在 OpenAI 官方库使用
Nebula Lab 完全兼容 OpenAI API 格式,您可以直接使用官方 OpenAI SDK,只需简单修改配置即可无缝切换。支持的官方 SDK
- Python (
openai) - Node.js (
openai) - .NET (
OpenAI) - Go (
go-openai) - Java (第三方)
- PHP (第三方)
- Ruby (第三方)
Python SDK
安装
复制
pip install openai
基础配置
复制
from openai import OpenAI
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://llm.ai-nebula.com/v1"
)
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
环境变量配置
复制
import os
from openai import OpenAI
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
os.environ["OPENAI_BASE_URL"] = "https://llm.ai-nebula.com/v1"
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Explain quantum computing"}]
)
异步使用
复制
import asyncio
from openai import AsyncOpenAI
async def main():
client = AsyncOpenAI(
api_key="YOUR_API_KEY",
base_url="https://llm.ai-nebula.com/v1"
)
response = await client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
流式输出
复制
from openai import OpenAI
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://llm.ai-nebula.com/v1"
)
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Write a short story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
Node.js SDK
安装
复制
npm install openai
基础配置
复制
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: 'YOUR_API_KEY',
baseURL: 'https://llm.ai-nebula.com/v1'
});
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
流式输出
复制
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a joke' }],
stream: true
});
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
TypeScript 支持
复制
import OpenAI from 'openai';
import type { ChatCompletionCreateParamsNonStreaming } from 'openai/resources/chat/completions';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY!,
baseURL: 'https://llm.ai-nebula.com/v1'
});
const params: ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello TypeScript!' }],
temperature: 0.7
};
const response = await openai.chat.completions.create(params);
.NET SDK
安装
复制
dotnet add package OpenAI
基础配置
复制
using OpenAI;
using OpenAI.Chat;
var client = new OpenAIClient("YOUR_API_KEY", new OpenAIClientOptions
{
Endpoint = new Uri("https://llm.ai-nebula.com/v1")
});
var chatClient = client.GetChatClient("gpt-4o");
var response = await chatClient.CompleteChatAsync("Hello!");
Console.WriteLine(response.Value.Content[0].Text);
Go SDK
安装
复制
go get github.com/sashabaranov/go-openai
基础配置
复制
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("YOUR_API_KEY")
config.BaseURL = "https://llm.ai-nebula.com/v1"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: "gpt-4o",
Messages: []openai.ChatCompletionMessage{
{Role: openai.ChatMessageRoleUser, Content: "Hello!"},
},
},
)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
模型切换
复制
client = OpenAI(
api_key="YOUR_API_KEY",
base_url="https://llm.ai-nebula.com/v1"
)
# GPT 模型
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
# Claude 模型
response = client.chat.completions.create(
model="claude-sonnet-4-20250514",
messages=[{"role": "user", "content": "Hello"}]
)
# Gemini 模型
response = client.chat.completions.create(
model="gemini-2.5-pro",
messages=[{"role": "user", "content": "Hello"}]
)
高级功能
Function Calling
复制
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "获取指定城市的天气信息",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "城市名称"}
},
"required": ["location"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "北京天气怎么样?"}],
tools=tools,
tool_choice="auto"
)
图像输入
复制
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "这张图片里有什么?"},
{
"type": "image_url",
"image_url": {"url": "https://example.com/image.jpg"}
}
]
}
]
)
嵌入向量
复制
response = client.embeddings.create(
model="text-embedding-3-small",
input="要嵌入的文本内容"
)
embedding = response.data[0].embedding
print(f"向量维度:{len(embedding)}")
错误处理
复制
from openai import (
OpenAI,
APIError,
APIConnectionError,
RateLimitError,
InternalServerError
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
except RateLimitError:
print("请求频率超限,请稍后重试")
except APIConnectionError:
print("网络连接错误")
except InternalServerError:
print("服务器内部错误")
except APIError as e:
print(f"API 错误:{e}")
迁移指南
从 OpenAI 迁移
复制
# 原来的配置
client = OpenAI(api_key="sk-...")
# 改为 Nebula Lab
client = OpenAI(
api_key="YOUR_NEBULA_KEY",
base_url="https://llm.ai-nebula.com/v1"
)
代码无需修改,所有其他代码保持不变,包括方法调用、参数格式、响应处理。
