基础教程
本教程将带您从零开始学习如何使用 New API 构建您的第一个 AI 应用。
🎯 学习目标
完成本教程后,您将能够:
- 理解 Realm API 的基本概念
- 配置和管理 API Token
- 调用聊天接口进行文本生成
- 处理流式响应
- 实现简单的聊天机器人
📋 前置要求
- 已部署 Realm API 服务
- 拥有管理员账户
- 基础的编程知识(Python/JavaScript)
- 了解 HTTP API 基础概念
🚀 第一步:创建 API Token
1. 登录管理界面
访问 http://your-domain.com 并使用管理员账户登录。
2. 创建 Token
- 点击左侧菜单的 控制台 > Token 管理
- 点击 创建新的 Token 按钮
- 填写 Token 信息:
- 名称:
My First App - 过期时间: 选择合适的过期时间
- 额度: 设置使用额度限制
- 名称:
- 点击 确认创建
- 重要: 复制并保存生成的 Token,它只会显示一次
3. Token 安全
bash
# 好的做法:使用环境变量
export NEW_API_TOKEN="your_token_here"
# 避免的做法:硬编码在代码中
# token = "sk-1234567890abcdef" # ❌ 不要这样做💬 第二步:第一个聊天请求
Python 示例
python
import requests
import os
# 从环境变量获取 Token
token = os.getenv('NEW_API_TOKEN')
base_url = 'http://your-domain.com/api'
def chat_with_ai(message):
"""发送聊天请求"""
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
data = {
'model': 'gpt-3.5-turbo',
'messages': [
{'role': 'user', 'content': message}
]
}
response = requests.post(
f'{base_url}/chat/completions',
headers=headers,
json=data
)
if response.status_code == 200:
result = response.json()
return result['choices'][0]['message']['content']
else:
return f"错误: {response.status_code} - {response.text}"
# 测试
if __name__ == '__main__':
response = chat_with_ai('你好,请介绍一下自己')
print(response)JavaScript 示例
javascript
// 使用 fetch API
async function chatWithAI(message) {
const token = process.env.NEW_API_TOKEN;
const baseUrl = 'http://your-domain.com/api';
const response = await fetch(`${baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: message }
]
})
});
if (response.ok) {
const result = await response.json();
return result.choices[0].message.content;
} else {
throw new Error(`错误: ${response.status} - ${await response.text()}`);
}
}
// 测试
chatWithAI('你好,请介绍一下自己')
.then(response => console.log(response))
.catch(error => console.error(error));🌊 第三步:处理流式响应
流式响应可以实时显示生成的内容,提供更好的用户体验。
Python 流式示例
python
import requests
import os
def stream_chat(message):
"""流式聊天"""
token = os.getenv('NEW_API_TOKEN')
base_url = 'http://your-domain.com/api'
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
data = {
'model': 'gpt-3.5-turbo',
'messages': [
{'role': 'user', 'content': message}
],
'stream': True # 启用流式
}
response = requests.post(
f'{base_url}/chat/completions',
headers=headers,
json=data,
stream=True
)
full_response = ""
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
data_str = line[6:] # 移除 'data: ' 前缀
if data_str == '[DONE]':
break
try:
import json
data = json.loads(data_str)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
content = delta['content']
full_response += content
print(content, end='', flush=True) # 实时输出
except json.JSONDecodeError:
continue
print() # 换行
return full_response
# 测试
if __name__ == '__main__':
stream_chat('请写一首关于春天的诗')JavaScript 流式示例
javascript
async function streamChat(message) {
const token = process.env.NEW_API_TOKEN;
const baseUrl = 'http://your-domain.com/api';
const response = await fetch(`${baseUrl}/chat/completions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: message }
],
stream: true
})
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let fullResponse = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const dataStr = line.slice(6);
if (dataStr === '[DONE]') {
return fullResponse;
}
try {
const data = JSON.parse(dataStr);
if (data.choices && data.choices[0]) {
const delta = data.choices[0].delta;
if (delta && delta.content) {
const content = delta.content;
fullResponse += content;
process.stdout.write(content); // 实时输出
}
}
} catch (e) {
// 忽略解析错误
}
}
}
}
} finally {
reader.releaseLock();
console.log(); // 换行
}
return fullResponse;
}
// 测试
streamChat('请写一首关于春天的诗')
.then(response => console.log('完整响应:', response))
.catch(error => console.error('错误:', error));🤖 第四步:构建简单聊天机器人
现在让我们构建一个简单的命令行聊天机器人。
Python 聊天机器人
python
import requests
import os
from typing import List, Dict
class SimpleChatBot:
def __init__(self, token: str, base_url: str, model: str = 'gpt-3.5-turbo'):
self.token = token
self.base_url = base_url
self.model = model
self.conversation_history: List[Dict] = []
def add_message(self, role: str, content: str):
"""添加消息到对话历史"""
self.conversation_history.append({'role': role, 'content': content})
# 保持对话历史在合理长度内
if len(self.conversation_history) > 10:
self.conversation_history = self.conversation_history[-10:]
def chat(self, message: str, stream: bool = False) -> str:
"""发送聊天消息"""
self.add_message('user', message)
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json'
}
data = {
'model': self.model,
'messages': self.conversation_history,
'stream': stream
}
if stream:
return self._stream_response(headers, data)
else:
return self._normal_response(headers, data)
def _normal_response(self, headers: Dict, data: Dict) -> str:
"""处理普通响应"""
response = requests.post(
f'{self.base_url}/chat/completions',
headers=headers,
json=data
)
if response.status_code == 200:
result = response.json()
assistant_message = result['choices'][0]['message']['content']
self.add_message('assistant', assistant_message)
return assistant_message
else:
raise Exception(f"API 错误: {response.status_code} - {response.text}")
def _stream_response(self, headers: Dict, data: Dict) -> str:
"""处理流式响应"""
response = requests.post(
f'{self.base_url}/chat/completions',
headers=headers,
json=data,
stream=True
)
full_response = ""
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
data_str = line[6:]
if data_str == '[DONE]':
break
try:
import json
data = json.loads(data_str)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
content = delta['content']
full_response += content
print(content, end='', flush=True)
except json.JSONDecodeError:
continue
print() # 换行
self.add_message('assistant', full_response)
return full_response
def clear_history(self):
"""清空对话历史"""
self.conversation_history = []
def run_interactive(self):
"""运行交互式聊天"""
print("🤖 Realm API 聊天机器人")
print("输入 'quit' 退出,'clear' 清空历史")
print("-" * 40)
while True:
try:
user_input = input("\n您: ").strip()
if user_input.lower() == 'quit':
print("再见!")
break
elif user_input.lower() == 'clear':
self.clear_history()
print("对话历史已清空")
continue
elif not user_input:
continue
print("\n助手: ", end='', flush=True)
self.chat(user_input, stream=True)
except KeyboardInterrupt:
print("\n\n再见!")
break
except Exception as e:
print(f"\n错误: {e}")
# 使用示例
if __name__ == '__main__':
token = os.getenv('NEW_API_TOKEN')
base_url = 'http://your-domain.com/api'
bot = SimpleChatBot(token, base_url)
bot.run_interactive()📊 第五步:监控使用情况
获取使用统计
python
def get_usage_stats():
"""获取使用统计"""
token = os.getenv('NEW_API_TOKEN')
base_url = 'http://your-domain.com/api'
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
response = requests.get(f'{base_url}/usage', headers=headers)
if response.status_code == 200:
stats = response.json()
print(f"使用统计:")
print(f" 提示令牌: {stats['usage']['prompt_tokens']}")
print(f" 完成令牌: {stats['usage']['completion_tokens']}")
print(f" 总令牌: {stats['usage']['total_tokens']}")
print(f" 费用: ${stats['usage']['cost']:.4f}")
else:
print(f"获取统计失败: {response.status_code}")
# 测试
get_usage_stats()🔧 第六步:错误处理
完善的错误处理
python
import requests
from typing import Optional
class NewAPIError(Exception):
"""Realm API 自定义错误"""
def __init__(self, message: str, code: Optional[str] = None, status_code: Optional[int] = None):
super().__init__(message)
self.code = code
self.status_code = status_code
def safe_chat(message: str, max_retries: int = 3) -> str:
"""带重试的安全聊天函数"""
token = os.getenv('NEW_API_TOKEN')
base_url = 'http://your-domain.com/api'
for attempt in range(max_retries):
try:
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
data = {
'model': 'gpt-3.5-turbo',
'messages': [
{'role': 'user', 'content': message}
]
}
response = requests.post(
f'{base_url}/chat/completions',
headers=headers,
json=data,
timeout=30
)
if response.status_code == 200:
result = response.json()
return result['choices'][0]['message']['content']
elif response.status_code == 401:
raise NewAPIError("认证失败,请检查 Token", "invalid_api_key", 401)
elif response.status_code == 429:
print(f"请求过于频繁,等待重试... (尝试 {attempt + 1}/{max_retries})")
time.sleep(2 ** attempt) # 指数退避
continue
elif response.status_code == 500:
print(f"服务器错误,等待重试... (尝试 {attempt + 1}/{max_retries})")
time.sleep(2 ** attempt)
continue
else:
error_data = response.json() if response.headers.get('content-type') == 'application/json' else {}
raise NewAPIError(
error_data.get('error', {}).get('message', f'HTTP {response.status_code}'),
error_data.get('error', {}).get('code'),
response.status_code
)
except requests.exceptions.Timeout:
print(f"请求超时,重试中... (尝试 {attempt + 1}/{max_retries})")
time.sleep(2 ** attempt)
continue
except requests.exceptions.ConnectionError:
print(f"连接错误,重试中... (尝试 {attempt + 1}/{max_retries})")
time.sleep(2 ** attempt)
continue
raise NewAPIError(f"在 {max_retries} 次尝试后仍然失败")
# 测试
try:
response = safe_chat("你好")
print(response)
except NewAPIError as e:
print(f"API 错误: {e}")
except Exception as e:
print(f"未知错误: {e}")🎉 总结
恭喜!您已经学会了:
✅ 创建和管理 API Token
✅ 发送基本的聊天请求
✅ 处理流式响应
✅ 构建交互式聊天机器人
✅ 监控使用情况
✅ 实现错误处理和重试机制
📚 下一步
💡 进阶提示: 尝试修改聊天机器人,添加更多功能,如:
- 保存对话历史到文件
- 支持多用户会话
- 添加命令系统(如 /help, /model 等)
- 集成其他 AI 功能(图像生成、语音等)