feat(聊天): 支持 OpenAI 兼容格式的 API 调用
- 新增 handleOpenAICompatibleChat 处理函数 - 支持第三方中转站的 /v1/chat/completions 端点 - 优化处理器选择逻辑(apiFormat -> isCodex -> Claude原生) - 过滤空内容消息避免 API 错误 - 规范化 URL 处理,避免双斜杠问题 - 支持多模态消息和工具调用
This commit is contained in:
parent
99ca472dd2
commit
6e37e61420
@ -72,6 +72,11 @@ function isCodexModel(modelId: string): boolean {
|
||||
return modelId.startsWith('gpt-') && modelId.includes('codex');
|
||||
}
|
||||
|
||||
// 规范化 URL(移除末尾斜杠,避免拼接时出现双斜杠)
|
||||
function normalizeBaseUrl(url: string): string {
|
||||
return url.replace(/\/+$/, '');
|
||||
}
|
||||
|
||||
// 默认系统提示词 - 用于生成更详细、更有结构的回复
|
||||
const DEFAULT_SYSTEM_PROMPT = `你是一个专业、友好的 AI 助手。请遵循以下规则来回复用户:
|
||||
|
||||
@ -282,6 +287,7 @@ export async function POST(request: Request) {
|
||||
async start(controller) {
|
||||
try {
|
||||
const cchUrl = settings.cchUrl || process.env.CCH_DEFAULT_URL || 'https://claude.leocoder.cn/';
|
||||
const apiFormat = (settings.apiFormat as 'claude' | 'openai') || 'claude';
|
||||
|
||||
// 获取系统提示词(叠加模式)
|
||||
// 1. 始终使用 DEFAULT_SYSTEM_PROMPT 作为基础
|
||||
@ -312,8 +318,41 @@ export async function POST(request: Request) {
|
||||
let totalInputTokens = 0;
|
||||
let totalOutputTokens = 0;
|
||||
|
||||
if (isCodex) {
|
||||
// ==================== Codex 模型处理(OpenAI 格式) ====================
|
||||
// 【重要】处理器选择优先级说明:
|
||||
// 1. 首先检查 apiFormat === 'openai':如果用户选择了 "OpenAI 兼容" 格式,
|
||||
// 则所有模型(包括 Codex 模型)都统一使用 /v1/chat/completions 端点
|
||||
// 这是因为第三方中转站通常只支持 OpenAI 兼容的 /v1/chat/completions 端点
|
||||
// 2. 然后检查 isCodex:如果是 Claude 原生格式 + Codex 模型,才使用 /v1/responses 端点
|
||||
// 3. 最后是普通的 Claude 原生格式,使用 /v1/messages 端点
|
||||
|
||||
if (apiFormat === 'openai') {
|
||||
// ==================== OpenAI 兼容格式处理 ====================
|
||||
// 当用户选择 "OpenAI 兼容" 时,无论什么模型都走这个分支
|
||||
// 第三方中转站统一使用 /v1/chat/completions 端点
|
||||
console.log('[API/chat] 使用 OpenAI 兼容格式,模型:', useModel, '(isCodex:', isCodex, ')');
|
||||
const result = await handleOpenAICompatibleChat({
|
||||
cchUrl,
|
||||
apiKey: decryptedApiKey,
|
||||
model: useModel,
|
||||
systemPrompt,
|
||||
temperature,
|
||||
historyMessages,
|
||||
message,
|
||||
tools: tools || (conversation.tools as string[]) || [],
|
||||
controller,
|
||||
encoder,
|
||||
images,
|
||||
});
|
||||
|
||||
fullContent = result.fullContent;
|
||||
thinkingContent = result.thinkingContent;
|
||||
totalInputTokens = result.inputTokens;
|
||||
totalOutputTokens = result.outputTokens;
|
||||
} else if (isCodex) {
|
||||
// ==================== Codex 模型处理(使用 Codex Response API) ====================
|
||||
// 仅当使用 Claude 原生格式 + Codex 模型时,才使用 /v1/responses 端点
|
||||
// 这是 CCH 项目特有的 Codex Response API
|
||||
console.log('[API/chat] 使用 Codex Response API (Claude 原生格式 + Codex 模型)');
|
||||
const result = await handleCodexChat({
|
||||
cchUrl,
|
||||
apiKey: decryptedApiKey,
|
||||
@ -332,7 +371,8 @@ export async function POST(request: Request) {
|
||||
totalInputTokens = result.inputTokens;
|
||||
totalOutputTokens = result.outputTokens;
|
||||
} else {
|
||||
// ==================== Claude 模型处理(原有逻辑) ====================
|
||||
// ==================== Claude 原生格式处理 ====================
|
||||
console.log('[API/chat] 使用 Claude 原生格式 (/v1/messages)');
|
||||
const result = await handleClaudeChat({
|
||||
cchUrl,
|
||||
apiKey: decryptedApiKey,
|
||||
@ -476,13 +516,15 @@ async function handleCodexChat(params: CodexChatParams): Promise<{
|
||||
images,
|
||||
} = params;
|
||||
|
||||
// 构建 Codex Response API 格式的输入
|
||||
// 构建 Codex Response API 格式的输入(过滤空内容的消息)
|
||||
const inputItems: CodexInputItem[] = [
|
||||
...historyMessages.map((msg) => ({
|
||||
type: 'message' as const,
|
||||
role: msg.role as 'user' | 'assistant',
|
||||
content: msg.content,
|
||||
})),
|
||||
...historyMessages
|
||||
.filter((msg) => msg.content && msg.content.trim() !== '')
|
||||
.map((msg) => ({
|
||||
type: 'message' as const,
|
||||
role: msg.role as 'user' | 'assistant',
|
||||
content: msg.content,
|
||||
})),
|
||||
];
|
||||
|
||||
// 添加当前用户消息(支持多模态内容)
|
||||
@ -556,7 +598,7 @@ async function handleCodexChat(params: CodexChatParams): Promise<{
|
||||
}
|
||||
|
||||
// 使用 Codex Response API 端点
|
||||
const response = await fetch(`${cchUrl}/v1/responses`, {
|
||||
const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/responses`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
@ -796,11 +838,13 @@ async function handleClaudeChat(params: ClaudeChatParams): Promise<{
|
||||
images,
|
||||
} = params;
|
||||
|
||||
// 构建消息历史
|
||||
const messageHistory: APIMessage[] = historyMessages.map((msg) => ({
|
||||
role: msg.role as 'user' | 'assistant',
|
||||
content: msg.content,
|
||||
}));
|
||||
// 构建消息历史(过滤空内容的消息)
|
||||
const messageHistory: APIMessage[] = historyMessages
|
||||
.filter((msg) => msg.content && msg.content.trim() !== '')
|
||||
.map((msg) => ({
|
||||
role: msg.role as 'user' | 'assistant',
|
||||
content: msg.content,
|
||||
}));
|
||||
|
||||
// 添加当前用户消息(支持多模态内容)
|
||||
if (images && images.length > 0) {
|
||||
@ -900,7 +944,7 @@ async function handleClaudeChat(params: ClaudeChatParams): Promise<{
|
||||
requestBody.temperature = temperature;
|
||||
}
|
||||
|
||||
const response = await fetch(`${cchUrl}/v1/messages`, {
|
||||
const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/messages`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
@ -1126,6 +1170,462 @@ async function handleClaudeChat(params: ClaudeChatParams): Promise<{
|
||||
};
|
||||
}
|
||||
|
||||
// ==================== OpenAI 兼容格式处理函数 ====================
|
||||
interface OpenAICompatibleChatParams {
|
||||
cchUrl: string;
|
||||
apiKey: string;
|
||||
model: string;
|
||||
systemPrompt: string;
|
||||
temperature: number;
|
||||
historyMessages: { role: string; content: string }[];
|
||||
message: string;
|
||||
tools: string[];
|
||||
controller: ReadableStreamDefaultController;
|
||||
encoder: TextEncoder;
|
||||
images?: {
|
||||
type: 'image';
|
||||
media_type: string;
|
||||
data: string;
|
||||
}[];
|
||||
}
|
||||
|
||||
// OpenAI 消息格式
|
||||
interface OpenAIMessageContent {
|
||||
type: 'text' | 'image_url';
|
||||
text?: string;
|
||||
image_url?: {
|
||||
url: string;
|
||||
};
|
||||
}
|
||||
|
||||
interface OpenAICompatibleMessage {
|
||||
role: 'system' | 'user' | 'assistant' | 'tool';
|
||||
content: string | OpenAIMessageContent[] | null;
|
||||
tool_calls?: {
|
||||
id: string;
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
arguments: string;
|
||||
};
|
||||
}[];
|
||||
tool_call_id?: string;
|
||||
}
|
||||
|
||||
async function handleOpenAICompatibleChat(params: OpenAICompatibleChatParams): Promise<{
|
||||
fullContent: string;
|
||||
thinkingContent: string;
|
||||
inputTokens: number;
|
||||
outputTokens: number;
|
||||
}> {
|
||||
const {
|
||||
cchUrl,
|
||||
apiKey,
|
||||
model,
|
||||
systemPrompt,
|
||||
temperature,
|
||||
historyMessages,
|
||||
message,
|
||||
tools,
|
||||
controller,
|
||||
encoder,
|
||||
images,
|
||||
} = params;
|
||||
|
||||
// 构建 OpenAI 格式的消息历史(过滤空内容的消息)
|
||||
const openaiMessages: OpenAICompatibleMessage[] = [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
...historyMessages
|
||||
.filter((msg) => msg.content && msg.content.trim() !== '')
|
||||
.map((msg) => ({
|
||||
role: msg.role as 'user' | 'assistant',
|
||||
content: msg.content,
|
||||
})),
|
||||
];
|
||||
|
||||
// 添加当前用户消息(支持多模态)
|
||||
if (images && images.length > 0) {
|
||||
console.log('[handleOpenAICompatibleChat] Building multimodal message with', images.length, 'images');
|
||||
const multimodalContent: OpenAIMessageContent[] = [];
|
||||
|
||||
// 添加图片
|
||||
for (const img of images) {
|
||||
console.log('[handleOpenAICompatibleChat] Adding image:', {
|
||||
type: img.type,
|
||||
media_type: img.media_type,
|
||||
dataLength: img.data?.length || 0,
|
||||
});
|
||||
multimodalContent.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:${img.media_type};base64,${img.data}`,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// 添加文本
|
||||
if (message) {
|
||||
multimodalContent.push({
|
||||
type: 'text',
|
||||
text: message,
|
||||
});
|
||||
}
|
||||
|
||||
openaiMessages.push({
|
||||
role: 'user',
|
||||
content: multimodalContent,
|
||||
});
|
||||
} else {
|
||||
console.log('[handleOpenAICompatibleChat] No images, using simple text message');
|
||||
openaiMessages.push({
|
||||
role: 'user',
|
||||
content: message,
|
||||
});
|
||||
}
|
||||
|
||||
// 构建 OpenAI 格式的工具定义
|
||||
const openaiTools = buildOpenAIToolDefinitions(tools);
|
||||
|
||||
let fullContent = '';
|
||||
let thinkingContent = ''; // 用于收集 <think> 标签中的思考内容
|
||||
let totalInputTokens = 0;
|
||||
let totalOutputTokens = 0;
|
||||
let loopCount = 0;
|
||||
const maxLoops = 10;
|
||||
|
||||
// 用于处理 <think> 标签的状态变量(跨 chunk 处理)
|
||||
let isInThinkingMode = false;
|
||||
let pendingBuffer = ''; // 用于处理标签可能跨 chunk 的情况
|
||||
|
||||
while (loopCount < maxLoops) {
|
||||
loopCount++;
|
||||
|
||||
// 构建请求体
|
||||
const requestBody: Record<string, unknown> = {
|
||||
model,
|
||||
messages: openaiMessages,
|
||||
stream: true,
|
||||
temperature,
|
||||
};
|
||||
|
||||
if (openaiTools.length > 0) {
|
||||
requestBody.tools = openaiTools;
|
||||
}
|
||||
|
||||
console.log('[handleOpenAICompatibleChat] Sending to OpenAI-compatible API:', {
|
||||
model,
|
||||
messagesCount: openaiMessages.length,
|
||||
url: `${normalizeBaseUrl(cchUrl)}/v1/chat/completions`,
|
||||
});
|
||||
|
||||
const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
},
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
throw new Error(`OpenAI-compatible API error: ${response.status} - ${errorText}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
// 收集当前轮次的内容
|
||||
let currentTextContent = '';
|
||||
const toolCalls: { id: string; name: string; arguments: string }[] = [];
|
||||
const currentToolCallsMap: Map<number, { id: string; name: string; arguments: string }> = new Map();
|
||||
let stopReason: string | null = null;
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
|
||||
// 处理流式响应
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6).trim();
|
||||
if (data === '[DONE]') continue;
|
||||
if (!data) continue;
|
||||
|
||||
try {
|
||||
const event = JSON.parse(data);
|
||||
|
||||
// 处理文本内容(包含 <think> 标签过滤)
|
||||
if (event.choices?.[0]?.delta?.content) {
|
||||
const rawContent = event.choices[0].delta.content;
|
||||
|
||||
// 将新内容追加到待处理缓冲区
|
||||
pendingBuffer += rawContent;
|
||||
|
||||
// 处理缓冲区中的内容
|
||||
while (pendingBuffer.length > 0) {
|
||||
if (isInThinkingMode) {
|
||||
// 在 thinking 模式中,查找 </think> 结束标签
|
||||
const endTagIndex = pendingBuffer.indexOf('</think>');
|
||||
|
||||
if (endTagIndex !== -1) {
|
||||
// 找到结束标签,提取 thinking 内容
|
||||
const thinkPart = pendingBuffer.slice(0, endTagIndex);
|
||||
if (thinkPart) {
|
||||
thinkingContent += thinkPart;
|
||||
// 发送 thinking 事件(让前端可以展示折叠的思考内容)
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'thinking',
|
||||
content: thinkPart,
|
||||
})}\n\n`));
|
||||
}
|
||||
// 移除已处理的内容和结束标签
|
||||
pendingBuffer = pendingBuffer.slice(endTagIndex + 8); // 8 = '</think>'.length
|
||||
isInThinkingMode = false;
|
||||
} else {
|
||||
// 没找到结束标签,检查是否可能是部分标签
|
||||
// 保留最后 7 个字符以防 </think> 被截断(7 = '</think'.length - 1)
|
||||
if (pendingBuffer.length > 7) {
|
||||
const safePart = pendingBuffer.slice(0, -7);
|
||||
const keepPart = pendingBuffer.slice(-7);
|
||||
|
||||
if (safePart) {
|
||||
thinkingContent += safePart;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'thinking',
|
||||
content: safePart,
|
||||
})}\n\n`));
|
||||
}
|
||||
pendingBuffer = keepPart;
|
||||
}
|
||||
// 等待更多数据
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// 不在 thinking 模式,查找 <think> 开始标签
|
||||
const startTagIndex = pendingBuffer.indexOf('<think>');
|
||||
|
||||
if (startTagIndex !== -1) {
|
||||
// 找到开始标签,先输出标签前的普通文本
|
||||
const textPart = pendingBuffer.slice(0, startTagIndex);
|
||||
if (textPart) {
|
||||
currentTextContent += textPart;
|
||||
fullContent += textPart;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'text',
|
||||
content: textPart,
|
||||
})}\n\n`));
|
||||
}
|
||||
// 移除已处理的内容和开始标签
|
||||
pendingBuffer = pendingBuffer.slice(startTagIndex + 7); // 7 = '<think>'.length
|
||||
isInThinkingMode = true;
|
||||
} else {
|
||||
// 没找到开始标签,检查是否可能是部分标签
|
||||
// 保留最后 6 个字符以防 <think> 被截断(6 = '<think'.length - 1)
|
||||
const potentialTagStart = pendingBuffer.lastIndexOf('<');
|
||||
|
||||
if (potentialTagStart !== -1 && potentialTagStart > pendingBuffer.length - 7) {
|
||||
// 可能是部分的 <think> 标签,保留这部分
|
||||
const safePart = pendingBuffer.slice(0, potentialTagStart);
|
||||
const keepPart = pendingBuffer.slice(potentialTagStart);
|
||||
|
||||
if (safePart) {
|
||||
currentTextContent += safePart;
|
||||
fullContent += safePart;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'text',
|
||||
content: safePart,
|
||||
})}\n\n`));
|
||||
}
|
||||
pendingBuffer = keepPart;
|
||||
// 等待更多数据
|
||||
break;
|
||||
} else {
|
||||
// 安全输出所有内容
|
||||
currentTextContent += pendingBuffer;
|
||||
fullContent += pendingBuffer;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'text',
|
||||
content: pendingBuffer,
|
||||
})}\n\n`));
|
||||
pendingBuffer = '';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 处理工具调用
|
||||
if (event.choices?.[0]?.delta?.tool_calls) {
|
||||
for (const toolCall of event.choices[0].delta.tool_calls) {
|
||||
const index = toolCall.index ?? 0;
|
||||
|
||||
if (!currentToolCallsMap.has(index)) {
|
||||
// 新的工具调用开始
|
||||
currentToolCallsMap.set(index, {
|
||||
id: toolCall.id || '',
|
||||
name: toolCall.function?.name || '',
|
||||
arguments: '',
|
||||
});
|
||||
|
||||
if (toolCall.id) {
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'tool_use_start',
|
||||
id: toolCall.id,
|
||||
name: toolCall.function?.name || '',
|
||||
})}\n\n`));
|
||||
}
|
||||
}
|
||||
|
||||
const current = currentToolCallsMap.get(index)!;
|
||||
|
||||
// 更新工具调用信息
|
||||
if (toolCall.id) current.id = toolCall.id;
|
||||
if (toolCall.function?.name) current.name = toolCall.function.name;
|
||||
if (toolCall.function?.arguments) {
|
||||
current.arguments += toolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 处理结束原因
|
||||
if (event.choices?.[0]?.finish_reason) {
|
||||
stopReason = event.choices[0].finish_reason;
|
||||
}
|
||||
|
||||
// 处理 usage 信息
|
||||
if (event.usage) {
|
||||
totalInputTokens = event.usage.prompt_tokens || 0;
|
||||
totalOutputTokens = event.usage.completion_tokens || 0;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[handleOpenAICompatibleChat] Parse error:', e, 'Line:', line);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 流结束后,处理可能残留在 pendingBuffer 中的内容
|
||||
if (pendingBuffer.length > 0) {
|
||||
if (isInThinkingMode) {
|
||||
// 如果还在 thinking 模式,说明 </think> 标签没有正常闭合
|
||||
// 将剩余内容作为 thinking 内容处理
|
||||
thinkingContent += pendingBuffer;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'thinking',
|
||||
content: pendingBuffer,
|
||||
})}\n\n`));
|
||||
} else {
|
||||
// 普通文本模式,输出剩余内容
|
||||
currentTextContent += pendingBuffer;
|
||||
fullContent += pendingBuffer;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'text',
|
||||
content: pendingBuffer,
|
||||
})}\n\n`));
|
||||
}
|
||||
pendingBuffer = '';
|
||||
}
|
||||
|
||||
// 收集所有工具调用
|
||||
for (const [, tc] of currentToolCallsMap) {
|
||||
if (tc.id && tc.name) {
|
||||
toolCalls.push(tc);
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'tool_use_complete',
|
||||
id: tc.id,
|
||||
name: tc.name,
|
||||
input: JSON.parse(tc.arguments || '{}'),
|
||||
})}\n\n`));
|
||||
}
|
||||
}
|
||||
|
||||
// 检查是否需要执行工具
|
||||
if ((stopReason === 'tool_calls' || toolCalls.length > 0) && toolCalls.length > 0) {
|
||||
// 将助手消息添加到历史
|
||||
const assistantMessage: OpenAICompatibleMessage = {
|
||||
role: 'assistant',
|
||||
content: currentTextContent || null,
|
||||
tool_calls: toolCalls.map((tc) => ({
|
||||
id: tc.id,
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: tc.name,
|
||||
arguments: tc.arguments,
|
||||
},
|
||||
})),
|
||||
};
|
||||
openaiMessages.push(assistantMessage);
|
||||
|
||||
// 执行所有工具并收集结果
|
||||
for (const tc of toolCalls) {
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'tool_execution_start',
|
||||
id: tc.id,
|
||||
name: tc.name,
|
||||
})}\n\n`));
|
||||
|
||||
// 解析工具参数
|
||||
let toolInput: Record<string, unknown> = {};
|
||||
try {
|
||||
toolInput = JSON.parse(tc.arguments || '{}');
|
||||
} catch {
|
||||
console.error('[handleOpenAICompatibleChat] Failed to parse tool arguments');
|
||||
}
|
||||
|
||||
// 执行工具
|
||||
const result = await executeTool(tc.name, toolInput);
|
||||
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'tool_execution_result',
|
||||
id: tc.id,
|
||||
name: tc.name,
|
||||
success: result.success,
|
||||
result: result.displayResult,
|
||||
images: result.images,
|
||||
})}\n\n`));
|
||||
|
||||
// 将工具结果显示给用户
|
||||
const toolDisplayText = `\n\n${result.displayResult}\n\n`;
|
||||
fullContent += toolDisplayText;
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'text',
|
||||
content: toolDisplayText,
|
||||
})}\n\n`));
|
||||
|
||||
// 将工具结果添加到消息历史(OpenAI 格式)
|
||||
openaiMessages.push({
|
||||
role: 'tool',
|
||||
content: result.fullResult,
|
||||
tool_call_id: tc.id,
|
||||
});
|
||||
}
|
||||
|
||||
// 继续循环,让 AI 基于工具结果继续回复
|
||||
continue;
|
||||
}
|
||||
|
||||
// 如果没有工具调用,结束循环
|
||||
break;
|
||||
}
|
||||
|
||||
return {
|
||||
fullContent,
|
||||
thinkingContent,
|
||||
inputTokens: totalInputTokens,
|
||||
outputTokens: totalOutputTokens,
|
||||
};
|
||||
}
|
||||
|
||||
// 构建 Claude 工具定义
|
||||
function buildClaudeToolDefinitions(toolIds: string[]) {
|
||||
const toolMap: Record<string, object> = {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user