import { NextResponse } from 'next/server'; import { db } from '@/drizzle/db'; import { conversations, messages, userSettings, type GeneratedImageData } from '@/drizzle/schema'; import { eq } from 'drizzle-orm'; import { nanoid } from 'nanoid'; import { executeTool } from '@/services/tools'; import { getCurrentUser } from '@/lib/auth'; import { decryptApiKey } from '@/lib/crypto'; import { parseDocument, type ParsedDocument } from '@/lib/document-parser'; // PDF 文档数据(从前端传来,直接传给 Claude API) interface PdfDocumentData { name: string; size: number; data: string; // Base64 编码 media_type: 'application/pdf'; } // Office 文档数据(从前端传来,需要后端解析) interface OfficeDocumentData { name: string; size: number; data: string; // Base64 编码 type: 'word' | 'excel'; mimeType: string; } interface ChatRequest { conversationId: string; message: string; displayMessage?: string; // 原始用户输入(用于数据库存储和显示) model?: string; tools?: string[]; enableThinking?: boolean; // 用户上传的图片(发送给 AI) images?: { type: 'image'; media_type: string; data: string; }[]; // 用户上传的图片 URL(用于保存到数据库显示) uploadedImages?: string[]; // 用户上传的文档(用于保存到数据库) uploadedDocuments?: { name: string; size: number; type: string; content: string; }[]; // PDF 文档(直接传给 Claude API 使用原生 document 类型) pdfDocuments?: PdfDocumentData[]; // Office 文档(Word/Excel,需要后端解析) officeDocuments?: OfficeDocumentData[]; } // 消息内容块类型(Claude) interface ContentBlock { type: 'text' | 'tool_use' | 'tool_result' | 'thinking'; text?: string; thinking?: string; id?: string; name?: string; input?: Record; tool_use_id?: string; content?: string; } // API 消息类型(Claude) interface APIMessage { role: 'user' | 'assistant'; content: string | ContentBlock[]; } // OpenAI 消息类型 interface OpenAIMessage { role: 'system' | 'user' | 'assistant' | 'tool'; content: string | null; tool_calls?: OpenAIToolCall[]; tool_call_id?: string; } // OpenAI 工具调用 interface OpenAIToolCall { id: string; type: 'function'; function: { name: string; arguments: string; }; } // 判断是否为 Codex 模型 function isCodexModel(modelId: string): boolean { return modelId.startsWith('gpt-') && modelId.includes('codex'); } // 判断是否为 Gemini 图片生成模型 function isGeminiImageModel(modelId: string): boolean { // 支持的图片生成模型列表 const imageModels = [ 'gemini-2.0-flash-preview-image-generation', 'gemini-3-pro-image-preview', 'imagen-3.0-generate-002', ]; return imageModels.some(model => modelId.includes(model)) || modelId.includes('image-generation') || modelId.includes('imagen'); } // 规范化 URL(移除末尾斜杠,避免拼接时出现双斜杠) function normalizeBaseUrl(url: string): string { return url.replace(/\/+$/, ''); } /** * 创建安全的 Stream 写入器 * 用于处理客户端断开连接时的安全写入,避免 "Controller is already closed" 错误 */ function createSafeStreamWriter( controller: ReadableStreamDefaultController, encoder: TextEncoder ) { let isClosed = false; return { /** * 安全地向 stream 写入数据 * 如果 controller 已关闭,则静默忽略 */ write(data: object): boolean { if (isClosed) return false; try { controller.enqueue(encoder.encode(`data: ${JSON.stringify(data)}\n\n`)); return true; } catch { // Controller 已关闭,标记状态并静默处理 isClosed = true; return false; } }, /** * 安全地关闭 stream */ close(): boolean { if (isClosed) return false; try { controller.close(); isClosed = true; return true; } catch { isClosed = true; return false; } }, /** * 检查 stream 是否已关闭 */ get closed(): boolean { return isClosed; } }; } // 默认系统提示词 - 用于生成更详细、更有结构的回复 const DEFAULT_SYSTEM_PROMPT = `你是一个专业、友好的 AI 助手。请遵循以下规则来回复用户: ## 📅 当前时间 **重要**:今天的日期是 {{CURRENT_DATE}}。当你需要搜索实时信息或提及日期时,请使用这个准确的日期。 ## 🎯 核心原则:专注回答最新问题 **重要**:你必须只回答用户最新(最后一条)的问题。请遵循以下规则: 1. **只回答最新问题**:无论对话历史中有多少问题,你只需要回答用户最新发送的那一条消息 2. **智能判断相关性**: - 如果最新问题与之前的对话主题相关(如追问、补充),可以参考历史上下文 - 如果最新问题是一个全新的、无关的话题,直接回答,不要提及之前的对话 3. **绝不重复回答**:已经回答过的问题不要再次回答,即使用户没有明确表示已收到答案 4. **一个问题一个回答**:每次只专注于一个问题,不要试图同时回答多个问题 ## 回复风格 - 使用中文回复,除非用户明确要求其他语言 - 回复要详细、有深度,不要过于简短 - 语气友好、专业,像一个耐心的老师 ## 格式规范 - 使用 Markdown 格式化回复 - 对于代码,使用代码块并标明语言,例如 \`\`\`python - 使用标题(##、###)来组织长回复 - 使用列表(有序或无序)来列举要点 - 使用粗体或斜体强调重要内容 ## 回答结构 当回答技术问题时,请按以下结构: 1. **简要概述**:先给出简洁的答案 2. **详细解释**:深入解释原理或概念 3. **代码示例**:如果适用,提供完整、可运行的代码 4. **注意事项**:指出常见错误或最佳实践 5. **延伸阅读**:如果有相关主题,可以简要提及 ## 代码规范 - 代码要完整、可运行,不要省略关键部分 - 添加适当的注释解释关键逻辑 - 使用有意义的变量名 - 如果代码较长,先展示完整代码,再逐段解释 ## 工具使用 - 当需要查询实时信息时,请使用 web_search 工具 - 当需要获取网页内容时,请使用 web_fetch 工具 - **工具调用原则**:只为最新问题调用必要的工具,不要为历史问题调用工具 ## 工具结果总结(重要) 使用工具后,请按以下方式总结和回答: 1. **直接给出答案**:不要重复展示工具返回的原始数据,直接用自然语言总结关键信息 2. **引用来源**:在回答末尾用"来源"或"参考"标注信息来源网站 3. **格式示例**: - ✅ 正确:"今天北京天气晴朗,气温15°C。(来源:weather.com)" - ❌ 错误:把整个搜索结果复制出来 ## 特别注意 - 如果问题不明确,先确认理解是否正确 - 如果有多种方案,说明各自的优缺点 - 承认不确定的地方,不要编造信息 ## 文档深度分析规范(重要) 当用户上传文档/文件并要求分析时,请按以下框架进行**深度分析**,不要简单总结: ### 1. 📋 文档概览 - 文档类型、主题、作者/来源 - 核心论点或目的 - 目标受众和适用场景 ### 2. 🏗️ 内容结构分析 - 文档的整体结构和逻辑框架 - 各章节/段落的主要内容概述 - 内容之间的逻辑关系和递进层次 ### 3. 💡 核心观点提取 - 列出文档中的所有关键观点(至少5-10个要点) - 每个观点配以原文引用或具体依据 - 分析观点的创新性、独特性和价值 ### 4. 🔍 深度思考与洞察 - 作者的写作意图和深层目的 - 文档中隐含的假设或前提条件 - 潜在的偏见、局限性或争议点 - 与相关领域知识的联系和对比 ### 5. 🎯 实践价值分析 - 文档的实用性和可操作性评估 - 具体的应用场景和使用建议 - 实施步骤、注意事项和潜在风险 ### 6. ⚖️ 批判性评价 - 优点、亮点和值得学习之处 - 不足之处和可改进的建议 - 与同类内容/观点的横向对比 ### 7. 📝 总结与延伸 - 核心要点的结构化总结 - 延伸阅读/深入学习的建议 - 相关问题的探讨和思考 **文档分析注意事项**: - 分析要详尽深入,每个部分至少2-3段有实质内容的文字 - 使用具体的原文引用来支持分析观点 - 提供独到见解和批判性思考,不要只做简单摘要复述 - 整体分析字数不少于1500字,确保分析的深度和广度 - 使用清晰的标题层级和列表格式,便于阅读 - 分析文档时**不需要执行代码**,除非用户明确要求数据处理或可视化`; // POST /api/chat - 发送消息并获取 AI 回复 export async function POST(request: Request) { try { const body: ChatRequest = await request.json(); const { conversationId, message, displayMessage, model, tools, enableThinking, images, uploadedImages, uploadedDocuments, pdfDocuments, officeDocuments } = body; // 调试日志:确认接收到的图片和文档数据 console.log('[API/chat] Received request with:', { conversationId, messageLength: message?.length, displayMessageLength: displayMessage?.length, model, tools, enableThinking, imagesCount: images?.length || 0, uploadedImagesCount: uploadedImages?.length || 0, uploadedDocumentsCount: uploadedDocuments?.length || 0, pdfDocumentsCount: pdfDocuments?.length || 0, officeDocumentsCount: officeDocuments?.length || 0, images: images ? images.map(img => ({ type: img.type, media_type: img.media_type, dataLength: img.data?.length || 0, })) : undefined, pdfDocuments: pdfDocuments ? pdfDocuments.map(doc => ({ name: doc.name, size: doc.size, dataLength: doc.data?.length || 0, })) : undefined, officeDocuments: officeDocuments ? officeDocuments.map(doc => ({ name: doc.name, size: doc.size, type: doc.type, dataLength: doc.data?.length || 0, })) : undefined, }); // 获取当前登录用户 const user = await getCurrentUser(); if (!user) { return NextResponse.json( { error: '请先登录后再使用聊天功能' }, { status: 401 } ); } // 获取该用户的设置 const settings = await db.query.userSettings.findFirst({ where: eq(userSettings.userId, user.userId), }); if (!settings?.cchApiKey) { return NextResponse.json( { error: '请先在设置中配置您的 API Key 才能使用聊天功能', code: 'API_KEY_NOT_CONFIGURED' }, { status: 400 } ); } // 解密 API Key const decryptedApiKey = decryptApiKey(settings.cchApiKey); // 解密秘塔 API Key(如果已配置) const decryptedMetasoApiKey = settings.metasoApiKey ? decryptApiKey(settings.metasoApiKey) : undefined; // 获取对话信息 const conversation = await db.query.conversations.findFirst({ where: eq(conversations.conversationId, conversationId), }); if (!conversation) { return NextResponse.json( { error: 'Conversation not found' }, { status: 404 } ); } // 获取对话历史消息 const historyMessages = await db.query.messages.findMany({ where: eq(messages.conversationId, conversationId), orderBy: (messages, { asc }) => [asc(messages.createdAt)], }); // 保存用户消息(包括上传的图片和文档) // 使用 displayMessage(原始用户输入)作为显示内容,如果没有则使用 message const userMessageId = nanoid(); await db.insert(messages).values({ messageId: userMessageId, conversationId, role: 'user', content: displayMessage || message, // 使用原始用户输入作为显示内容 status: 'completed', uploadedImages: uploadedImages && uploadedImages.length > 0 ? uploadedImages : null, uploadedDocuments: uploadedDocuments && uploadedDocuments.length > 0 ? uploadedDocuments : null, }); // 准备 AI 消息 ID const assistantMessageId = nanoid(); // 判断使用的模型类型 const useModel = model || conversation.model; const isCodex = isCodexModel(useModel); const isGeminiImage = isGeminiImageModel(useModel); // 创建 SSE 响应 const encoder = new TextEncoder(); const stream = new ReadableStream({ async start(controller) { // 创建安全的 stream 写入器用于最终的 done/error 事件 const safeWriter = createSafeStreamWriter(controller, encoder); try { const cchUrl = settings.cchUrl || process.env.CCH_DEFAULT_URL || 'https://claude.leocoder.cn/'; const apiFormat = (settings.apiFormat as 'claude' | 'openai') || 'claude'; // 获取系统提示词(叠加模式) // 1. 始终使用 DEFAULT_SYSTEM_PROMPT 作为基础 // 2. 如果对话有关联助手的提示词(conversation.systemPrompt),则叠加到默认提示词后面 // 3. 助手提示词替代设置页面的自定义提示词,不替代默认提示词 const currentDate = new Date().toLocaleDateString('zh-CN', { year: 'numeric', month: 'long', day: 'numeric', weekday: 'long', }); const basePrompt = DEFAULT_SYSTEM_PROMPT.replace('{{CURRENT_DATE}}', currentDate); // 叠加助手提示词 let systemPrompt = basePrompt; if (conversation.systemPrompt) { systemPrompt = `${basePrompt}\n\n---\n\n## 🎭 当前助手角色设定\n\n${conversation.systemPrompt}`; console.log('[API/chat] 🎭 使用助手提示词:', conversation.systemPrompt.substring(0, 100) + '...'); } else { console.log('[API/chat] 📝 无助手提示词,使用默认提示词'); } // 获取温度参数 const temperature = parseFloat(conversation.temperature || settings.temperature || '0.7'); // 解析 Office 文档(Word/Excel) const parsedOfficeDocuments: ParsedDocument[] = []; const documentParseErrors: { name: string; error: string }[] = []; // 记录解析失败的文档 if (officeDocuments && officeDocuments.length > 0) { console.log('[API/chat] 📄 开始解析 Office 文档,数量:', officeDocuments.length); for (const doc of officeDocuments) { try { const parsed = await parseDocument(doc.data, doc.name, doc.mimeType); if (parsed) { parsedOfficeDocuments.push(parsed); console.log('[API/chat] ✅ 成功解析文档:', doc.name, '内容长度:', parsed.content.length); } } catch (parseError) { const errorMsg = parseError instanceof Error ? parseError.message : '未知错误'; console.error('[API/chat] ❌ 解析文档失败:', doc.name, parseError); documentParseErrors.push({ name: doc.name, error: errorMsg }); } } } let fullContent = ''; let thinkingContent = ''; let totalInputTokens = 0; let totalOutputTokens = 0; let usedTools: string[] = []; // 收集使用过的工具名称 let generatedImages: GeneratedImageData[] = []; // Gemini 生成的图片 // 如果有文档解析失败,将警告添加到内容开头 if (documentParseErrors.length > 0) { const errorMessages = documentParseErrors.map(e => `- ${e.name}: ${e.error}`).join('\n'); const warningText = `⚠️ 以下文档解析失败,无法分析其内容:\n${errorMessages}\n\n`; fullContent = warningText; safeWriter.write({ type: 'text', content: warningText, }); } // 【重要】处理器选择优先级说明: // 0. 首先检查 isGeminiImage:Gemini 图片生成模型有专门的处理逻辑 // 1. 然后检查 apiFormat === 'openai':如果用户选择了 "OpenAI 兼容" 格式, // 则所有模型(包括 Codex 模型)都统一使用 /v1/chat/completions 端点 // 这是因为第三方中转站通常只支持 OpenAI 兼容的 /v1/chat/completions 端点 // 2. 然后检查 isCodex:如果是 Claude 原生格式 + Codex 模型,才使用 /v1/responses 端点 // 3. 最后是普通的 Claude 原生格式,使用 /v1/messages 端点 if (isGeminiImage) { // ==================== Gemini 图片生成模型处理 ==================== // Gemini 图片生成模型使用专门的 Gemini API 端点 console.log('[API/chat] 使用 Gemini 图片生成模型:', useModel); const result = await handleGeminiImageChat({ cchUrl, apiKey: decryptedApiKey, model: useModel, systemPrompt, temperature, historyMessages, message, controller, encoder, images, }); fullContent = result.fullContent; generatedImages = result.generatedImages; totalInputTokens = result.inputTokens; totalOutputTokens = result.outputTokens; } else if (apiFormat === 'openai') { // ==================== OpenAI 兼容格式处理 ==================== // 当用户选择 "OpenAI 兼容" 时,无论什么模型都走这个分支 // 第三方中转站统一使用 /v1/chat/completions 端点 console.log('[API/chat] 使用 OpenAI 兼容格式,模型:', useModel, '(isCodex:', isCodex, ')'); const result = await handleOpenAICompatibleChat({ cchUrl, apiKey: decryptedApiKey, model: useModel, systemPrompt, temperature, historyMessages, message, tools: tools || (conversation.tools as string[]) || [], controller, encoder, images, metasoApiKey: decryptedMetasoApiKey, pdfDocuments, parsedOfficeDocuments, }); fullContent = result.fullContent; thinkingContent = result.thinkingContent; totalInputTokens = result.inputTokens; totalOutputTokens = result.outputTokens; usedTools = result.usedTools; } else if (isCodex) { // ==================== Codex 模型处理(使用 Codex Response API) ==================== // 仅当使用 Claude 原生格式 + Codex 模型时,才使用 /v1/responses 端点 // 这是 CCH 项目特有的 Codex Response API console.log('[API/chat] 使用 Codex Response API (Claude 原生格式 + Codex 模型)'); const result = await handleCodexChat({ cchUrl, apiKey: decryptedApiKey, model: useModel, systemPrompt, temperature, historyMessages, message, tools: tools || (conversation.tools as string[]) || [], controller, encoder, images, // 传递用户上传的图片 metasoApiKey: decryptedMetasoApiKey, pdfDocuments, parsedOfficeDocuments, }); fullContent = result.fullContent; totalInputTokens = result.inputTokens; totalOutputTokens = result.outputTokens; usedTools = result.usedTools; } else { // ==================== Claude 原生格式处理 ==================== console.log('[API/chat] 使用 Claude 原生格式 (/v1/messages)'); const result = await handleClaudeChat({ cchUrl, apiKey: decryptedApiKey, model: useModel, systemPrompt, temperature, historyMessages, message, tools: tools || (conversation.tools as string[]) || [], enableThinking: enableThinking ?? conversation.enableThinking ?? false, controller, encoder, images, // 传递用户上传的图片 metasoApiKey: decryptedMetasoApiKey, pdfDocuments, parsedOfficeDocuments, }); fullContent = result.fullContent; thinkingContent = result.thinkingContent; totalInputTokens = result.inputTokens; totalOutputTokens = result.outputTokens; usedTools = result.usedTools; } // 保存 AI 回复到数据库 await db.insert(messages).values({ messageId: assistantMessageId, conversationId, role: 'assistant', content: fullContent, thinkingContent: thinkingContent || null, usedTools: usedTools.length > 0 ? usedTools : null, generatedImages: generatedImages.length > 0 ? generatedImages : null, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, status: 'completed', }); // 更新对话信息 const titleSource = displayMessage || message; await db .update(conversations) .set({ messageCount: (conversation.messageCount || 0) + 2, totalTokens: (conversation.totalTokens || 0) + totalInputTokens + totalOutputTokens, lastMessageAt: new Date(), updatedAt: new Date(), title: (conversation.messageCount || 0) === 0 ? titleSource.slice(0, 50) + (titleSource.length > 50 ? '...' : '') : conversation.title, }) .where(eq(conversations.conversationId, conversationId)); // 发送完成事件 safeWriter.write({ type: 'done', messageId: assistantMessageId, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, usedTools: usedTools.length > 0 ? usedTools : undefined, }); safeWriter.close(); } catch (error) { console.error('Stream error:', error); const errorMessage = error instanceof Error ? error.message : 'Unknown error'; safeWriter.write({ type: 'error', error: errorMessage, }); safeWriter.close(); } }, }); return new Response(stream, { headers: { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', }, }); } catch (error) { console.error('Chat API error:', error); return NextResponse.json( { error: 'Failed to process chat request' }, { status: 500 } ); } } // ==================== Codex 模型处理函数(使用 Codex Response API 格式)==================== interface CodexChatParams { cchUrl: string; apiKey: string; model: string; systemPrompt: string; temperature: number; historyMessages: { role: string; content: string }[]; message: string; tools: string[]; controller: ReadableStreamDefaultController; encoder: TextEncoder; // 用户上传的图片 images?: { type: 'image'; media_type: string; data: string; }[]; // 秘塔 API Key metasoApiKey?: string; // PDF 文档(直接传给 API) pdfDocuments?: PdfDocumentData[]; // 解析后的 Office 文档 parsedOfficeDocuments?: ParsedDocument[]; } // Codex Response API 的输入项类型 interface CodexInputItem { type: 'message'; role: 'user' | 'assistant' | 'system'; content: string | CodexMultimodalContent[]; } // Codex 多模态内容类型 type CodexMultimodalContent = | { type: 'input_text'; text: string } | { type: 'input_image'; image_url: string }; // Codex Response API 的工具调用类型 interface CodexFunctionCall { call_id: string; name: string; arguments: string; } async function handleCodexChat(params: CodexChatParams): Promise<{ fullContent: string; inputTokens: number; outputTokens: number; usedTools: string[]; }> { const { cchUrl, apiKey, model, systemPrompt, temperature, historyMessages, message, tools, controller, encoder, images, metasoApiKey, pdfDocuments, parsedOfficeDocuments, } = params; // 创建安全的 stream 写入器 const safeWriter = createSafeStreamWriter(controller, encoder); // 构建 Codex Response API 格式的输入(过滤空内容的消息) const inputItems: CodexInputItem[] = [ ...historyMessages .filter((msg) => msg.content && msg.content.trim() !== '') .map((msg) => ({ type: 'message' as const, role: msg.role as 'user' | 'assistant', content: msg.content, })), ]; // 添加当前用户消息(支持多模态内容) const hasImages = images && images.length > 0; const hasPdfDocuments = pdfDocuments && pdfDocuments.length > 0; const hasOfficeDocuments = parsedOfficeDocuments && parsedOfficeDocuments.length > 0; // 构建消息文本(包含用户消息、PDF 提示和解析后的 Office 文档内容) let textContent = message; // Codex/OpenAI 格式不支持原生 PDF,添加提示信息 if (hasPdfDocuments) { const pdfInfo = pdfDocuments!.map(doc => `[PDF 文档: ${doc.name}]`).join('\n'); textContent = `${textContent}\n\n注意:用户上传了以下 PDF 文件,但当前 API 格式不支持原生 PDF 解析。建议切换到 Claude 原生格式以获得完整的 PDF 支持。\n${pdfInfo}`; console.log('[handleCodexChat] PDF documents detected but not supported in Codex format'); } // 添加解析后的 Office 文档内容 if (hasOfficeDocuments) { const officeContents = parsedOfficeDocuments!.map(doc => { const typeLabel = doc.type === 'word' ? 'Word' : doc.type === 'excel' ? 'Excel' : '文档'; return `\n\n--- ${typeLabel} 文件:${doc.name} ---\n${doc.content}\n--- 文件结束 ---`; }).join('\n'); textContent = textContent + officeContents; console.log('[handleCodexChat] Added Office documents content, total length:', textContent.length); } if (hasImages) { console.log('[handleCodexChat] Building multimodal message with', images!.length, 'images'); // 如果有图片,构建多模态消息 const multimodalContent: CodexMultimodalContent[] = []; // 先添加图片 for (const img of images!) { console.log('[handleCodexChat] Adding image:', { type: img.type, media_type: img.media_type, dataLength: img.data?.length || 0, }); // Codex/OpenAI 格式:使用 data URL const dataUrl = `data:${img.media_type};base64,${img.data}`; multimodalContent.push({ type: 'input_image', image_url: dataUrl, }); } // 再添加文本 if (textContent) { multimodalContent.push({ type: 'input_text', text: textContent, }); } console.log('[handleCodexChat] Multimodal content blocks:', multimodalContent.length); inputItems.push({ type: 'message' as const, role: 'user' as const, content: multimodalContent, }); } else { console.log('[handleCodexChat] No images, using text message'); // 没有图片,使用文本消息 inputItems.push({ type: 'message' as const, role: 'user' as const, content: textContent, }); } // 构建 Codex Response API 格式的工具定义 const codexTools = buildCodexToolDefinitions(tools); let fullContent = ''; let totalInputTokens = 0; let totalOutputTokens = 0; const usedTools: string[] = []; // 收集使用过的工具名称 let loopCount = 0; const maxLoops = 10; while (loopCount < maxLoops) { loopCount++; // 构建 Codex Response API 请求体 const requestBody: Record = { model, input: inputItems, stream: true, instructions: systemPrompt, temperature, }; if (codexTools.length > 0) { requestBody.tools = codexTools; } // 使用 Codex Response API 端点 const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/responses`, { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${apiKey}`, }, body: JSON.stringify(requestBody), }); if (!response.ok) { const errorText = await response.text(); throw new Error(`CCH API error: ${response.status} - ${errorText}`); } const reader = response.body?.getReader(); if (!reader) { throw new Error('No response body'); } // 收集当前轮次的内容 let currentTextContent = ''; const functionCalls: CodexFunctionCall[] = []; let currentFunctionCall: { call_id: string; name: string; arguments: string } | null = null; let hasToolUse = false; const decoder = new TextDecoder(); let buffer = ''; // 处理 Codex Response API 流式响应 while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); const lines = buffer.split('\n'); buffer = lines.pop() || ''; for (const line of lines) { // Codex Response API 使用 "event: xxx" 和 "data: xxx" 格式 if (line.startsWith('event: ')) { // 事件类型行,继续读取下一行的 data continue; } if (line.startsWith('data: ')) { const data = line.slice(6); if (data === '[DONE]') continue; try { const event = JSON.parse(data); // 处理不同的事件类型 if (event.type === 'response.output_text.delta') { // 文本增量 const delta = event.delta || ''; currentTextContent += delta; fullContent += delta; safeWriter.write({ type: 'text', content: delta, }); } else if (event.type === 'response.content_part.delta') { // 内容部分增量(另一种格式) const delta = event.delta?.text || event.delta || ''; if (delta) { currentTextContent += delta; fullContent += delta; safeWriter.write({ type: 'text', content: delta, }); } } else if (event.type === 'response.function_call_arguments.delta') { // 函数调用参数增量 if (currentFunctionCall) { currentFunctionCall.arguments += event.delta || ''; } } else if (event.type === 'response.output_item.added') { // 新输出项添加 const item = event.item; if (item?.type === 'function_call') { currentFunctionCall = { call_id: item.call_id || '', name: item.name || '', arguments: '', }; hasToolUse = true; safeWriter.write({ type: 'tool_use_start', id: item.call_id, name: item.name, }); } } else if (event.type === 'response.output_item.done') { // 输出项完成 const item = event.item; if (item?.type === 'function_call' && currentFunctionCall) { // 更新函数调用信息 currentFunctionCall.call_id = item.call_id || currentFunctionCall.call_id; currentFunctionCall.name = item.name || currentFunctionCall.name; currentFunctionCall.arguments = item.arguments || currentFunctionCall.arguments; functionCalls.push({ ...currentFunctionCall }); safeWriter.write({ type: 'tool_use_complete', id: currentFunctionCall.call_id, name: currentFunctionCall.name, input: JSON.parse(currentFunctionCall.arguments || '{}'), }); currentFunctionCall = null; } } else if (event.type === 'response.completed') { // 响应完成,提取 usage 信息 const usage = event.response?.usage; if (usage) { totalInputTokens = usage.input_tokens || 0; totalOutputTokens = usage.output_tokens || 0; } } } catch (e) { console.error('Parse error:', e, 'Line:', line); } } } } // 检查是否需要执行工具 if (hasToolUse && functionCalls.length > 0) { // 将助手消息添加到输入历史 if (currentTextContent) { inputItems.push({ type: 'message', role: 'assistant', content: currentTextContent, }); } // 执行所有工具并收集结果 for (const fc of functionCalls) { // 收集工具名称(避免重复) if (!usedTools.includes(fc.name)) { usedTools.push(fc.name); // 发送实时工具使用事件 safeWriter.write({ type: 'tool_used', toolName: fc.name, }); } // 发送工具执行开始事件 safeWriter.write({ type: 'tool_execution_start', id: fc.call_id, name: fc.name, }); // 解析工具参数 let toolInput: Record = {}; try { toolInput = JSON.parse(fc.arguments || '{}'); } catch { console.error('Failed to parse tool arguments'); } // 执行工具 const result = await executeTool(fc.name, toolInput, { metasoApiKey }); // 发送工具执行结果事件 safeWriter.write({ type: 'tool_execution_result', id: fc.call_id, name: fc.name, success: result.success, result: result.displayResult, images: result.images, }); // 如果有搜索图片结果,发送专门的图片事件 if (result.searchImages && result.searchImages.length > 0) { safeWriter.write({ type: 'tool_search_images', id: fc.call_id, name: fc.name, searchImages: result.searchImages, }); } // 如果有搜索视频结果,发送专门的视频事件 if (result.searchVideos && result.searchVideos.length > 0) { safeWriter.write({ type: 'tool_search_videos', id: fc.call_id, name: fc.name, searchVideos: result.searchVideos, }); } // 将工具结果显示给用户 const toolDisplayText = `\n\n${result.displayResult}\n\n`; fullContent += toolDisplayText; safeWriter.write({ type: 'text', content: toolDisplayText, }); // 将工具结果添加到输入历史(Codex 格式) inputItems.push({ type: 'message', role: 'user', content: `Function ${fc.name} result: ${result.fullResult}`, }); } // 继续循环,让 AI 基于工具结果继续回复 continue; } // 如果没有工具调用,则结束循环 break; } return { fullContent, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, usedTools, }; } // ==================== Claude 模型处理函数 ==================== interface ClaudeChatParams { cchUrl: string; apiKey: string; model: string; systemPrompt: string; temperature: number; historyMessages: { role: string; content: string }[]; message: string; tools: string[]; enableThinking: boolean; controller: ReadableStreamDefaultController; encoder: TextEncoder; // 用户上传的图片 images?: { type: 'image'; media_type: string; data: string; }[]; // 秘塔 API Key metasoApiKey?: string; // PDF 文档(直接传给 Claude API 使用原生 document 类型) pdfDocuments?: PdfDocumentData[]; // 解析后的 Office 文档 parsedOfficeDocuments?: ParsedDocument[]; } async function handleClaudeChat(params: ClaudeChatParams): Promise<{ fullContent: string; thinkingContent: string; inputTokens: number; outputTokens: number; usedTools: string[]; }> { const { cchUrl, apiKey, model, systemPrompt, temperature, historyMessages, message, tools, enableThinking, controller, encoder, images, metasoApiKey, pdfDocuments, parsedOfficeDocuments, } = params; // 创建安全的 stream 写入器 const safeWriter = createSafeStreamWriter(controller, encoder); // 构建消息历史(过滤空内容的消息) const messageHistory: APIMessage[] = historyMessages .filter((msg) => msg.content && msg.content.trim() !== '') .map((msg) => ({ role: msg.role as 'user' | 'assistant', content: msg.content, })); // 添加当前用户消息(支持多模态内容:图片、PDF、Office文档) const hasImages = images && images.length > 0; const hasPdfDocuments = pdfDocuments && pdfDocuments.length > 0; const hasOfficeDocuments = parsedOfficeDocuments && parsedOfficeDocuments.length > 0; if (hasImages || hasPdfDocuments || hasOfficeDocuments) { console.log('[handleClaudeChat] Building multimodal message with:', { images: images?.length || 0, pdfDocuments: pdfDocuments?.length || 0, officeDocuments: parsedOfficeDocuments?.length || 0, }); // 构建多模态消息 const multimodalContent: ContentBlock[] = []; // 1. 添加 PDF 文档(使用 Claude 原生 document 类型) if (hasPdfDocuments) { for (const pdf of pdfDocuments!) { console.log('[handleClaudeChat] Adding PDF document:', pdf.name, 'size:', pdf.size); multimodalContent.push({ type: 'document' as unknown as 'text', // @ts-expect-error - Claude API 支持 document 类型但 TypeScript 类型定义不完整 source: { type: 'base64', media_type: 'application/pdf', data: pdf.data, }, }); } } // 2. 添加图片 if (hasImages) { for (const img of images!) { console.log('[handleClaudeChat] Adding image:', { type: img.type, media_type: img.media_type, dataLength: img.data?.length || 0, }); multimodalContent.push({ type: 'image' as unknown as 'text', // @ts-expect-error - Claude API 支持 image 类型但 TypeScript 类型定义不完整 source: { type: 'base64', media_type: img.media_type, data: img.data, }, }); } } // 3. 构建消息文本(包含用户消息和解析后的 Office 文档内容) let textContent = message; if (hasOfficeDocuments) { const officeContents = parsedOfficeDocuments!.map(doc => { const typeLabel = doc.type === 'word' ? 'Word' : doc.type === 'excel' ? 'Excel' : '文档'; return `\n\n--- ${typeLabel} 文件:${doc.name} ---\n${doc.content}\n--- 文件结束 ---`; }).join('\n'); textContent = message + officeContents; console.log('[handleClaudeChat] Added Office documents content, total length:', textContent.length); } // 4. 添加文本内容 if (textContent) { multimodalContent.push({ type: 'text', text: textContent, }); } console.log('[handleClaudeChat] Multimodal content blocks:', multimodalContent.length); messageHistory.push({ role: 'user', content: multimodalContent, }); } else { console.log('[handleClaudeChat] No multimodal content, using simple text message'); // 没有多模态内容,使用简单文本消息 messageHistory.push({ role: 'user', content: message, }); } // 构建工具定义 const toolDefinitions = buildClaudeToolDefinitions(tools); let currentMessages = [...messageHistory]; let fullContent = ''; let thinkingContent = ''; let totalInputTokens = 0; let totalOutputTokens = 0; const usedTools: string[] = []; // 收集使用过的工具名称 let loopCount = 0; const maxLoops = 10; let hasToolResults = false; while (loopCount < maxLoops) { loopCount++; // 构建请求体 const requestBody: Record = { model, max_tokens: 8192, stream: true, system: systemPrompt, messages: currentMessages, }; // 调试日志:查看发送给 Claude API 的消息内容 console.log('[handleClaudeChat] Sending to Claude API:', { model, messagesCount: currentMessages.length, lastMessage: currentMessages.length > 0 ? { role: currentMessages[currentMessages.length - 1].role, contentType: typeof currentMessages[currentMessages.length - 1].content, contentIsArray: Array.isArray(currentMessages[currentMessages.length - 1].content), contentLength: Array.isArray(currentMessages[currentMessages.length - 1].content) ? (currentMessages[currentMessages.length - 1].content as unknown[]).length : (currentMessages[currentMessages.length - 1].content as string).length, } : null, }); if (toolDefinitions.length > 0) { requestBody.tools = toolDefinitions; } // 添加思考模式(Codex 不支持) if (enableThinking && !hasToolResults) { requestBody.thinking = { type: 'enabled', budget_tokens: 4096, }; } else { requestBody.temperature = temperature; } const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/messages`, { method: 'POST', headers: { 'Content-Type': 'application/json', 'x-api-key': apiKey, 'anthropic-version': '2023-06-01', }, body: JSON.stringify(requestBody), }); if (!response.ok) { const errorText = await response.text(); throw new Error(`CCH API error: ${response.status} - ${errorText}`); } const reader = response.body?.getReader(); if (!reader) { throw new Error('No response body'); } // 收集当前轮次的内容 let currentTextContent = ''; let currentThinkingContent = ''; const toolCalls: { id: string; name: string; input: Record }[] = []; let currentToolUse: { id: string; name: string; inputJson: string } | null = null; let stopReason: string | null = null; const decoder = new TextDecoder(); let buffer = ''; // 处理流式响应 while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); const lines = buffer.split('\n'); buffer = lines.pop() || ''; for (const line of lines) { if (line.startsWith('data: ')) { const data = line.slice(6); if (data === '[DONE]') continue; try { const event = JSON.parse(data); if (event.type === 'content_block_delta') { const delta = event.delta; if (delta.type === 'thinking_delta') { currentThinkingContent += delta.thinking || ''; thinkingContent += delta.thinking || ''; safeWriter.write({ type: 'thinking', content: delta.thinking || '', }); } else if (delta.type === 'text_delta') { currentTextContent += delta.text || ''; fullContent += delta.text || ''; safeWriter.write({ type: 'text', content: delta.text || '', }); } else if (delta.type === 'input_json_delta') { if (currentToolUse) { currentToolUse.inputJson += delta.partial_json || ''; } } } else if (event.type === 'message_delta') { if (event.usage) { totalOutputTokens += event.usage.output_tokens || 0; } if (event.delta?.stop_reason) { stopReason = event.delta.stop_reason; } } else if (event.type === 'message_start') { if (event.message?.usage) { totalInputTokens += event.message.usage.input_tokens || 0; } } else if (event.type === 'content_block_start') { if (event.content_block?.type === 'tool_use') { currentToolUse = { id: event.content_block.id, name: event.content_block.name, inputJson: '', }; safeWriter.write({ type: 'tool_use_start', id: event.content_block.id, name: event.content_block.name, }); } } else if (event.type === 'content_block_stop') { if (currentToolUse) { try { const toolInput = JSON.parse(currentToolUse.inputJson || '{}'); toolCalls.push({ id: currentToolUse.id, name: currentToolUse.name, input: toolInput, }); safeWriter.write({ type: 'tool_use_complete', id: currentToolUse.id, name: currentToolUse.name, input: toolInput, }); } catch (e) { console.error('Failed to parse tool input:', e); } currentToolUse = null; } } } catch (e) { console.error('Parse error:', e); } } } } // 检查是否需要执行工具 if (stopReason === 'tool_use' && toolCalls.length > 0) { // 构建助手消息的内容块 const assistantContent: ContentBlock[] = []; if (currentThinkingContent) { assistantContent.push({ type: 'thinking', thinking: currentThinkingContent, }); } if (currentTextContent) { assistantContent.push({ type: 'text', text: currentTextContent, }); } for (const tc of toolCalls) { assistantContent.push({ type: 'tool_use', id: tc.id, name: tc.name, input: tc.input, }); } currentMessages.push({ role: 'assistant', content: assistantContent, }); // 执行所有工具并收集结果 const toolResults: ContentBlock[] = []; for (const tc of toolCalls) { // 收集工具名称(避免重复) if (!usedTools.includes(tc.name)) { usedTools.push(tc.name); // 发送实时工具使用事件 safeWriter.write({ type: 'tool_used', toolName: tc.name, }); } safeWriter.write({ type: 'tool_execution_start', id: tc.id, name: tc.name, }); const result = await executeTool(tc.name, tc.input, { metasoApiKey }); if (result.requiresPyodide) { safeWriter.write({ type: 'pyodide_execution_required', id: tc.id, name: tc.name, code: result.code, language: result.language, }); toolResults.push({ type: 'tool_result', tool_use_id: tc.id, content: '代码正在浏览器端执行中,请稍候...', }); continue; } safeWriter.write({ type: 'tool_execution_result', id: tc.id, name: tc.name, success: result.success, result: result.displayResult, images: result.images, }); // 如果有搜索图片结果,发送专门的图片事件 if (result.searchImages && result.searchImages.length > 0) { safeWriter.write({ type: 'tool_search_images', id: tc.id, name: tc.name, searchImages: result.searchImages, }); } // 如果有搜索视频结果,发送专门的视频事件 if (result.searchVideos && result.searchVideos.length > 0) { safeWriter.write({ type: 'tool_search_videos', id: tc.id, name: tc.name, searchVideos: result.searchVideos, }); } const toolDisplayText = `\n\n${result.displayResult}\n\n`; fullContent += toolDisplayText; safeWriter.write({ type: 'text', content: toolDisplayText, }); toolResults.push({ type: 'tool_result', tool_use_id: tc.id, content: result.fullResult, }); } currentMessages.push({ role: 'user', content: toolResults, }); hasToolResults = true; continue; } break; } return { fullContent, thinkingContent, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, usedTools, }; } // ==================== OpenAI 兼容格式处理函数 ==================== interface OpenAICompatibleChatParams { cchUrl: string; apiKey: string; model: string; systemPrompt: string; temperature: number; historyMessages: { role: string; content: string }[]; message: string; tools: string[]; controller: ReadableStreamDefaultController; encoder: TextEncoder; images?: { type: 'image'; media_type: string; data: string; }[]; // 秘塔 API Key metasoApiKey?: string; // PDF 文档 pdfDocuments?: PdfDocumentData[]; // 解析后的 Office 文档 parsedOfficeDocuments?: ParsedDocument[]; } // OpenAI 消息格式 interface OpenAIMessageContent { type: 'text' | 'image_url'; text?: string; image_url?: { url: string; }; } interface OpenAICompatibleMessage { role: 'system' | 'user' | 'assistant' | 'tool'; content: string | OpenAIMessageContent[] | null; tool_calls?: { id: string; type: 'function'; function: { name: string; arguments: string; }; }[]; tool_call_id?: string; } async function handleOpenAICompatibleChat(params: OpenAICompatibleChatParams): Promise<{ fullContent: string; thinkingContent: string; inputTokens: number; outputTokens: number; usedTools: string[]; }> { const { cchUrl, apiKey, model, systemPrompt, temperature, historyMessages, message, tools, controller, encoder, images, metasoApiKey, pdfDocuments, parsedOfficeDocuments, } = params; // 创建安全的 stream 写入器 const safeWriter = createSafeStreamWriter(controller, encoder); // 构建 OpenAI 格式的消息历史(过滤空内容的消息) const openaiMessages: OpenAICompatibleMessage[] = [ { role: 'system', content: systemPrompt }, ...historyMessages .filter((msg) => msg.content && msg.content.trim() !== '') .map((msg) => ({ role: msg.role as 'user' | 'assistant', content: msg.content, })), ]; // 添加当前用户消息(支持多模态) const hasImages = images && images.length > 0; const hasPdfDocuments = pdfDocuments && pdfDocuments.length > 0; const hasOfficeDocuments = parsedOfficeDocuments && parsedOfficeDocuments.length > 0; // 构建消息文本(包含用户消息、PDF 提示和解析后的 Office 文档内容) let textContent = message; // OpenAI 格式不支持原生 PDF,添加提示信息 if (hasPdfDocuments) { const pdfInfo = pdfDocuments!.map(doc => `[PDF 文档: ${doc.name}]`).join('\n'); textContent = `${textContent}\n\n注意:用户上传了以下 PDF 文件,但当前 API 格式不支持原生 PDF 解析。建议切换到 Claude 原生格式以获得完整的 PDF 支持。\n${pdfInfo}`; console.log('[handleOpenAICompatibleChat] PDF documents detected but not supported in OpenAI format'); } // 添加解析后的 Office 文档内容 if (hasOfficeDocuments) { const officeContents = parsedOfficeDocuments!.map(doc => { const typeLabel = doc.type === 'word' ? 'Word' : doc.type === 'excel' ? 'Excel' : '文档'; return `\n\n--- ${typeLabel} 文件:${doc.name} ---\n${doc.content}\n--- 文件结束 ---`; }).join('\n'); textContent = textContent + officeContents; console.log('[handleOpenAICompatibleChat] Added Office documents content, total length:', textContent.length); } if (hasImages) { console.log('[handleOpenAICompatibleChat] Building multimodal message with', images!.length, 'images'); const multimodalContent: OpenAIMessageContent[] = []; // 添加图片 for (const img of images!) { console.log('[handleOpenAICompatibleChat] Adding image:', { type: img.type, media_type: img.media_type, dataLength: img.data?.length || 0, }); multimodalContent.push({ type: 'image_url', image_url: { url: `data:${img.media_type};base64,${img.data}`, }, }); } // 添加文本 if (textContent) { multimodalContent.push({ type: 'text', text: textContent, }); } openaiMessages.push({ role: 'user', content: multimodalContent, }); } else { console.log('[handleOpenAICompatibleChat] No images, using text message'); openaiMessages.push({ role: 'user', content: textContent, }); } // 构建 OpenAI 格式的工具定义 const openaiTools = buildOpenAIToolDefinitions(tools); let fullContent = ''; let thinkingContent = ''; // 用于收集 标签中的思考内容 let totalInputTokens = 0; let totalOutputTokens = 0; const usedTools: string[] = []; // 收集使用过的工具名称 let loopCount = 0; const maxLoops = 10; // 用于处理 标签的状态变量(跨 chunk 处理) let isInThinkingMode = false; let pendingBuffer = ''; // 用于处理标签可能跨 chunk 的情况 while (loopCount < maxLoops) { loopCount++; // 构建请求体 const requestBody: Record = { model, messages: openaiMessages, stream: true, temperature, }; if (openaiTools.length > 0) { requestBody.tools = openaiTools; } console.log('[handleOpenAICompatibleChat] Sending to OpenAI-compatible API:', { model, messagesCount: openaiMessages.length, url: `${normalizeBaseUrl(cchUrl)}/v1/chat/completions`, }); const response = await fetch(`${normalizeBaseUrl(cchUrl)}/v1/chat/completions`, { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${apiKey}`, }, body: JSON.stringify(requestBody), }); if (!response.ok) { const errorText = await response.text(); throw new Error(`OpenAI-compatible API error: ${response.status} - ${errorText}`); } const reader = response.body?.getReader(); if (!reader) { throw new Error('No response body'); } // 收集当前轮次的内容 let currentTextContent = ''; const toolCalls: { id: string; name: string; arguments: string }[] = []; const currentToolCallsMap: Map = new Map(); let stopReason: string | null = null; const decoder = new TextDecoder(); let buffer = ''; // 处理流式响应 while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); const lines = buffer.split('\n'); buffer = lines.pop() || ''; for (const line of lines) { if (line.startsWith('data: ')) { const data = line.slice(6).trim(); if (data === '[DONE]') continue; if (!data) continue; try { const event = JSON.parse(data); // 处理文本内容(包含 标签过滤) if (event.choices?.[0]?.delta?.content) { const rawContent = event.choices[0].delta.content; // 将新内容追加到待处理缓冲区 pendingBuffer += rawContent; // 处理缓冲区中的内容 while (pendingBuffer.length > 0) { if (isInThinkingMode) { // 在 thinking 模式中,查找 结束标签 const endTagIndex = pendingBuffer.indexOf(''); if (endTagIndex !== -1) { // 找到结束标签,提取 thinking 内容 const thinkPart = pendingBuffer.slice(0, endTagIndex); if (thinkPart) { thinkingContent += thinkPart; // 发送 thinking 事件(让前端可以展示折叠的思考内容) safeWriter.write({ type: 'thinking', content: thinkPart, }); } // 移除已处理的内容和结束标签 pendingBuffer = pendingBuffer.slice(endTagIndex + 8); // 8 = ''.length isInThinkingMode = false; } else { // 没找到结束标签,检查是否可能是部分标签 // 保留最后 7 个字符以防 被截断(7 = ' 7) { const safePart = pendingBuffer.slice(0, -7); const keepPart = pendingBuffer.slice(-7); if (safePart) { thinkingContent += safePart; safeWriter.write({ type: 'thinking', content: safePart, }); } pendingBuffer = keepPart; } // 等待更多数据 break; } } else { // 不在 thinking 模式,查找 开始标签 const startTagIndex = pendingBuffer.indexOf(''); if (startTagIndex !== -1) { // 找到开始标签,先输出标签前的普通文本 const textPart = pendingBuffer.slice(0, startTagIndex); if (textPart) { currentTextContent += textPart; fullContent += textPart; safeWriter.write({ type: 'text', content: textPart, }); } // 移除已处理的内容和开始标签 pendingBuffer = pendingBuffer.slice(startTagIndex + 7); // 7 = ''.length isInThinkingMode = true; } else { // 没找到开始标签,检查是否可能是部分标签 // 保留最后 6 个字符以防 被截断(6 = ' pendingBuffer.length - 7) { // 可能是部分的 标签,保留这部分 const safePart = pendingBuffer.slice(0, potentialTagStart); const keepPart = pendingBuffer.slice(potentialTagStart); if (safePart) { currentTextContent += safePart; fullContent += safePart; safeWriter.write({ type: 'text', content: safePart, }); } pendingBuffer = keepPart; // 等待更多数据 break; } else { // 安全输出所有内容 currentTextContent += pendingBuffer; fullContent += pendingBuffer; safeWriter.write({ type: 'text', content: pendingBuffer, }); pendingBuffer = ''; } } } } } // 处理工具调用 if (event.choices?.[0]?.delta?.tool_calls) { for (const toolCall of event.choices[0].delta.tool_calls) { const index = toolCall.index ?? 0; if (!currentToolCallsMap.has(index)) { // 新的工具调用开始 currentToolCallsMap.set(index, { id: toolCall.id || '', name: toolCall.function?.name || '', arguments: '', }); if (toolCall.id) { safeWriter.write({ type: 'tool_use_start', id: toolCall.id, name: toolCall.function?.name || '', }); } } const current = currentToolCallsMap.get(index)!; // 更新工具调用信息 if (toolCall.id) current.id = toolCall.id; if (toolCall.function?.name) current.name = toolCall.function.name; if (toolCall.function?.arguments) { current.arguments += toolCall.function.arguments; } } } // 处理结束原因 if (event.choices?.[0]?.finish_reason) { stopReason = event.choices[0].finish_reason; } // 处理 usage 信息 if (event.usage) { totalInputTokens = event.usage.prompt_tokens || 0; totalOutputTokens = event.usage.completion_tokens || 0; } } catch (e) { console.error('[handleOpenAICompatibleChat] Parse error:', e, 'Line:', line); } } } } // 流结束后,处理可能残留在 pendingBuffer 中的内容 if (pendingBuffer.length > 0) { if (isInThinkingMode) { // 如果还在 thinking 模式,说明 标签没有正常闭合 // 将剩余内容作为 thinking 内容处理 thinkingContent += pendingBuffer; safeWriter.write({ type: 'thinking', content: pendingBuffer, }); } else { // 普通文本模式,输出剩余内容 currentTextContent += pendingBuffer; fullContent += pendingBuffer; safeWriter.write({ type: 'text', content: pendingBuffer, }); } pendingBuffer = ''; } // 收集所有工具调用 for (const [, tc] of currentToolCallsMap) { if (tc.id && tc.name) { toolCalls.push(tc); safeWriter.write({ type: 'tool_use_complete', id: tc.id, name: tc.name, input: JSON.parse(tc.arguments || '{}'), }); } } // 检查是否需要执行工具 if ((stopReason === 'tool_calls' || toolCalls.length > 0) && toolCalls.length > 0) { // 将助手消息添加到历史 const assistantMessage: OpenAICompatibleMessage = { role: 'assistant', content: currentTextContent || null, tool_calls: toolCalls.map((tc) => ({ id: tc.id, type: 'function' as const, function: { name: tc.name, arguments: tc.arguments, }, })), }; openaiMessages.push(assistantMessage); // 执行所有工具并收集结果 for (const tc of toolCalls) { // 收集工具名称(避免重复) if (!usedTools.includes(tc.name)) { usedTools.push(tc.name); // 发送实时工具使用事件 safeWriter.write({ type: 'tool_used', toolName: tc.name, }); } safeWriter.write({ type: 'tool_execution_start', id: tc.id, name: tc.name, }); // 解析工具参数 let toolInput: Record = {}; try { toolInput = JSON.parse(tc.arguments || '{}'); } catch { console.error('[handleOpenAICompatibleChat] Failed to parse tool arguments'); } // 执行工具 const result = await executeTool(tc.name, toolInput, { metasoApiKey }); safeWriter.write({ type: 'tool_execution_result', id: tc.id, name: tc.name, success: result.success, result: result.displayResult, images: result.images, }); // 如果有搜索图片结果,发送专门的图片事件 if (result.searchImages && result.searchImages.length > 0) { safeWriter.write({ type: 'tool_search_images', id: tc.id, name: tc.name, searchImages: result.searchImages, }); } // 如果有搜索视频结果,发送专门的视频事件 if (result.searchVideos && result.searchVideos.length > 0) { safeWriter.write({ type: 'tool_search_videos', id: tc.id, name: tc.name, searchVideos: result.searchVideos, }); } // 将工具结果显示给用户 const toolDisplayText = `\n\n${result.displayResult}\n\n`; fullContent += toolDisplayText; safeWriter.write({ type: 'text', content: toolDisplayText, }); // 将工具结果添加到消息历史(OpenAI 格式) openaiMessages.push({ role: 'tool', content: result.fullResult, tool_call_id: tc.id, }); } // 继续循环,让 AI 基于工具结果继续回复 continue; } // 如果没有工具调用,结束循环 break; } return { fullContent, thinkingContent, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, usedTools, }; } // ==================== Gemini 图片生成模型处理函数 ==================== interface GeminiImageChatParams { cchUrl: string; apiKey: string; model: string; systemPrompt: string; temperature: number; historyMessages: { role: string; content: string }[]; message: string; controller: ReadableStreamDefaultController; encoder: TextEncoder; // 用户上传的图片 images?: { type: 'image'; media_type: string; data: string; }[]; } async function handleGeminiImageChat(params: GeminiImageChatParams): Promise<{ fullContent: string; generatedImages: GeneratedImageData[]; inputTokens: number; outputTokens: number; }> { const { cchUrl, apiKey, model, systemPrompt, temperature, historyMessages, message, controller, encoder, images, } = params; // 创建安全的 stream 写入器 const safeWriter = createSafeStreamWriter(controller, encoder); // 发送生成开始事件 safeWriter.write({ type: 'image_generation_start', model, }); // 构建 Gemini API 请求内容 // Gemini 使用 contents 数组格式 const contents: Array<{ role: 'user' | 'model'; parts: Array<{ text?: string; inlineData?: { mimeType: string; data: string } }>; }> = []; // 添加历史消息 for (const msg of historyMessages) { if (msg.content && msg.content.trim() !== '') { contents.push({ role: msg.role === 'assistant' ? 'model' : 'user', parts: [{ text: msg.content }], }); } } // 构建当前用户消息(支持多模态) const currentParts: Array<{ text?: string; inlineData?: { mimeType: string; data: string } }> = []; // 如果有图片,先添加图片 if (images && images.length > 0) { for (const img of images) { currentParts.push({ inlineData: { mimeType: img.media_type, data: img.data, }, }); } } // 添加文本消息 if (message) { currentParts.push({ text: message }); } contents.push({ role: 'user', parts: currentParts, }); // 构建请求体 const requestBody = { contents, systemInstruction: systemPrompt ? { parts: [{ text: systemPrompt }] } : undefined, generationConfig: { temperature, responseModalities: ['TEXT', 'IMAGE'], // 请求同时返回文本和图片 }, }; console.log('[handleGeminiImageChat] Sending request to Gemini API:', { model, contentsCount: contents.length, hasImages: images && images.length > 0, }); // 发送请求到 Gemini API // Gemini API 端点格式: /v1beta/models/{model}:generateContent const geminiEndpoint = `${normalizeBaseUrl(cchUrl)}/v1beta/models/${model}:generateContent`; const response = await fetch(geminiEndpoint, { method: 'POST', headers: { 'Content-Type': 'application/json', 'x-goog-api-key': apiKey, }, body: JSON.stringify(requestBody), }); if (!response.ok) { const errorText = await response.text(); throw new Error(`Gemini API error: ${response.status} - ${errorText}`); } // 解析 Gemini 响应 const geminiResponse = await response.json(); let fullContent = ''; const generatedImages: GeneratedImageData[] = []; let totalInputTokens = 0; let totalOutputTokens = 0; // 提取 usage 信息 if (geminiResponse.usageMetadata) { totalInputTokens = geminiResponse.usageMetadata.promptTokenCount || 0; totalOutputTokens = geminiResponse.usageMetadata.candidatesTokenCount || 0; } // 解析候选响应 if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { const candidate = geminiResponse.candidates[0]; if (candidate.content && candidate.content.parts) { for (const part of candidate.content.parts) { // 处理文本内容 if (part.text) { fullContent += part.text; safeWriter.write({ type: 'text', content: part.text, }); } // 处理生成的图片 if (part.inlineData) { const imageData: GeneratedImageData = { mimeType: part.inlineData.mimeType, data: part.inlineData.data, }; generatedImages.push(imageData); // 发送生成图片事件 safeWriter.write({ type: 'generated_image', image: imageData, index: generatedImages.length - 1, }); console.log('[handleGeminiImageChat] Generated image:', { mimeType: imageData.mimeType, dataLength: imageData.data.length, }); } } } } // 发送生成完成事件 safeWriter.write({ type: 'image_generation_complete', imageCount: generatedImages.length, }); console.log('[handleGeminiImageChat] Response processed:', { textLength: fullContent.length, imageCount: generatedImages.length, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, }); return { fullContent, generatedImages, inputTokens: totalInputTokens, outputTokens: totalOutputTokens, }; } // 构建 Claude 工具定义 function buildClaudeToolDefinitions(toolIds: string[]) { const toolMap: Record = { web_search: { name: 'web_search', description: '搜索互联网获取最新信息。当用户询问时事、新闻、天气、实时数据等需要最新信息的问题时,请使用此工具。', input_schema: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, }, required: ['query'], }, }, web_fetch: { name: 'web_fetch', description: '获取指定 URL 的网页内容。当用户提供了具体的网址并想了解该页面的内容时,请使用此工具。', input_schema: { type: 'object', properties: { url: { type: 'string', description: '要获取内容的完整 URL', }, }, required: ['url'], }, }, mita_search: { name: 'mita_search', description: '秘塔AI智能搜索。支持网页搜索、图片搜索和视频搜索三种模式。当需要搜索高质量的中文内容或需要更精准的搜索结果时使用网页搜索;当用户明确要求搜索图片或需要图片素材时使用图片搜索;当用户需要搜索视频内容时使用视频搜索。', input_schema: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, scope: { type: 'string', enum: ['webpage', 'image', 'video'], description: '搜索类型:webpage(网页搜索,默认)、image(图片搜索)或 video(视频搜索)', }, size: { type: 'number', description: '返回结果数量,网页搜索默认10,图片搜索默认5,视频搜索默认5', }, }, required: ['query'], }, }, mita_reader: { name: 'mita_reader', description: '秘塔AI网页读取。获取网页内容并返回结构化的Markdown格式,适合阅读长文章。', input_schema: { type: 'object', properties: { url: { type: 'string', description: '要读取的网页URL', }, }, required: ['url'], }, }, youdao_translate: { name: 'youdao_translate', description: '有道智云高质量多语言翻译。当用户需要翻译文本、句子、段落或询问某个词/短语的翻译时,请使用此工具。支持100+种语言互译,包括中英日韩法德西俄阿拉伯语等。', input_schema: { type: 'object', properties: { text: { type: 'string', description: '待翻译的文本内容', }, from: { type: 'string', description: '源语言代码,如 auto(自动检测)/en(英语)/zh-CHS(简体中文)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认auto', }, to: { type: 'string', description: '目标语言代码,如 zh-CHS(简体中文)/en(英语)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认zh-CHS', }, }, required: ['text'], }, }, }; return toolIds .filter((id) => toolMap[id]) .map((id) => toolMap[id]); } // 构建 OpenAI 工具定义 function buildOpenAIToolDefinitions(toolIds: string[]) { const toolMap: Record = { web_search: { type: 'function', function: { name: 'web_search', description: '搜索互联网获取最新信息。当用户询问时事、新闻、天气、实时数据等需要最新信息的问题时,请使用此工具。', parameters: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, }, required: ['query'], }, }, }, web_fetch: { type: 'function', function: { name: 'web_fetch', description: '获取指定 URL 的网页内容。当用户提供了具体的网址并想了解该页面的内容时,请使用此工具。', parameters: { type: 'object', properties: { url: { type: 'string', description: '要获取内容的完整 URL', }, }, required: ['url'], }, }, }, mita_search: { type: 'function', function: { name: 'mita_search', description: '秘塔AI智能搜索。支持网页搜索、图片搜索和视频搜索三种模式。当需要搜索高质量的中文内容或需要更精准的搜索结果时使用网页搜索;当用户明确要求搜索图片或需要图片素材时使用图片搜索;当用户需要搜索视频内容时使用视频搜索。', parameters: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, scope: { type: 'string', enum: ['webpage', 'image', 'video'], description: '搜索类型:webpage(网页搜索,默认)、image(图片搜索)或 video(视频搜索)', }, size: { type: 'number', description: '返回结果数量,网页搜索默认10,图片搜索默认5,视频搜索默认5', }, }, required: ['query'], }, }, }, mita_reader: { type: 'function', function: { name: 'mita_reader', description: '秘塔AI网页读取。获取网页内容并返回结构化的Markdown格式,适合阅读长文章。', parameters: { type: 'object', properties: { url: { type: 'string', description: '要读取的网页URL', }, }, required: ['url'], }, }, }, youdao_translate: { type: 'function', function: { name: 'youdao_translate', description: '有道智云高质量多语言翻译。当用户需要翻译文本、句子、段落或询问某个词/短语的翻译时,请使用此工具。支持100+种语言互译,包括中英日韩法德西俄阿拉伯语等。', parameters: { type: 'object', properties: { text: { type: 'string', description: '待翻译的文本内容', }, from: { type: 'string', description: '源语言代码,如 auto(自动检测)/en(英语)/zh-CHS(简体中文)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认auto', }, to: { type: 'string', description: '目标语言代码,如 zh-CHS(简体中文)/en(英语)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认zh-CHS', }, }, required: ['text'], }, }, }, }; return toolIds .filter((id) => toolMap[id]) .map((id) => toolMap[id]); } // 构建 Codex Response API 工具定义 function buildCodexToolDefinitions(toolIds: string[]) { const toolMap: Record = { web_search: { type: 'function', name: 'web_search', description: '搜索互联网获取最新信息。当用户询问时事、新闻、天气、实时数据等需要最新信息的问题时,请使用此工具。', parameters: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, }, required: ['query'], }, }, web_fetch: { type: 'function', name: 'web_fetch', description: '获取指定 URL 的网页内容。当用户提供了具体的网址并想了解该页面的内容时,请使用此工具。', parameters: { type: 'object', properties: { url: { type: 'string', description: '要获取内容的完整 URL', }, }, required: ['url'], }, }, mita_search: { type: 'function', name: 'mita_search', description: '秘塔AI智能搜索。支持网页搜索、图片搜索和视频搜索三种模式。当需要搜索高质量的中文内容或需要更精准的搜索结果时使用网页搜索;当用户明确要求搜索图片或需要图片素材时使用图片搜索;当用户需要搜索视频内容时使用视频搜索。', parameters: { type: 'object', properties: { query: { type: 'string', description: '搜索查询关键词', }, scope: { type: 'string', enum: ['webpage', 'image', 'video'], description: '搜索类型:webpage(网页搜索,默认)、image(图片搜索)或 video(视频搜索)', }, size: { type: 'number', description: '返回结果数量,网页搜索默认10,图片搜索默认5,视频搜索默认5', }, }, required: ['query'], }, }, mita_reader: { type: 'function', name: 'mita_reader', description: '秘塔AI网页读取。获取网页内容并返回结构化的Markdown格式,适合阅读长文章。', parameters: { type: 'object', properties: { url: { type: 'string', description: '要读取的网页URL', }, }, required: ['url'], }, }, youdao_translate: { type: 'function', name: 'youdao_translate', description: '有道智云高质量多语言翻译。当用户需要翻译文本、句子、段落或询问某个词/短语的翻译时,请使用此工具。支持100+种语言互译,包括中英日韩法德西俄阿拉伯语等。', parameters: { type: 'object', properties: { text: { type: 'string', description: '待翻译的文本内容', }, from: { type: 'string', description: '源语言代码,如 auto(自动检测)/en(英语)/zh-CHS(简体中文)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认auto', }, to: { type: 'string', description: '目标语言代码,如 zh-CHS(简体中文)/en(英语)/ja(日语)/ko(韩语)/fr(法语)/de(德语) 等,默认zh-CHS', }, }, required: ['text'], }, }, }; return toolIds .filter((id) => toolMap[id]) .map((id) => toolMap[id]); }