Compare commits

..

10 Commits

Author SHA1 Message Date
a2a7fd46c3 chore: 版本号更新到v3.0.6 2026-04-15 09:52:31 +08:00
baf5913bfb fix: SiliconFlow平台Function Calling第二轮调用兼容
问题:SiliconFlow平台不支持标准tool消息类型,第二轮调用返回参数无效

修复:将tool消息转换为user消息格式
- 收集所有tool消息的内容
- 合并为一个用户消息发送给模型
- 添加明确的提示让模型直接根据结果回答

版本: v3.0.6
2026-04-15 09:52:19 +08:00
ae08e01e55 fix: Kimi模型伪工具调用格式过滤
修复Kimi-K2.5模型在第二轮调用时输出伪工具调用格式的问题:
- 添加系统提示告诉模型直接根据工具结果回答
- 过滤 <|tool_calls_section_begin|> 等内部格式标记
- 清理多余空行

版本: v3.0.1
2026-04-15 09:45:08 +08:00
9048d94e33 fix: 添加详细日志诊断工具调用消息格式 2026-04-15 02:25:05 +08:00
291de733a4 fix: chat_with_tool_results不重复添加tool结果,修正消息格式 2026-04-15 01:03:10 +08:00
10f67a807a fix: get_agent_config添加supports_vision和supports_function_calling字段 2026-04-14 19:20:17 +08:00
d9ac2c78f6 feat: 对话区左侧显示Agent信息 2026-04-14 19:14:31 +08:00
4ac67b5816 feat: v3.0 Function Calling模式 - LLM自主调用工具 2026-04-14 18:39:12 +08:00
527885f3d6 fix: 工具按钮放附件右边、输入框左边 2026-04-14 17:19:52 +08:00
c21270195a feat: 工具按钮放输入框右边,面板向上弹出 2026-04-14 17:15:56 +08:00
6 changed files with 600 additions and 324 deletions

View File

@@ -1,6 +1,6 @@
"""
AI对话系统 v2.0.0 - 主应用
支持大模型池、Agent管理、渠道独立绑定、思考功能开关
AI对话系统 v3.0.0 - 主应用
支持大模型池、Agent管理、渠道独立绑定、思考功能开关、Function CallingLLM自主调用工具
"""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, Request
from fastapi.responses import HTMLResponse, JSONResponse
@@ -33,7 +33,7 @@ logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 创建应用
app = FastAPI(title="AI对话系统 v2.0", version="2.0.0")
app = FastAPI(title="AI对话系统 v3.0", version="3.0.6")
# 静态文件和模板
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -122,6 +122,7 @@ async def get_providers(db: Session = Depends(get_db)):
"thinking_model": p.thinking_model,
"supports_vision": p.supports_vision,
"vision_model": p.vision_model,
"supports_function_calling": p.supports_function_calling,
"max_tokens": p.max_tokens,
"temperature": p.temperature,
"is_active": p.is_active,
@@ -832,7 +833,7 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
conversation_id = data.get("conversation_id")
enable_thinking = data.get("enable_thinking", True)
agent_id_override = data.get("agent_id")
disabled_tools = data.get("disabled_tools", []) # 禁用的工具列表
# v3.0: 移除 disabled_tools由LLM自主决定
if agent_id_override:
agent = agent_service.get_agent(agent_id_override)
@@ -846,48 +847,41 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
if not message.strip() and not files:
continue
# 处理文件内容,添加到消息
image_contents = [] # 图片内容(用于视觉模型)
text_contents = [] # 文本文件内容
image_paths = [] # 图片服务器路径(用于历史记录显示)
# 处理文件内容
image_contents = []
text_contents = []
image_paths = []
if files:
for f in files:
if f.get('type') and f['type'].startswith('image/'):
# 图片:记录 base64 数据,用于视觉模型
image_contents.append({
'name': f['name'],
'type': f['type'],
'data': f.get('content', '') # base64 数据
'data': f.get('content', '')
})
# 记录服务器路径(用于历史记录)
if f.get('serverPath'):
image_paths.append({
'name': f['name'],
'type': f['type'],
'url': f['serverPath'] # 服务器文件路径
'url': f['serverPath']
})
# 不添加文件名文本,图片信息保存在 extra_data 中
elif f.get('content'):
# 文本文件:直接添加内容,不带文件名前缀
text_contents.append(f['content'][:3000])
if len(f['content']) > 3000:
text_contents[-1] += "...(内容过长已截断)"
# 如果有文本文件内容,追加到消息后面
if text_contents:
for content in text_contents:
message += f"\n\n{content}"
# 保存图片和文件信息到 extra_data(用于历史记录)
# 保存文件信息到 extra_data
extra_data_for_msg = None
if image_paths:
# 图片保存服务器路径URL历史记录可以显示
extra_data_for_msg = {
'images': image_paths,
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
}
elif image_contents:
# 没有服务器路径但有问题(可能上传失败)
extra_data_for_msg = {
'images': [{'name': i['name'], 'type': i['type']} for i in image_contents],
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
@@ -896,8 +890,9 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
# 1. 获取Agent配置
agent_config = agent_service.get_agent_config(current_agent_id)
agent_tools = agent_config.get('agent', {}).get('tools', [])
supports_function_calling = agent_config.get('provider', {}).get('supports_function_calling', False)
# 2. 获取或创建会话(先有 conversation_id
# 2. 获取或创建会话
if conversation_id:
conversation = conv_service.get_conversation(conversation_id)
else:
@@ -908,12 +903,12 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
"conversation_id": conversation_id
})
# 3. 广播用户消息(前端立即看到)
# 3. 广播用户消息
await manager.send_to_user(MAIN_USER_ID, {
"type": "user_message",
"conversation_id": conversation_id,
"message": {
"id": None, # 临时,后面会保存
"id": None,
"role": "user",
"content": message,
"source": "web",
@@ -921,118 +916,45 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
}
})
# 4. 执行搜索并发送搜索结果
search_context = None
search_results_for_client = None # 用于发送给前端和保存
logger.info(f"检查搜索条件: agent_tools={agent_tools}, disabled_tools={disabled_tools}")
if 'search' in agent_tools and 'search' not in disabled_tools:
logger.info("搜索条件满足,开始执行搜索")
tool_service = ToolService(db)
search_tool = tool_service.get_default_tool('search')
logger.info(f"获取到搜索工具: {search_tool.name if search_tool else 'None'}")
if search_tool and search_tool.config.get('api_key'):
import httpx
import time
start_time = time.time()
try:
logger.info(f"执行搜索: query={message}")
tavily_url = "https://api.tavily.com/search"
config = search_tool.config
payload = {
"api_key": config.get('api_key'),
"query": message,
"max_results": config.get('max_results', 5),
"search_depth": config.get('search_depth', 'basic')
}
with httpx.Client(timeout=30) as client:
resp = client.post(tavily_url, json=payload)
search_result = resp.json()
duration_ms = int((time.time() - start_time) * 1000)
if search_result.get("results"):
# 构建搜索上下文给LLM
max_for_llm = config.get('max_results', 5)
search_context = "\n\n【搜索结果】\n"
for i, r in enumerate(search_result["results"][:max_for_llm], 1):
search_context += f"{i}. {r.get('title', 'N/A')}\n {r.get('content', r.get('snippet', 'N/A'))[:200]}\n 来源: {r.get('url', 'N/A')}\n"
logger.info(f"搜索完成: {len(search_result['results'])} 条结果,使用 {min(len(search_result['results']), max_for_llm)}")
# 发送搜索结果给前端(按配置的数量)
max_display = config.get('max_results', 5)
search_results_for_client = [
{
"title": r.get('title', 'N/A'),
"snippet": r.get('content', r.get('snippet', ''))[:150],
"url": r.get('url', 'N/A')
}
for r in search_result["results"][:max_display]
]
await websocket.send_json({
"type": "search_results",
"conversation_id": conversation_id,
"results": search_results_for_client,
"query": message
})
# 更新统计和日志
tool_service.increment_stats(search_tool.id, True)
tool_service.log_usage({
'tool_id': search_tool.id,
'tool_type': 'search',
'query': message,
'success': True,
'result_summary': f'{len(search_result["results"])} results',
'conversation_id': conversation_id,
'agent_id': current_agent_id,
'duration_ms': duration_ms
})
except Exception as e:
duration_ms = int((time.time() - start_time) * 1000)
logger.error(f"搜索失败: {e}")
tool_service.increment_stats(search_tool.id, False)
tool_service.log_usage({
'tool_id': search_tool.id,
'tool_type': 'search',
'query': message,
'success': False,
'error_message': str(e),
'conversation_id': conversation_id,
'duration_ms': duration_ms
})
# 5. 保存用户消息到数据库
extra_data_to_save = None
if search_results_for_client:
extra_data_to_save = {'search_results': search_results_for_client, 'search_query': message}
if extra_data_for_msg:
if extra_data_to_save:
extra_data_to_save.update(extra_data_for_msg)
else:
extra_data_to_save = extra_data_for_msg
# 4. 保存用户消息
user_msg = conv_service.add_message(
conversation_id=conversation.id,
role='user',
content=message,
source='web',
extra_data=extra_data_to_save
extra_data=extra_data_for_msg
)
# 6. 获取对话历史(包含刚保存的用户消息)
# 5. 获取对话历史
history = conv_service.get_conversation_history(conversation_id, limit=agent_config['agent'].get('max_history', 20))
# 7. 如果有搜索结果,添加到消息中
if search_context:
modified_system_prompt = agent_config['agent'].get('system_prompt', '') + "\n\n如果提供了搜索结果,请基于搜索结果回答用户问题,并注明信息来源。"
agent_config['agent']['system_prompt'] = modified_system_prompt
history.append({"role": "system", "content": f"以下是搜索到的相关信息,请参考这些内容回答用户问题:{search_context}"})
# 6. 构建工具 schemaFunction Calling
tools_schema = []
if supports_function_calling and agent_tools:
# 搜索工具
if 'search' in agent_tools:
tool_service = ToolService(db)
search_tool = tool_service.get_default_tool('search')
if search_tool and search_tool.config.get('api_key'):
tools_schema.append({
"type": "function",
"function": {
"name": "web_search",
"description": "搜索互联网获取实时信息、新闻、数据等。当用户询问需要最新信息的问题时使用此工具。",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词或问题"
}
},
"required": ["query"]
}
}
})
# 8. 调用LLM返回回复
# 7. 调用LLMFunction Calling模式
if not agent_config or not agent_config.get('provider'):
await websocket.send_json({
"type": "error",
@@ -1041,17 +963,184 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
continue
try:
response, thinking_content = await llm_service.chat(
messages=history,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
enable_thinking=enable_thinking,
images=image_contents # 传递图片数据给多模态模型
)
response = None
thinking_content = None
tool_calls_record = []
# 第一阶段让LLM决定是否调用工具
if tools_schema:
response, thinking_content, tool_calls = await llm_service.chat_with_tools(
messages=history,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
tools=tools_schema,
enable_thinking=enable_thinking,
images=image_contents
)
# 如果LLM请求调用工具
if tool_calls:
logger.info(f"LLM请求调用工具: {tool_calls}")
# 发送工具调用通知给前端
await websocket.send_json({
"type": "tool_calls",
"conversation_id": conversation_id,
"tool_calls": [
{"name": tc['name'], "arguments": tc['arguments']}
for tc in tool_calls
]
})
# 执行工具调用
tool_results = []
tool_service = ToolService(db)
search_tool = tool_service.get_default_tool('search')
for tc in tool_calls:
if tc['name'] == 'web_search':
query = tc['arguments'].get('query', message)
logger.info(f"执行搜索: query={query}")
import httpx
import time
start_time = time.time()
try:
tavily_url = "https://api.tavily.com/search"
config = search_tool.config
payload = {
"api_key": config.get('api_key'),
"query": query,
"max_results": config.get('max_results', 5),
"search_depth": config.get('search_depth', 'basic')
}
with httpx.Client(timeout=30) as client:
resp = client.post(tavily_url, json=payload)
search_result = resp.json()
duration_ms = int((time.time() - start_time) * 1000)
if search_result.get("results"):
# 构建搜索结果
search_content = []
for i, r in enumerate(search_result["results"][:5], 1):
search_content.append({
"title": r.get('title', 'N/A'),
"content": r.get('content', r.get('snippet', ''))[:300],
"url": r.get('url', 'N/A')
})
tool_results.append({
"tool_call_id": tc['id'],
"content": json.dumps(search_content)
})
# 发送搜索结果给前端
await websocket.send_json({
"type": "search_results",
"conversation_id": conversation_id,
"results": [
{"title": r.get('title'), "snippet": r.get('content', '')[:150], "url": r.get('url')}
for r in search_result["results"][:5]
],
"query": query
})
# 记录日志
tool_service.increment_stats(search_tool.id, True)
tool_service.log_usage({
'tool_id': search_tool.id,
'tool_type': 'search',
'query': query,
'success': True,
'result_summary': f'{len(search_result["results"])} results',
'conversation_id': conversation_id,
'agent_id': current_agent_id,
'duration_ms': duration_ms
})
tool_calls_record.append({
"name": "web_search",
"query": query,
"results_count": len(search_result["results"])
})
except Exception as e:
logger.error(f"搜索失败: {e}")
duration_ms = int((time.time() - start_time) * 1000)
tool_service.increment_stats(search_tool.id, False)
tool_service.log_usage({
'tool_id': search_tool.id,
'tool_type': 'search',
'query': query,
'success': False,
'error_message': str(e),
'conversation_id': conversation_id,
'duration_ms': duration_ms
})
tool_results.append({
"tool_call_id": tc['id'],
"content": json.dumps({"error": str(e)})
})
# 将工具调用消息添加到历史
# 注意:这里需要将 assistant 的 tool_calls 消息添加到历史
# 但我们用的是简化的历史格式,需要重新构建
# 第二阶段将工具结果返回给LLM
if tool_results:
# 重新获取完整历史(包含工具调用)
history_with_tools = history.copy()
# 添加 assistant 的 tool_calls 消息
history_with_tools.append({
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": tc['id'],
"type": "function",
"function": {
"name": tc['name'],
"arguments": json.dumps(tc['arguments'])
}
}
for tc in tool_calls
]
})
# 添加工具结果
for tr in tool_results:
history_with_tools.append({
"role": "tool",
"tool_call_id": tr['tool_call_id'],
"content": tr['content']
})
response, thinking_content = await llm_service.chat_with_tool_results(
messages=history_with_tools,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
enable_thinking=enable_thinking
)
# 如果不支持 Function Calling 或没有工具,直接调用普通 chat
if response is None:
response, thinking_content = await llm_service.chat(
messages=history,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
enable_thinking=enable_thinking,
images=image_contents
)
logger.info(f"LLM响应: response长度={len(response)}, thinking长度={len(thinking_content) if thinking_content else 0}")
# 保存AI回复
extra_data_to_save = None
if tool_calls_record:
extra_data_to_save = {'tool_calls': tool_calls_record}
assistant_msg = conv_service.add_message(
conversation_id=conversation.id,
role='assistant',
@@ -1059,7 +1148,8 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
source='web',
thinking_content=thinking_content if thinking_content else None,
agent_id=current_agent_id,
model_used=agent_config['provider'].get('default_model')
model_used=agent_config['provider'].get('default_model'),
extra_data=extra_data_to_save
)
# 发送AI回复
@@ -1074,6 +1164,7 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
"source": "web",
"agent_id": current_agent_id,
"agent_name": agent_config['agent'].get('display_name'),
"tool_calls": tool_calls_record, # v3.0: 返回工具调用记录
"created_at": assistant_msg.created_at.isoformat()
}
})

View File

@@ -36,6 +36,9 @@ class LLMProvider(Base):
supports_vision = Column(Boolean, default=False) # 是否支持图片理解(多模态)
vision_model = Column(String(100), nullable=True) # 视觉模型名(如与默认模型不同)
# Function Calling 支持
supports_function_calling = Column(Boolean, default=False) # 是否支持函数调用(工具自主调用)
# 配额和限制
max_tokens = Column(Integer, default=4096)
temperature = Column(Float, default=0.7)

View File

@@ -137,6 +137,9 @@ class AgentService:
'api_key': provider.api_key if provider else None,
'supports_thinking': provider.supports_thinking if provider else False,
'thinking_model': provider.thinking_model if provider else None,
'supports_vision': provider.supports_vision if provider else False,
'vision_model': provider.vision_model if provider else None,
'supports_function_calling': provider.supports_function_calling if provider else False,
'default_model': provider.default_model if provider else 'auto',
'max_tokens': provider.max_tokens if provider else 4096,
'temperature': provider.temperature if provider else 0.7,

View File

@@ -382,5 +382,261 @@ class LLMService:
yield {"type": "content", "text": buffer}
async def chat_with_tools(
self,
messages: List[Dict],
provider_config: dict,
agent_config: dict,
tools: List[Dict] = None,
enable_thinking: bool = True,
images: List[Dict] = None
) -> Tuple[str, Optional[str], Optional[List[Dict]]]:
"""
支持Function Calling的对话
Args:
messages: 对话历史
provider_config: LLM Provider配置
agent_config: Agent配置
tools: 工具定义列表OpenAI Function Calling格式
enable_thinking: 是否启用思考
images: 图片数据列表
Returns:
Tuple[str, Optional[str], Optional[List[Dict]]]: (回复内容, 思考过程, 工具调用记录)
"""
api_base = provider_config.get('api_base')
api_key = provider_config.get('api_key')
model = agent_config.get('model_override') or provider_config.get('default_model', 'auto')
supports_function_calling = provider_config.get('supports_function_calling', False)
max_tokens = provider_config.get('max_tokens', 4096)
temperature = agent_config.get('temperature_override') or provider_config.get('temperature', 0.7)
# 如果不支持Function Calling直接调用普通chat
if not supports_function_calling or not tools:
response, thinking = await self.chat(messages, provider_config, agent_config, enable_thinking, images)
return response, thinking, None
# 构建消息
final_messages = messages.copy()
system_prompt = agent_config.get('system_prompt', '你是一个有用的AI助手。')
if final_messages and final_messages[0]['role'] != 'system':
final_messages.insert(0, {"role": "system", "content": system_prompt})
# 处理图片(多模态)
if images and len(images) > 0:
for i in range(len(final_messages) - 1, -1, -1):
if final_messages[i]['role'] == 'user':
original_text = final_messages[i]['content']
multimodal_content = [{"type": "text", "text": original_text if original_text else "请描述这张图片"}]
for img in images:
multimodal_content.append({
"type": "image_url",
"image_url": {"url": img['data']}
})
final_messages[i]['content'] = multimodal_content
break
# 第一次调用让LLM决定是否调用工具
url = f"{api_base.rstrip('/')}/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": final_messages,
"temperature": temperature,
"max_tokens": max_tokens,
"tools": tools # 传入工具定义
}
logger.info(f"Function Calling调用: url={url}, model={model}, tools={len(tools)}")
tool_calls_record = [] # 记录工具调用
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
if response.status_code != 200:
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
response.raise_for_status()
data = response.json()
if 'choices' not in data or len(data['choices']) == 0:
raise ValueError("API响应格式错误缺少choices")
message = data['choices'][0]['message']
# 检查是否有工具调用
if 'tool_calls' in message and message['tool_calls']:
logger.info(f"LLM请求调用工具: {len(message['tool_calls'])}")
# 将LLM的工具调用消息添加到历史
final_messages.append({
"role": "assistant",
"content": None,
"tool_calls": message['tool_calls']
})
# 记录工具调用
for tc in message['tool_calls']:
tool_calls_record.append({
"id": tc['id'],
"name": tc['function']['name'],
"arguments": json.loads(tc['function']['arguments'])
})
# 返回工具调用记录,由调用方执行工具
return None, None, tool_calls_record
# 没有工具调用,直接返回内容
content = message.get('content', '')
# 处理思考内容(如果有)
thinking_content = None
# 这里可以添加思考内容提取逻辑
return content, thinking_content, None
except httpx.HTTPStatusError as e:
logger.error(f"HTTP错误: {e.response.status_code}, {e.response.text}")
raise
except Exception as e:
logger.error(f"Function Calling调用异常: {type(e).__name__}: {e}")
raise
async def chat_with_tool_results(
self,
messages: List[Dict],
provider_config: dict,
agent_config: dict,
enable_thinking: bool = True
) -> Tuple[str, Optional[str]]:
"""
第二阶段调用:使用包含工具调用和结果的完整消息历史
注意SiliconFlow 等平台不支持标准 tool 消息类型,
需要将工具结果转换为普通用户消息格式。
Args:
messages: 已包含assistant tool_calls和tool结果的完整消息历史
provider_config: LLM Provider配置
agent_config: Agent配置
Returns:
Tuple[str, Optional[str]]: (回复内容, 思考过程)
"""
api_base = provider_config.get('api_base')
api_key = provider_config.get('api_key')
model = agent_config.get('model_override') or provider_config.get('default_model', 'auto')
max_tokens = provider_config.get('max_tokens', 4096)
temperature = agent_config.get('temperature_override') or provider_config.get('temperature', 0.7)
# 转换消息格式:将 tool 相关消息转为普通消息格式
# 因为很多 API 平台(如 SiliconFlow不支持 tool 消息类型
converted_messages = []
tool_results_content = []
for msg in messages:
role = msg.get('role')
if role == 'system':
converted_messages.append(msg)
elif role == 'user':
converted_messages.append(msg)
elif role == 'assistant':
# 如果有 tool_calls跳过这个消息不发送给不支持的平台
if msg.get('tool_calls'):
# 记录工具调用信息(可选)
pass
else:
converted_messages.append(msg)
elif role == 'tool':
# 收集工具结果,后面合并成一个用户消息
tool_results_content.append(msg.get('content', ''))
# 如果有工具结果,添加为一个特殊的用户消息
if tool_results_content:
combined_results = "\n\n".join([
f"【搜索结果 {i+1}\n{result}"
for i, result in enumerate(tool_results_content)
])
# 添加工具结果作为用户消息
converted_messages.append({
"role": "user",
"content": f"以下是搜索工具返回的结果:\n\n{combined_results}\n\n请根据以上搜索结果回答我之前的问题,不要再说\"让我搜索一下\",直接给出回答。如果搜索结果不足以回答,请说明。"
})
final_messages = converted_messages
# 调用LLM生成最终回复
url = f"{api_base.rstrip('/')}/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": final_messages,
"temperature": temperature,
"max_tokens": max_tokens
}
logger.info(f"工具结果返回LLM: url={url}, model={model}, 消息数={len(final_messages)}")
# 打印消息内容(调试)
for i, msg in enumerate(final_messages):
role = msg.get('role')
content_preview = str(msg.get('content', ''))[:100] if msg.get('content') else 'None'
if role == 'tool':
logger.info(f"消息[{i}] role={role}, tool_call_id={msg.get('tool_call_id')}, content长度={len(msg.get('content',''))}")
elif role == 'assistant' and msg.get('tool_calls'):
logger.info(f"消息[{i}] role={role}, tool_calls={len(msg['tool_calls'])}")
else:
logger.info(f"消息[{i}] role={role}, content={content_preview}...")
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
if response.status_code != 200:
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
response.raise_for_status()
data = response.json()
content = data['choices'][0]['message']['content']
# 过滤掉伪工具调用格式某些模型如Kimi会输出这种内部格式
# 模式:<|tool_calls_section_begin|>...<|tool_calls_section_end|>
import re
tool_pattern = r'<\|tool_calls_section_begin\|>.*?<\|tool_calls_section_end\|>'
content = re.sub(tool_pattern, '', content, flags=re.DOTALL)
# 也过滤单个 tool_call 格式
tool_call_pattern = r'<\|tool_call_begin\|>.*?<\|tool_call_end\|>'
content = re.sub(tool_call_pattern, '', content, flags=re.DOTALL)
# 清理可能残留的格式标记
content = content.replace('<|tool_calls_section_begin|>', '')
content = content.replace('<|tool_calls_section_end|>', '')
content = content.replace('<|tool_call_begin|>', '')
content = content.replace('<|tool_call_end|>', '')
content = content.replace('<|tool_call_argument_begin|>', '')
content = content.replace('<|tool_call_argument_end|>', '')
# 清理多余空行
content = re.sub(r'\n{3,}', '\n\n', content).strip()
return content, None
except Exception as e:
logger.error(f"工具结果调用异常: {e}")
raise
# 全局实例
llm_service = LLMService()

View File

@@ -58,8 +58,8 @@
</div>
<div class="card-body">
<table class="table">
<thead><tr><th>名称</th><th>API地址</th><th>默认模型</th><th>思考</th><th>视觉</th><th>状态</th><th>操作</th></tr></thead>
<tbody id="providers-list"><tr><td colspan="7" class="text-center">加载中...</td></tr></tbody>
<thead><tr><th>名称</th><th>API地址</th><th>默认模型</th><th>思考</th><th>视觉</th><th>FC</th><th>状态</th><th>操作</th></tr></thead>
<tbody id="providers-list"><tr><td colspan="8" class="text-center">加载中...</td></tr></tbody>
</table>
</div>
</div>
@@ -164,6 +164,8 @@
<div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-thinking"><label class="form-check-label">支持原生思考</label></div><div class="col-md-6"><label class="form-label">思考模型名</label><input type="text" class="form-control" id="provider-thinking-model"></div></div></div>
<hr><h6>视觉能力</h6>
<div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-vision"><label class="form-check-label">支持图片理解</label></div><div class="col-md-6"><label class="form-label">视觉模型名</label><input type="text" class="form-control" id="provider-vision-model" placeholder="留空则使用默认模型"></div></div><small class="text-muted mt-2 d-block">启用后可上传图片让AI识别分析内容</small></div>
<hr><h6>Function Calling</h6>
<div class="thinking-config"><div class="form-check"><input type="checkbox" class="form-check-input" id="provider-supports-function-calling"><label class="form-check-label">支持函数调用</label></div><small class="text-muted mt-2 d-block">启用后LLM可自主决定何时调用工具更智能</small></div>
<div class="mt-3"><button type="button" class="btn btn-outline-primary" onclick="fetchProviderModels()"><i class="ri-refresh-line"></i> 获取模型</button><button type="button" class="btn btn-outline-secondary" onclick="testProviderConnection()"><i class="ri-link"></i> 测试连接</button></div>
<div class="mt-2" id="provider-models-preview"></div><div class="mt-2" id="provider-test-result"></div>
</form></div>
@@ -331,6 +333,7 @@
<td><strong>${p.name}</strong></td><td><small>${p.api_base||'-'}</small></td><td>${p.default_model||'auto'}</td>
<td>${p.supports_thinking?'<span class="badge bg-success">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.supports_vision?'<span class="badge bg-info">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.supports_function_calling?'<span class="badge bg-primary">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.is_active?'<span class="badge bg-success">启用</span>':'<span class="badge bg-secondary">禁用</span>'}</td>
<td><button class="btn btn-sm btn-outline-primary" onclick="editProvider(${p.id})"><i class="ri-edit-line"></i></button>
<button class="btn btn-sm btn-outline-danger" onclick="deleteProvider(${p.id},'${p.name}')"><i class="ri-delete-bin-line"></i></button></td>
@@ -349,6 +352,7 @@
document.getElementById('provider-active').checked = true;
document.getElementById('provider-supports-thinking').checked = false;
document.getElementById('provider-supports-vision').checked = false;
document.getElementById('provider-supports-function-calling').checked = false;
document.getElementById('provider-models-preview').innerHTML = '';
document.getElementById('provider-test-result').innerHTML = '';
new bootstrap.Modal(document.getElementById('providerModal')).show();
@@ -371,6 +375,7 @@
document.getElementById('provider-thinking-model').value = p.thinking_model || '';
document.getElementById('provider-supports-vision').checked = p.supports_vision;
document.getElementById('provider-vision-model').value = p.vision_model || '';
document.getElementById('provider-supports-function-calling').checked = p.supports_function_calling;
new bootstrap.Modal(document.getElementById('providerModal')).show();
}
@@ -389,7 +394,8 @@
supports_thinking: document.getElementById('provider-supports-thinking').checked,
thinking_model: document.getElementById('provider-thinking-model').value,
supports_vision: document.getElementById('provider-supports-vision').checked,
vision_model: document.getElementById('provider-vision-model').value
vision_model: document.getElementById('provider-vision-model').value,
supports_function_calling: document.getElementById('provider-supports-function-calling').checked
};
const res = await fetch(id ? `/api/v2/providers/${id}` : '/api/v2/providers', { method: id ? 'PUT' : 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(data) });
const result = await res.json();

View File

@@ -131,38 +131,26 @@
/* 快捷语句 - 横向扁平 */
.quick-phrases-bar { display: flex; align-items: center; gap: 8px; margin-top: 12px; position: relative; }
/* 工具折叠面板 */
.tools-collapsible { position: relative; margin-bottom: 8px; }
.tools-toggle-btn {
padding: 8px 12px; background: #f5f5f5; border: 1px solid #e0e0e0; border-radius: 8px;
cursor: pointer; display: inline-flex; align-items: center; gap: 4px; font-size: 13px; color: #666;
transition: all 0.2s;
}
.tools-toggle-btn:hover { background: #e8e8e8; border-color: #10a37f; color: #10a37f; }
.tools-toggle-btn.active { background: #e8f5e9; border-color: #10a37f; color: #10a37f; }
.tools-badge {
background: #10a37f; color: white; font-size: 11px; padding: 1px 5px; border-radius: 10px;
min-width: 16px; text-align: center;
}
.tools-panel {
display: none; position: absolute; bottom: 100%; left: 0; margin-bottom: 8px;
background: white; border: 1px solid #e0e0e0; border-radius: 12px;
box-shadow: 0 4px 20px rgba(0,0,0,0.15); min-width: 200px; z-index: 100;
}
.tools-panel.show { display: block; }
.tools-panel-header {
display: flex; justify-content: space-between; align-items: center;
padding: 12px 16px; border-bottom: 1px solid #eee; font-weight: 500; color: #333;
}
.tools-panel-header button { background: none; border: none; color: #999; cursor: pointer; padding: 4px; }
.tools-panel-header button:hover { color: #666; }
.tools-panel-content { padding: 12px 16px; max-height: 300px; overflow-y: auto; }
.tool-item { display: flex; align-items: center; gap: 8px; padding: 8px 0; }
.tool-item input { width: 16px; height: 16px; }
.tool-item label { cursor: pointer; font-size: 14px; color: #333; display: flex; align-items: center; gap: 6px; }
.tool-item label i { color: #10a37f; }
.tool-item.disabled { opacity: 0.5; }
.tool-item.disabled label { color: #999; cursor: not-allowed; }
/* 工具调用记录显示 */
.tool-call-record { margin-top: 8px; padding: 8px 12px; background: #e8f5e9; border-radius: 8px; font-size: 12px; color: #10a37f; }
.tool-call-record i { margin-right: 4px; }
/* Agent信息侧边栏 */
.agent-info-sidebar { width: 200px; background: #f8f9fa; border-right: 1px solid #e0e0e0; padding: 16px; display: flex; flex-direction: column; }
.agent-info-header { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
.agent-avatar { width: 48px; height: 48px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 12px; display: flex; align-items: center; justify-content: center; color: white; font-size: 24px; }
.agent-name-area { flex: 1; }
.agent-name-area h3 { font-size: 16px; margin: 0; color: #333; }
.agent-name-area small { color: #999; font-size: 12px; }
.agent-info-section { margin-top: 16px; }
.agent-info-section h4 { font-size: 13px; color: #666; margin: 0 0 8px 0; font-weight: 500; }
.agent-info-section p { font-size: 12px; color: #333; line-height: 1.5; margin: 0; }
.agent-capabilities { display: flex; flex-wrap: wrap; gap: 6px; margin-top: 8px; }
.capability-tag { padding: 4px 8px; background: #e8f5e9; border-radius: 6px; font-size: 11px; color: #10a37f; }
.capability-tag.disabled { background: #f5f5f5; color: #999; }
.agent-model-info { margin-top: 12px; padding: 8px; background: white; border-radius: 8px; border: 1px solid #e0e0e0; }
.agent-model-info label { font-size: 11px; color: #999; }
.agent-model-info span { font-size: 12px; color: #333; display: block; margin-top: 2px; }
.add-phrase-btn { padding: 6px 10px; background: #f0f0f0; border: 1px solid #ddd; border-radius: 6px; cursor: pointer; font-size: 12px; color: #666; white-space: nowrap; flex-shrink: 0; }
.add-phrase-btn:hover { background: #e8e8e8; }
.phrase-list-wrapper { flex: 1; overflow-x: auto; overflow-y: hidden; scrollbar-width: thin; }
@@ -211,7 +199,7 @@
<div class="main-content">
<div class="chat-header">
<h1>AI 对话 v2.0</h1>
<h1>AI 对话 v3.0</h1>
<div class="header-controls">
<div class="agent-selector">
<select id="agentSelect" onchange="switchAgent()"><option value="">加载中...</option></select>
@@ -220,35 +208,49 @@
</div>
</div>
<div class="messages-container" id="messagesContainer">
<div class="welcome"><h2>👋 开始对话</h2><p>选择Agent开始聊天</p></div>
<!-- 对话区域左侧Agent信息 + 右侧消息 -->
<div class="chat-area" style="display:flex;flex:1;overflow:hidden;">
<!-- Agent信息侧边栏 -->
<div class="agent-info-sidebar" id="agentInfoSidebar">
<div class="agent-info-header">
<div class="agent-avatar" id="agentAvatar">🤖</div>
<div class="agent-name-area">
<h3 id="agentDisplayName">加载中...</h3>
<small id="agentName">agent-name</small>
</div>
</div>
<div class="agent-info-section">
<h4>简介</h4>
<p id="agentDescription">-</p>
</div>
<div class="agent-info-section">
<h4>能力</h4>
<div class="agent-capabilities" id="agentCapabilities">
<!-- 动态渲染 -->
</div>
</div>
<div class="agent-model-info">
<label>模型</label>
<span id="agentModelInfo">-</span>
</div>
</div>
<!-- 消息容器 -->
<div class="messages-container" id="messagesContainer" style="flex:1;">
<div class="welcome"><h2>👋 开始对话</h2><p>选择Agent开始聊天</p></div>
</div>
</div>
<div class="input-container">
<div class="input-area">
<!-- 工具选择折叠按钮 -->
<div class="tools-collapsible">
<button class="tools-toggle-btn" onclick="toggleToolsPanel()" title="工具设置">
<i class="ri-tools-line"></i>
<span class="tools-badge" id="toolsBadge" style="display:none;">0</span>
</button>
<div class="tools-panel" id="toolsPanel">
<div class="tools-panel-header">
<span>工具设置</span>
<button onclick="toggleToolsPanel()"><i class="ri-close-line"></i></button>
</div>
<div class="tools-panel-content" id="toolsPanelContent"></div>
</div>
<!-- 工具警告提示 -->
<div id="tool-warning-tip" style="display:none;margin-top:8px;padding:8px 12px;background:#fff3cd;border:1px solid #ffc107;border-radius:6px;font-size:13px;color:#856404;"></div>
</div>
<div class="input-row">
<input type="file" id="fileInput" multiple accept="image/*,.pdf,.txt,.md,.json,.csv,.doc,.docx" style="display:none" onchange="handleFileUpload(event)">
<button class="upload-btn" onclick="document.getElementById('fileInput').click()" title="上传文件"><i class="ri-attachment-2"></i></button>
<textarea id="messageInput" placeholder="输入消息..." rows="1"></textarea>
<button class="send-btn" id="sendBtn" onclick="sendMessage()"><i class="ri-send-plane-fill"></i></button>
</div>
<!-- 工具警告提示 -->
<div id="tool-warning-tip" style="display:none;margin-top:4px;padding:6px 10px;background:#fff3cd;border:1px solid #ffc107;border-radius:6px;font-size:12px;color:#856404;"></div>
<div class="file-preview-area" id="filePreviewArea"></div>
<div class="quick-phrases-bar">
<button class="add-phrase-btn" onclick="showAddPhraseModal()"><i class="ri-add-line"></i> 添加</button>
@@ -324,7 +326,6 @@
document.addEventListener('DOMContentLoaded', () => {
loadProviders(); // 加载大模型池
loadToolsData(); // 加载工具列表
loadAgents();
loadQuickPhrases();
connectWebSocket();
@@ -338,8 +339,8 @@
const res = await fetch('/api/v2/providers');
const data = await res.json();
providers = data.providers || [];
// 加载后检查工具支持如果agents已加载
if (agents.length > 0) showToolWarning();
// 加载后更新Agent信息侧边栏如果agents已加载
if (agents.length > 0) renderAgentInfoSidebar();
} catch (e) { console.error('加载Provider失败:', e); }
}
@@ -359,117 +360,6 @@
return agentTools.includes(toolType);
}
// 获取当前Agent不支持的工具列表用户已启用但Agent不支持
function getUnsupportedTools() {
const unsupported = [];
// 检查所有工具checkbox排除disabled的
const toolCheckboxes = document.querySelectorAll('.tool-checkbox:not([disabled])');
toolCheckboxes.forEach(cb => {
if (cb.checked) {
// 检查Agent是否支持
if (!checkAgentToolSupport(cb.dataset.toolType)) {
// 获取工具显示名称
const label = cb.closest('.tool-item')?.querySelector('label')?.textContent?.trim() || cb.dataset.toolType;
unsupported.push(label);
}
}
});
return unsupported;
}
// 渲染工具选择区域(根据系统工具列表)
function renderToolToggles() {
const container = document.getElementById('toolsPanelContent');
if (!container || toolsData.length === 0) return;
// 获取当前Agent支持的工具
const agent = agents.find(a => a.id === currentAgentId);
const agentTools = agent?.tools || [];
// 渲染工具checkbox列表
let html = '';
toolsData.filter(t => t.is_active).forEach(t => {
const toolType = t.tool_type || 'unknown';
const isSupported = agentTools.includes(toolType);
const icon = getToolIconFrontend(toolType);
const disabledClass = isSupported ? '' : 'disabled';
html += `<div class="tool-item ${disabledClass}">
<input type="checkbox" class="tool-checkbox" id="tool-${toolType}" data-tool-type="${toolType}" ${isSupported ? 'checked' : ''} ${!isSupported ? 'disabled' : ''} onchange="showToolWarning()">
<label for="tool-${toolType}"><i class="${icon}"></i> ${t.name}</label>
</div>`;
});
container.innerHTML = html || '<div class="text-muted" style="padding:8px;">暂无可用工具</div>';
// 更新工具数量badge
updateToolsBadge();
}
// 更新工具数量badge
function updateToolsBadge() {
const badge = document.getElementById('toolsBadge');
const selectedCount = document.querySelectorAll('.tool-checkbox:checked').length;
if (selectedCount > 0) {
badge.textContent = selectedCount;
badge.style.display = 'inline';
} else {
badge.style.display = 'none';
}
}
// 切换工具面板显示
function toggleToolsPanel() {
const panel = document.getElementById('toolsPanel');
const btn = document.querySelector('.tools-toggle-btn');
panel.classList.toggle('show');
btn.classList.toggle('active');
}
// 前端工具图标
function getToolIconFrontend(toolType) {
const icons = {
'search': 'ri-search-line',
'calculator': 'ri-calculator-line',
'code': 'ri-code-line',
'image': 'ri-image-line',
'web': 'ri-global-line'
};
return icons[toolType] || 'ri-tools-line';
}
// 加载工具列表
let toolsData = [];
async function loadToolsData() {
try {
const res = await fetch('/api/v2/tools');
const data = await res.json();
toolsData = data.tools || [];
renderToolToggles();
} catch (e) { console.error('加载工具列表失败:', e); }
}
// 显示工具不支持提示
function showToolWarning() {
updateToolsBadge(); // 更新badge数量
const unsupported = getUnsupportedTools();
const warningDiv = document.getElementById('tool-warning-tip');
if (unsupported.length > 0) {
const agent = agents.find(a => a.id === currentAgentId);
const agentName = agent?.display_name || agent?.name || '当前Agent';
const msg = `<i class="ri-alert-line"></i> <strong>${agentName}</strong> 不支持 <strong>${unsupported.join('、')}</strong> 工具请关闭或切换Agent`;
warningDiv.innerHTML = msg;
warningDiv.style.display = 'block';
// 禁用发送按钮
document.getElementById('sendBtn').disabled = true;
} else {
warningDiv.style.display = 'none';
document.getElementById('sendBtn').disabled = false;
}
}
// 加载Agent
async function loadAgents() {
try {
@@ -479,13 +369,58 @@
const defaultAgent = agents.find(a => a.is_default) || agents[0];
if (defaultAgent) currentAgentId = defaultAgent.id;
renderAgentSelect();
// 加载后检查工具支持
showToolWarning();
// 渲染工具选择区域
renderToolToggles();
renderAgentInfoSidebar(); // 渲染Agent信息侧边栏
} catch (e) { console.error('加载Agent失败:', e); }
}
// 渲染Agent信息侧边栏
function renderAgentInfoSidebar() {
const agent = agents.find(a => a.id === currentAgentId);
if (!agent) return;
// 更新名称
document.getElementById('agentDisplayName').textContent = agent.display_name || agent.name;
document.getElementById('agentName').textContent = agent.name;
// 更新头像用emoji或首字母
const avatar = document.getElementById('agentAvatar');
avatar.textContent = agent.display_name?.charAt(0) || agent.name?.charAt(0) || '🤖';
// 更新描述
document.getElementById('agentDescription').textContent = agent.description || '暂无描述';
// 更新能力标签
const capabilitiesHtml = [];
// 检查思考能力
const provider = providers.find(p => p.id === agent.llm_provider_id);
if (provider) {
if (provider.supports_thinking) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-lightbulb-line"></i> 思考</span>');
}
if (provider.supports_vision) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-image-line"></i> 视觉</span>');
}
if (provider.supports_function_calling) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-tools-line"></i> 工具调用</span>');
} else {
capabilitiesHtml.push('<span class="capability-tag disabled"><i class="ri-tools-line"></i> 工具(手动)</span>');
}
// 更新模型信息
const model = agent.model_override || provider.default_model || 'auto';
document.getElementById('agentModelInfo').textContent = model;
}
// 检查工具配置
const agentTools = agent.tools || [];
if (agentTools.includes('search')) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-search-line"></i> 搜索</span>');
}
document.getElementById('agentCapabilities').innerHTML = capabilitiesHtml.join('') || '<span class="capability-tag disabled">基础对话</span>';
}
function renderAgentSelect() {
const select = document.getElementById('agentSelect');
select.innerHTML = agents.filter(a => a.is_active).map(a =>
@@ -505,8 +440,7 @@
if (ws?.readyState === WebSocket.OPEN) ws.send(JSON.stringify({ action: 'switch_agent', agent_id: currentAgentId }));
await createNewConversation();
showAgentSwitchNotice();
// 切换Agent后检查工具支持
showToolWarning();
renderAgentInfoSidebar(); // 更新侧边栏信息
}
}
@@ -1208,16 +1142,6 @@
return;
}
// 检查工具支持
const unsupported = getUnsupportedTools();
if (unsupported.length > 0) {
const agent = agents.find(a => a.id === currentAgentId);
const agentName = agent?.display_name || agent?.name || '当前Agent';
alert(`⚠️ ${agentName} 不支持 ${unsupported.join('、')} 工具\n\n请关闭不支持的工具或切换到支持该工具的Agent。`);
document.getElementById('sendBtn').disabled = false;
return;
}
document.getElementById('sendBtn').disabled = true;
input.value = '';
input.style.height = 'auto';
@@ -1230,13 +1154,7 @@
pendingFiles = [];
document.getElementById('filePreviewArea').innerHTML = '';
// 获取禁用的工具列表(所有系统工具 - 用户选中的工具)
const disabledTools = [];
const allToolTypes = toolsData.filter(t => t.is_active).map(t => t.tool_type);
const selectedTools = Array.from(document.querySelectorAll('.tool-checkbox:checked')).map(cb => cb.dataset.toolType);
allToolTypes.forEach(t => {
if (!selectedTools.includes(t)) disabledTools.push(t);
});
// v3.0: Function Calling模式不再需要 disabled_tools
// 发送消息(包含文件)
if (ws?.readyState === WebSocket.OPEN) {
@@ -1245,7 +1163,6 @@
message: msg,
conversation_id: currentConversationId,
agent_id: currentAgentId,
disabled_tools: disabledTools,
files: lastSentFiles || [] // 发送的文件列表
}));
}