Compare commits

...

9 Commits

Author SHA1 Message Date
ae08e01e55 fix: Kimi模型伪工具调用格式过滤
修复Kimi-K2.5模型在第二轮调用时输出伪工具调用格式的问题:
- 添加系统提示告诉模型直接根据工具结果回答
- 过滤 <|tool_calls_section_begin|> 等内部格式标记
- 清理多余空行

版本: v3.0.1
2026-04-15 09:45:08 +08:00
9048d94e33 fix: 添加详细日志诊断工具调用消息格式 2026-04-15 02:25:05 +08:00
291de733a4 fix: chat_with_tool_results不重复添加tool结果,修正消息格式 2026-04-15 01:03:10 +08:00
10f67a807a fix: get_agent_config添加supports_vision和supports_function_calling字段 2026-04-14 19:20:17 +08:00
d9ac2c78f6 feat: 对话区左侧显示Agent信息 2026-04-14 19:14:31 +08:00
4ac67b5816 feat: v3.0 Function Calling模式 - LLM自主调用工具 2026-04-14 18:39:12 +08:00
527885f3d6 fix: 工具按钮放附件右边、输入框左边 2026-04-14 17:19:52 +08:00
c21270195a feat: 工具按钮放输入框右边,面板向上弹出 2026-04-14 17:15:56 +08:00
db7d0fd586 feat: 工具选择折叠面板,不占用快捷语句区域 2026-04-14 17:11:07 +08:00
6 changed files with 572 additions and 262 deletions

View File

@@ -1,6 +1,6 @@
""" """
AI对话系统 v2.0.0 - 主应用 AI对话系统 v3.0.0 - 主应用
支持大模型池、Agent管理、渠道独立绑定、思考功能开关 支持大模型池、Agent管理、渠道独立绑定、思考功能开关、Function CallingLLM自主调用工具
""" """
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, Request from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, Request
from fastapi.responses import HTMLResponse, JSONResponse from fastapi.responses import HTMLResponse, JSONResponse
@@ -33,7 +33,7 @@ logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# 创建应用 # 创建应用
app = FastAPI(title="AI对话系统 v2.0", version="2.0.0") app = FastAPI(title="AI对话系统 v3.0", version="3.0.1")
# 静态文件和模板 # 静态文件和模板
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -122,6 +122,7 @@ async def get_providers(db: Session = Depends(get_db)):
"thinking_model": p.thinking_model, "thinking_model": p.thinking_model,
"supports_vision": p.supports_vision, "supports_vision": p.supports_vision,
"vision_model": p.vision_model, "vision_model": p.vision_model,
"supports_function_calling": p.supports_function_calling,
"max_tokens": p.max_tokens, "max_tokens": p.max_tokens,
"temperature": p.temperature, "temperature": p.temperature,
"is_active": p.is_active, "is_active": p.is_active,
@@ -832,7 +833,7 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
conversation_id = data.get("conversation_id") conversation_id = data.get("conversation_id")
enable_thinking = data.get("enable_thinking", True) enable_thinking = data.get("enable_thinking", True)
agent_id_override = data.get("agent_id") agent_id_override = data.get("agent_id")
disabled_tools = data.get("disabled_tools", []) # 禁用的工具列表 # v3.0: 移除 disabled_tools由LLM自主决定
if agent_id_override: if agent_id_override:
agent = agent_service.get_agent(agent_id_override) agent = agent_service.get_agent(agent_id_override)
@@ -846,48 +847,41 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
if not message.strip() and not files: if not message.strip() and not files:
continue continue
# 处理文件内容,添加到消息 # 处理文件内容
image_contents = [] # 图片内容(用于视觉模型) image_contents = []
text_contents = [] # 文本文件内容 text_contents = []
image_paths = [] # 图片服务器路径(用于历史记录显示) image_paths = []
if files: if files:
for f in files: for f in files:
if f.get('type') and f['type'].startswith('image/'): if f.get('type') and f['type'].startswith('image/'):
# 图片:记录 base64 数据,用于视觉模型
image_contents.append({ image_contents.append({
'name': f['name'], 'name': f['name'],
'type': f['type'], 'type': f['type'],
'data': f.get('content', '') # base64 数据 'data': f.get('content', '')
}) })
# 记录服务器路径(用于历史记录)
if f.get('serverPath'): if f.get('serverPath'):
image_paths.append({ image_paths.append({
'name': f['name'], 'name': f['name'],
'type': f['type'], 'type': f['type'],
'url': f['serverPath'] # 服务器文件路径 'url': f['serverPath']
}) })
# 不添加文件名文本,图片信息保存在 extra_data 中
elif f.get('content'): elif f.get('content'):
# 文本文件:直接添加内容,不带文件名前缀
text_contents.append(f['content'][:3000]) text_contents.append(f['content'][:3000])
if len(f['content']) > 3000: if len(f['content']) > 3000:
text_contents[-1] += "...(内容过长已截断)" text_contents[-1] += "...(内容过长已截断)"
# 如果有文本文件内容,追加到消息后面
if text_contents: if text_contents:
for content in text_contents: for content in text_contents:
message += f"\n\n{content}" message += f"\n\n{content}"
# 保存图片和文件信息到 extra_data(用于历史记录) # 保存文件信息到 extra_data
extra_data_for_msg = None extra_data_for_msg = None
if image_paths: if image_paths:
# 图片保存服务器路径URL历史记录可以显示
extra_data_for_msg = { extra_data_for_msg = {
'images': image_paths, 'images': image_paths,
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')] 'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
} }
elif image_contents: elif image_contents:
# 没有服务器路径但有问题(可能上传失败)
extra_data_for_msg = { extra_data_for_msg = {
'images': [{'name': i['name'], 'type': i['type']} for i in image_contents], 'images': [{'name': i['name'], 'type': i['type']} for i in image_contents],
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')] 'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
@@ -896,8 +890,9 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
# 1. 获取Agent配置 # 1. 获取Agent配置
agent_config = agent_service.get_agent_config(current_agent_id) agent_config = agent_service.get_agent_config(current_agent_id)
agent_tools = agent_config.get('agent', {}).get('tools', []) agent_tools = agent_config.get('agent', {}).get('tools', [])
supports_function_calling = agent_config.get('provider', {}).get('supports_function_calling', False)
# 2. 获取或创建会话(先有 conversation_id # 2. 获取或创建会话
if conversation_id: if conversation_id:
conversation = conv_service.get_conversation(conversation_id) conversation = conv_service.get_conversation(conversation_id)
else: else:
@@ -908,12 +903,12 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
"conversation_id": conversation_id "conversation_id": conversation_id
}) })
# 3. 广播用户消息(前端立即看到) # 3. 广播用户消息
await manager.send_to_user(MAIN_USER_ID, { await manager.send_to_user(MAIN_USER_ID, {
"type": "user_message", "type": "user_message",
"conversation_id": conversation_id, "conversation_id": conversation_id,
"message": { "message": {
"id": None, # 临时,后面会保存 "id": None,
"role": "user", "role": "user",
"content": message, "content": message,
"source": "web", "source": "web",
@@ -921,29 +916,102 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
} }
}) })
# 4. 执行搜索并发送搜索结果 # 4. 保存用户消息
search_context = None user_msg = conv_service.add_message(
search_results_for_client = None # 用于发送给前端和保存 conversation_id=conversation.id,
logger.info(f"检查搜索条件: agent_tools={agent_tools}, disabled_tools={disabled_tools}") role='user',
content=message,
source='web',
extra_data=extra_data_for_msg
)
if 'search' in agent_tools and 'search' not in disabled_tools: # 5. 获取对话历史
logger.info("搜索条件满足,开始执行搜索") history = conv_service.get_conversation_history(conversation_id, limit=agent_config['agent'].get('max_history', 20))
# 6. 构建工具 schemaFunction Calling
tools_schema = []
if supports_function_calling and agent_tools:
# 搜索工具
if 'search' in agent_tools:
tool_service = ToolService(db) tool_service = ToolService(db)
search_tool = tool_service.get_default_tool('search') search_tool = tool_service.get_default_tool('search')
logger.info(f"获取到搜索工具: {search_tool.name if search_tool else 'None'}")
if search_tool and search_tool.config.get('api_key'): if search_tool and search_tool.config.get('api_key'):
tools_schema.append({
"type": "function",
"function": {
"name": "web_search",
"description": "搜索互联网获取实时信息、新闻、数据等。当用户询问需要最新信息的问题时使用此工具。",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词或问题"
}
},
"required": ["query"]
}
}
})
# 7. 调用LLMFunction Calling模式
if not agent_config or not agent_config.get('provider'):
await websocket.send_json({
"type": "error",
"message": "Agent配置不完整"
})
continue
try:
response = None
thinking_content = None
tool_calls_record = []
# 第一阶段让LLM决定是否调用工具
if tools_schema:
response, thinking_content, tool_calls = await llm_service.chat_with_tools(
messages=history,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
tools=tools_schema,
enable_thinking=enable_thinking,
images=image_contents
)
# 如果LLM请求调用工具
if tool_calls:
logger.info(f"LLM请求调用工具: {tool_calls}")
# 发送工具调用通知给前端
await websocket.send_json({
"type": "tool_calls",
"conversation_id": conversation_id,
"tool_calls": [
{"name": tc['name'], "arguments": tc['arguments']}
for tc in tool_calls
]
})
# 执行工具调用
tool_results = []
tool_service = ToolService(db)
search_tool = tool_service.get_default_tool('search')
for tc in tool_calls:
if tc['name'] == 'web_search':
query = tc['arguments'].get('query', message)
logger.info(f"执行搜索: query={query}")
import httpx import httpx
import time import time
start_time = time.time() start_time = time.time()
try: try:
logger.info(f"执行搜索: query={message}")
tavily_url = "https://api.tavily.com/search" tavily_url = "https://api.tavily.com/search"
config = search_tool.config config = search_tool.config
payload = { payload = {
"api_key": config.get('api_key'), "api_key": config.get('api_key'),
"query": message, "query": query,
"max_results": config.get('max_results', 5), "max_results": config.get('max_results', 5),
"search_depth": config.get('search_depth', 'basic') "search_depth": config.get('search_depth', 'basic')
} }
@@ -955,103 +1023,124 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
duration_ms = int((time.time() - start_time) * 1000) duration_ms = int((time.time() - start_time) * 1000)
if search_result.get("results"): if search_result.get("results"):
# 构建搜索上下文给LLM # 构建搜索结果
max_for_llm = config.get('max_results', 5) search_content = []
search_context = "\n\n【搜索结果】\n" for i, r in enumerate(search_result["results"][:5], 1):
for i, r in enumerate(search_result["results"][:max_for_llm], 1): search_content.append({
search_context += f"{i}. {r.get('title', 'N/A')}\n {r.get('content', r.get('snippet', 'N/A'))[:200]}\n 来源: {r.get('url', 'N/A')}\n"
logger.info(f"搜索完成: {len(search_result['results'])} 条结果,使用 {min(len(search_result['results']), max_for_llm)}")
# 发送搜索结果给前端(按配置的数量)
max_display = config.get('max_results', 5)
search_results_for_client = [
{
"title": r.get('title', 'N/A'), "title": r.get('title', 'N/A'),
"snippet": r.get('content', r.get('snippet', ''))[:150], "content": r.get('content', r.get('snippet', ''))[:300],
"url": r.get('url', 'N/A') "url": r.get('url', 'N/A')
} })
for r in search_result["results"][:max_display]
] tool_results.append({
"tool_call_id": tc['id'],
"content": json.dumps(search_content)
})
# 发送搜索结果给前端
await websocket.send_json({ await websocket.send_json({
"type": "search_results", "type": "search_results",
"conversation_id": conversation_id, "conversation_id": conversation_id,
"results": search_results_for_client, "results": [
"query": message {"title": r.get('title'), "snippet": r.get('content', '')[:150], "url": r.get('url')}
for r in search_result["results"][:5]
],
"query": query
}) })
# 更新统计和日志 # 记录日志
tool_service.increment_stats(search_tool.id, True) tool_service.increment_stats(search_tool.id, True)
tool_service.log_usage({ tool_service.log_usage({
'tool_id': search_tool.id, 'tool_id': search_tool.id,
'tool_type': 'search', 'tool_type': 'search',
'query': message, 'query': query,
'success': True, 'success': True,
'result_summary': f'{len(search_result["results"])} results', 'result_summary': f'{len(search_result["results"])} results',
'conversation_id': conversation_id, 'conversation_id': conversation_id,
'agent_id': current_agent_id, 'agent_id': current_agent_id,
'duration_ms': duration_ms 'duration_ms': duration_ms
}) })
tool_calls_record.append({
"name": "web_search",
"query": query,
"results_count": len(search_result["results"])
})
except Exception as e: except Exception as e:
duration_ms = int((time.time() - start_time) * 1000)
logger.error(f"搜索失败: {e}") logger.error(f"搜索失败: {e}")
duration_ms = int((time.time() - start_time) * 1000)
tool_service.increment_stats(search_tool.id, False) tool_service.increment_stats(search_tool.id, False)
tool_service.log_usage({ tool_service.log_usage({
'tool_id': search_tool.id, 'tool_id': search_tool.id,
'tool_type': 'search', 'tool_type': 'search',
'query': message, 'query': query,
'success': False, 'success': False,
'error_message': str(e), 'error_message': str(e),
'conversation_id': conversation_id, 'conversation_id': conversation_id,
'duration_ms': duration_ms 'duration_ms': duration_ms
}) })
tool_results.append({
"tool_call_id": tc['id'],
"content": json.dumps({"error": str(e)})
})
# 5. 保存用户消息到数据库 # 将工具调用消息添加到历史
extra_data_to_save = None # 注意:这里需要将 assistant 的 tool_calls 消息添加到历史
if search_results_for_client: # 但我们用的是简化的历史格式,需要重新构建
extra_data_to_save = {'search_results': search_results_for_client, 'search_query': message}
if extra_data_for_msg:
if extra_data_to_save:
extra_data_to_save.update(extra_data_for_msg)
else:
extra_data_to_save = extra_data_for_msg
user_msg = conv_service.add_message( # 第二阶段将工具结果返回给LLM
conversation_id=conversation.id, if tool_results:
role='user', # 重新获取完整历史(包含工具调用)
content=message, history_with_tools = history.copy()
source='web', # 添加 assistant 的 tool_calls 消息
extra_data=extra_data_to_save history_with_tools.append({
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": tc['id'],
"type": "function",
"function": {
"name": tc['name'],
"arguments": json.dumps(tc['arguments'])
}
}
for tc in tool_calls
]
})
# 添加工具结果
for tr in tool_results:
history_with_tools.append({
"role": "tool",
"tool_call_id": tr['tool_call_id'],
"content": tr['content']
})
response, thinking_content = await llm_service.chat_with_tool_results(
messages=history_with_tools,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
enable_thinking=enable_thinking
) )
# 6. 获取对话历史(包含刚保存的用户消息) # 如果不支持 Function Calling 或没有工具,直接调用普通 chat
history = conv_service.get_conversation_history(conversation_id, limit=agent_config['agent'].get('max_history', 20)) if response is None:
# 7. 如果有搜索结果,添加到消息中
if search_context:
modified_system_prompt = agent_config['agent'].get('system_prompt', '') + "\n\n如果提供了搜索结果,请基于搜索结果回答用户问题,并注明信息来源。"
agent_config['agent']['system_prompt'] = modified_system_prompt
history.append({"role": "system", "content": f"以下是搜索到的相关信息,请参考这些内容回答用户问题:{search_context}"})
# 8. 调用LLM返回回复
if not agent_config or not agent_config.get('provider'):
await websocket.send_json({
"type": "error",
"message": "Agent配置不完整"
})
continue
try:
response, thinking_content = await llm_service.chat( response, thinking_content = await llm_service.chat(
messages=history, messages=history,
provider_config=agent_config['provider'], provider_config=agent_config['provider'],
agent_config=agent_config['agent'], agent_config=agent_config['agent'],
enable_thinking=enable_thinking, enable_thinking=enable_thinking,
images=image_contents # 传递图片数据给多模态模型 images=image_contents
) )
logger.info(f"LLM响应: response长度={len(response)}, thinking长度={len(thinking_content) if thinking_content else 0}") logger.info(f"LLM响应: response长度={len(response)}, thinking长度={len(thinking_content) if thinking_content else 0}")
# 保存AI回复 # 保存AI回复
extra_data_to_save = None
if tool_calls_record:
extra_data_to_save = {'tool_calls': tool_calls_record}
assistant_msg = conv_service.add_message( assistant_msg = conv_service.add_message(
conversation_id=conversation.id, conversation_id=conversation.id,
role='assistant', role='assistant',
@@ -1059,7 +1148,8 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
source='web', source='web',
thinking_content=thinking_content if thinking_content else None, thinking_content=thinking_content if thinking_content else None,
agent_id=current_agent_id, agent_id=current_agent_id,
model_used=agent_config['provider'].get('default_model') model_used=agent_config['provider'].get('default_model'),
extra_data=extra_data_to_save
) )
# 发送AI回复 # 发送AI回复
@@ -1074,6 +1164,7 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
"source": "web", "source": "web",
"agent_id": current_agent_id, "agent_id": current_agent_id,
"agent_name": agent_config['agent'].get('display_name'), "agent_name": agent_config['agent'].get('display_name'),
"tool_calls": tool_calls_record, # v3.0: 返回工具调用记录
"created_at": assistant_msg.created_at.isoformat() "created_at": assistant_msg.created_at.isoformat()
} }
}) })

View File

@@ -36,6 +36,9 @@ class LLMProvider(Base):
supports_vision = Column(Boolean, default=False) # 是否支持图片理解(多模态) supports_vision = Column(Boolean, default=False) # 是否支持图片理解(多模态)
vision_model = Column(String(100), nullable=True) # 视觉模型名(如与默认模型不同) vision_model = Column(String(100), nullable=True) # 视觉模型名(如与默认模型不同)
# Function Calling 支持
supports_function_calling = Column(Boolean, default=False) # 是否支持函数调用(工具自主调用)
# 配额和限制 # 配额和限制
max_tokens = Column(Integer, default=4096) max_tokens = Column(Integer, default=4096)
temperature = Column(Float, default=0.7) temperature = Column(Float, default=0.7)

View File

@@ -137,6 +137,9 @@ class AgentService:
'api_key': provider.api_key if provider else None, 'api_key': provider.api_key if provider else None,
'supports_thinking': provider.supports_thinking if provider else False, 'supports_thinking': provider.supports_thinking if provider else False,
'thinking_model': provider.thinking_model if provider else None, 'thinking_model': provider.thinking_model if provider else None,
'supports_vision': provider.supports_vision if provider else False,
'vision_model': provider.vision_model if provider else None,
'supports_function_calling': provider.supports_function_calling if provider else False,
'default_model': provider.default_model if provider else 'auto', 'default_model': provider.default_model if provider else 'auto',
'max_tokens': provider.max_tokens if provider else 4096, 'max_tokens': provider.max_tokens if provider else 4096,
'temperature': provider.temperature if provider else 0.7, 'temperature': provider.temperature if provider else 0.7,

View File

@@ -382,5 +382,232 @@ class LLMService:
yield {"type": "content", "text": buffer} yield {"type": "content", "text": buffer}
async def chat_with_tools(
self,
messages: List[Dict],
provider_config: dict,
agent_config: dict,
tools: List[Dict] = None,
enable_thinking: bool = True,
images: List[Dict] = None
) -> Tuple[str, Optional[str], Optional[List[Dict]]]:
"""
支持Function Calling的对话
Args:
messages: 对话历史
provider_config: LLM Provider配置
agent_config: Agent配置
tools: 工具定义列表OpenAI Function Calling格式
enable_thinking: 是否启用思考
images: 图片数据列表
Returns:
Tuple[str, Optional[str], Optional[List[Dict]]]: (回复内容, 思考过程, 工具调用记录)
"""
api_base = provider_config.get('api_base')
api_key = provider_config.get('api_key')
model = agent_config.get('model_override') or provider_config.get('default_model', 'auto')
supports_function_calling = provider_config.get('supports_function_calling', False)
max_tokens = provider_config.get('max_tokens', 4096)
temperature = agent_config.get('temperature_override') or provider_config.get('temperature', 0.7)
# 如果不支持Function Calling直接调用普通chat
if not supports_function_calling or not tools:
response, thinking = await self.chat(messages, provider_config, agent_config, enable_thinking, images)
return response, thinking, None
# 构建消息
final_messages = messages.copy()
system_prompt = agent_config.get('system_prompt', '你是一个有用的AI助手。')
if final_messages and final_messages[0]['role'] != 'system':
final_messages.insert(0, {"role": "system", "content": system_prompt})
# 处理图片(多模态)
if images and len(images) > 0:
for i in range(len(final_messages) - 1, -1, -1):
if final_messages[i]['role'] == 'user':
original_text = final_messages[i]['content']
multimodal_content = [{"type": "text", "text": original_text if original_text else "请描述这张图片"}]
for img in images:
multimodal_content.append({
"type": "image_url",
"image_url": {"url": img['data']}
})
final_messages[i]['content'] = multimodal_content
break
# 第一次调用让LLM决定是否调用工具
url = f"{api_base.rstrip('/')}/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": final_messages,
"temperature": temperature,
"max_tokens": max_tokens,
"tools": tools # 传入工具定义
}
logger.info(f"Function Calling调用: url={url}, model={model}, tools={len(tools)}")
tool_calls_record = [] # 记录工具调用
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
if response.status_code != 200:
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
response.raise_for_status()
data = response.json()
if 'choices' not in data or len(data['choices']) == 0:
raise ValueError("API响应格式错误缺少choices")
message = data['choices'][0]['message']
# 检查是否有工具调用
if 'tool_calls' in message and message['tool_calls']:
logger.info(f"LLM请求调用工具: {len(message['tool_calls'])}")
# 将LLM的工具调用消息添加到历史
final_messages.append({
"role": "assistant",
"content": None,
"tool_calls": message['tool_calls']
})
# 记录工具调用
for tc in message['tool_calls']:
tool_calls_record.append({
"id": tc['id'],
"name": tc['function']['name'],
"arguments": json.loads(tc['function']['arguments'])
})
# 返回工具调用记录,由调用方执行工具
return None, None, tool_calls_record
# 没有工具调用,直接返回内容
content = message.get('content', '')
# 处理思考内容(如果有)
thinking_content = None
# 这里可以添加思考内容提取逻辑
return content, thinking_content, None
except httpx.HTTPStatusError as e:
logger.error(f"HTTP错误: {e.response.status_code}, {e.response.text}")
raise
except Exception as e:
logger.error(f"Function Calling调用异常: {type(e).__name__}: {e}")
raise
async def chat_with_tool_results(
self,
messages: List[Dict],
provider_config: dict,
agent_config: dict,
enable_thinking: bool = True
) -> Tuple[str, Optional[str]]:
"""
第二阶段调用:使用包含工具调用和结果的完整消息历史
Args:
messages: 已包含assistant tool_calls和tool结果的完整消息历史
provider_config: LLM Provider配置
agent_config: Agent配置
Returns:
Tuple[str, Optional[str]]: (回复内容, 思考过程)
"""
api_base = provider_config.get('api_base')
api_key = provider_config.get('api_key')
model = agent_config.get('model_override') or provider_config.get('default_model', 'auto')
max_tokens = provider_config.get('max_tokens', 4096)
temperature = agent_config.get('temperature_override') or provider_config.get('temperature', 0.7)
# 消息历史已经包含了assistant的tool_calls和tool结果直接使用
final_messages = messages.copy()
# 添加提示:告诉模型直接根据工具结果回答,不要再调用工具
# 添加一个系统级别的提示
tool_hint = {
"role": "system",
"content": "请根据工具返回的结果直接回答用户的问题,不要再调用任何工具或搜索。如果结果不足以回答问题,请根据现有信息给出最好的回答,并说明信息的局限性。"
}
# 在工具结果之后添加提示
final_messages.append(tool_hint)
# 调用LLM生成最终回复
url = f"{api_base.rstrip('/')}/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": final_messages,
"temperature": temperature,
"max_tokens": max_tokens
}
logger.info(f"工具结果返回LLM: url={url}, model={model}, 消息数={len(final_messages)}")
# 打印消息内容(调试)
for i, msg in enumerate(final_messages):
role = msg.get('role')
content_preview = str(msg.get('content', ''))[:100] if msg.get('content') else 'None'
if role == 'tool':
logger.info(f"消息[{i}] role={role}, tool_call_id={msg.get('tool_call_id')}, content长度={len(msg.get('content',''))}")
elif role == 'assistant' and msg.get('tool_calls'):
logger.info(f"消息[{i}] role={role}, tool_calls={len(msg['tool_calls'])}")
else:
logger.info(f"消息[{i}] role={role}, content={content_preview}...")
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
if response.status_code != 200:
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
response.raise_for_status()
data = response.json()
content = data['choices'][0]['message']['content']
# 过滤掉伪工具调用格式某些模型如Kimi会输出这种内部格式
# 模式:<|tool_calls_section_begin|>...<|tool_calls_section_end|>
import re
tool_pattern = r'<\|tool_calls_section_begin\|>.*?<\|tool_calls_section_end\|>'
content = re.sub(tool_pattern, '', content, flags=re.DOTALL)
# 也过滤单个 tool_call 格式
tool_call_pattern = r'<\|tool_call_begin\|>.*?<\|tool_call_end\|>'
content = re.sub(tool_call_pattern, '', content, flags=re.DOTALL)
# 清理可能残留的格式标记
content = content.replace('<|tool_calls_section_begin|>', '')
content = content.replace('<|tool_calls_section_end|>', '')
content = content.replace('<|tool_call_begin|>', '')
content = content.replace('<|tool_call_end|>', '')
content = content.replace('<|tool_call_argument_begin|>', '')
content = content.replace('<|tool_call_argument_end|>', '')
# 清理多余空行
content = re.sub(r'\n{3,}', '\n\n', content).strip()
return content, None
except Exception as e:
logger.error(f"工具结果调用异常: {e}")
raise
# 全局实例 # 全局实例
llm_service = LLMService() llm_service = LLMService()

View File

@@ -58,8 +58,8 @@
</div> </div>
<div class="card-body"> <div class="card-body">
<table class="table"> <table class="table">
<thead><tr><th>名称</th><th>API地址</th><th>默认模型</th><th>思考</th><th>视觉</th><th>状态</th><th>操作</th></tr></thead> <thead><tr><th>名称</th><th>API地址</th><th>默认模型</th><th>思考</th><th>视觉</th><th>FC</th><th>状态</th><th>操作</th></tr></thead>
<tbody id="providers-list"><tr><td colspan="7" class="text-center">加载中...</td></tr></tbody> <tbody id="providers-list"><tr><td colspan="8" class="text-center">加载中...</td></tr></tbody>
</table> </table>
</div> </div>
</div> </div>
@@ -164,6 +164,8 @@
<div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-thinking"><label class="form-check-label">支持原生思考</label></div><div class="col-md-6"><label class="form-label">思考模型名</label><input type="text" class="form-control" id="provider-thinking-model"></div></div></div> <div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-thinking"><label class="form-check-label">支持原生思考</label></div><div class="col-md-6"><label class="form-label">思考模型名</label><input type="text" class="form-control" id="provider-thinking-model"></div></div></div>
<hr><h6>视觉能力</h6> <hr><h6>视觉能力</h6>
<div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-vision"><label class="form-check-label">支持图片理解</label></div><div class="col-md-6"><label class="form-label">视觉模型名</label><input type="text" class="form-control" id="provider-vision-model" placeholder="留空则使用默认模型"></div></div><small class="text-muted mt-2 d-block">启用后可上传图片让AI识别分析内容</small></div> <div class="thinking-config"><div class="row"><div class="col-md-6 form-check"><input type="checkbox" class="form-check-input" id="provider-supports-vision"><label class="form-check-label">支持图片理解</label></div><div class="col-md-6"><label class="form-label">视觉模型名</label><input type="text" class="form-control" id="provider-vision-model" placeholder="留空则使用默认模型"></div></div><small class="text-muted mt-2 d-block">启用后可上传图片让AI识别分析内容</small></div>
<hr><h6>Function Calling</h6>
<div class="thinking-config"><div class="form-check"><input type="checkbox" class="form-check-input" id="provider-supports-function-calling"><label class="form-check-label">支持函数调用</label></div><small class="text-muted mt-2 d-block">启用后LLM可自主决定何时调用工具更智能</small></div>
<div class="mt-3"><button type="button" class="btn btn-outline-primary" onclick="fetchProviderModels()"><i class="ri-refresh-line"></i> 获取模型</button><button type="button" class="btn btn-outline-secondary" onclick="testProviderConnection()"><i class="ri-link"></i> 测试连接</button></div> <div class="mt-3"><button type="button" class="btn btn-outline-primary" onclick="fetchProviderModels()"><i class="ri-refresh-line"></i> 获取模型</button><button type="button" class="btn btn-outline-secondary" onclick="testProviderConnection()"><i class="ri-link"></i> 测试连接</button></div>
<div class="mt-2" id="provider-models-preview"></div><div class="mt-2" id="provider-test-result"></div> <div class="mt-2" id="provider-models-preview"></div><div class="mt-2" id="provider-test-result"></div>
</form></div> </form></div>
@@ -331,6 +333,7 @@
<td><strong>${p.name}</strong></td><td><small>${p.api_base||'-'}</small></td><td>${p.default_model||'auto'}</td> <td><strong>${p.name}</strong></td><td><small>${p.api_base||'-'}</small></td><td>${p.default_model||'auto'}</td>
<td>${p.supports_thinking?'<span class="badge bg-success">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td> <td>${p.supports_thinking?'<span class="badge bg-success">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.supports_vision?'<span class="badge bg-info">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td> <td>${p.supports_vision?'<span class="badge bg-info">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.supports_function_calling?'<span class="badge bg-primary">支持</span>':'<span class="badge bg-secondary">不支持</span>'}</td>
<td>${p.is_active?'<span class="badge bg-success">启用</span>':'<span class="badge bg-secondary">禁用</span>'}</td> <td>${p.is_active?'<span class="badge bg-success">启用</span>':'<span class="badge bg-secondary">禁用</span>'}</td>
<td><button class="btn btn-sm btn-outline-primary" onclick="editProvider(${p.id})"><i class="ri-edit-line"></i></button> <td><button class="btn btn-sm btn-outline-primary" onclick="editProvider(${p.id})"><i class="ri-edit-line"></i></button>
<button class="btn btn-sm btn-outline-danger" onclick="deleteProvider(${p.id},'${p.name}')"><i class="ri-delete-bin-line"></i></button></td> <button class="btn btn-sm btn-outline-danger" onclick="deleteProvider(${p.id},'${p.name}')"><i class="ri-delete-bin-line"></i></button></td>
@@ -349,6 +352,7 @@
document.getElementById('provider-active').checked = true; document.getElementById('provider-active').checked = true;
document.getElementById('provider-supports-thinking').checked = false; document.getElementById('provider-supports-thinking').checked = false;
document.getElementById('provider-supports-vision').checked = false; document.getElementById('provider-supports-vision').checked = false;
document.getElementById('provider-supports-function-calling').checked = false;
document.getElementById('provider-models-preview').innerHTML = ''; document.getElementById('provider-models-preview').innerHTML = '';
document.getElementById('provider-test-result').innerHTML = ''; document.getElementById('provider-test-result').innerHTML = '';
new bootstrap.Modal(document.getElementById('providerModal')).show(); new bootstrap.Modal(document.getElementById('providerModal')).show();
@@ -371,6 +375,7 @@
document.getElementById('provider-thinking-model').value = p.thinking_model || ''; document.getElementById('provider-thinking-model').value = p.thinking_model || '';
document.getElementById('provider-supports-vision').checked = p.supports_vision; document.getElementById('provider-supports-vision').checked = p.supports_vision;
document.getElementById('provider-vision-model').value = p.vision_model || ''; document.getElementById('provider-vision-model').value = p.vision_model || '';
document.getElementById('provider-supports-function-calling').checked = p.supports_function_calling;
new bootstrap.Modal(document.getElementById('providerModal')).show(); new bootstrap.Modal(document.getElementById('providerModal')).show();
} }
@@ -389,7 +394,8 @@
supports_thinking: document.getElementById('provider-supports-thinking').checked, supports_thinking: document.getElementById('provider-supports-thinking').checked,
thinking_model: document.getElementById('provider-thinking-model').value, thinking_model: document.getElementById('provider-thinking-model').value,
supports_vision: document.getElementById('provider-supports-vision').checked, supports_vision: document.getElementById('provider-supports-vision').checked,
vision_model: document.getElementById('provider-vision-model').value vision_model: document.getElementById('provider-vision-model').value,
supports_function_calling: document.getElementById('provider-supports-function-calling').checked
}; };
const res = await fetch(id ? `/api/v2/providers/${id}` : '/api/v2/providers', { method: id ? 'PUT' : 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(data) }); const res = await fetch(id ? `/api/v2/providers/${id}` : '/api/v2/providers', { method: id ? 'PUT' : 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(data) });
const result = await res.json(); const result = await res.json();

View File

@@ -130,10 +130,27 @@
/* 快捷语句 - 横向扁平 */ /* 快捷语句 - 横向扁平 */
.quick-phrases-bar { display: flex; align-items: center; gap: 8px; margin-top: 12px; position: relative; } .quick-phrases-bar { display: flex; align-items: center; gap: 8px; margin-top: 12px; position: relative; }
.tool-toggle-item { display: inline-flex; align-items: center; gap: 4px; }
.tool-toggle-item input { width: 14px; height: 14px; } /* 工具调用记录显示 */
.tool-toggle-item label { font-size: 12px; color: #666; } .tool-call-record { margin-top: 8px; padding: 8px 12px; background: #e8f5e9; border-radius: 8px; font-size: 12px; color: #10a37f; }
.tool-toggle-item label i { color: #10a37f; } .tool-call-record i { margin-right: 4px; }
/* Agent信息侧边栏 */
.agent-info-sidebar { width: 200px; background: #f8f9fa; border-right: 1px solid #e0e0e0; padding: 16px; display: flex; flex-direction: column; }
.agent-info-header { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
.agent-avatar { width: 48px; height: 48px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 12px; display: flex; align-items: center; justify-content: center; color: white; font-size: 24px; }
.agent-name-area { flex: 1; }
.agent-name-area h3 { font-size: 16px; margin: 0; color: #333; }
.agent-name-area small { color: #999; font-size: 12px; }
.agent-info-section { margin-top: 16px; }
.agent-info-section h4 { font-size: 13px; color: #666; margin: 0 0 8px 0; font-weight: 500; }
.agent-info-section p { font-size: 12px; color: #333; line-height: 1.5; margin: 0; }
.agent-capabilities { display: flex; flex-wrap: wrap; gap: 6px; margin-top: 8px; }
.capability-tag { padding: 4px 8px; background: #e8f5e9; border-radius: 6px; font-size: 11px; color: #10a37f; }
.capability-tag.disabled { background: #f5f5f5; color: #999; }
.agent-model-info { margin-top: 12px; padding: 8px; background: white; border-radius: 8px; border: 1px solid #e0e0e0; }
.agent-model-info label { font-size: 11px; color: #999; }
.agent-model-info span { font-size: 12px; color: #333; display: block; margin-top: 2px; }
.add-phrase-btn { padding: 6px 10px; background: #f0f0f0; border: 1px solid #ddd; border-radius: 6px; cursor: pointer; font-size: 12px; color: #666; white-space: nowrap; flex-shrink: 0; } .add-phrase-btn { padding: 6px 10px; background: #f0f0f0; border: 1px solid #ddd; border-radius: 6px; cursor: pointer; font-size: 12px; color: #666; white-space: nowrap; flex-shrink: 0; }
.add-phrase-btn:hover { background: #e8e8e8; } .add-phrase-btn:hover { background: #e8e8e8; }
.phrase-list-wrapper { flex: 1; overflow-x: auto; overflow-y: hidden; scrollbar-width: thin; } .phrase-list-wrapper { flex: 1; overflow-x: auto; overflow-y: hidden; scrollbar-width: thin; }
@@ -182,7 +199,7 @@
<div class="main-content"> <div class="main-content">
<div class="chat-header"> <div class="chat-header">
<h1>AI 对话 v2.0</h1> <h1>AI 对话 v3.0</h1>
<div class="header-controls"> <div class="header-controls">
<div class="agent-selector"> <div class="agent-selector">
<select id="agentSelect" onchange="switchAgent()"><option value="">加载中...</option></select> <select id="agentSelect" onchange="switchAgent()"><option value="">加载中...</option></select>
@@ -191,9 +208,38 @@
</div> </div>
</div> </div>
<div class="messages-container" id="messagesContainer"> <!-- 对话区域左侧Agent信息 + 右侧消息 -->
<div class="chat-area" style="display:flex;flex:1;overflow:hidden;">
<!-- Agent信息侧边栏 -->
<div class="agent-info-sidebar" id="agentInfoSidebar">
<div class="agent-info-header">
<div class="agent-avatar" id="agentAvatar">🤖</div>
<div class="agent-name-area">
<h3 id="agentDisplayName">加载中...</h3>
<small id="agentName">agent-name</small>
</div>
</div>
<div class="agent-info-section">
<h4>简介</h4>
<p id="agentDescription">-</p>
</div>
<div class="agent-info-section">
<h4>能力</h4>
<div class="agent-capabilities" id="agentCapabilities">
<!-- 动态渲染 -->
</div>
</div>
<div class="agent-model-info">
<label>模型</label>
<span id="agentModelInfo">-</span>
</div>
</div>
<!-- 消息容器 -->
<div class="messages-container" id="messagesContainer" style="flex:1;">
<div class="welcome"><h2>👋 开始对话</h2><p>选择Agent开始聊天</p></div> <div class="welcome"><h2>👋 开始对话</h2><p>选择Agent开始聊天</p></div>
</div> </div>
</div>
<div class="input-container"> <div class="input-container">
<div class="input-area"> <div class="input-area">
@@ -203,9 +249,10 @@
<textarea id="messageInput" placeholder="输入消息..." rows="1"></textarea> <textarea id="messageInput" placeholder="输入消息..." rows="1"></textarea>
<button class="send-btn" id="sendBtn" onclick="sendMessage()"><i class="ri-send-plane-fill"></i></button> <button class="send-btn" id="sendBtn" onclick="sendMessage()"><i class="ri-send-plane-fill"></i></button>
</div> </div>
<!-- 工具警告提示 -->
<div id="tool-warning-tip" style="display:none;margin-top:4px;padding:6px 10px;background:#fff3cd;border:1px solid #ffc107;border-radius:6px;font-size:12px;color:#856404;"></div>
<div class="file-preview-area" id="filePreviewArea"></div> <div class="file-preview-area" id="filePreviewArea"></div>
<div class="quick-phrases-bar"> <div class="quick-phrases-bar">
<div id="toolToggleArea"></div>
<button class="add-phrase-btn" onclick="showAddPhraseModal()"><i class="ri-add-line"></i> 添加</button> <button class="add-phrase-btn" onclick="showAddPhraseModal()"><i class="ri-add-line"></i> 添加</button>
<div class="phrase-list-wrapper" id="phraseListWrapper" onwheel="scrollPhrases(event)"> <div class="phrase-list-wrapper" id="phraseListWrapper" onwheel="scrollPhrases(event)">
<div class="phrase-list" id="quickPhrasesList"></div> <div class="phrase-list" id="quickPhrasesList"></div>
@@ -279,7 +326,6 @@
document.addEventListener('DOMContentLoaded', () => { document.addEventListener('DOMContentLoaded', () => {
loadProviders(); // 加载大模型池 loadProviders(); // 加载大模型池
loadToolsData(); // 加载工具列表
loadAgents(); loadAgents();
loadQuickPhrases(); loadQuickPhrases();
connectWebSocket(); connectWebSocket();
@@ -293,8 +339,8 @@
const res = await fetch('/api/v2/providers'); const res = await fetch('/api/v2/providers');
const data = await res.json(); const data = await res.json();
providers = data.providers || []; providers = data.providers || [];
// 加载后检查工具支持如果agents已加载 // 加载后更新Agent信息侧边栏如果agents已加载
if (agents.length > 0) showToolWarning(); if (agents.length > 0) renderAgentInfoSidebar();
} catch (e) { console.error('加载Provider失败:', e); } } catch (e) { console.error('加载Provider失败:', e); }
} }
@@ -314,99 +360,6 @@
return agentTools.includes(toolType); return agentTools.includes(toolType);
} }
// 获取当前Agent不支持的工具列表用户已启用但Agent不支持
function getUnsupportedTools() {
const unsupported = [];
// 检查所有工具checkbox
const toolCheckboxes = document.querySelectorAll('.tool-checkbox');
toolCheckboxes.forEach(cb => {
if (cb.checked && !checkAgentToolSupport(cb.dataset.toolType)) {
// 获取工具显示名称
const label = cb.nextElementSibling?.textContent?.trim() || cb.dataset.toolType;
unsupported.push(label);
}
});
return unsupported;
}
// 渲染工具选择区域(根据系统工具列表)
function renderToolToggles() {
const container = document.getElementById('toolToggleArea');
if (!container || toolsData.length === 0) return;
// 获取当前Agent支持的工具
const agent = agents.find(a => a.id === currentAgentId);
const agentTools = agent?.tools || [];
// 渲染工具checkbox列表
let html = '';
toolsData.filter(t => t.is_active).forEach(t => {
const toolType = t.tool_type || 'unknown';
const isSupported = agentTools.includes(toolType);
const icon = getToolIconFrontend(toolType);
html += `<div class="tool-toggle-item" style="display:inline-flex;align-items:center;gap:4px;margin-right:12px;">
<input type="checkbox" class="tool-checkbox" id="tool-${toolType}" data-tool-type="${toolType}" ${isSupported ? 'checked' : ''} onchange="showToolWarning()">
<label for="tool-${toolType}" style="cursor:pointer;font-size:12px;"><i class="${icon}"></i> ${t.name}</label>
</div>`;
});
// 保留原有的结构只更新工具checkbox部分
const existingWarning = document.getElementById('tool-warning-tip');
container.innerHTML = html;
if (existingWarning) container.appendChild(existingWarning);
}
// 前端工具图标
function getToolIconFrontend(toolType) {
const icons = {
'search': 'ri-search-line',
'calculator': 'ri-calculator-line',
'code': 'ri-code-line',
'image': 'ri-image-line',
'web': 'ri-global-line'
};
return icons[toolType] || 'ri-tools-line';
}
// 加载工具列表
let toolsData = [];
async function loadToolsData() {
try {
const res = await fetch('/api/v2/tools');
const data = await res.json();
toolsData = data.tools || [];
renderToolToggles();
} catch (e) { console.error('加载工具列表失败:', e); }
}
// 显示工具不支持提示
function showToolWarning() {
const unsupported = getUnsupportedTools();
const warningDiv = document.getElementById('tool-warning-tip');
if (unsupported.length > 0) {
const agent = agents.find(a => a.id === currentAgentId);
const agentName = agent?.display_name || agent?.name || '当前Agent';
const msg = `<i class="ri-alert-line"></i> <strong>${agentName}</strong> 不支持 <strong>${unsupported.join('、')}</strong> 工具请关闭或切换Agent`;
if (warningDiv) {
warningDiv.innerHTML = msg;
warningDiv.style.display = 'block';
} else {
const newWarning = document.createElement('div');
newWarning.id = 'tool-warning-tip';
newWarning.style.cssText = 'margin-top:8px;padding:8px 12px;background:#fff3cd;border:1px solid #ffc107;border-radius:6px;font-size:13px;color:#856404;';
newWarning.innerHTML = msg;
document.getElementById('toolToggleArea').appendChild(newWarning);
}
// 禁用发送按钮
document.getElementById('sendBtn').disabled = true;
} else {
if (warningDiv) warningDiv.style.display = 'none';
document.getElementById('sendBtn').disabled = false;
}
}
// 加载Agent // 加载Agent
async function loadAgents() { async function loadAgents() {
try { try {
@@ -416,13 +369,58 @@
const defaultAgent = agents.find(a => a.is_default) || agents[0]; const defaultAgent = agents.find(a => a.is_default) || agents[0];
if (defaultAgent) currentAgentId = defaultAgent.id; if (defaultAgent) currentAgentId = defaultAgent.id;
renderAgentSelect(); renderAgentSelect();
// 加载后检查工具支持 renderAgentInfoSidebar(); // 渲染Agent信息侧边栏
showToolWarning();
// 渲染工具选择区域
renderToolToggles();
} catch (e) { console.error('加载Agent失败:', e); } } catch (e) { console.error('加载Agent失败:', e); }
} }
// 渲染Agent信息侧边栏
function renderAgentInfoSidebar() {
const agent = agents.find(a => a.id === currentAgentId);
if (!agent) return;
// 更新名称
document.getElementById('agentDisplayName').textContent = agent.display_name || agent.name;
document.getElementById('agentName').textContent = agent.name;
// 更新头像用emoji或首字母
const avatar = document.getElementById('agentAvatar');
avatar.textContent = agent.display_name?.charAt(0) || agent.name?.charAt(0) || '🤖';
// 更新描述
document.getElementById('agentDescription').textContent = agent.description || '暂无描述';
// 更新能力标签
const capabilitiesHtml = [];
// 检查思考能力
const provider = providers.find(p => p.id === agent.llm_provider_id);
if (provider) {
if (provider.supports_thinking) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-lightbulb-line"></i> 思考</span>');
}
if (provider.supports_vision) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-image-line"></i> 视觉</span>');
}
if (provider.supports_function_calling) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-tools-line"></i> 工具调用</span>');
} else {
capabilitiesHtml.push('<span class="capability-tag disabled"><i class="ri-tools-line"></i> 工具(手动)</span>');
}
// 更新模型信息
const model = agent.model_override || provider.default_model || 'auto';
document.getElementById('agentModelInfo').textContent = model;
}
// 检查工具配置
const agentTools = agent.tools || [];
if (agentTools.includes('search')) {
capabilitiesHtml.push('<span class="capability-tag"><i class="ri-search-line"></i> 搜索</span>');
}
document.getElementById('agentCapabilities').innerHTML = capabilitiesHtml.join('') || '<span class="capability-tag disabled">基础对话</span>';
}
function renderAgentSelect() { function renderAgentSelect() {
const select = document.getElementById('agentSelect'); const select = document.getElementById('agentSelect');
select.innerHTML = agents.filter(a => a.is_active).map(a => select.innerHTML = agents.filter(a => a.is_active).map(a =>
@@ -442,8 +440,7 @@
if (ws?.readyState === WebSocket.OPEN) ws.send(JSON.stringify({ action: 'switch_agent', agent_id: currentAgentId })); if (ws?.readyState === WebSocket.OPEN) ws.send(JSON.stringify({ action: 'switch_agent', agent_id: currentAgentId }));
await createNewConversation(); await createNewConversation();
showAgentSwitchNotice(); showAgentSwitchNotice();
// 切换Agent后检查工具支持 renderAgentInfoSidebar(); // 更新侧边栏信息
showToolWarning();
} }
} }
@@ -1145,16 +1142,6 @@
return; return;
} }
// 检查工具支持
const unsupported = getUnsupportedTools();
if (unsupported.length > 0) {
const agent = agents.find(a => a.id === currentAgentId);
const agentName = agent?.display_name || agent?.name || '当前Agent';
alert(`⚠️ ${agentName} 不支持 ${unsupported.join('、')} 工具\n\n请关闭不支持的工具或切换到支持该工具的Agent。`);
document.getElementById('sendBtn').disabled = false;
return;
}
document.getElementById('sendBtn').disabled = true; document.getElementById('sendBtn').disabled = true;
input.value = ''; input.value = '';
input.style.height = 'auto'; input.style.height = 'auto';
@@ -1167,13 +1154,7 @@
pendingFiles = []; pendingFiles = [];
document.getElementById('filePreviewArea').innerHTML = ''; document.getElementById('filePreviewArea').innerHTML = '';
// 获取禁用的工具列表(所有系统工具 - 用户选中的工具) // v3.0: Function Calling模式不再需要 disabled_tools
const disabledTools = [];
const allToolTypes = toolsData.filter(t => t.is_active).map(t => t.tool_type);
const selectedTools = Array.from(document.querySelectorAll('.tool-checkbox:checked')).map(cb => cb.dataset.toolType);
allToolTypes.forEach(t => {
if (!selectedTools.includes(t)) disabledTools.push(t);
});
// 发送消息(包含文件) // 发送消息(包含文件)
if (ws?.readyState === WebSocket.OPEN) { if (ws?.readyState === WebSocket.OPEN) {
@@ -1182,7 +1163,6 @@
message: msg, message: msg,
conversation_id: currentConversationId, conversation_id: currentConversationId,
agent_id: currentAgentId, agent_id: currentAgentId,
disabled_tools: disabledTools,
files: lastSentFiles || [] // 发送的文件列表 files: lastSentFiles || [] // 发送的文件列表
})); }));
} }