Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 291de733a4 | |||
| 10f67a807a |
@@ -1121,7 +1121,6 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
|
||||
messages=history_with_tools,
|
||||
provider_config=agent_config['provider'],
|
||||
agent_config=agent_config['agent'],
|
||||
tool_results=tool_results,
|
||||
enable_thinking=enable_thinking
|
||||
)
|
||||
|
||||
|
||||
@@ -137,6 +137,9 @@ class AgentService:
|
||||
'api_key': provider.api_key if provider else None,
|
||||
'supports_thinking': provider.supports_thinking if provider else False,
|
||||
'thinking_model': provider.thinking_model if provider else None,
|
||||
'supports_vision': provider.supports_vision if provider else False,
|
||||
'vision_model': provider.vision_model if provider else None,
|
||||
'supports_function_calling': provider.supports_function_calling if provider else False,
|
||||
'default_model': provider.default_model if provider else 'auto',
|
||||
'max_tokens': provider.max_tokens if provider else 4096,
|
||||
'temperature': provider.temperature if provider else 0.7,
|
||||
|
||||
@@ -514,17 +514,15 @@ class LLMService:
|
||||
messages: List[Dict],
|
||||
provider_config: dict,
|
||||
agent_config: dict,
|
||||
tool_results: List[Dict],
|
||||
enable_thinking: bool = True
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""
|
||||
第二阶段调用:将工具执行结果返回给LLM
|
||||
第二阶段调用:使用包含工具调用和结果的完整消息历史
|
||||
|
||||
Args:
|
||||
messages: 对话历史(包含工具调用和结果)
|
||||
messages: 已包含assistant tool_calls和tool结果的完整消息历史
|
||||
provider_config: LLM Provider配置
|
||||
agent_config: Agent配置
|
||||
tool_results: 工具执行结果 [{"tool_call_id": "xxx", "content": "..."}]
|
||||
|
||||
Returns:
|
||||
Tuple[str, Optional[str]]: (回复内容, 思考过程)
|
||||
@@ -535,14 +533,8 @@ class LLMService:
|
||||
max_tokens = provider_config.get('max_tokens', 4096)
|
||||
temperature = agent_config.get('temperature_override') or provider_config.get('temperature', 0.7)
|
||||
|
||||
# 将工具结果添加到消息历史
|
||||
# 消息历史已经包含了assistant的tool_calls和tool结果,直接使用
|
||||
final_messages = messages.copy()
|
||||
for result in tool_results:
|
||||
final_messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": result['tool_call_id'],
|
||||
"content": result['content']
|
||||
})
|
||||
|
||||
# 调用LLM生成最终回复
|
||||
url = f"{api_base.rstrip('/')}/chat/completions"
|
||||
@@ -557,14 +549,14 @@ class LLMService:
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
|
||||
logger.info(f"工具结果返回LLM: url={url}, model={model}")
|
||||
logger.info(f"工具结果返回LLM: url={url}, model={model}, 消息数={len(final_messages)}")
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
response = await client.post(url, headers=headers, json=payload)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(f"API返回错误: status={response.status_code}")
|
||||
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
|
||||
Reference in New Issue
Block a user