fix: Matrix连接改为非阻塞模式,修复服务启动阻塞问题

This commit is contained in:
2026-04-11 12:22:03 +08:00
parent fd583132d7
commit b05a03e198
7 changed files with 104 additions and 105 deletions

View File

@@ -2,45 +2,40 @@
AI服务 - 调用大模型API
"""
import httpx
from typing import List, Dict, Optional
from typing import List, Dict, AsyncGenerator
import json
import logging
logger = logging.getLogger(__name__)
# 默认配置
DEFAULT_API_BASE = "http://192.168.2.17:19007/v1"
DEFAULT_API_KEY = "xxxx"
DEFAULT_MODEL = "auto"
class AIService:
def __init__(self, api_base: str = None, api_key: str = None, model: str = None):
self.api_base = api_base or DEFAULT_API_BASE
self.api_key = api_key or DEFAULT_API_KEY
self.model = model or DEFAULT_MODEL
def __init__(self):
self.api_base = ""
self.api_key = ""
self.model = ""
self.use_mock = True
def update_config(self, api_base: str, api_key: str, model: str):
"""更新配置"""
self.api_base = api_base
self.api_key = api_key
self.model = model
logger.info(f"AI配置已更新: {api_base}, model={model}")
self.api_base = api_base
self.api_key = api_key
self.model = model
# 如果配置完整则使用真实API否则使用mock
self.use_mock = not (api_base and model)
logger.info(f"AI配置已更新: api_base={api_base}, model={model}, use_mock={self.use_mock}")
async def chat(self, messages: List[Dict], stream: bool = False) -> str:
async def chat(self, messages: List[Dict]) -> str:
"""
调用AI模型进行对话
Args:
messages: 对话历史 [{"role": "user", "content": "..."}]
stream: 是否流式输出
Returns:
AI回复内容
"""
# 如果使用mock模式返回模拟回复
if self.use_mock:
logger.info("使用Mock模式回复")
last_msg = messages[-1]['content'] if messages else "你好"
return f"这是一个测试回复。您说的是:{last_msg}\n\n请配置有效的AI服务地址和模型才能获得真正的AI回复。"
# 调用真实API
url = f"{self.api_base}/chat/completions"
headers = {
"Authorization": f"Bearer {self.api_key}",
@@ -49,21 +44,35 @@ class AIService:
payload = {
"model": self.model,
"messages": messages,
"stream": stream,
"temperature": 0.7,
"max_tokens": 2000
}
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data['choices'][0]['message']['content']
logger.info(f"调用AI API: {url}, model={self.model}")
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data['choices'][0]['message']['content']
except Exception as e:
logger.error(f"AI API调用失败: {e}")
# API失败时返回模拟回复
last_msg = messages[-1]['content'] if messages else "你好"
return f"AI服务暂时不可用错误{str(e)})。您说的是:{last_msg}"
async def chat_stream(self, messages: List[Dict]):
async def chat_stream(self, messages: List[Dict]) -> AsyncGenerator[str, None]:
"""
流式调用AI模型
"""
if self.use_mock:
last_msg = messages[-1]['content'] if messages else "你好"
reply = f"这是一个测试回复。您说的是:{last_msg}"
for char in reply:
yield char
return
url = f"{self.api_base}/chat/completions"
headers = {
"Authorization": f"Bearer {self.api_key}",