Compare commits

..

5 Commits

3 changed files with 249 additions and 27 deletions

View File

@@ -3,7 +3,7 @@ AI对话系统 v2.0.0 - 主应用
支持大模型池、Agent管理、渠道独立绑定、思考功能开关
"""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Depends, HTTPException, Request
from fastapi.responses import HTMLResponse
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from sqlalchemy.orm import Session
@@ -13,6 +13,9 @@ import json
import logging
from datetime import datetime
import os
import base64
import uuid
import time
# 使用新的数据模型
from models_v2 import (
@@ -34,7 +37,14 @@ app = FastAPI(title="AI对话系统 v2.0", version="2.0.0")
# 静态文件和模板
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UPLOADS_DIR = os.path.join(BASE_DIR, "uploads", "images")
# 确保上传目录存在
os.makedirs(UPLOADS_DIR, exist_ok=True)
# 静态文件服务
app.mount("/static", StaticFiles(directory=os.path.join(BASE_DIR, "static")), name="static")
app.mount("/uploads", StaticFiles(directory=os.path.join(BASE_DIR, "uploads")), name="uploads")
templates = Jinja2Templates(directory=os.path.join(BASE_DIR, "templates"))
# WebSocket连接管理
@@ -575,6 +585,56 @@ async def perform_search(data: dict, db: Session = Depends(get_db)):
return {"success": False, "message": "不支持的搜索提供商"}
# ==================== 图片上传 API ====================
@app.post("/api/v2/upload-image")
async def upload_image(data: dict):
"""上传图片到服务器,返回文件路径"""
try:
image_data = data.get('image')
file_name = data.get('name', 'image.png')
if not image_data:
return {"success": False, "message": "缺少图片数据"}
# 解析 base64 数据
if image_data.startswith('data:image/'):
# 提取格式和base64内容
header, base64_content = image_data.split(',', 1)
# 从header中提取图片格式
format_match = header.split(':')[1].split(';')[0] # 如 'image/png'
ext = format_match.split('/')[1] if '/' in format_match else 'png'
else:
base64_content = image_data
ext = 'png'
# 生成唯一文件名
timestamp = int(time.time())
unique_id = uuid.uuid4().hex[:8]
safe_name = f"{timestamp}_{unique_id}.{ext}"
# 保存文件
file_path = os.path.join(UPLOADS_DIR, safe_name)
image_bytes = base64.b64decode(base64_content)
# 检查文件大小限制10MB
if len(image_bytes) > 10 * 1024 * 1024:
return {"success": False, "message": "图片大小超过10MB限制"}
with open(file_path, 'wb') as f:
f.write(image_bytes)
# 返回可访问的URL路径
url_path = f"/uploads/images/{safe_name}"
logger.info(f"图片已保存: {file_path}, URL: {url_path}")
return {"success": True, "path": url_path, "name": safe_name}
except Exception as e:
logger.error(f"图片上传失败: {e}")
return {"success": False, "message": str(e)}
# ==================== 对话 API保留原有 ====================
@app.get("/api/conversations")
@@ -787,6 +847,7 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
# 处理文件内容,添加到消息
image_contents = [] # 图片内容(用于视觉模型)
text_contents = [] # 文本文件内容
image_paths = [] # 图片服务器路径(用于历史记录显示)
if files:
for f in files:
if f.get('type') and f['type'].startswith('image/'):
@@ -796,6 +857,13 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
'type': f['type'],
'data': f.get('content', '') # base64 数据
})
# 记录服务器路径(用于历史记录)
if f.get('serverPath'):
image_paths.append({
'name': f['name'],
'type': f['type'],
'url': f['serverPath'] # 服务器文件路径
})
# 不添加文件名文本,图片信息保存在 extra_data 中
elif f.get('content'):
# 文本文件:直接添加内容,不带文件名前缀
@@ -808,10 +876,16 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
for content in text_contents:
message += f"\n\n{content}"
# 保存图片信息到 extra_data用于历史记录
# 保存图片和文件信息到 extra_data用于历史记录
extra_data_for_msg = None
if image_contents:
# 只保存图片 URL不保存完整 base64
if image_paths:
# 图片保存服务器路径URL历史记录可以显示
extra_data_for_msg = {
'images': image_paths,
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
}
elif image_contents:
# 没有服务器路径但有问题(可能上传失败)
extra_data_for_msg = {
'images': [{'name': i['name'], 'type': i['type']} for i in image_contents],
'files': [{'name': f['name'], 'type': f['type']} for f in files if not f['type'].startswith('image/')]
@@ -969,7 +1043,8 @@ async def websocket_endpoint(websocket: WebSocket, user_id: str):
messages=history,
provider_config=agent_config['provider'],
agent_config=agent_config['agent'],
enable_thinking=enable_thinking
enable_thinking=enable_thinking,
images=image_contents # 传递图片数据给多模态模型
)
logger.info(f"LLM响应: response长度={len(response)}, thinking长度={len(thinking_content) if thinking_content else 0}")

View File

@@ -98,11 +98,19 @@ class LLMService:
messages: List[Dict],
provider_config: dict,
agent_config: dict,
enable_thinking: bool = True
enable_thinking: bool = True,
images: List[Dict] = None # 图片数据列表 [{'name', 'type', 'data': base64}]
) -> Tuple[str, Optional[str]]:
"""
调用AI模型进行对话
Args:
messages: 对话历史
provider_config: LLM Provider配置
agent_config: Agent配置
enable_thinking: 是否启用思考
images: 图片数据列表(用于多模态模型)
Returns:
Tuple[str, Optional[str]]: (回复内容, 思考过程)
"""
@@ -123,6 +131,22 @@ class LLMService:
if final_messages and final_messages[0]['role'] != 'system':
final_messages.insert(0, {"role": "system", "content": system_prompt})
# 如果有图片,构建多模态消息(只修改最后一条用户消息)
if images and len(images) > 0:
# 找到最后一条用户消息
for i in range(len(final_messages) - 1, -1, -1):
if final_messages[i]['role'] == 'user':
original_text = final_messages[i]['content']
# 构建多模态内容
multimodal_content = [{"type": "text", "text": original_text if original_text else "请描述这张图片"}]
for img in images:
multimodal_content.append({
"type": "image_url",
"image_url": {"url": img['data']} # base64 data URL
})
final_messages[i]['content'] = multimodal_content
break
thinking_content = None
# 处理思考功能
@@ -208,7 +232,7 @@ class LLMService:
temperature: float = 0.7
) -> str:
"""调用API"""
url = f"{api_base}/chat/completions"
url = f"{api_base.rstrip('/')}/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
@@ -220,13 +244,33 @@ class LLMService:
"max_tokens": max_tokens
}
# 打印请求详情(调试)
logger.info(f"调用LLM: url={url}, model={model}")
logger.info(f"消息数量: {len(messages)}, 第一条消息类型: {type(messages[0].get('content'))}")
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data['choices'][0]['message']['content']
try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=headers, json=payload)
# 检查HTTP状态
if response.status_code != 200:
logger.error(f"API返回错误: status={response.status_code}, body={response.text[:500]}")
response.raise_for_status()
data = response.json()
# 检查响应格式
if 'choices' not in data or len(data['choices']) == 0:
logger.error(f"API响应格式错误: {data}")
raise ValueError("API响应格式错误缺少choices")
return data['choices'][0]['message']['content']
except httpx.HTTPStatusError as e:
logger.error(f"HTTP错误: {e.response.status_code}, {e.response.text}")
raise
except Exception as e:
logger.error(f"API调用异常: {type(e).__name__}: {e}")
raise
async def chat_stream(
self,

View File

@@ -146,6 +146,17 @@
.modal-buttons { display: flex; gap: 12px; justify-content: flex-end; }
.modal-buttons button { padding: 8px 16px; border-radius: 8px; cursor: pointer; }
/* 图片放大弹窗 */
.image-lightbox { position: fixed; top: 0; left: 0; right: 0; bottom: 0; background: rgba(0,0,0,0.9); display: none; align-items: center; justify-content: center; z-index: 2000; cursor: zoom-out; }
.image-lightbox.show { display: flex; }
.image-lightbox img { max-width: 90%; max-height: 90%; border-radius: 8px; box-shadow: 0 0 30px rgba(255,255,255,0.2); }
.image-lightbox-close { position: absolute; top: 20px; right: 20px; width: 40px; height: 40px; background: rgba(255,255,255,0.2); border-radius: 50%; display: flex; align-items: center; justify-content: center; color: #fff; font-size: 20px; cursor: pointer; transition: background 0.2s; }
.image-lightbox-close:hover { background: rgba(255,255,255,0.3); }
/* 对话中的图片可点击 */
.uploaded-image img { cursor: zoom-in; transition: transform 0.2s; }
.uploaded-image img:hover { transform: scale(1.02); }
.welcome { display: flex; flex-direction: column; align-items: center; justify-content: center; height: 100%; color: #666; }
.welcome h2 { font-size: 28px; margin-bottom: 16px; color: #333; }
@@ -211,6 +222,34 @@
</div>
</div>
<!-- 图片放大弹窗 -->
<div class="image-lightbox" id="imageLightbox" onclick="closeImageLightbox()">
<div class="image-lightbox-close"><i class="ri-close-line"></i></div>
<img id="lightboxImage" src="" alt="放大图片">
</div>
<!-- 隐藏的图片上传API处理 -->
<script>
// 图片上传到服务器(保存文件)
async function uploadImageToServer(base64Data, fileName) {
try {
const response = await fetch('/api/v2/upload-image', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ image: base64Data, name: fileName })
});
const result = await response.json();
if (result.success) {
return result.path; // 返回服务器文件路径
}
return null;
} catch (e) {
console.error('图片上传失败:', e);
return null;
}
}
</script>
<!-- Markdown渲染库 -->
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<script>
@@ -393,19 +432,50 @@
html += '</div>';
div.innerHTML = html;
// 如果是用户消息且有搜索结果在设置innerHTML后追加
// 如果是用户消息且有额外数据(搜索结果、图片、文件)在设置innerHTML后追加
if (role === 'user' && extraData) {
console.log('Processing extraData for user message:', extraData);
console.log('search_results exists:', extraData.search_results);
const bodyDiv = div.querySelector('.message-body');
// 处理图片如果有服务器URL显示图片
if (extraData.images && extraData.images.length > 0) {
let imagesHtml = '<div class="history-images" style="margin-top:8px;display:flex;gap:8px;flex-wrap:wrap;">';
for (const img of extraData.images) {
if (img.url) {
// 有服务器URL显示真实图片
imagesHtml += `<div class="history-image" style="display:inline-block;">
<img src="${img.url}" style="max-width:300px;max-height:200px;border-radius:8px;cursor:zoom-in;" onclick="openImageLightbox('${img.url}')">
</div>`;
} else {
// 没有URL显示占位符
imagesHtml += `<div class="history-image-placeholder" style="padding:8px 12px;background:#f0f0f0;border-radius:8px;display:flex;align-items:center;gap:6px;font-size:13px;color:#666;">
<i class="ri-image-line" style="color:#10a37f;"></i>
<span>${escapeHtml(img.name || '图片')}</span>
</div>`;
}
}
imagesHtml += '</div>';
if (bodyDiv) bodyDiv.insertAdjacentHTML('beforeend', imagesHtml);
}
// 处理文本文件
if (extraData.files && extraData.files.length > 0) {
let filesHtml = '<div class="history-files" style="margin-top:8px;">';
for (const f of extraData.files) {
filesHtml += `<div class="history-file-placeholder" style="padding:6px 10px;background:#f5f5f5;border-radius:6px;margin-bottom:4px;display:flex;align-items:center;gap:6px;font-size:12px;color:#666;">
<i class="ri-file-text-line" style="color:#10a37f;"></i>
<span>${escapeHtml(f.name || '文件')}</span>
</div>`;
}
filesHtml += '</div>';
if (bodyDiv) bodyDiv.insertAdjacentHTML('beforeend', filesHtml);
}
// 处理搜索结果
if (extraData.search_results && extraData.search_results.length > 0) {
console.log('Building search results HTML for', extraData.search_results.length, 'results');
const searchHtml = buildSearchResultsHtml(extraData.search_results, extraData.search_query || content);
const bodyDiv = div.querySelector('.message-body');
console.log('bodyDiv found:', bodyDiv != null);
if (bodyDiv) {
bodyDiv.insertAdjacentHTML('beforeend', searchHtml);
console.log('Search results HTML inserted');
}
if (bodyDiv) bodyDiv.insertAdjacentHTML('beforeend', searchHtml);
}
}
@@ -852,13 +922,15 @@
lastSentFiles = files.map(f => ({
name: f.name,
type: f.type,
content: f.content
content: f.content,
serverPath: f.serverPath // 服务器路径(用于历史记录)
}));
for (const f of files) {
if (f.type.startsWith('image/')) {
// 图片直接显示
html += `<div class="uploaded-image" style="margin-bottom:8px"><img src="${f.content}" style="max-width:300px;border-radius:8px"></div>`;
// 图片直接显示用服务器路径或base64
const imgSrc = f.serverPath || f.content;
html += `<div class="uploaded-image" style="margin-bottom:8px"><img src="${f.content}" style="max-width:300px;border-radius:8px" onclick="openImageLightbox('${imgSrc}')"></div>`;
} else {
// 文本文件显示名称和内容摘要
html += `<div class="uploaded-file" style="padding:8px;background:#f5f5f5;border-radius:6px;margin-bottom:8px">`;
@@ -883,7 +955,7 @@
}
// 文件上传处理
function handleFileUpload(event) {
async function handleFileUpload(event) {
const files = event.target.files;
const previewArea = document.getElementById('filePreviewArea');
@@ -892,13 +964,23 @@
// 读取文件内容
const reader = new FileReader();
reader.onload = (e) => {
reader.onload = async (e) => {
const base64Content = e.target.result;
// 图片:先上传到服务器保存
let serverPath = null;
if (file.type.startsWith('image/')) {
serverPath = await uploadImageToServer(base64Content, file.name);
console.log('图片上传结果:', serverPath);
}
const fileData = {
id: fileId,
name: file.name,
type: file.type,
size: file.size,
content: e.target.result
content: base64Content, // base64数据用于多模态模型
serverPath: serverPath // 服务器路径(用于历史记录显示)
};
pendingFiles.push(fileData);
@@ -909,8 +991,9 @@
if (file.type.startsWith('image/')) {
previewItem.classList.add('image-preview');
// 预览用本地base64显示更快
previewItem.innerHTML = `
<img src="${e.target.result}" alt="${file.name}">
<img src="${base64Content}" alt="${file.name}" style="cursor:pointer" onclick="openImageLightbox('${serverPath || base64Content}')">
<button class="file-remove" onclick="removeFile('${fileId}')"><i class="ri-close-line"></i></button>
`;
} else {
@@ -1014,6 +1097,26 @@
}
document.getElementById('newPhraseInput').addEventListener('keydown', e => { if (e.key === 'Enter') addPhrase(); if (e.key === 'Escape') hidePhraseModal(); });
// 图片放大弹窗
function openImageLightbox(imageSrc) {
const lightbox = document.getElementById('imageLightbox');
const lightboxImg = document.getElementById('lightboxImage');
lightboxImg.src = imageSrc;
lightbox.classList.add('show');
}
function closeImageLightbox() {
const lightbox = document.getElementById('imageLightbox');
lightbox.classList.remove('show');
}
// ESC键关闭图片弹窗
document.addEventListener('keydown', e => {
if (e.key === 'Escape') {
closeImageLightbox();
}
});
</script>
</body>
</html>