Compare commits

...

1 Commits

Author SHA1 Message Date
44cca59ec6 fix: 修正思考模式 - 使用thinking参数而非切换模型 2026-04-26 11:47:12 +08:00
2 changed files with 17 additions and 15 deletions

View File

@@ -5,7 +5,6 @@ const CONFIG = {
apiUrl: 'https://open.bigmodel.cn/api/paas/v4/chat/completions',
apiKey: '2259e33a1357460abe17919aaf81e73d.K44a8LPQTmFM5PKm',
model: 'glm-4.5-air',
thinkingModel: 'glm-z1-flash', // 智谱思考模型
maxTokens: 2048
};
@@ -418,8 +417,19 @@ async function streamGenerate(userMsgIndex) {
contentEl.innerHTML = '<span class="streaming-cursor">▌</span>';
try {
// 根据开关选择模型
const model = enableThinking ? CONFIG.thinkingModel : CONFIG.model;
// 构建请求体 - 统一使用 glm-4.5-air通过 thinking 参数控制
const requestBody = {
model: CONFIG.model,
messages: currentConversation.messages.slice(0, aiMessageIndex).map(m => ({
role: m.role,
content: m.content
})),
max_tokens: CONFIG.maxTokens,
stream: true,
thinking: {
type: enableThinking ? 'enabled' : 'disabled'
}
};
const response = await fetch(CONFIG.apiUrl, {
method: 'POST',
@@ -427,15 +437,7 @@ async function streamGenerate(userMsgIndex) {
'Content-Type': 'application/json',
'Authorization': `Bearer ${CONFIG.apiKey}`
},
body: JSON.stringify({
model: model,
messages: currentConversation.messages.slice(0, aiMessageIndex).map(m => ({
role: m.role,
content: m.content
})),
max_tokens: CONFIG.maxTokens,
stream: true
})
body: JSON.stringify(requestBody)
});
if (!response.ok) {

View File

@@ -8,12 +8,12 @@
<meta http-equiv="Pragma" content="no-cache">
<meta http-equiv="Expires" content="0">
<title>AI助手</title>
<link rel="stylesheet" href="style.css?v=2.2.2">
<link rel="stylesheet" href="style.css?v=2.2.3">
<link rel="manifest" href="manifest.json">
</head>
<body>
<div id="app"></div>
<script src="marked.min.js?v=2.2.2"></script>
<script src="app.js?v=2.2.2"></script>
<script src="marked.min.js?v=2.2.3"></script>
<script src="app.js?v=2.2.3"></script>
</body>
</html>