diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3597395289f7a..b0c6373c2afae 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,31 @@
# Changelog
+### [Version 1.69.1](https://github.com/lobehub/lobe-chat/compare/v1.69.0...v1.69.1)
+
+Released on **2025-03-07**
+
+#### 💄 Styles
+
+- **misc**: Add Qwen QwQ model.
+
+
+
+
+Improvements and Fixes
+
+#### Styles
+
+- **misc**: Add Qwen QwQ model, closes [#6783](https://github.com/lobehub/lobe-chat/issues/6783) ([3d3c2ce](https://github.com/lobehub/lobe-chat/commit/3d3c2ce))
+
+
+
+
+
+[](#readme-top)
+
+
+
## [Version 1.69.0](https://github.com/lobehub/lobe-chat/compare/v1.68.11...v1.69.0)
Released on **2025-03-07**
diff --git a/changelog/v1.json b/changelog/v1.json
index 557d36fc238fe..b7fe5775c3825 100644
--- a/changelog/v1.json
+++ b/changelog/v1.json
@@ -1,4 +1,11 @@
[
+ {
+ "children": {
+ "improvements": ["Add Qwen QwQ model."]
+ },
+ "date": "2025-03-07",
+ "version": "1.69.1"
+ },
{
"children": {
"features": ["Support Anthropic Context Caching."]
diff --git a/package.json b/package.json
index 5e170dbb731c9..7c5ea19c14a1a 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "@lobehub/chat",
- "version": "1.69.0",
+ "version": "1.69.1",
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
"keywords": [
"framework",
diff --git a/src/config/aiModels/google.ts b/src/config/aiModels/google.ts
index d3b928fe31a68..91b3886718869 100644
--- a/src/config/aiModels/google.ts
+++ b/src/config/aiModels/google.ts
@@ -118,7 +118,6 @@ const googleChatModels: AIChatModelCard[] = [
description:
'Gemini 2.0 Flash Thinking Exp 是 Google 的实验性多模态推理AI模型,能对复杂问题进行推理,拥有新的思维能力。',
displayName: 'Gemini 2.0 Flash Thinking Experimental',
- enabled: true,
id: 'gemini-2.0-flash-thinking-exp',
maxOutput: 65_536,
pricing: {
diff --git a/src/config/aiModels/groq.ts b/src/config/aiModels/groq.ts
index 811b0c5474955..2a7c63b00ba7a 100644
--- a/src/config/aiModels/groq.ts
+++ b/src/config/aiModels/groq.ts
@@ -4,6 +4,20 @@ import { AIChatModelCard } from '@/types/aiModel';
// https://console.groq.com/docs/models
const groqChatModels: AIChatModelCard[] = [
+ {
+ abilities: {
+ functionCall: true,
+ reasoning: true,
+ },
+ contextWindowTokens: 131_072,
+ displayName: 'Qwen QwQ 32B',
+ id: 'qwen-qwq-32b',
+ pricing: {
+ input: 0.29,
+ output: 0.39,
+ },
+ type: 'chat',
+ },
{
abilities: {
functionCall: true,
diff --git a/src/config/aiModels/novita.ts b/src/config/aiModels/novita.ts
index f0d3b6f7075fc..a3fe89a43f6ec 100644
--- a/src/config/aiModels/novita.ts
+++ b/src/config/aiModels/novita.ts
@@ -150,6 +150,29 @@ const novitaChatModels: AIChatModelCard[] = [
},
type: 'chat',
},
+ {
+ contextWindowTokens: 64_000,
+ displayName: 'Deepseek V3 Turbo',
+ id: 'deepseek/deepseek-v3-turbo',
+ pricing: {
+ input: 0.4,
+ output: 1.3,
+ },
+ type: 'chat',
+ },
+ {
+ abilities: {
+ reasoning: true,
+ },
+ contextWindowTokens: 64_000,
+ displayName: 'Deepseek R1 Turbo',
+ id: 'deepseek/deepseek-r1-turbo',
+ pricing: {
+ input: 0.7,
+ output: 2.5,
+ },
+ type: 'chat',
+ },
{
abilities: {
reasoning: true,
@@ -378,6 +401,19 @@ const novitaChatModels: AIChatModelCard[] = [
},
type: 'chat',
},
+ {
+ abilities: {
+ reasoning: true,
+ },
+ contextWindowTokens: 32_768,
+ displayName: 'QwQ 32B',
+ id: 'qwen/qwq-32b',
+ pricing: {
+ input: 0.18,
+ output: 0.2,
+ },
+ type: 'chat',
+ },
];
export const allModels = [...novitaChatModels];
diff --git a/src/config/aiModels/siliconcloud.ts b/src/config/aiModels/siliconcloud.ts
index 8e79b459e5a1b..259f603654af2 100644
--- a/src/config/aiModels/siliconcloud.ts
+++ b/src/config/aiModels/siliconcloud.ts
@@ -226,9 +226,25 @@ const siliconcloudChatModels: AIChatModelCard[] = [
},
contextWindowTokens: 32_768,
description:
- 'QwQ-32B-Preview是Qwen 最新的实验性研究模型,专注于提升AI推理能力。通过探索语言混合、递归推理等复杂机制,主要优势包括强大的推理分析能力、数学和编程能力。与此同时,也存在语言切换问题、推理循环、安全性考虑、其他能力方面的差异。',
- displayName: 'QwQ 32B Preview',
+ 'QwQ 是 Qwen 系列的推理模型。与传统的指令调优模型相比,QwQ 具备思考和推理能力,能够在下游任务中实现显著增强的性能,尤其是在解决困难问题方面。QwQ-32B 是中型推理模型,能够在与最先进的推理模型(如 DeepSeek-R1、o1-mini)的对比中取得有竞争力的性能。该模型采用 RoPE、SwiGLU、RMSNorm 和 Attention QKV bias 等技术,具有 64 层网络结构和 40 个 Q 注意力头(GQA 架构中 KV 为 8 个)。',
+ displayName: 'QwQ 32B',
enabled: true,
+ id: 'Qwen/QwQ-32B',
+ pricing: {
+ currency: 'CNY',
+ input: 1,
+ output: 4,
+ },
+ type: 'chat',
+ },
+ {
+ abilities: {
+ reasoning: true,
+ },
+ contextWindowTokens: 32_768,
+ description:
+ 'QwQ-32B-Preview 是 Qwen 最新的实验性研究模型,专注于提升AI推理能力。通过探索语言混合、递归推理等复杂机制,主要优势包括强大的推理分析能力、数学和编程能力。与此同时,也存在语言切换问题、推理循环、安全性考虑、其他能力方面的差异。',
+ displayName: 'QwQ 32B Preview',
id: 'Qwen/QwQ-32B-Preview',
pricing: {
currency: 'CNY',