diff --git a/app.py b/app.py index 4de5104..e028271 100644 --- a/app.py +++ b/app.py @@ -20,6 +20,8 @@ DATA_DIR.mkdir(exist_ok=True) MODELS_FILE = DATA_DIR / 'models.json' GPUS_FILE = DATA_DIR / 'gpus.json' CPUS_FILE = DATA_DIR / 'cpus.json' +CATEGORIES_FILE = DATA_DIR / 'categories.json' +KNOWLEDGE_FILE = DATA_DIR / 'knowledge.json' def load_data(file_path): """加载JSON数据""" @@ -337,14 +339,140 @@ def api_stats(): models = load_data(MODELS_FILE) gpus = load_data(GPUS_FILE) cpus = load_data(CPUS_FILE) + categories = load_data(CATEGORIES_FILE) + knowledge = load_data(KNOWLEDGE_FILE) return jsonify({ 'models_count': len(models), 'gpus_count': len(gpus), 'cpus_count': len(cpus), + 'categories_count': len(categories), + 'knowledge_count': len(knowledge), 'latest_models': sorted(models, key=lambda x: x.get('created_at', ''), reverse=True)[:5] }) +# ============ 分类管理API ============ + +@app.route('/api/categories') +def api_categories(): + """获取分类列表""" + categories = load_data(CATEGORIES_FILE) + return jsonify(categories) + +@app.route('/api/categories', methods=['POST']) +def api_create_category(): + """创建新分类""" + data = request.get_json() + categories = load_data(CATEGORIES_FILE) + + import uuid + data['id'] = uuid.uuid4().hex[:12] + data['created_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + categories.append(data) + save_data(CATEGORIES_FILE, categories) + + return jsonify(data) + +@app.route('/api/categories/', methods=['PUT']) +def api_update_category(category_id): + """更新分类""" + data = request.get_json() + categories = load_data(CATEGORIES_FILE) + + category = next((c for c in categories if c['id'] == category_id), None) + if not category: + return jsonify({'error': 'Category not found'}), 404 + + category.update(data) + category['updated_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + save_data(CATEGORIES_FILE, categories) + + return jsonify(category) + +@app.route('/api/categories/', methods=['DELETE']) +def api_delete_category(category_id): + """删除分类""" + categories = load_data(CATEGORIES_FILE) + categories = [c for c in categories if c['id'] != category_id] + save_data(CATEGORIES_FILE, categories) + + return jsonify({'success': True}) + +# ============ 知识库管理API ============ + +@app.route('/api/knowledge') +def api_knowledge(): + """获取知识列表""" + knowledge = load_data(KNOWLEDGE_FILE) + + # 搜索过滤 + keyword = request.args.get('q', '').strip().lower() + if keyword: + knowledge = [k for k in knowledge if keyword in k.get('title', '').lower() or + keyword in k.get('content', '').lower()] + + # 分类过滤 + category = request.args.get('category', '') + if category: + knowledge = [k for k in knowledge if k.get('category') == category] + + return jsonify(sorted(knowledge, key=lambda x: x.get('order', 0))) + +@app.route('/api/knowledge/') +def api_knowledge_detail(knowledge_id): + """获取单个知识详情""" + knowledge = load_data(KNOWLEDGE_FILE) + item = next((k for k in knowledge if k['id'] == knowledge_id), None) + + if not item: + return jsonify({'error': 'Knowledge not found'}), 404 + + return jsonify(item) + +@app.route('/api/knowledge', methods=['POST']) +def api_create_knowledge(): + """创建新知识""" + data = request.get_json() + knowledge = load_data(KNOWLEDGE_FILE) + + import uuid + data['id'] = uuid.uuid4().hex[:12] + data['created_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + if 'order' not in data: + data['order'] = len(knowledge) + + knowledge.append(data) + save_data(KNOWLEDGE_FILE, knowledge) + + return jsonify(data) + +@app.route('/api/knowledge/', methods=['PUT']) +def api_update_knowledge(knowledge_id): + """更新知识""" + data = request.get_json() + knowledge = load_data(KNOWLEDGE_FILE) + + item = next((k for k in knowledge if k['id'] == knowledge_id), None) + if not item: + return jsonify({'error': 'Knowledge not found'}), 404 + + item.update(data) + item['updated_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + save_data(KNOWLEDGE_FILE, knowledge) + + return jsonify(item) + +@app.route('/api/knowledge/', methods=['DELETE']) +def api_delete_knowledge(knowledge_id): + """删除知识""" + knowledge = load_data(KNOWLEDGE_FILE) + knowledge = [k for k in knowledge if k['id'] != knowledge_id] + save_data(KNOWLEDGE_FILE, knowledge) + + return jsonify({'success': True}) + if __name__ == '__main__': print("=" * 50) print("ParamHub - 参数百科") diff --git a/data/categories.json b/data/categories.json new file mode 100644 index 0000000..cd95bc8 --- /dev/null +++ b/data/categories.json @@ -0,0 +1,8 @@ +[ + {"id": "ai-models", "name": "AI模型", "icon": "ri-robot-line", "color": "blue", "description": "大语言模型、图像模型等AI模型参数", "order": 1}, + {"id": "gpus", "name": "GPU显卡", "icon": "ri-cpu-line", "color": "green", "description": "NVIDIA、AMD等GPU显卡规格参数", "order": 2}, + {"id": "cpus", "name": "CPU处理器", "icon": "ri-memory-line", "color": "purple", "description": "Intel、AMD等CPU处理器参数", "order": 3}, + {"id": "phones", "name": "手机", "icon": "ri-smartphone-line", "color": "orange", "description": "各品牌手机参数规格", "order": 4}, + {"id": "laptops", "name": "电脑", "icon": "ri-macbook-line", "color": "teal", "description": "笔记本电脑、台式机参数", "order": 5}, + {"id": "cars", "name": "汽车", "icon": "ri-car-line", "color": "red", "description": "新能源汽车、燃油车参数", "order": 6} +] \ No newline at end of file diff --git a/data/knowledge.json b/data/knowledge.json new file mode 100644 index 0000000..b4c8750 --- /dev/null +++ b/data/knowledge.json @@ -0,0 +1,9 @@ +[ + {"id": "k001", "title": "什么是参数量?", "category": "ai-models", "icon": "ri-calculator-line", "content": "参数量(Parameters)是衡量大模型规模的指标,表示模型中权重参数的数量。例如 GPT-3 有 175B 参数,即约1750亿个参数。", "detail": "参数量决定了模型的容量和表达能力。一般来说,参数量越大,模型能力越强,但也需要更多计算资源。\n\n常见规模分类:\n- 小模型:<1B (适合边缘设备)\n- 中模型:1B-10B (消费级GPU可运行)\n- 大模型:10B-100B (需要多GPU)\n- 超大模型:>100B (需要数据中心)", "order": 1}, + {"id": "k002", "title": "什么是上下文长度?", "category": "ai-models", "icon": "ri-text-wrap", "content": "上下文长度(Context Length)是模型能处理的输入文本最大长度。更长的上下文意味着模型可以理解更长的文档或对话历史。", "detail": "常见长度:\n- 4K:传统长度,适合简单对话\n- 32K:中等长度,适合长文档\n- 128K:超长上下文,如GPT-4 Turbo\n- 200K:Claude 3的极限长度", "order": 2}, + {"id": "k003", "title": "什么是量化?", "category": "ai-models", "icon": "ri-scales-3-line", "content": "量化(Quantization)是将模型参数从高精度转换为低精度,减少显存占用和计算量。如FP16→INT8→INT4,精度损失可控,资源节省显著。", "detail": "量化效果:\n- FP32→FP16: 显存减半,精度基本不变\n- FP16→INT8: 显存再减半,精度略降\n- INT8→INT4: 显存再减半,需特殊技术\n\n推荐工具:llama.cpp、GPTQ、AWQ等", "order": 3}, + {"id": "k004", "title": "什么是MMLU?", "category": "ai-models", "icon": "ri-bar-chart-box-line", "content": "MMLU(Massive Multitask Language Understanding)是评估大模型综合能力的标准测试集,覆盖57个学科领域。", "detail": "分数参考:\n- 60-70%:入门级,如GPT-3\n- 70-80%:中等水平,如Llama 2 70B\n- 80-90%:优秀水平,如GPT-4、Claude 3", "order": 4}, + {"id": "k005", "title": "如何计算显存需求?", "category": "gpus", "icon": "ri-memory-line", "content": "模型显存需求 ≈ 参数量 × 每参数字节数 × 1.3(含KV Cache开销)", "detail": "计算公式:\n- FP32: 参数量 × 4字节 × 1.3\n- FP16: 参数量 × 2字节 × 1.3\n- INT8: 参数量 × 1字节 × 1.3\n- INT4: 参数量 × 0.5字节 × 1.3\n\n例如:7B模型FP16加载需要约 7 × 2 × 1.3 ≈ 18GB显存", "order": 1}, + {"id": "k006", "title": "GPU架构演进", "category": "gpus", "icon": "ri-history-line", "content": "NVIDIA GPU架构从Fermi到Hopper,每一代都有显著提升。了解架构有助于选择合适的GPU。", "detail": "主要架构:\n- Volta (2017): V100, 引入Tensor Core\n- Turing (2018): RTX 20系列, RT Core\n- Ampere (2020): A100, RTX 30系列\n- Hopper (2022): H100, FP8支持\n- Ada Lovelace (2022): RTX 40系列, L40S", "order": 2}, + {"id": "k007", "title": "CPU核心数选择", "category": "cpus", "icon": "ri-database-2-line", "content": "CPU核心数的选择取决于应用场景。更多核心适合并行任务,但单核性能也很重要。", "detail": "场景推荐:\n- 办公/日常:4-6核足够\n- 开发/编译:8-16核\n- 服务器/虚拟化:16-64核\n- 高性能计算:64核以上\n\n注意:AI训练主要依赖GPU,CPU主要用于数据预处理", "order": 1} +] \ No newline at end of file diff --git a/templates/admin.html b/templates/admin.html index 1214d1f..b53815e 100644 --- a/templates/admin.html +++ b/templates/admin.html @@ -6,10 +6,15 @@ 后台管理 - ParamHub + -