功能模块: - 仪表盘: 模型/GPU/CPU统计、开源/闭源分布、快速操作 - 模型管理: 查看模型列表、筛选搜索、添加新模型 - GPU管理: 查看GPU列表、厂商筛选、添加新GPU - CPU管理: 查看CPU列表 - 数据导出: 导出模型/GPU数据为JSON 技术: - Flask + Tailwind CSS - 解析TypeScript数据文件 - 端口: 19006
254 lines
7.4 KiB
Python
254 lines
7.4 KiB
Python
"""
|
|
参数百科网站 - 后台管理系统
|
|
"""
|
|
|
|
from flask import Flask, render_template, jsonify, request
|
|
from flask_cors import CORS
|
|
import json
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
|
|
app = Flask(__name__)
|
|
CORS(app)
|
|
|
|
# 数据目录 - 读取 Next.js 项目的数据文件
|
|
DATA_DIR = Path(__file__).parent.parent / 'src' / 'data'
|
|
|
|
def load_models():
|
|
"""加载模型数据"""
|
|
init_file = DATA_DIR / 'initial.ts'
|
|
if init_file.exists():
|
|
# 从 TypeScript 文件解析数据
|
|
content = init_file.read_text(encoding='utf-8')
|
|
return parse_ts_data(content, 'models')
|
|
return []
|
|
|
|
def load_gpus():
|
|
"""加载GPU数据"""
|
|
init_file = DATA_DIR / 'initial.ts'
|
|
if init_file.exists():
|
|
content = init_file.read_text(encoding='utf-8')
|
|
return parse_ts_data(content, 'gpus')
|
|
return []
|
|
|
|
def load_cpus():
|
|
"""加载CPU数据"""
|
|
init_file = DATA_DIR / 'initial.ts'
|
|
if init_file.exists():
|
|
content = init_file.read_text(encoding='utf-8')
|
|
return parse_ts_data(content, 'cpus')
|
|
return []
|
|
|
|
def parse_ts_data(content, data_type):
|
|
"""简单解析 TypeScript 数据"""
|
|
import re
|
|
|
|
# 找到数据定义的开始位置
|
|
patterns = {
|
|
'models': r'export const models: Model\[\]\s*=\s*\[([\s\S]*?)\n\]',
|
|
'gpus': r'export const gpus: Gpu\[\]\s*=\s*\[([\s\S]*?)\n\]',
|
|
'cpus': r'export const cpus: Cpu\[\]\s*=\s*\[([\s\S]*?)\n\]',
|
|
}
|
|
|
|
pattern = patterns.get(data_type)
|
|
if not pattern:
|
|
return []
|
|
|
|
match = re.search(pattern, content)
|
|
if not match:
|
|
return []
|
|
|
|
data_str = match.group(1)
|
|
|
|
# 简单的对象解析
|
|
items = []
|
|
current_item = {}
|
|
current_key = None
|
|
current_value = []
|
|
in_string = False
|
|
in_object = 0
|
|
|
|
lines = data_str.split('\n')
|
|
for line in lines:
|
|
line = line.strip()
|
|
if line == '{':
|
|
current_item = {}
|
|
in_object += 1
|
|
elif line == '},' or line == '}':
|
|
if current_item:
|
|
items.append(current_item)
|
|
current_item = {}
|
|
in_object = max(0, in_object - 1)
|
|
elif ':' in line and in_object > 0:
|
|
parts = line.split(':', 1)
|
|
key = parts[0].strip()
|
|
value = parts[1].strip().rstrip(',')
|
|
|
|
if value.startswith('"') or value.startswith("'"):
|
|
value = value.strip('"\'')
|
|
elif value == 'null':
|
|
value = None
|
|
elif value == 'true':
|
|
value = True
|
|
elif value == 'false':
|
|
value = False
|
|
elif value.startswith('['):
|
|
# 数组类型
|
|
value = value.strip('[]').replace('"', '').split(',') if value != '[]' else []
|
|
value = [v.strip() for v in value if v.strip()]
|
|
else:
|
|
try:
|
|
value = int(value) if '.' not in value else float(value)
|
|
except:
|
|
pass
|
|
|
|
current_item[key] = value
|
|
|
|
return items
|
|
|
|
def save_models(models):
|
|
"""保存模型数据"""
|
|
# 这里需要更新 TypeScript 文件
|
|
# 为简化,我们使用 JSON 文件存储变更
|
|
data_dir = Path(__file__).parent.parent / 'data'
|
|
data_dir.mkdir(exist_ok=True)
|
|
(data_dir / 'models.json').write_text(json.dumps(models, ensure_ascii=False, indent=2), encoding='utf-8')
|
|
|
|
def save_gpus(gpus):
|
|
"""保存GPU数据"""
|
|
data_dir = Path(__file__).parent.parent / 'data'
|
|
data_dir.mkdir(exist_ok=True)
|
|
(data_dir / 'gpus.json').write_text(json.dumps(gpus, ensure_ascii=False, indent=2), encoding='utf-8')
|
|
|
|
# ============ 页面路由 ============
|
|
|
|
@app.route('/')
|
|
def index():
|
|
return render_template('index.html')
|
|
|
|
@app.route('/models')
|
|
def models_page():
|
|
return render_template('models.html')
|
|
|
|
@app.route('/gpus')
|
|
def gpus_page():
|
|
return render_template('gpus.html')
|
|
|
|
@app.route('/cpus')
|
|
def cpus_page():
|
|
return render_template('cpus.html')
|
|
|
|
# ============ API路由 ============
|
|
|
|
@app.route('/api/stats')
|
|
def api_stats():
|
|
models = load_models()
|
|
gpus = load_gpus()
|
|
cpus = load_cpus()
|
|
|
|
# 统计
|
|
open_source = sum(1 for m in models if m.get('isOpenSource'))
|
|
nvidia_gpus = sum(1 for g in gpus if g.get('manufacturer') == 'NVIDIA')
|
|
amd_gpus = sum(1 for g in gpus if g.get('manufacturer') == 'AMD')
|
|
|
|
return jsonify({
|
|
'models_count': len(models),
|
|
'gpus_count': len(gpus),
|
|
'cpus_count': len(cpus),
|
|
'open_source_models': open_source,
|
|
'closed_source_models': len(models) - open_source,
|
|
'nvidia_gpus': nvidia_gpus,
|
|
'amd_gpus': amd_gpus,
|
|
})
|
|
|
|
@app.route('/api/models')
|
|
def api_models():
|
|
models = load_models()
|
|
return jsonify(models)
|
|
|
|
@app.route('/api/models', methods=['POST'])
|
|
def api_add_model():
|
|
data = request.json
|
|
|
|
models = load_models()
|
|
|
|
new_model = {
|
|
'name': data.get('name', ''),
|
|
'slug': data.get('slug', data.get('name', '').lower().replace(' ', '-')),
|
|
'organization': data.get('organization', ''),
|
|
'parametersCount': data.get('parametersCount'),
|
|
'architecture': data.get('architecture'),
|
|
'contextLength': data.get('contextLength'),
|
|
'isOpenSource': data.get('isOpenSource', False),
|
|
'license': data.get('license'),
|
|
'benchmarkMmlu': data.get('benchmarkMmlu'),
|
|
'benchmarkHumaneval': data.get('benchmarkHumaneval'),
|
|
'minVramFp16': data.get('minVramFp16'),
|
|
'minVramInt8': data.get('minVramInt8'),
|
|
'minVramInt4': data.get('minVramInt4'),
|
|
}
|
|
|
|
models.append(new_model)
|
|
save_models(models)
|
|
|
|
return jsonify({'success': True, 'model': new_model})
|
|
|
|
@app.route('/api/gpus')
|
|
def api_gpus():
|
|
gpus = load_gpus()
|
|
return jsonify(gpus)
|
|
|
|
@app.route('/api/gpus', methods=['POST'])
|
|
def api_add_gpu():
|
|
data = request.json
|
|
|
|
gpus = load_gpus()
|
|
|
|
new_gpu = {
|
|
'name': data.get('name', ''),
|
|
'slug': data.get('slug', data.get('name', '').lower().replace(' ', '-')),
|
|
'manufacturer': data.get('manufacturer', 'NVIDIA'),
|
|
'architecture': data.get('architecture'),
|
|
'cudaCores': data.get('cudaCores'),
|
|
'tensorCores': data.get('tensorCores'),
|
|
'memoryGb': data.get('memoryGb'),
|
|
'memoryType': data.get('memoryType'),
|
|
'memoryBandwidthGbps': data.get('memoryBandwidthGbps'),
|
|
'fp32Tflops': data.get('fp32Tflops'),
|
|
'fp16Tflops': data.get('fp16Tflops'),
|
|
'tdpWatts': data.get('tdpWatts'),
|
|
'priceUsd': data.get('priceUsd'),
|
|
'recommendedFor': data.get('recommendedFor', []),
|
|
}
|
|
|
|
gpus.append(new_gpu)
|
|
save_gpus(gpus)
|
|
|
|
return jsonify({'success': True, 'gpu': new_gpu})
|
|
|
|
@app.route('/api/cpus')
|
|
def api_cpus():
|
|
cpus = load_cpus()
|
|
return jsonify(cpus)
|
|
|
|
@app.route('/api/export/models')
|
|
def api_export_models():
|
|
"""导出模型数据为 JSON"""
|
|
models = load_models()
|
|
return jsonify(models)
|
|
|
|
@app.route('/api/export/gpus')
|
|
def api_export_gpus():
|
|
"""导出GPU数据为 JSON"""
|
|
gpus = load_gpus()
|
|
return jsonify(gpus)
|
|
|
|
if __name__ == '__main__':
|
|
print("=" * 50)
|
|
print("参数百科网站 - 后台管理系统")
|
|
print("=" * 50)
|
|
print(f"访问地址: http://localhost:19006")
|
|
print(f"前台地址: http://localhost:3000")
|
|
print("=" * 50)
|
|
|
|
app.run(host='0.0.0.0', port=19006, debug=True) |