优化了部分代码

main
mula.liu 2025-09-25 11:48:02 +08:00
parent 9fb07bb435
commit 91aeeca9c8
13 changed files with 410 additions and 81 deletions

BIN
.DS_Store vendored

Binary file not shown.

BIN
app.zip

Binary file not shown.

View File

@ -0,0 +1,145 @@
from fastapi import APIRouter, HTTPException, Depends
from app.core.auth import get_current_admin_user
from app.core.config import LLM_CONFIG, DEFAULT_RESET_PASSWORD, MAX_FILE_SIZE, MAX_IMAGE_SIZE
from pydantic import BaseModel
import os
import json
from pathlib import Path
router = APIRouter()
# 配置文件路径
CONFIG_FILE = Path(__file__).parent.parent.parent.parent / "config" / "system_config.json"
class SystemConfigModel(BaseModel):
model_name: str
system_prompt: str
DEFAULT_RESET_PASSWORD: str
MAX_FILE_SIZE: int # 字节为单位
MAX_IMAGE_SIZE: int # 字节为单位
class SystemConfigResponse(BaseModel):
model_name: str
system_prompt: str
DEFAULT_RESET_PASSWORD: str
MAX_FILE_SIZE: int
MAX_IMAGE_SIZE: int
message: str = ""
def load_config_from_file():
"""从文件加载配置,如果文件不存在则返回默认配置"""
try:
if CONFIG_FILE.exists():
with open(CONFIG_FILE, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
pass
# 返回默认配置
return {
'model_name': LLM_CONFIG['model_name'],
'system_prompt': LLM_CONFIG['system_prompt'],
'DEFAULT_RESET_PASSWORD': DEFAULT_RESET_PASSWORD,
'MAX_FILE_SIZE': MAX_FILE_SIZE,
'MAX_IMAGE_SIZE': MAX_IMAGE_SIZE
}
def save_config_to_file(config_data):
"""将配置保存到文件"""
try:
# 确保配置目录存在
CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump(config_data, f, ensure_ascii=False, indent=2)
return True
except Exception as e:
print(f"保存配置文件失败: {e}")
return False
@router.get("/admin/system-config", response_model=SystemConfigResponse)
async def get_system_config(current_user=Depends(get_current_admin_user)):
"""
获取系统配置
只有管理员才能访问
"""
try:
# 优先从文件加载配置,然后从内存配置补充
config = load_config_from_file()
return SystemConfigResponse(
model_name=config.get('model_name', LLM_CONFIG['model_name']),
system_prompt=config.get('system_prompt', LLM_CONFIG['system_prompt']),
DEFAULT_RESET_PASSWORD=config.get('DEFAULT_RESET_PASSWORD', DEFAULT_RESET_PASSWORD),
MAX_FILE_SIZE=config.get('MAX_FILE_SIZE', MAX_FILE_SIZE),
MAX_IMAGE_SIZE=config.get('MAX_IMAGE_SIZE', MAX_IMAGE_SIZE),
message="配置获取成功"
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"获取配置失败: {str(e)}")
@router.put("/admin/system-config", response_model=SystemConfigResponse)
async def update_system_config(
config: SystemConfigModel,
current_user=Depends(get_current_admin_user)
):
"""
更新系统配置
只有管理员才能访问
"""
try:
# 准备要保存的配置数据
config_data = {
'model_name': config.model_name,
'system_prompt': config.system_prompt,
'DEFAULT_RESET_PASSWORD': config.DEFAULT_RESET_PASSWORD,
'MAX_FILE_SIZE': config.MAX_FILE_SIZE,
'MAX_IMAGE_SIZE': config.MAX_IMAGE_SIZE
}
# 保存到文件
if not save_config_to_file(config_data):
raise HTTPException(status_code=500, detail="配置保存到文件失败")
# 更新运行时配置
LLM_CONFIG['model_name'] = config.model_name
LLM_CONFIG['system_prompt'] = config.system_prompt
# 更新模块级别的配置
import app.core.config as config_module
config_module.DEFAULT_RESET_PASSWORD = config.DEFAULT_RESET_PASSWORD
config_module.MAX_FILE_SIZE = config.MAX_FILE_SIZE
config_module.MAX_IMAGE_SIZE = config.MAX_IMAGE_SIZE
return SystemConfigResponse(
model_name=config.model_name,
system_prompt=config.system_prompt,
DEFAULT_RESET_PASSWORD=config.DEFAULT_RESET_PASSWORD,
MAX_FILE_SIZE=config.MAX_FILE_SIZE,
MAX_IMAGE_SIZE=config.MAX_IMAGE_SIZE,
message="配置更新成功,重启服务后完全生效"
)
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"更新配置失败: {str(e)}")
# 在应用启动时加载配置
def load_system_config():
"""在应用启动时调用,加载保存的配置"""
try:
config = load_config_from_file()
# 更新运行时配置
LLM_CONFIG['model_name'] = config.get('model_name', LLM_CONFIG['model_name'])
LLM_CONFIG['system_prompt'] = config.get('system_prompt', LLM_CONFIG['system_prompt'])
# 更新其他配置
import app.core.config as config_module
config_module.DEFAULT_RESET_PASSWORD = config.get('DEFAULT_RESET_PASSWORD', DEFAULT_RESET_PASSWORD)
config_module.MAX_FILE_SIZE = config.get('MAX_FILE_SIZE', MAX_FILE_SIZE)
config_module.MAX_IMAGE_SIZE = config.get('MAX_IMAGE_SIZE', MAX_IMAGE_SIZE)
print(f"系统配置加载成功: model={config.get('model_name')}")
except Exception as e:
print(f"加载系统配置失败,使用默认配置: {e}")

View File

@ -1,13 +1,14 @@
from fastapi import APIRouter, HTTPException, UploadFile, File, Form, Depends, BackgroundTasks
from fastapi.responses import StreamingResponse
from app.models.models import Meeting, TranscriptSegment, TranscriptionTaskStatus, CreateMeetingRequest, UpdateMeetingRequest, SpeakerTagUpdateRequest, BatchSpeakerTagUpdateRequest, TranscriptUpdateRequest, BatchTranscriptUpdateRequest, Tag
from app.core.database import get_db_connection
from app.core.config import BASE_DIR, UPLOAD_DIR, AUDIO_DIR, MARKDOWN_DIR, ALLOWED_EXTENSIONS, ALLOWED_IMAGE_EXTENSIONS, MAX_FILE_SIZE, MAX_IMAGE_SIZE
from app.services.qiniu_service import qiniu_service
from app.core.config import BASE_DIR, AUDIO_DIR, MARKDOWN_DIR, ALLOWED_EXTENSIONS, ALLOWED_IMAGE_EXTENSIONS
import app.core.config as config_module
from app.services.llm_service import LLMService
from app.services.async_transcription_service import AsyncTranscriptionService
from app.services.async_llm_service import async_llm_service
from app.core.auth import get_current_user, get_optional_current_user
from app.core.auth import get_current_user
from typing import List, Optional
from datetime import datetime
from pydantic import BaseModel
@ -398,11 +399,13 @@ async def upload_audio(
detail=f"Unsupported file type. Allowed types: {', '.join(ALLOWED_EXTENSIONS)}"
)
# Check file size
if audio_file.size > MAX_FILE_SIZE:
# Check file size using dynamic config
max_file_size = getattr(config_module, 'MAX_FILE_SIZE', 100 * 1024 * 1024)
if audio_file.size > max_file_size:
max_size_mb = max_file_size // (1024 * 1024)
raise HTTPException(
status_code=400,
detail="File size exceeds 100MB limit"
detail=f"File size exceeds {max_size_mb}MB limit"
)
# 检查是否已有音频文件和转录记录
@ -646,11 +649,13 @@ async def upload_image(
detail=f"Unsupported image type. Allowed types: {', '.join(ALLOWED_IMAGE_EXTENSIONS)}"
)
# Check file size
if image_file.size > MAX_IMAGE_SIZE:
# Check file size using dynamic config
max_image_size = getattr(config_module, 'MAX_IMAGE_SIZE', 10 * 1024 * 1024)
if image_file.size > max_image_size:
max_size_mb = max_image_size // (1024 * 1024)
raise HTTPException(
status_code=400,
detail="Image size exceeds 10MB limit"
detail=f"Image size exceeds {max_size_mb}MB limit"
)
# Check if meeting exists and user has permission
@ -767,6 +772,47 @@ def batch_update_transcript(meeting_id: int, request: BatchTranscriptUpdateReque
raise HTTPException(status_code=500, detail=f"Failed to update transcript: {str(e)}")
# AI总结相关接口
@router.post("/meetings/{meeting_id}/generate-summary-stream")
def generate_meeting_summary_stream(meeting_id: int, request: GenerateSummaryRequest, current_user: dict = Depends(get_current_user)):
"""生成会议AI总结流式输出"""
try:
# 检查会议是否存在
with get_db_connection() as connection:
cursor = connection.cursor(dictionary=True)
cursor.execute("SELECT meeting_id FROM meetings WHERE meeting_id = %s", (meeting_id,))
if not cursor.fetchone():
raise HTTPException(status_code=404, detail="Meeting not found")
# 创建流式生成器
def generate_stream():
for chunk in llm_service.generate_meeting_summary_stream(meeting_id, request.user_prompt):
if chunk.startswith("error:"):
# 如果遇到错误,发送错误信息并结束
yield f"data: {{\"error\": \"{chunk[6:]}\"}}\n\n"
break
else:
# 发送正常的内容块
import json
yield f"data: {{\"content\": {json.dumps(chunk, ensure_ascii=False)}}}\n\n"
# 发送结束标记
yield "data: {\"done\": true}\n\n"
return StreamingResponse(
generate_stream(),
media_type="text/plain",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "text/plain; charset=utf-8"
}
)
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to start stream generation: {str(e)}")
@router.post("/meetings/{meeting_id}/generate-summary")
def generate_meeting_summary(meeting_id: int, request: GenerateSummaryRequest, current_user: dict = Depends(get_current_user)):
"""生成会议AI总结"""
@ -887,12 +933,12 @@ def get_llm_task_status(task_id: str, current_user: dict = Depends(get_current_u
"""获取LLM任务状态包括进度"""
try:
status = async_llm_service.get_task_status(task_id)
if status.get('status') == 'not_found':
raise HTTPException(status_code=404, detail="Task not found")
return status
except HTTPException:
raise
except Exception as e:

View File

@ -3,7 +3,7 @@ from fastapi import APIRouter, HTTPException, Depends
from app.models.models import UserInfo, PasswordChangeRequest, UserListResponse, CreateUserRequest, UpdateUserRequest, RoleInfo
from app.core.database import get_db_connection
from app.core.auth import get_current_user
from app.core.config import DEFAULT_RESET_PASSWORD
import app.core.config as config_module
import hashlib
import datetime
import re
@ -48,7 +48,7 @@ def create_user(request: CreateUserRequest, current_user: dict = Depends(get_cur
raise HTTPException(status_code=400, detail="用户名已存在")
# Use provided password or default password
password = request.password if request.password else DEFAULT_RESET_PASSWORD
password = request.password if request.password else config_module.DEFAULT_RESET_PASSWORD
hashed_password = hash_password(password)
# Insert new user
@ -150,7 +150,7 @@ def reset_password(user_id: int, current_user: dict = Depends(get_current_user))
raise HTTPException(status_code=404, detail="用户不存在")
# Hash password
hashed_password = hash_password(DEFAULT_RESET_PASSWORD)
hashed_password = hash_password(config_module.DEFAULT_RESET_PASSWORD)
# Update user password
query = "UPDATE users SET password_hash = %s WHERE user_id = %s"

View File

@ -38,6 +38,19 @@ def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(securit
return user
def get_current_admin_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
"""获取当前管理员用户信息的依赖函数"""
user = get_current_user(credentials)
# 检查用户是否是管理员 (role_id = 1)
if user.get('role_id') != 1:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Admin access required",
)
return user
def get_optional_current_user(request: Request) -> Optional[dict]:
"""可选的用户认证(不强制要求登录)"""
auth_header = request.headers.get("Authorization")

View File

@ -60,8 +60,7 @@ QWEN_API_KEY = os.getenv('QWEN_API_KEY', 'sk-c2bf06ea56b4491ea3d1e37fdb472b8f')
# LLM配置 - 阿里Qwen3大模型
LLM_CONFIG = {
'model_name': os.getenv('LLM_MODEL_NAME', 'qwen-plus'),
'api_url': os.getenv('LLM_API_URL', 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation'),
'max_tokens': int(os.getenv('LLM_MAX_TOKENS', '2000')),
'time_out': int(os.getenv('LLM_TIMEOUT', '120')),
'temperature': float(os.getenv('LLM_TEMPERATURE', '0.7')),
'top_p': float(os.getenv('LLM_TOP_P', '0.9')),
'system_prompt': """你是一个专业的会议记录分析助手。请根据提供的会议转录内容,生成简洁明了的会议总结。

View File

@ -1,28 +1,87 @@
import json
import requests
from typing import Optional, Dict, List
from app.core.config import LLM_CONFIG, QWEN_API_KEY
import dashscope
from http import HTTPStatus
from typing import Optional, Dict, List, Generator
import app.core.config as config_module
from app.core.database import get_db_connection
class LLMService:
def __init__(self):
self.api_key = QWEN_API_KEY
self.model_name = LLM_CONFIG["model_name"]
self.api_url = LLM_CONFIG["api_url"]
self.system_prompt = LLM_CONFIG["system_prompt"]
self.max_tokens = LLM_CONFIG["max_tokens"]
self.temperature = LLM_CONFIG["temperature"]
self.top_p = LLM_CONFIG["top_p"]
# 设置dashscope API key
dashscope.api_key = config_module.QWEN_API_KEY
@property
def model_name(self):
"""动态获取模型名称"""
return config_module.LLM_CONFIG["model_name"]
@property
def system_prompt(self):
"""动态获取系统提示词"""
return config_module.LLM_CONFIG["system_prompt"]
@property
def time_out(self):
"""动态获取超时时间"""
return config_module.LLM_CONFIG["time_out"]
@property
def temperature(self):
"""动态获取temperature"""
return config_module.LLM_CONFIG["temperature"]
@property
def top_p(self):
"""动态获取top_p"""
return config_module.LLM_CONFIG["top_p"]
def generate_meeting_summary(self, meeting_id: int, user_prompt: str = "") -> Optional[Dict]:
def generate_meeting_summary_stream(self, meeting_id: int, user_prompt: str = "") -> Generator[str, None, None]:
"""
生成会议总结
流式生成会议总结
Args:
meeting_id: 会议ID
user_prompt: 用户额外提示词
Yields:
str: 流式输出的内容片段
"""
try:
# 获取会议转录内容
transcript_text = self._get_meeting_transcript(meeting_id)
if not transcript_text:
yield "error: 无法获取会议转录内容"
return
# 构建完整提示词
full_prompt = self._build_prompt(transcript_text, user_prompt)
# 调用大模型API进行流式生成
full_content = ""
for chunk in self._call_llm_api_stream(full_prompt):
if chunk.startswith("error:"):
yield chunk
return
full_content += chunk
yield chunk
# 保存完整总结到数据库
if full_content:
self._save_summary_to_db(meeting_id, full_content, user_prompt)
except Exception as e:
print(f"流式生成会议总结错误: {e}")
yield f"error: {str(e)}"
def generate_meeting_summary(self, meeting_id: int, user_prompt: str = "") -> Optional[Dict]:
"""
生成会议总结非流式保持向后兼容
Args:
meeting_id: 会议ID
user_prompt: 用户额外提示词
Returns:
包含总结内容的字典如果失败返回None
"""
@ -31,13 +90,13 @@ class LLMService:
transcript_text = self._get_meeting_transcript(meeting_id)
if not transcript_text:
return {"error": "无法获取会议转录内容"}
# 构建完整提示词
full_prompt = self._build_prompt(transcript_text, user_prompt)
# 调用大模型API
response = self._call_llm_api(full_prompt)
if response:
# 保存总结到数据库
summary_id = self._save_summary_to_db(meeting_id, response, user_prompt)
@ -48,7 +107,7 @@ class LLMService:
}
else:
return {"error": "大模型API调用失败"}
except Exception as e:
print(f"生成会议总结错误: {e}")
return {"error": str(e)}
@ -95,52 +154,53 @@ class LLMService:
return prompt
def _call_llm_api(self, prompt: str) -> Optional[str]:
"""调用阿里Qwen3大模型API"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
data = {
"model": self.model_name,
"input": {
"messages": [
{
"role": "user",
"content": prompt
}
]
},
"parameters": {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
"incremental_output": False
}
}
def _call_llm_api_stream(self, prompt: str) -> Generator[str, None, None]:
"""流式调用阿里Qwen3大模型API"""
try:
response = requests.post(self.api_url, headers=headers, json=data, timeout=60)
response.raise_for_status()
result = response.json()
# 处理阿里Qwen API的响应格式
if result.get("output") and result["output"].get("text"):
return result["output"]["text"]
elif result.get("output") and result["output"].get("choices"):
return result["output"]["choices"][0]["message"]["content"]
responses = dashscope.Generation.call(
model=self.model_name,
prompt=prompt,
stream=True,
timeout=self.time_out,
temperature=self.temperature,
top_p=self.top_p,
incremental_output=True # 开启增量输出模式
)
for response in responses:
if response.status_code == HTTPStatus.OK:
# 增量输出内容
new_content = response.output.get('text', '')
if new_content:
yield new_content
else:
error_msg = f"Request failed with status code: {response.status_code}, Error: {response.message}"
print(error_msg)
yield f"error: {error_msg}"
break
except Exception as e:
error_msg = f"流式调用大模型API错误: {e}"
print(error_msg)
yield f"error: {error_msg}"
def _call_llm_api(self, prompt: str) -> Optional[str]:
"""调用阿里Qwen3大模型API非流式保持向后兼容"""
try:
response = dashscope.Generation.call(
model=self.model_name,
prompt=prompt,
timeout=self.time_out,
temperature=self.temperature,
top_p=self.top_p
)
if response.status_code == HTTPStatus.OK:
return response.output.get('text', '')
else:
print(f"API响应格式错误: {result}")
print(f"API调用失败: {response.status_code}, {response.message}")
return None
except requests.exceptions.RequestException as e:
print(f"API请求错误: {e}")
return None
except json.JSONDecodeError as e:
print(f"JSON解析错误: {e}")
return None
except Exception as e:
print(f"调用大模型API错误: {e}")
return None

View File

@ -0,0 +1,7 @@
{
"model_name": "qwen-plus",
"system_prompt": "你是一个专业的会议记录分析助手。请根据提供的会议转录内容,生成简洁明了的会议总结。\n\n总结应该包括以下几部分生成MD二级目录\n1. 会议概述 - 简要说明会议的主要目的和背景(生成MD引用)\n2. 主要讨论点 - 列出会议中讨论的重要话题和内容\n3. 决策事项 - 明确记录会议中做出的决定和结论\n4. 待办事项 - 列出需要后续跟进的任务和责任人\n5. 关键信息 - 其他重要的信息点\n\n输出要求\n- 保持客观中性,不添加个人观点\n- 使用简洁、准确的中文表达\n- 按重要性排序各项内容\n- 如果某个部分没有相关内容,可以说明\"无相关内容\"\n- 总字数控制在500字以内",
"DEFAULT_RESET_PASSWORD": "123456",
"MAX_FILE_SIZE": 209715200,
"MAX_IMAGE_SIZE": 10485760
}

View File

@ -39,8 +39,7 @@ services:
# LLM配置
- QWEN_API_KEY=sk-c2bf06ea56b4491ea3d1e37fdb472b8f
- LLM_MODEL_NAME=qwen-plus
- LLM_API_URL=https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation
- LLM_MAX_TOKENS=2000
- LLM_TIMEOUT=120
- LLM_TEMPERATURE=0.7
- LLM_TOP_P=0.9

View File

@ -2,9 +2,10 @@ import uvicorn
from fastapi import FastAPI, Request, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from app.api.endpoints import auth, users, meetings, tags
from app.api.endpoints import auth, users, meetings, tags, admin
from app.core.config import UPLOAD_DIR, API_CONFIG, MAX_FILE_SIZE
from app.services.async_llm_service import async_llm_service
from app.api.endpoints.admin import load_system_config
import os
app = FastAPI(
@ -13,6 +14,9 @@ app = FastAPI(
version="1.0.2"
)
# 加载系统配置
load_system_config()
# 添加CORS中间件
app.add_middleware(
CORSMiddleware,
@ -31,6 +35,7 @@ app.include_router(auth.router, prefix="/api", tags=["Authentication"])
app.include_router(users.router, prefix="/api", tags=["Users"])
app.include_router(meetings.router, prefix="/api", tags=["Meetings"])
app.include_router(tags.router, prefix="/api", tags=["Tags"])
app.include_router(admin.router, prefix="/api", tags=["Admin"])
@app.get("/")
def read_root():

1
stream_test.html 100644

File diff suppressed because one or more lines are too long

54
test_stream_llm.py 100644
View File

@ -0,0 +1,54 @@
#!/usr/bin/env python3
"""
测试流式LLM服务
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
from app.services.llm_service import LLMService
def test_stream_generation():
"""测试流式生成功能"""
print("=== 测试流式LLM生成 ===")
llm_service = LLMService()
test_meeting_id = 38 # 使用一个存在的会议ID
test_user_prompt = "请重点关注决策事项和待办任务"
print(f"开始为会议 {test_meeting_id} 生成流式总结...")
print("输出内容:")
print("-" * 50)
full_content = ""
chunk_count = 0
try:
for chunk in llm_service.generate_meeting_summary_stream(test_meeting_id, test_user_prompt):
if chunk.startswith("error:"):
print(f"\n生成过程中出现错误: {chunk}")
break
else:
print(chunk, end='', flush=True)
full_content += chunk
chunk_count += 1
print(f"\n\n-" * 50)
print(f"流式生成完成!")
print(f"总共接收到 {chunk_count} 个数据块")
print(f"完整内容长度: {len(full_content)} 字符")
# 测试传统方式(对比)
print("\n=== 对比测试传统生成方式 ===")
result = llm_service.generate_meeting_summary(test_meeting_id, test_user_prompt)
if result.get("error"):
print(f"传统方式生成失败: {result['error']}")
else:
print("传统方式生成成功!")
print(f"内容长度: {len(result['content'])} 字符")
except Exception as e:
print(f"\n测试过程中出现异常: {e}")
if __name__ == '__main__':
test_stream_generation()