imetting_backend/test/test_stream_llm.py

54 lines
1.7 KiB
Python

#!/usr/bin/env python3
"""
测试流式LLM服务
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
from app.services.llm_service import LLMService
def test_stream_generation():
"""测试流式生成功能"""
print("=== 测试流式LLM生成 ===")
llm_service = LLMService()
test_meeting_id = 38 # 使用一个存在的会议ID
test_user_prompt = "请重点关注决策事项和待办任务"
print(f"开始为会议 {test_meeting_id} 生成流式总结...")
print("输出内容:")
print("-" * 50)
full_content = ""
chunk_count = 0
try:
for chunk in llm_service.generate_meeting_summary_stream(test_meeting_id, test_user_prompt):
if chunk.startswith("error:"):
print(f"\n生成过程中出现错误: {chunk}")
break
else:
print(chunk, end='', flush=True)
full_content += chunk
chunk_count += 1
print(f"\n\n-" * 50)
print(f"流式生成完成!")
print(f"总共接收到 {chunk_count} 个数据块")
print(f"完整内容长度: {len(full_content)} 字符")
# 测试传统方式(对比)
print("\n=== 对比测试传统生成方式 ===")
result = llm_service.generate_meeting_summary(test_meeting_id, test_user_prompt)
if result.get("error"):
print(f"传统方式生成失败: {result['error']}")
else:
print("传统方式生成成功!")
print(f"内容长度: {len(result['content'])} 字符")
except Exception as e:
print(f"\n测试过程中出现异常: {e}")
if __name__ == '__main__':
test_stream_generation()