增加了全文检索

main
mula.liu 2026-01-23 15:00:03 +08:00
parent 08f4115889
commit 2004ccce4e
22 changed files with 1318 additions and 352 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -28,6 +28,7 @@ from app.schemas.response import success_response
from app.services.storage import storage_service from app.services.storage import storage_service
from app.services.log_service import log_service from app.services.log_service import log_service
from app.services.notification_service import notification_service from app.services.notification_service import notification_service
from app.services.search_service import search_service
from app.core.enums import OperationType from app.core.enums import OperationType
router = APIRouter() router = APIRouter()
@ -145,6 +146,11 @@ async def save_file(
# 写入文件内容 # 写入文件内容
await storage_service.write_file(file_path, file_data.content) await storage_service.write_file(file_path, file_data.content)
# 更新搜索索引 (仅限 Markdown)
if file_data.path.endswith('.md'):
file_title = Path(file_data.path).stem
await search_service.update_doc(project_id, file_data.path, file_title, file_data.content)
# 记录操作日志 # 记录操作日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,
@ -188,6 +194,11 @@ async def operate_file(
if operation.action == "delete": if operation.action == "delete":
# 删除文件或文件夹 # 删除文件或文件夹
await storage_service.delete_file(current_path) await storage_service.delete_file(current_path)
# 删除索引
if operation.path.endswith('.md'):
await search_service.remove_doc(project_id, operation.path)
# 记录日志 # 记录日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,
@ -216,6 +227,21 @@ async def operate_file(
raise HTTPException(status_code=400, detail="缺少新路径参数") raise HTTPException(status_code=400, detail="缺少新路径参数")
new_path = storage_service.get_secure_path(project.storage_key, operation.new_path) new_path = storage_service.get_secure_path(project.storage_key, operation.new_path)
await storage_service.rename_file(current_path, new_path) await storage_service.rename_file(current_path, new_path)
# 更新索引 (删除旧的,添加新的 - 如果内容未变也需要重新读取内容吗?
# 优化Whoosh 更新需要内容。我们可以尝试读取文件内容。
# 如果是目录重命名,比较复杂,暂时忽略目录重命名的递归索引更新,或者后续实现重建索引功能)
if operation.path.endswith('.md') and operation.new_path.endswith('.md'):
# 简单处理:读取新文件内容并更新索引
try:
content = await storage_service.read_file(new_path)
file_title = Path(operation.new_path).stem
await search_service.remove_doc(project_id, operation.path)
await search_service.update_doc(project_id, operation.new_path, file_title, content)
except Exception as e:
# 忽略索引更新错误
pass
# 记录日志 # 记录日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,
@ -245,6 +271,17 @@ async def operate_file(
raise HTTPException(status_code=400, detail="缺少目标路径参数") raise HTTPException(status_code=400, detail="缺少目标路径参数")
new_path = storage_service.get_secure_path(project.storage_key, operation.new_path) new_path = storage_service.get_secure_path(project.storage_key, operation.new_path)
await storage_service.rename_file(current_path, new_path) await storage_service.rename_file(current_path, new_path)
# 更新索引
if operation.path.endswith('.md') and operation.new_path.endswith('.md'):
try:
content = await storage_service.read_file(new_path)
file_title = Path(operation.new_path).stem
await search_service.remove_doc(project_id, operation.path)
await search_service.update_doc(project_id, operation.new_path, file_title, content)
except Exception:
pass
# 记录日志 # 记录日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,
@ -297,6 +334,12 @@ async def operate_file(
# 创建文件 # 创建文件
content = operation.content or "" content = operation.content or ""
await storage_service.write_file(current_path, content) await storage_service.write_file(current_path, content)
# 更新索引
if operation.path.endswith('.md'):
file_title = Path(operation.path).stem
await search_service.update_doc(project_id, operation.path, file_title, content)
# 记录日志 # 记录日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,
@ -374,11 +417,6 @@ async def upload_document(
): ):
""" """
上传文档文件PDF等到项目目录 上传文档文件PDF等到项目目录
Args:
project_id: 项目ID
file: 上传的文件
target_dir: 目标目录相对路径 "docs" "docs/manuals"空字符串表示根目录
""" """
project = await check_project_access(project_id, current_user, db, require_write=True) project = await check_project_access(project_id, current_user, db, require_write=True)
@ -421,10 +459,6 @@ async def get_document_file(
): ):
""" """
获取文档文件PDF等- 支持 HTTP Range 请求 获取文档文件PDF等- 支持 HTTP Range 请求
Args:
project_id: 项目ID
path: 文件相对路径 "manual.pdf" "docs/guide.pdf"
""" """
import re import re
import aiofiles import aiofiles
@ -565,6 +599,14 @@ async def import_documents(
relative_path = f"{target_path}/{file.filename}" if target_path else file.filename relative_path = f"{target_path}/{file.filename}" if target_path else file.filename
imported_files.append(relative_path) imported_files.append(relative_path)
# 更新索引
try:
text_content = content.decode('utf-8')
file_title = Path(file.filename).stem
await search_service.update_doc(project_id, relative_path, file_title, text_content)
except Exception:
pass
# 记录日志 # 记录日志
await log_service.log_file_operation( await log_service.log_file_operation(
db=db, db=db,

View File

@ -1,21 +1,16 @@
""" """
通知管理 API 通知管理 API (Redis版)
""" """
from fastapi import APIRouter, Depends, HTTPException, Request from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession from typing import List, Union
from sqlalchemy import select, update, func, desc
from typing import List, Optional
from datetime import datetime from datetime import datetime
from app.core.database import get_db from app.core.database import get_db
from app.core.deps import get_current_user from app.core.deps import get_current_user
from app.models.user import User from app.models.user import User
from app.models.notification import Notification
from app.schemas.notification import ( from app.schemas.notification import (
NotificationResponse, NotificationResponse,
NotificationUpdate, NotificationCreate,
UnreadCountResponse,
NotificationCreate
) )
from app.schemas.response import success_response from app.schemas.response import success_response
from app.services.notification_service import notification_service from app.services.notification_service import notification_service
@ -27,97 +22,75 @@ router = APIRouter()
async def get_notifications( async def get_notifications(
page: int = 1, page: int = 1,
page_size: int = 20, page_size: int = 20,
unread_only: bool = False,
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
): ):
"""获取当前用户的通知列表""" """获取当前用户的通知列表 (从 Redis 读取)"""
query = select(Notification).where(Notification.user_id == current_user.id) try:
skip = (page - 1) * page_size
notifications = await notification_service.get_user_notifications(
user_id=current_user.id,
limit=page_size,
skip=skip
)
if unread_only: # 获取总数 (ZCARD)
query = query.where(Notification.is_read == 0) from app.core.redis_client import get_redis
redis = get_redis()
total = 0
if redis:
key = notification_service._get_order_key(current_user.id)
total = await redis.zcard(key)
query = query.order_by(desc(Notification.created_at))
# 分页
offset = (page - 1) * page_size
query = query.offset(offset).limit(page_size)
result = await db.execute(query)
notifications = result.scalars().all()
# 获取总数
count_query = select(func.count()).select_from(Notification).where(Notification.user_id == current_user.id)
if unread_only:
count_query = count_query.where(Notification.is_read == 0)
total_result = await db.execute(count_query)
total = total_result.scalar()
data = [NotificationResponse.from_orm(n).dict() for n in notifications]
return { return {
"code": 200, "code": 200,
"message": "success", "message": "success",
"data": data, "data": notifications,
"total": total, "total": total,
"page": page, "page": page,
"page_size": page_size "page_size": page_size
} }
except Exception as e:
# 降级处理,防止 500
print(f"Error fetching notifications: {e}")
return {
"code": 200,
"message": "success",
"data": [],
"total": 0,
"page": page,
"page_size": page_size
}
@router.get("/unread-count", response_model=dict) @router.get("/unread-count", response_model=dict)
async def get_unread_count( async def get_unread_count(
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
): ):
"""获取未读通知数量""" """获取未读通知数量"""
result = await db.execute( try:
select(func.count()).select_from(Notification).where( count = await notification_service.get_unread_count(current_user.id)
Notification.user_id == current_user.id,
Notification.is_read == 0
)
)
count = result.scalar()
return success_response(data={"unread_count": count}) return success_response(data={"unread_count": count})
except Exception as e:
print(f"Error fetching unread count: {e}")
return success_response(data={"unread_count": 0})
@router.put("/{notification_id}/read", response_model=dict) @router.put("/{notification_id}/read", response_model=dict)
async def mark_as_read( async def mark_as_read(
notification_id: int, notification_id: str,
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
): ):
"""标记单条通知为已读""" """标记单条通知为已读"""
result = await db.execute( await notification_service.mark_read(current_user.id, notification_id)
select(Notification).where(
Notification.id == notification_id,
Notification.user_id == current_user.id
)
)
notification = result.scalar_one_or_none()
if not notification:
raise HTTPException(status_code=404, detail="通知不存在")
if notification.is_read == 0:
notification.is_read = 1
notification.read_at = datetime.now()
await db.commit()
return success_response(message="已标记为已读") return success_response(message="已标记为已读")
@router.put("/read-all", response_model=dict) @router.put("/read-all", response_model=dict)
async def mark_all_as_read( async def mark_all_as_read(
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
): ):
"""标记所有通知为已读""" """标记所有通知为已读"""
await db.execute( await notification_service.mark_all_read(current_user.id)
update(Notification)
.where(Notification.user_id == current_user.id, Notification.is_read == 0)
.values(is_read=1, read_at=datetime.now())
)
await db.commit()
return success_response(message="全部标记为已读") return success_response(message="全部标记为已读")
@ -125,14 +98,14 @@ async def mark_all_as_read(
async def send_system_notification( async def send_system_notification(
notification_in: NotificationCreate, notification_in: NotificationCreate,
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
): ):
"""发送系统通知(仅限超级管理员)""" """发送系统通知(仅限超级管理员)"""
if not current_user.is_superuser: if not current_user.is_superuser:
raise HTTPException(status_code=403, detail="只有管理员可以发送系统通知") raise HTTPException(status_code=403, detail="只有管理员可以发送系统通知")
# 這裡我们传 db=None 因为 service 已经不需要 db 写操作了
await notification_service.create_notification( await notification_service.create_notification(
db=db, db=None,
user_id=notification_in.user_id, user_id=notification_in.user_id,
title=notification_in.title, title=notification_in.title,
content=notification_in.content, content=notification_in.content,
@ -140,5 +113,4 @@ async def send_system_notification(
category="system", category="system",
link=notification_in.link link=notification_in.link
) )
await db.commit()
return success_response(message="系统通知发送成功") return success_response(message="系统通知发送成功")

View File

@ -32,6 +32,22 @@ from app.core.enums import OperationType, ResourceType
router = APIRouter() router = APIRouter()
def get_document_count(storage_key: str) -> int:
"""计算项目中的文档数量(.md 和 .pdf"""
try:
project_path = storage_service.get_secure_path(storage_key)
if not project_path.exists():
return 0
md_count = len(list(project_path.rglob("*.md")))
pdf_count = len(list(project_path.rglob("*.pdf")))
# 排除 _assets 目录下的文件
assets_md = len(list((project_path / "_assets").rglob("*.md"))) if (project_path / "_assets").exists() else 0
assets_pdf = len(list((project_path / "_assets").rglob("*.pdf"))) if (project_path / "_assets").exists() else 0
return md_count + pdf_count - assets_md - assets_pdf
except Exception:
return 0
@router.get("/", response_model=dict) @router.get("/", response_model=dict)
async def get_my_projects( async def get_my_projects(
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
@ -58,7 +74,11 @@ async def get_my_projects(
# 合并结果 # 合并结果
all_projects = owned_projects + member_projects all_projects = owned_projects + member_projects
projects_data = [ProjectResponse.from_orm(p).dict() for p in all_projects] projects_data = []
for p in all_projects:
p_dict = ProjectResponse.from_orm(p).dict()
p_dict['doc_count'] = get_document_count(p.storage_key)
projects_data.append(p_dict)
return success_response(data=projects_data) return success_response(data=projects_data)
@ -73,7 +93,11 @@ async def get_owned_projects(
select(Project).where(Project.owner_id == current_user.id, Project.status == 1) select(Project).where(Project.owner_id == current_user.id, Project.status == 1)
) )
projects = result.scalars().all() projects = result.scalars().all()
projects_data = [ProjectResponse.from_orm(p).dict() for p in projects] projects_data = []
for p in projects:
p_dict = ProjectResponse.from_orm(p).dict()
p_dict['doc_count'] = get_document_count(p.storage_key)
projects_data.append(p_dict)
return success_response(data=projects_data) return success_response(data=projects_data)
@ -101,6 +125,7 @@ async def get_shared_projects(
project_dict['owner_name'] = owner.username project_dict['owner_name'] = owner.username
project_dict['owner_nickname'] = owner.nickname project_dict['owner_nickname'] = owner.nickname
project_dict['user_role'] = member.role # 添加用户角色 project_dict['user_role'] = member.role # 添加用户角色
project_dict['doc_count'] = get_document_count(project.storage_key)
projects_data.append(project_dict) projects_data.append(project_dict)
return success_response(data=projects_data) return success_response(data=projects_data)
@ -188,7 +213,7 @@ async def get_project(
if not member and project.is_public != 1: if not member and project.is_public != 1:
raise HTTPException(status_code=403, detail="无权访问该项目") raise HTTPException(status_code=403, detail="无权访问该项目")
# 增加访问次数 # 增加访问次数 (简单计数)
project.visit_count += 1 project.visit_count += 1
await db.commit() await db.commit()

View File

@ -1,128 +1,270 @@
""" """
文档搜索相关 API 文档搜索相关 API
""" """
from fastapi import APIRouter, Depends, HTTPException, Query from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, or_ from sqlalchemy import select, or_
import os from typing import Optional, List
import glob from pathlib import Path
import logging
from app.core.database import get_db from app.core.database import get_db
from app.core.deps import get_current_user from app.core.deps import get_current_user
from app.models.user import User from app.models.user import User
from app.models.project import Project, ProjectMember from app.models.project import Project, ProjectMember
from app.services.search_service import search_service
from app.services.storage import storage_service from app.services.storage import storage_service
from app.schemas.response import success_response from app.schemas.response import success_response
router = APIRouter() router = APIRouter()
logger = logging.getLogger(__name__)
@router.get("/documents", response_model=dict) @router.get("/documents", response_model=dict)
async def search_documents( async def search_documents(
keyword: str = Query(..., min_length=1, description="搜索关键词"), keyword: str = Query(..., min_length=1, description="搜索关键词"),
project_id: Optional[int] = Query(None, description="限制在指定项目中搜索"),
current_user: User = Depends(get_current_user), current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db) db: AsyncSession = Depends(get_db)
): ):
""" """
文档搜索简化版 文档搜索 (混合模式Whoosh 全文检索 + 数据库项目搜索 + 文件系统文件名搜索 fallback)
搜索范围项目名称项目描述文件名支持.md和.pdf
""" """
try:
if not keyword: if not keyword:
return success_response(data=[]) return success_response(data=[])
keyword_lower = keyword.lower() # 1. 确定搜索范围 (项目ID列表)
allowed_project_ids = []
# 获取用户有权限访问的项目 if project_id:
# 1. 用户创建的项目 # 检查指定项目的访问权限
owned_projects_result = await db.execute( result = await db.execute(select(Project).where(Project.id == project_id))
select(Project).where(Project.owner_id == current_user.id, Project.status == 1) project = result.scalar_one_or_none()
if not project:
raise HTTPException(status_code=404, detail="项目不存在")
# 检查权限
if project.owner_id != current_user.id and project.is_public != 1:
member_result = await db.execute(
select(ProjectMember).where(
ProjectMember.project_id == project_id,
ProjectMember.user_id == current_user.id
) )
owned_projects = owned_projects_result.scalars().all() )
if not member_result.scalar_one_or_none():
raise HTTPException(status_code=403, detail="无权访问该项目")
allowed_project_ids.append(str(project_id))
else:
# 获取所有可访问的项目
# 1. 用户创建的项目
owned_result = await db.execute(
select(Project.id).where(Project.owner_id == current_user.id, Project.status == 1)
)
allowed_project_ids.extend([str(pid) for pid in owned_result.scalars().all()])
# 2. 用户参与的项目 # 2. 用户参与的项目
member_projects_result = await db.execute( member_result = await db.execute(
select(Project) select(ProjectMember.project_id)
.join(ProjectMember, ProjectMember.project_id == Project.id) .join(Project, ProjectMember.project_id == Project.id)
.where( .where(
ProjectMember.user_id == current_user.id, ProjectMember.user_id == current_user.id,
Project.owner_id != current_user.id,
Project.status == 1 Project.status == 1
) )
) )
member_projects = member_projects_result.scalars().all() allowed_project_ids.extend([str(pid) for pid in member_result.scalars().all()])
# 合并所有可访问的项目 # 去重
all_projects = owned_projects + member_projects allowed_project_ids = list(set(allowed_project_ids))
# 搜索结果列表 if not allowed_project_ids:
return success_response(data=[])
# 2. 执行搜索
search_results = [] search_results = []
# 搜索项目和文件 # A. 数据库项目搜索 (仅当未指定 project_id 时,或者需要搜项目本身)
for project in all_projects: # 如果前端指定了 project_id通常是在项目内搜文件不需要搜项目本身
# 检查项目名称或描述是否匹配 if not project_id:
project_matched = False projects_query = select(Project).where(
if keyword_lower in project.name.lower(): Project.id.in_(allowed_project_ids),
project_matched = True or_(
elif project.description and keyword_lower in project.description.lower(): Project.name.ilike(f"%{keyword}%"),
project_matched = True Project.description.ilike(f"%{keyword}%")
)
)
project_res = await db.execute(projects_query)
matched_projects = project_res.scalars().all()
# 如果项目本身匹配,添加到结果 for proj in matched_projects:
if project_matched:
search_results.append({ search_results.append({
"type": "project", "type": "project",
"project_id": project.id, "project_id": proj.id,
"project_name": project.name, "project_name": proj.name,
"project_description": project.description or "", "project_description": proj.description or "",
"match_type": "项目", "match_type": "项目名称/描述",
}) })
# 搜索项目中的文件名 # B. Whoosh 全文检索
whoosh_results = []
try:
if project_id:
whoosh_results = await search_service.search(keyword, str(project_id))
else:
# 全局搜索
whoosh_results = await search_service.search(keyword, limit=50)
# 过滤权限
whoosh_results = [r for r in whoosh_results if str(r['project_id']) in allowed_project_ids]
except Exception as e:
logger.warning(f"Whoosh search failed: {e}")
pass
# 获取 Whoosh 结果涉及的项目 ID
whoosh_project_ids = set(res['project_id'] for res in whoosh_results if res.get('project_id'))
# 查询项目名称映射
project_name_map = {}
if whoosh_project_ids:
p_res = await db.execute(select(Project.id, Project.name).where(Project.id.in_(whoosh_project_ids)))
for pid, pname in p_res.all():
project_name_map[str(pid)] = pname
# 添加 Whoosh 结果
for res in whoosh_results:
pid_str = str(res['project_id'])
search_results.append({
"type": "file",
"project_id": res['project_id'],
"project_name": project_name_map.get(pid_str, "未知项目"),
"file_path": res['path'],
"file_name": res['title'],
"highlights": res.get('highlights'),
"match_type": "全文检索"
})
# C. 文件系统文件名搜索 (Fallback / Complementary)
# 为了保证未索引的文件也能通过文件名搜到
# 获取需要扫描的项目
projects_to_scan = []
if project_id:
# 单项目扫描
res = await db.execute(select(Project).where(Project.id == project_id))
p = res.scalar_one_or_none()
if p: projects_to_scan = [p]
elif len(search_results) < 20:
# 全局扫描:仅当结果较少时才进行全盘扫描,避免性能问题
# 这是一个简单的启发式策略
res = await db.execute(select(Project).where(Project.id.in_(allowed_project_ids)))
projects_to_scan = res.scalars().all()
# 已存在的文件路径集合 (用于去重)
existing_paths = set()
for res in search_results:
if res.get('type') == 'file':
# 统一 key 格式
existing_paths.add(f"{res['project_id']}:{res['file_path']}")
keyword_lower = keyword.lower()
for project in projects_to_scan:
try: try:
project_path = storage_service.get_secure_path(project.storage_key) project_path = storage_service.get_secure_path(project.storage_key)
if not project_path.exists(): continue
if not project_path.exists() or not project_path.is_dir(): # 查找文件名匹配
continue
# 查找所有 .md 和 .pdf 文件
md_files = list(project_path.rglob("*.md")) md_files = list(project_path.rglob("*.md"))
pdf_files = list(project_path.rglob("*.pdf")) pdf_files = list(project_path.rglob("*.pdf"))
all_files = md_files + pdf_files
for file_path in all_files: for file_path in md_files + pdf_files:
# 跳过 _assets 目录中的文件 if "_assets" in file_path.parts: continue
if "_assets" in file_path.parts:
continue
try: if keyword_lower in file_path.name.lower():
# 获取相对路径 rel_path = str(file_path.relative_to(project_path))
relative_path = str(file_path.relative_to(project_path)) unique_key = f"{project.id}:{rel_path}"
# 获取文件名PDF保留扩展名MD去掉扩展名 if unique_key not in existing_paths:
if file_path.suffix.lower() == '.pdf':
file_name = file_path.name # PDF保留完整文件名
else:
file_name = file_path.stem # MD去掉扩展名
# 检查关键词是否在文件名或路径中
if keyword_lower in file_name.lower() or keyword_lower in relative_path.lower():
search_results.append({ search_results.append({
"type": "file", "type": "file",
"project_id": project.id, "project_id": project.id,
"project_name": project.name, "project_name": project.name,
"file_path": relative_path, "file_path": rel_path,
"file_name": file_name, "file_name": file_path.name,
"match_type": "文件", "match_type": "文件名匹配"
}) })
existing_paths.add(unique_key)
except Exception: except Exception:
# 忽略无法处理的文件
continue continue
except Exception: return success_response(data=search_results[:100])
# 忽略无法遍历的目录
except Exception as e:
logger.error(f"Search API error: {e}")
return success_response(data=[], message="搜索服务暂时不可用")
async def rebuild_index_task(db: AsyncSession):
"""后台任务:重建索引"""
logger.info("Starting index rebuild...")
try:
# 获取所有项目
result = await db.execute(select(Project).where(Project.status == 1))
projects = result.scalars().all()
documents = []
for project in projects:
try:
# 遍历项目文件
project_root = storage_service.get_secure_path(project.storage_key)
if not project_root.exists():
continue continue
# 限制返回结果数量 # 查找所有 .md 文件
search_results = search_results[:100] md_files = list(project_root.rglob("*.md"))
return success_response(data=search_results, message=f"找到 {len(search_results)} 个结果") for file_path in md_files:
if "_assets" in file_path.parts:
continue
try:
content = await storage_service.read_file(file_path)
relative_path = str(file_path.relative_to(project_root))
documents.append({
"project_id": project.id,
"path": relative_path,
"title": file_path.stem,
"content": content
})
except Exception:
continue
except Exception as e:
logger.error(f"Error processing project {project.id}: {e}")
continue
# 批量写入索引
import asyncio
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, search_service.rebuild_index_sync, documents)
logger.info(f"Index rebuild completed. Indexed {len(documents)} documents.")
except Exception as e:
logger.error(f"Index rebuild failed: {e}")
@router.post("/rebuild-index", response_model=dict)
async def rebuild_index(
background_tasks: BackgroundTasks,
current_user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db)
):
"""
重建搜索索引 (仅限超级管理员)
"""
if not current_user.is_superuser:
raise HTTPException(status_code=403, detail="权限不足")
background_tasks.add_task(rebuild_index_task, db)
return success_response(message="索引重建任务已启动")

View File

@ -2,7 +2,7 @@
通知相关的 Pydantic Schema 通知相关的 Pydantic Schema
""" """
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing import Optional, List from typing import Optional, List, Union
from datetime import datetime from datetime import datetime
@ -19,14 +19,14 @@ class NotificationCreate(NotificationBase):
class NotificationUpdate(BaseModel): class NotificationUpdate(BaseModel):
is_read: Optional[int] = None is_read: Optional[bool] = None
class NotificationResponse(NotificationBase): class NotificationResponse(NotificationBase):
id: int id: Union[str, int]
user_id: int user_id: int
is_read: int is_read: bool
created_at: datetime created_at: Union[datetime, float] # Redis returns float timestamp
read_at: Optional[datetime] = None read_at: Optional[datetime] = None
class Config: class Config:

View File

@ -1,13 +1,25 @@
import logging import logging
import json
import time
import uuid
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, update, insert from sqlalchemy import select
from app.models.notification import Notification
from app.models.project import ProjectMember from app.models.project import ProjectMember
from typing import List, Optional from app.core.redis_client import get_redis
from typing import List, Optional, Dict, Any
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class NotificationService: class NotificationService:
# 通知过期时间14天 (秒)
EXPIRATION_SECONDS = 14 * 24 * 60 * 60
def _get_order_key(self, user_id: int) -> str:
return f"notifications:order:{user_id}"
def _get_content_key(self, user_id: int) -> str:
return f"notifications:content:{user_id}"
async def create_notification( async def create_notification(
self, self,
db: AsyncSession, db: AsyncSession,
@ -17,19 +29,41 @@ class NotificationService:
type: str = "info", type: str = "info",
category: str = "system", category: str = "system",
link: str = None link: str = None
) -> Notification: ) -> Dict[str, Any]:
"""创建单条通知""" """创建单条通知 (写入 Redis)"""
db_notification = Notification( redis = get_redis()
user_id=user_id, if not redis:
title=title, return None
content=content,
type=type, timestamp = time.time()
category=category, notification_id = str(uuid.uuid4())
link=link
) notification_data = {
db.add(db_notification) "id": notification_id,
await db.flush() "user_id": user_id,
return db_notification "title": title,
"content": content,
"type": type,
"category": category,
"link": link,
"is_read": False,
"created_at": timestamp
}
json_data = json.dumps(notification_data, ensure_ascii=False)
order_key = self._get_order_key(user_id)
content_key = self._get_content_key(user_id)
async with redis.pipeline() as pipe:
pipe.hset(content_key, notification_id, json_data)
pipe.zadd(order_key, {notification_id: timestamp})
min_score = timestamp - self.EXPIRATION_SECONDS
pipe.zremrangebyscore(order_key, "-inf", min_score)
pipe.expire(order_key, self.EXPIRATION_SECONDS + 86400)
pipe.expire(content_key, self.EXPIRATION_SECONDS + 86400)
await pipe.execute()
return notification_data
async def broadcast_system_notification( async def broadcast_system_notification(
self, self,
@ -40,17 +74,37 @@ class NotificationService:
link: str = None link: str = None
): ):
"""向指定多个用户发送系统通知""" """向指定多个用户发送系统通知"""
redis = get_redis()
if not redis:
return
timestamp = time.time()
async with redis.pipeline() as pipe:
for uid in user_ids: for uid in user_ids:
db_notification = Notification( notification_id = str(uuid.uuid4())
user_id=uid, notification_data = {
title=title, "id": notification_id,
content=content, "user_id": uid,
type="info", "title": title,
category="system", "content": content,
link=link "type": "info",
) "category": "system",
db.add(db_notification) "link": link,
await db.flush() "is_read": False,
"created_at": timestamp
}
json_data = json.dumps(notification_data, ensure_ascii=False)
order_key = self._get_order_key(uid)
content_key = self._get_content_key(uid)
pipe.hset(content_key, notification_id, json_data)
pipe.zadd(order_key, {notification_id: timestamp})
pipe.expire(order_key, self.EXPIRATION_SECONDS + 86400)
pipe.expire(content_key, self.EXPIRATION_SECONDS + 86400)
await pipe.execute()
async def notify_project_members( async def notify_project_members(
self, self,
@ -62,7 +116,6 @@ class NotificationService:
link: str = None, link: str = None,
category: str = "project" category: str = "project"
): ):
"""通知项目中除指定用户外的所有成员"""
result = await db.execute( result = await db.execute(
select(ProjectMember.user_id).where( select(ProjectMember.user_id).where(
ProjectMember.project_id == project_id, ProjectMember.project_id == project_id,
@ -71,20 +124,126 @@ class NotificationService:
) )
member_ids = result.scalars().all() member_ids = result.scalars().all()
logger.info(f"Notifying members of project {project_id}. Found {len(member_ids)} members to notify (excluding user {exclude_user_id}).")
if member_ids: if member_ids:
for uid in member_ids: await self.broadcast_system_notification(
db_notification = Notification( db,
user_id=uid,
title=title, title=title,
content=content, content=content,
type="info", user_ids=member_ids,
category=category,
link=link link=link
) )
db.add(db_notification)
await db.flush()
async def get_user_notifications(
self,
user_id: int,
limit: int = 50,
skip: int = 0
) -> List[Dict[str, Any]]:
"""获取用户通知列表"""
redis = get_redis()
if not redis:
return []
order_key = self._get_order_key(user_id)
content_key = self._get_content_key(user_id)
ids = await redis.zrevrange(order_key, skip, skip + limit - 1)
if not ids:
return []
json_strings = await redis.hmget(content_key, ids)
notifications = []
ids_to_remove = []
for i, json_str in enumerate(json_strings):
if json_str:
try:
notifications.append(json.loads(json_str))
except:
continue
else:
ids_to_remove.append(ids[i])
if ids_to_remove:
await redis.zrem(order_key, *ids_to_remove)
return notifications
async def get_unread_count(self, user_id: int) -> int:
"""获取未读通知数量"""
redis = get_redis()
if not redis:
return 0
content_key = self._get_content_key(user_id)
all_jsons = await redis.hvals(content_key)
count = 0
for js in all_jsons:
try:
if js:
data = json.loads(js)
if not data.get('is_read'):
count += 1
except:
pass
return count
async def mark_read(self, user_id: int, notification_id: str):
"""标记已读"""
redis = get_redis()
if not redis:
return
content_key = self._get_content_key(user_id)
json_str = await redis.hget(content_key, notification_id)
if json_str:
try:
data = json.loads(json_str)
data['is_read'] = True
await redis.hset(content_key, notification_id, json.dumps(data, ensure_ascii=False))
except:
pass
async def mark_all_read(self, user_id: int):
"""标记所有已读"""
redis = get_redis()
if not redis:
return
order_key = self._get_order_key(user_id)
content_key = self._get_content_key(user_id)
ids = await redis.zrange(order_key, 0, -1)
if not ids:
return
json_strings = await redis.hmget(content_key, ids)
updates = {}
for i, json_str in enumerate(json_strings):
if json_str:
try:
data = json.loads(json_str)
if not data.get('is_read'):
data['is_read'] = True
updates[ids[i]] = json.dumps(data, ensure_ascii=False)
except:
pass
if updates:
await redis.hset(content_key, mapping=updates)
async def delete_notification(self, user_id: int, notification_id: str):
redis = get_redis()
if not redis:
return
order_key = self._get_order_key(user_id)
content_key = self._get_content_key(user_id)
await redis.zrem(order_key, notification_id)
await redis.hdel(content_key, notification_id)
notification_service = NotificationService() notification_service = NotificationService()

View File

@ -0,0 +1,220 @@
import os
import shutil
import asyncio
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional
from whoosh import index
from whoosh.fields import Schema, TEXT, ID, DATETIME
from whoosh.qparser import QueryParser, MultifieldParser
from whoosh.analysis import Tokenizer, Token
from whoosh.highlight import HtmlFormatter
import jieba
from app.core.config import settings
logger = logging.getLogger(__name__)
# 定义中文分词器
class ChineseTokenizer(Tokenizer):
def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, mode='', **kwargs):
t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)
# cut_for_search returns generator, need to list if iterating repeatedly?
# Tokenizer expects yield Token.
try:
seglist = jieba.cut_for_search(value)
for w in seglist:
t.original = t.text = w
t.boost = 1.0
if positions:
t.pos = start_pos
start_pos += 1
if chars:
t.startchar = start_char
t.endchar = start_char + len(w)
start_char += len(w)
yield t
except Exception as e:
logger.error(f"Jieba tokenization error: {e}")
def ChineseAnalyzer():
return ChineseTokenizer()
class SearchService:
def __init__(self):
# 索引存储路径 (使用配置文件中的存储根目录)
storage_root = Path(settings.STORAGE_ROOT)
# 如果是相对路径,转换为绝对路径 (相对于 backend 根目录,这里假设 settings 已经处理好或者我们手动处理)
# settings.STORAGE_ROOT 默认为 /data/...
# 本地开发时可能是 ./storage
if not storage_root.is_absolute():
backend_dir = Path(__file__).parent.parent.parent
storage_root = (backend_dir / storage_root).resolve()
self.index_dir = storage_root / "search_index"
try:
self.index_dir.mkdir(parents=True, exist_ok=True)
except Exception as e:
logger.error(f"Failed to create search index directory: {e}")
self.schema = Schema(
project_id=ID(stored=True),
path=ID(unique=True, stored=True),
title=TEXT(stored=True, analyzer=ChineseAnalyzer()),
content=TEXT(stored=True, analyzer=ChineseAnalyzer())
)
self.ix = None
try:
self._load_or_create_index()
except Exception as e:
logger.error(f"Failed to initialize search index: {e}")
def _load_or_create_index(self):
# 检查是否包含索引文件
if index.exists_in(str(self.index_dir)):
try:
self.ix = index.open_dir(str(self.index_dir))
except Exception as e:
logger.warning(f"Failed to open index, trying to recreate: {e}")
# 如果打开失败(例如损坏),尝试重建
shutil.rmtree(str(self.index_dir))
self.index_dir.mkdir(parents=True, exist_ok=True)
self.ix = index.create_in(str(self.index_dir), self.schema)
else:
self.ix = index.create_in(str(self.index_dir), self.schema)
def _add_document_sync(self, project_id: str, path: str, title: str, content: str):
if not self.ix:
return
try:
writer = self.ix.writer()
writer.update_document(
project_id=str(project_id),
path=path,
title=title,
content=content
)
writer.commit()
except Exception as e:
logger.error(f"Failed to add document to index: {e}")
# 如果是 LockError可能需要清理锁? 暂时忽略
async def add_document(self, project_id: str, path: str, title: str, content: str):
"""添加或更新文档索引 (Async)"""
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, self._add_document_sync, project_id, path, title, content)
def _delete_document_sync(self, unique_path: str):
if not self.ix:
return
try:
writer = self.ix.writer()
writer.delete_by_term('path', unique_path)
writer.commit()
except Exception as e:
logger.error(f"Failed to delete document from index: {e}")
async def delete_document(self, project_id: str, path: str):
"""删除文档索引 (Async)"""
unique_path = f"{project_id}:{path}"
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, self._delete_document_sync, unique_path)
def _delete_project_documents_sync(self, project_id: str):
if not self.ix:
return
try:
writer = self.ix.writer()
writer.delete_by_term('project_id', str(project_id))
writer.commit()
except Exception as e:
logger.error(f"Failed to delete project documents: {e}")
async def delete_project_documents(self, project_id: str):
"""删除项目下的所有文档索引 (Async)"""
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, self._delete_project_documents_sync, project_id)
def _search_sync(self, keyword: str, project_id: Optional[str] = None, limit: int = 20):
if not keyword or not self.ix:
return []
try:
with self.ix.searcher() as searcher:
parser = MultifieldParser(["title", "content"], schema=self.ix.schema)
query = parser.parse(keyword)
filter_query = None
if project_id:
filter_parser = QueryParser("project_id", schema=self.ix.schema)
filter_query = filter_parser.parse(str(project_id))
results = searcher.search(query, filter=filter_query, limit=limit)
results.formatter = HtmlFormatter(tagname="mark", classname="search-highlight", termclass="search-term")
search_results = []
for hit in results:
# 提取原始路径 (去掉 project_id 前缀)
full_path = hit.get("path", "")
if ":" in full_path:
_, real_path = full_path.split(":", 1)
else:
real_path = full_path
# 安全获取高亮
try:
highlights = hit.highlights("content") or hit.highlights("title") or hit.get("title", "")
except:
highlights = hit.get("title", "")
search_results.append({
"project_id": hit.get("project_id"),
"path": real_path,
"title": hit.get("title"),
"highlights": highlights,
"score": hit.score
})
return search_results
except Exception as e:
logger.error(f"Search failed: {e}")
# 返回空列表而不是抛出异常
return []
async def search(self, keyword: str, project_id: Optional[str] = None, limit: int = 20):
"""搜索文档 (Async)"""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, self._search_sync, keyword, project_id, limit)
async def update_doc(self, project_id: int, path: str, title: str, content: str):
"""添加或更新文档 (对外接口)"""
unique_path = f"{project_id}:{path}"
await self.add_document(str(project_id), unique_path, title, content)
async def remove_doc(self, project_id: int, path: str):
"""删除文档 (对外接口)"""
await self.delete_document(str(project_id), path)
def rebuild_index_sync(self, documents: List[Dict]):
"""同步重建索引"""
if not self.ix:
return
try:
writer = self.ix.writer()
# 这里的 documents 必须包含 project_id, path, title, content
for doc in documents:
unique_path = f"{doc['project_id']}:{doc['path']}"
writer.update_document(
project_id=str(doc['project_id']),
path=unique_path,
title=doc['title'],
content=doc['content']
)
writer.commit()
except Exception as e:
logger.error(f"Rebuild index failed: {e}")
search_service = SearchService()

View File

@ -18,6 +18,7 @@ greenlet==3.2.4
h11==0.16.0 h11==0.16.0
httptools==0.7.1 httptools==0.7.1
idna==3.11 idna==3.11
jieba==0.42.1
loguru==0.7.2 loguru==0.7.2
Mako==1.3.10 Mako==1.3.10
MarkupSafe==3.0.3 MarkupSafe==3.0.3
@ -43,3 +44,4 @@ uvicorn==0.27.0
uvloop==0.22.1 uvloop==0.22.1
watchfiles==1.1.1 watchfiles==1.1.1
websockets==15.0.1 websockets==15.0.1
Whoosh==2.7.4

View File

@ -22,9 +22,12 @@
"axios": "^1.6.2", "axios": "^1.6.2",
"bytemd": "^1.22.0", "bytemd": "^1.22.0",
"dayjs": "^1.11.10", "dayjs": "^1.11.10",
"github-slugger": "^2.0.0",
"mark.js": "^8.11.1",
"pdfjs-dist": "5.4.296", "pdfjs-dist": "5.4.296",
"react": "^18.2.0", "react": "^18.2.0",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"react-highlight-words": "^0.21.0",
"react-markdown": "^9.0.1", "react-markdown": "^9.0.1",
"react-pdf": "^10.2.0", "react-pdf": "^10.2.0",
"react-router-dom": "^6.20.1", "react-router-dom": "^6.20.1",

View File

@ -5,11 +5,23 @@ import request from '@/utils/request'
/** /**
* 搜索文档 * 搜索文档
* @param {string} keyword - 搜索关键词
* @param {string} project_id - 项目ID (可选限制搜索范围)
*/ */
export function searchDocuments(keyword) { export function searchDocuments(keyword, project_id) {
return request({ return request({
url: '/search/documents', url: '/search/documents',
method: 'get', method: 'get',
params: { keyword }, params: { keyword, project_id },
})
}
/**
* 重建搜索索引 (仅管理员)
*/
export function rebuildIndex() {
return request({
url: '/search/rebuild-index',
method: 'post',
}) })
} }

View File

@ -208,6 +208,7 @@ function AppHeader({ collapsed, onToggle }) {
<div className="header-left"> <div className="header-left">
{/* Logo 区域 */} {/* Logo 区域 */}
<div className="header-logo"> <div className="header-logo">
<img src="/favicon.svg" alt="logo" style={{ width: 32, height: 32, marginRight: 8 }} />
<h2 style={{ margin: 0, color: '#1677ff', fontWeight: 'bold' }}>NexDocus</h2> <h2 style={{ margin: 0, color: '#1677ff', fontWeight: 'bold' }}>NexDocus</h2>
</div> </div>

View File

@ -1,3 +1,7 @@
@tailwind base; .search-highlight {
@tailwind components; background-color: #ffd54f !important;
@tailwind utilities; color: black !important;
font-weight: bold;
padding: 0 2px;
border-radius: 2px;
}

View File

@ -1,11 +1,13 @@
import { useState, useEffect } from 'react' import { useState, useEffect } from 'react'
import { Card, Row, Col, Statistic, Table, Spin } from 'antd' import { Card, Row, Col, Statistic, Table, Spin, Button, Tooltip, message } from 'antd'
import { UserOutlined, ProjectOutlined, FileTextOutlined } from '@ant-design/icons' import { UserOutlined, ProjectOutlined, FileTextOutlined, SyncOutlined } from '@ant-design/icons'
import { getDashboardStats } from '@/api/dashboard' import { getDashboardStats } from '@/api/dashboard'
import { rebuildIndex } from '@/api/search'
import Toast from '@/components/Toast/Toast' import Toast from '@/components/Toast/Toast'
function Dashboard() { function Dashboard() {
const [loading, setLoading] = useState(true) const [loading, setLoading] = useState(true)
const [rebuilding, setRebuilding] = useState(false)
const [stats, setStats] = useState({ const [stats, setStats] = useState({
user_count: 0, user_count: 0,
project_count: 0, project_count: 0,
@ -34,6 +36,19 @@ function Dashboard() {
} }
} }
const handleRebuildIndex = async () => {
setRebuilding(true)
try {
await rebuildIndex()
message.success('索引重建任务已启动,请稍后搜索尝试')
} catch (error) {
console.error('Rebuild index error:', error)
message.error('重建索引失败: ' + (error.response?.data?.detail || error.message))
} finally {
setRebuilding(false)
}
}
const userColumns = [ const userColumns = [
{ {
title: '用户名', title: '用户名',
@ -117,7 +132,23 @@ function Dashboard() {
<Col span={8}> <Col span={8}>
<Card> <Card>
<Statistic <Statistic
title="文档总数" title={
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
<span>文档总数</span>
<Tooltip title="重建全文搜索索引(扫描所有文档)">
<Button
type="link"
size="small"
style={{ padding: 0, height: 'auto' }}
icon={<SyncOutlined spin={rebuilding} />}
onClick={handleRebuildIndex}
disabled={rebuilding}
>
重建索引
</Button>
</Tooltip>
</div>
}
value={stats.document_count} value={stats.document_count}
prefix={<FileTextOutlined />} prefix={<FileTextOutlined />}
valueStyle={{ color: '#cf1322' }} valueStyle={{ color: '#cf1322' }}

View File

@ -1,22 +1,38 @@
import { useState, useEffect, useRef } from 'react' import { useState, useEffect, useRef, useMemo } from 'react'
import { useParams, useNavigate, useSearchParams } from 'react-router-dom' import { useParams, useNavigate, useSearchParams } from 'react-router-dom'
import { Layout, Menu, Spin, FloatButton, Button, Tooltip, message, Anchor, Modal, Input, Switch, Space, Dropdown } from 'antd' import { Layout, Menu, Spin, FloatButton, Button, Tooltip, message, Anchor, Modal, Input, Switch, Space, Dropdown, Empty } from 'antd'
import { EditOutlined, VerticalAlignTopOutlined, ShareAltOutlined, MenuFoldOutlined, MenuUnfoldOutlined, FileTextOutlined, FolderOutlined, FilePdfOutlined, CopyOutlined, LockOutlined, CloudDownloadOutlined, CloudUploadOutlined, DownOutlined } from '@ant-design/icons' import { EditOutlined, VerticalAlignTopOutlined, ShareAltOutlined, MenuFoldOutlined, MenuUnfoldOutlined, FileTextOutlined, FolderOutlined, FilePdfOutlined, CopyOutlined, LockOutlined, CloudDownloadOutlined, CloudUploadOutlined, DownOutlined, SearchOutlined } from '@ant-design/icons'
import ReactMarkdown from 'react-markdown' import ReactMarkdown from 'react-markdown'
import remarkGfm from 'remark-gfm' import remarkGfm from 'remark-gfm'
import rehypeRaw from 'rehype-raw' import rehypeRaw from 'rehype-raw'
import rehypeSlug from 'rehype-slug' import rehypeSlug from 'rehype-slug'
import rehypeHighlight from 'rehype-highlight' import rehypeHighlight from 'rehype-highlight'
import 'highlight.js/styles/github.css' import 'highlight.js/styles/github.css'
import Highlighter from 'react-highlight-words'
import GithubSlugger from 'github-slugger'
import { getProjectTree, getFileContent, getDocumentUrl } from '@/api/file' import { getProjectTree, getFileContent, getDocumentUrl } from '@/api/file'
import { gitPull, gitPush, getGitRepos } from '@/api/project' import { gitPull, gitPush, getGitRepos } from '@/api/project'
import { getProjectShareInfo, updateShareSettings } from '@/api/share' import { getProjectShareInfo, updateShareSettings } from '@/api/share'
import { searchDocuments } from '@/api/search'
import VirtualPDFViewer from '@/components/PDFViewer/VirtualPDFViewer' import VirtualPDFViewer from '@/components/PDFViewer/VirtualPDFViewer'
import Toast from '@/components/Toast/Toast' import Toast from '@/components/Toast/Toast'
import './DocumentPage.css' import './DocumentPage.css'
const { Sider, Content } = Layout const { Sider, Content } = Layout
//
const HighlightText = ({ text, keyword }) => {
if (!keyword || !text) return text;
return (
<Highlighter
highlightClassName="search-highlight"
searchWords={[keyword]}
autoEscape={true}
textToHighlight={text}
/>
)
}
function DocumentPage() { function DocumentPage() {
const { projectId } = useParams() const { projectId } = useParams()
const navigate = useNavigate() const navigate = useNavigate()
@ -32,13 +48,19 @@ function DocumentPage() {
const [shareInfo, setShareInfo] = useState(null) const [shareInfo, setShareInfo] = useState(null)
const [hasPassword, setHasPassword] = useState(false) const [hasPassword, setHasPassword] = useState(false)
const [password, setPassword] = useState('') const [password, setPassword] = useState('')
const [userRole, setUserRole] = useState('viewer') // owner/admin/editor/viewer const [userRole, setUserRole] = useState('viewer')
const [pdfViewerVisible, setPdfViewerVisible] = useState(false) const [pdfViewerVisible, setPdfViewerVisible] = useState(false)
const [pdfUrl, setPdfUrl] = useState('') const [pdfUrl, setPdfUrl] = useState('')
const [pdfFilename, setPdfFilename] = useState('') const [pdfFilename, setPdfFilename] = useState('')
const [viewMode, setViewMode] = useState('markdown') // 'markdown' or 'pdf' const [viewMode, setViewMode] = useState('markdown')
const [gitRepos, setGitRepos] = useState([]) const [gitRepos, setGitRepos] = useState([])
const [projectName, setProjectName] = useState('') const [projectName, setProjectName] = useState('')
//
const [searchKeyword, setSearchKeyword] = useState('')
const [matchedFilePaths, setMatchedFilePaths] = useState(new Set())
const [isSearching, setIsSearching] = useState(false)
const contentRef = useRef(null) const contentRef = useRef(null)
useEffect(() => { useEffect(() => {
@ -51,6 +73,64 @@ function DocumentPage() {
} }
}, [projectId, userRole]) }, [projectId, userRole])
//
const handleSearch = async (value) => {
setSearchKeyword(value)
if (!value.trim()) {
setMatchedFilePaths(new Set())
return
}
setIsSearching(true)
try {
const res = await searchDocuments(value, projectId)
const paths = new Set(res.data.map(item => item.file_path))
setMatchedFilePaths(paths)
//
const keysToExpand = new Set(openKeys)
res.data.forEach(item => {
const parts = item.file_path.split('/')
let currentPath = ''
for (let i = 0; i < parts.length - 1; i++) {
currentPath = currentPath ? `${currentPath}/${parts[i]}` : parts[i]
keysToExpand.add(currentPath)
}
})
setOpenKeys(Array.from(keysToExpand))
} catch (error) {
console.error('Search error:', error)
} finally {
setIsSearching(false)
}
}
//
const filteredTreeData = useMemo(() => {
if (!searchKeyword.trim()) return fileTree
const loop = (data) => {
const result = []
for (const node of data) {
const titleMatch = node.title.toLowerCase().includes(searchKeyword.toLowerCase())
const contentMatch = matchedFilePaths.has(node.key)
if (node.children) {
const children = loop(node.children)
if (children.length > 0 || titleMatch) {
result.push({ ...node, children })
}
} else {
if (titleMatch || contentMatch) {
result.push(node)
}
}
}
return result
}
return loop(fileTree)
}, [fileTree, searchKeyword, matchedFilePaths])
const loadGitRepos = async () => { const loadGitRepos = async () => {
try { try {
const res = await getGitRepos(projectId) const res = await getGitRepos(projectId)
@ -75,6 +155,12 @@ function DocumentPage() {
// URL // URL
const fileParam = searchParams.get('file') const fileParam = searchParams.get('file')
const keywordParam = searchParams.get('keyword')
if (keywordParam) {
handleSearch(keywordParam)
}
if (fileParam) { if (fileParam) {
setSelectedFile(fileParam) setSelectedFile(fileParam)
@ -131,6 +217,9 @@ function DocumentPage() {
// //
const convertTreeToMenuItems = (nodes) => { const convertTreeToMenuItems = (nodes) => {
return nodes.map((node) => { return nodes.map((node) => {
// -
const titleNode = node.title.replace('.md', '')
if (!node.isLeaf) { if (!node.isLeaf) {
// //
return { return {
@ -143,7 +232,7 @@ function DocumentPage() {
// Markdown // Markdown
return { return {
key: node.key, key: node.key,
label: node.title.replace('.md', ''), label: titleNode,
icon: <FileTextOutlined />, icon: <FileTextOutlined />,
} }
} else if (node.title && node.title.endsWith('.pdf')) { } else if (node.title && node.title.endsWith('.pdf')) {
@ -181,6 +270,7 @@ function DocumentPage() {
// markdown // markdown
useEffect(() => { useEffect(() => {
if (markdownContent) { if (markdownContent) {
const slugger = new GithubSlugger()
const headings = [] const headings = []
const lines = markdownContent.split('\n') const lines = markdownContent.split('\n')
@ -189,15 +279,8 @@ function DocumentPage() {
if (match) { if (match) {
const level = match[1].length const level = match[1].length
const title = match[2] const title = match[2]
// rehype-slug/github-slugger ID // 使 github-slugger ID rehype-slug
const key = title const key = slugger.slug(title)
.toLowerCase()
.trim()
.replace(/\s+/g, '-') //
.replace(/[^\w\-\u4e00-\u9fa5]+/g, '') // (线)
.replace(/\-\-+/g, '-') //
.replace(/^-+/, '') //
.replace(/-+$/, '') //
headings.push({ headings.push({
key: `#${key}`, key: `#${key}`,
@ -586,7 +669,81 @@ function DocumentPage() {
} }
} }
const menuItems = convertTreeToMenuItems(fileTree) const menuItems = convertTreeToMenuItems(filteredTreeData)
// Markdown
// 使 components
// 使 rehype
// 使
const markdownComponents = useMemo(() => {
if (!searchKeyword) {
return {
a: ({ node, href, children, ...props }) => {
const isExternal = href && (href.startsWith('http') || href.startsWith('//'));
return (
<a
href={href}
onClick={(e) => handleMarkdownLink(e, href)}
target={isExternal ? '_blank' : undefined}
rel={isExternal ? 'noopener noreferrer' : undefined}
{...props}
>
{children}
</a>
);
},
}
}
//
//
const highlightRenderer = (Tag) => ({ node, children, ...props }) => {
// children
if (typeof children === 'string') {
return <Tag {...props}><HighlightText text={children} keyword={searchKeyword} /></Tag>
}
//
if (Array.isArray(children)) {
const newChildren = children.map((child, idx) => {
if (typeof child === 'string') {
return <HighlightText key={idx} text={child} keyword={searchKeyword} />
}
return child
})
return <Tag {...props}>{newChildren}</Tag>
}
return <Tag {...props}>{children}</Tag>
}
return {
a: ({ node, href, children, ...props }) => {
const isExternal = href && (href.startsWith('http') || href.startsWith('//'));
return (
<a
href={href}
onClick={(e) => handleMarkdownLink(e, href)}
target={isExternal ? '_blank' : undefined}
rel={isExternal ? 'noopener noreferrer' : undefined}
{...props}
>
{typeof children === 'string' ? <HighlightText text={children} keyword={searchKeyword} /> : children}
</a>
);
},
p: highlightRenderer('p'),
li: highlightRenderer('li'),
h1: highlightRenderer('h1'),
h2: highlightRenderer('h2'),
h3: highlightRenderer('h3'),
h4: highlightRenderer('h4'),
h5: highlightRenderer('h5'),
h6: highlightRenderer('h6'),
span: highlightRenderer('span'),
td: highlightRenderer('td'),
th: highlightRenderer('th'),
div: highlightRenderer('div'),
}
}, [searchKeyword])
return ( return (
<div className="project-docs-page"> <div className="project-docs-page">
@ -623,6 +780,20 @@ function DocumentPage() {
</div> </div>
</div> </div>
{/* 搜索框 */}
<div style={{ padding: '12px 16px 4px' }}>
<Input.Search
placeholder="搜索文档内容..."
allowClear
value={searchKeyword}
onChange={(e) => setSearchKeyword(e.target.value)}
onSearch={handleSearch}
loading={isSearching}
enterButton
/>
</div>
{filteredTreeData.length > 0 ? (
<Menu <Menu
mode="inline" mode="inline"
selectedKeys={[selectedFile]} selectedKeys={[selectedFile]}
@ -632,6 +803,11 @@ function DocumentPage() {
onClick={handleMenuClick} onClick={handleMenuClick}
className="docs-menu" className="docs-menu"
/> />
) : (
<div style={{ padding: '20px', textAlign: 'center', color: '#999' }}>
<Empty image={Empty.PRESENTED_IMAGE_SIMPLE} description="未找到匹配文档" />
</div>
)}
</Sider> </Sider>
{/* 右侧内容区 */} {/* 右侧内容区 */}
@ -654,22 +830,7 @@ function DocumentPage() {
<ReactMarkdown <ReactMarkdown
remarkPlugins={[remarkGfm]} remarkPlugins={[remarkGfm]}
rehypePlugins={[rehypeRaw, rehypeSlug, rehypeHighlight]} rehypePlugins={[rehypeRaw, rehypeSlug, rehypeHighlight]}
components={{ components={markdownComponents}
a: ({ node, href, children, ...props }) => {
const isExternal = href && (href.startsWith('http') || href.startsWith('//'));
return (
<a
href={href}
onClick={(e) => handleMarkdownLink(e, href)}
target={isExternal ? '_blank' : undefined}
rel={isExternal ? 'noopener noreferrer' : undefined}
{...props}
>
{children}
</a>
);
},
}}
> >
{markdownContent} {markdownContent}
</ReactMarkdown> </ReactMarkdown>
@ -716,7 +877,7 @@ function DocumentPage() {
title: ( title: (
<div style={{ paddingLeft: `${(item.level - 1) * 12}px`, display: 'flex', alignItems: 'center', gap: '4px' }}> <div style={{ paddingLeft: `${(item.level - 1) * 12}px`, display: 'flex', alignItems: 'center', gap: '4px' }}>
<FileTextOutlined style={{ fontSize: '12px', color: '#8c8c8c' }} /> <FileTextOutlined style={{ fontSize: '12px', color: '#8c8c8c' }} />
{item.title} <HighlightText text={item.title} keyword={searchKeyword} />
</div> </div>
), ),
}))} }))}
@ -743,6 +904,7 @@ function DocumentPage() {
</Layout> </Layout>
{/* 分享模态框 */} {/* 分享模态框 */}
{/* ... keeping the modal ... */}
<Modal <Modal
title="分享项目" title="分享项目"
open={shareModalVisible} open={shareModalVisible}

View File

@ -88,12 +88,10 @@ function Login() {
<div className="intro-section"> <div className="intro-section">
<h1 className="intro-title"> <h1 className="intro-title">
团队协作 团队协作<span className="highlight"> 文档管理平台</span>
<br />
<span className="highlight">文档管理平台</span>
</h1> </h1>
<p className="intro-desc"> <p className="intro-desc">
全流程文档共享提升团队协作效率的新一代解决方案 全流程文档协作共享让每一份文档都产生价值
</p> </p>
</div> </div>

View File

@ -1,7 +1,7 @@
import { useState, useEffect, useRef, useMemo } from 'react' import { useState, useEffect, useRef, useMemo } from 'react'
import { useParams } from 'react-router-dom' import { useParams, useSearchParams } from 'react-router-dom'
import { Layout, Menu, Spin, FloatButton, Button, Modal, Input, message, Drawer, Anchor } from 'antd' import { Layout, Menu, Spin, FloatButton, Button, Modal, Input, message, Drawer, Anchor, Empty } from 'antd'
import { VerticalAlignTopOutlined, MenuOutlined, MenuFoldOutlined, MenuUnfoldOutlined, FileTextOutlined, FolderOutlined, FilePdfOutlined, LockOutlined } from '@ant-design/icons' import { VerticalAlignTopOutlined, MenuOutlined, MenuFoldOutlined, MenuUnfoldOutlined, FileTextOutlined, FolderOutlined, FilePdfOutlined, LockOutlined, SearchOutlined } from '@ant-design/icons'
import { Viewer } from '@bytemd/react' import { Viewer } from '@bytemd/react'
import gfm from '@bytemd/plugin-gfm' import gfm from '@bytemd/plugin-gfm'
import highlight from '@bytemd/plugin-highlight' import highlight from '@bytemd/plugin-highlight'
@ -11,14 +11,32 @@ import gemoji from '@bytemd/plugin-gemoji'
import 'bytemd/dist/index.css' import 'bytemd/dist/index.css'
import rehypeSlug from 'rehype-slug' import rehypeSlug from 'rehype-slug'
import 'highlight.js/styles/github.css' import 'highlight.js/styles/github.css'
import Mark from 'mark.js'
import Highlighter from 'react-highlight-words'
import GithubSlugger from 'github-slugger'
import { getPreviewInfo, getPreviewTree, getPreviewFile, verifyAccessPassword, getPreviewDocumentUrl } from '@/api/share' import { getPreviewInfo, getPreviewTree, getPreviewFile, verifyAccessPassword, getPreviewDocumentUrl } from '@/api/share'
import { searchDocuments } from '@/api/search'
import VirtualPDFViewer from '@/components/PDFViewer/VirtualPDFViewer' import VirtualPDFViewer from '@/components/PDFViewer/VirtualPDFViewer'
import './PreviewPage.css' import './PreviewPage.css'
const { Sider, Content } = Layout const { Sider, Content } = Layout
// ( Tree)
const HighlightText = ({ text, keyword }) => {
if (!keyword || !text) return text;
return (
<Highlighter
highlightClassName="search-highlight"
searchWords={[keyword]}
autoEscape={true}
textToHighlight={text}
/>
)
}
function PreviewPage() { function PreviewPage() {
const { projectId } = useParams() const { projectId } = useParams()
const [searchParams] = useSearchParams()
const [projectInfo, setProjectInfo] = useState(null) const [projectInfo, setProjectInfo] = useState(null)
const [fileTree, setFileTree] = useState([]) const [fileTree, setFileTree] = useState([])
const [selectedFile, setSelectedFile] = useState('') const [selectedFile, setSelectedFile] = useState('')
@ -29,15 +47,22 @@ function PreviewPage() {
const [tocItems, setTocItems] = useState([]) const [tocItems, setTocItems] = useState([])
const [passwordModalVisible, setPasswordModalVisible] = useState(false) const [passwordModalVisible, setPasswordModalVisible] = useState(false)
const [password, setPassword] = useState('') const [password, setPassword] = useState('')
const [accessPassword, setAccessPassword] = useState(null) // const [accessPassword, setAccessPassword] = useState(null)
const [siderCollapsed, setSiderCollapsed] = useState(false) const [siderCollapsed, setSiderCollapsed] = useState(false)
const [mobileDrawerVisible, setMobileDrawerVisible] = useState(false) const [mobileDrawerVisible, setMobileDrawerVisible] = useState(false)
const [isMobile, setIsMobile] = useState(false) const [isMobile, setIsMobile] = useState(false)
const [pdfViewerVisible, setPdfViewerVisible] = useState(false) const [pdfViewerVisible, setPdfViewerVisible] = useState(false)
const [pdfUrl, setPdfUrl] = useState('') const [pdfUrl, setPdfUrl] = useState('')
const [pdfFilename, setPdfFilename] = useState('') const [pdfFilename, setPdfFilename] = useState('')
const [viewMode, setViewMode] = useState('markdown') // 'markdown' or 'pdf' const [viewMode, setViewMode] = useState('markdown')
//
const [searchKeyword, setSearchKeyword] = useState('')
const [matchedFilePaths, setMatchedFilePaths] = useState(new Set())
const [isSearching, setIsSearching] = useState(false)
const contentRef = useRef(null) const contentRef = useRef(null)
const viewerRef = useRef(null)
// ByteMD // ByteMD
const plugins = useMemo(() => [ const plugins = useMemo(() => [
@ -51,6 +76,22 @@ function PreviewPage() {
} }
], []) ], [])
// mark.js
useEffect(() => {
if (viewerRef.current && viewMode === 'markdown') {
const instance = new Mark(viewerRef.current)
instance.unmark()
if (searchKeyword.trim()) {
instance.mark(searchKeyword, {
element: 'span',
className: 'search-highlight',
exclude: ['pre', 'code', '.toc-content']
})
}
}
}, [markdownContent, searchKeyword, viewMode])
// //
useEffect(() => { useEffect(() => {
const checkMobile = () => { const checkMobile = () => {
@ -73,10 +114,8 @@ function PreviewPage() {
setProjectInfo(info) setProjectInfo(info)
if (info.has_password) { if (info.has_password) {
//
setPasswordModalVisible(true) setPasswordModalVisible(true)
} else { } else {
//
loadFileTree() loadFileTree()
} }
} catch (error) { } catch (error) {
@ -110,9 +149,52 @@ function PreviewPage() {
const tree = res.data || [] const tree = res.data || []
setFileTree(tree) setFileTree(tree)
// README.md
const readmeNode = findReadme(tree) const readmeNode = findReadme(tree)
if (readmeNode) {
// Check query params
const fileParam = searchParams.get('file')
const keywordParam = searchParams.get('keyword')
if (keywordParam) {
handleSearch(keywordParam)
}
if (fileParam) {
// Deep link to file
if (fileParam.toLowerCase().endsWith('.pdf')) {
let url = getPreviewDocumentUrl(projectId, fileParam)
// ... params logic repeated from handleMenuClick ...
// Simplify: just call logic or set state
// Since we need token/password logic, let's reuse handleMenuClick logic if possible or copy it.
// For simplicity, just set selection and let user click? No, auto load.
// Copy logic for PDF url construction
const params = []
if (pwd || accessPassword) params.push(`access_pass=${encodeURIComponent(pwd || accessPassword)}`)
const token = localStorage.getItem('access_token')
if (token) params.push(`token=${encodeURIComponent(token)}`)
if (params.length > 0) url += `?${params.join('&')}`
setSelectedFile(fileParam)
setPdfUrl(url)
setPdfFilename(fileParam.split('/').pop())
setViewMode('pdf')
} else {
setSelectedFile(fileParam)
loadMarkdown(fileParam, pwd || accessPassword)
}
// Expand tree to file
const parts = fileParam.split('/')
const allParentPaths = []
let currentPath = ''
for (let i = 0; i < parts.length - 1; i++) {
currentPath = currentPath ? `${currentPath}/${parts[i]}` : parts[i]
allParentPaths.push(currentPath)
}
setOpenKeys(prev => [...new Set([...prev, ...allParentPaths])])
} else if (readmeNode) {
setSelectedFile(readmeNode.key) setSelectedFile(readmeNode.key)
loadMarkdown(readmeNode.key, pwd || accessPassword) loadMarkdown(readmeNode.key, pwd || accessPassword)
} }
@ -125,7 +207,64 @@ function PreviewPage() {
} }
} }
// README.md //
const handleSearch = async (value) => {
setSearchKeyword(value)
if (!value.trim()) {
setMatchedFilePaths(new Set())
return
}
setIsSearching(true)
try {
const res = await searchDocuments(value, projectId)
const paths = new Set(res.data.map(item => item.file_path))
setMatchedFilePaths(paths)
// (Assuming this comment might be there or not, better context: keysToExpand)
const keysToExpand = new Set(openKeys)
res.data.forEach(item => {
const parts = item.file_path.split('/')
let currentPath = ''
for (let i = 0; i < parts.length - 1; i++) {
currentPath = currentPath ? `${currentPath}/${parts[i]}` : parts[i]
keysToExpand.add(currentPath)
}
})
setOpenKeys(Array.from(keysToExpand))
} catch (error) {
console.error('Search error:', error)
} finally {
setIsSearching(false)
}
}
//
const filteredTreeData = useMemo(() => {
if (!searchKeyword.trim()) return fileTree
const loop = (data) => {
const result = []
for (const node of data) {
const titleMatch = node.title.toLowerCase().includes(searchKeyword.toLowerCase())
const contentMatch = matchedFilePaths.has(node.key)
if (node.children) {
const children = loop(node.children)
if (children.length > 0 || titleMatch) {
result.push({ ...node, children })
}
} else {
if (titleMatch || contentMatch) {
result.push(node)
}
}
}
return result
}
return loop(fileTree)
}, [fileTree, searchKeyword, matchedFilePaths])
const findReadme = (nodes) => { const findReadme = (nodes) => {
for (const node of nodes) { for (const node of nodes) {
if (node.title === 'README.md' && node.isLeaf) { if (node.title === 'README.md' && node.isLeaf) {
@ -135,9 +274,10 @@ function PreviewPage() {
return null return null
} }
//
const convertTreeToMenuItems = (nodes) => { const convertTreeToMenuItems = (nodes) => {
return nodes.map((node) => { return nodes.map((node) => {
const labelNode = node.title.replace('.md', '')
if (!node.isLeaf) { if (!node.isLeaf) {
return { return {
key: node.key, key: node.key,
@ -148,7 +288,7 @@ function PreviewPage() {
} else if (node.title && node.title.endsWith('.md')) { } else if (node.title && node.title.endsWith('.md')) {
return { return {
key: node.key, key: node.key,
label: node.title.replace('.md', ''), label: labelNode,
icon: <FileTextOutlined />, icon: <FileTextOutlined />,
} }
} else if (node.title && node.title.endsWith('.pdf')) { } else if (node.title && node.title.endsWith('.pdf')) {
@ -162,7 +302,6 @@ function PreviewPage() {
}).filter(Boolean) }).filter(Boolean)
} }
// markdown
const loadMarkdown = async (filePath, pwd = null) => { const loadMarkdown = async (filePath, pwd = null) => {
setLoading(true) setLoading(true)
setTocItems([]) setTocItems([])
@ -170,12 +309,10 @@ function PreviewPage() {
const res = await getPreviewFile(projectId, filePath, pwd || accessPassword) const res = await getPreviewFile(projectId, filePath, pwd || accessPassword)
setMarkdownContent(res.data?.content || '') setMarkdownContent(res.data?.content || '')
//
if (isMobile) { if (isMobile) {
setMobileDrawerVisible(false) setMobileDrawerVisible(false)
} }
//
if (contentRef.current) { if (contentRef.current) {
contentRef.current.scrollTo({ top: 0, behavior: 'smooth' }) contentRef.current.scrollTo({ top: 0, behavior: 'smooth' })
} }
@ -192,9 +329,9 @@ function PreviewPage() {
} }
} }
// markdown
useEffect(() => { useEffect(() => {
if (markdownContent) { if (markdownContent) {
const slugger = new GithubSlugger()
const headings = [] const headings = []
const lines = markdownContent.split('\n') const lines = markdownContent.split('\n')
@ -203,7 +340,8 @@ function PreviewPage() {
if (match) { if (match) {
const level = match[1].length const level = match[1].length
const title = match[2] const title = match[2]
const key = title.toLowerCase().replace(/[^a-z0-9\u4e00-\u9fa5]+/g, '-') // 使 github-slugger ID rehype-slug
const key = slugger.slug(title)
headings.push({ headings.push({
key: `#${key}`, key: `#${key}`,
@ -217,7 +355,7 @@ function PreviewPage() {
setTocItems(headings) setTocItems(headings)
} }
}, [markdownContent]) }, [markdownContent])
//
const resolveRelativePath = (currentPath, relativePath) => { const resolveRelativePath = (currentPath, relativePath) => {
if (relativePath.startsWith('/')) { if (relativePath.startsWith('/')) {
return relativePath.substring(1) return relativePath.substring(1)
@ -240,7 +378,6 @@ function PreviewPage() {
return dirParts.join('/') return dirParts.join('/')
} }
// markdown
const handleMarkdownLink = (e, href) => { const handleMarkdownLink = (e, href) => {
if (!href || href.startsWith('http') || href.startsWith('//') || href.startsWith('#')) { if (!href || href.startsWith('http') || href.startsWith('//') || href.startsWith('#')) {
return return
@ -257,7 +394,6 @@ function PreviewPage() {
try { try {
decodedHref = decodeURIComponent(href) decodedHref = decodeURIComponent(href)
} catch (err) { } catch (err) {
// ignore
} }
const targetPath = resolveRelativePath(selectedFile, decodedHref) const targetPath = resolveRelativePath(selectedFile, decodedHref)
@ -278,7 +414,6 @@ function PreviewPage() {
handleMenuClick({ key: targetPath }) handleMenuClick({ key: targetPath })
} }
//
const handleContentClick = (e) => { const handleContentClick = (e) => {
const target = e.target.closest('a') const target = e.target.closest('a')
if (target) { if (target) {
@ -289,22 +424,17 @@ function PreviewPage() {
} }
} }
//
const handleMenuClick = ({ key }) => { const handleMenuClick = ({ key }) => {
setSelectedFile(key) setSelectedFile(key)
// PDF
if (key.toLowerCase().endsWith('.pdf')) { if (key.toLowerCase().endsWith('.pdf')) {
// PDF - 使API
let url = getPreviewDocumentUrl(projectId, key) let url = getPreviewDocumentUrl(projectId, key)
const params = [] const params = []
//
if (accessPassword) { if (accessPassword) {
params.push(`access_pass=${encodeURIComponent(accessPassword)}`) params.push(`access_pass=${encodeURIComponent(accessPassword)}`)
} }
// token
const token = localStorage.getItem('access_token') const token = localStorage.getItem('access_token')
if (token) { if (token) {
params.push(`token=${encodeURIComponent(token)}`) params.push(`token=${encodeURIComponent(token)}`)
@ -318,39 +448,16 @@ function PreviewPage() {
setPdfFilename(key.split('/').pop()) setPdfFilename(key.split('/').pop())
setViewMode('pdf') setViewMode('pdf')
} else { } else {
// Markdown
setViewMode('markdown') setViewMode('markdown')
loadMarkdown(key) loadMarkdown(key)
} }
} }
const menuItems = convertTreeToMenuItems(fileTree) const menuItems = convertTreeToMenuItems(filteredTreeData)
//
const SiderContent = () => (
<>
<div className="preview-sider-header">
<h2>{projectInfo?.name || '项目预览'}</h2>
{projectInfo?.description && (
<p className="preview-project-desc">{projectInfo.description}</p>
)}
</div>
<Menu
mode="inline"
selectedKeys={[selectedFile]}
openKeys={openKeys}
onOpenChange={setOpenKeys}
items={menuItems}
onClick={handleMenuClick}
className="preview-menu"
/>
</>
)
return ( return (
<div className="preview-page"> <div className="preview-page">
<Layout className="preview-layout"> <Layout className="preview-layout">
{/* 移动端使用 Drawer桌面端使用 Sider */}
{isMobile ? ( {isMobile ? (
<> <>
<Button <Button
@ -362,12 +469,37 @@ function PreviewPage() {
目录索引 目录索引
</Button> </Button>
<Drawer <Drawer
title={projectInfo?.name || '项目预览'} title={
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>
<img src="/favicon.svg" alt="logo" style={{ width: 24, height: 24 }} />
<span>{projectInfo?.name || '项目预览'}</span>
</div>
}
placement="left" placement="left"
onClose={() => setMobileDrawerVisible(false)} onClose={() => setMobileDrawerVisible(false)}
open={mobileDrawerVisible} open={mobileDrawerVisible}
width="80%" width="80%"
> >
<div className="preview-sider-header" style={{ padding: '0 0 16px' }}>
{projectInfo?.description && (
<p className="preview-project-desc">{projectInfo.description}</p>
)}
</div>
{/* 搜索框 */}
<div style={{ padding: '0 0 12px' }}>
<Input.Search
placeholder="搜索文档内容..."
allowClear
value={searchKeyword}
onChange={(e) => setSearchKeyword(e.target.value)}
onSearch={handleSearch}
loading={isSearching}
enterButton
/>
</div>
{filteredTreeData.length > 0 ? (
<Menu <Menu
mode="inline" mode="inline"
selectedKeys={[selectedFile]} selectedKeys={[selectedFile]}
@ -377,6 +509,11 @@ function PreviewPage() {
onClick={handleMenuClick} onClick={handleMenuClick}
className="preview-menu" className="preview-menu"
/> />
) : (
<div style={{ padding: '20px', textAlign: 'center', color: '#999' }}>
<Empty image={Empty.PRESENTED_IMAGE_SIMPLE} description="未找到匹配文档" />
</div>
)}
</Drawer> </Drawer>
</> </>
) : ( ) : (
@ -387,11 +524,47 @@ function PreviewPage() {
collapsed={siderCollapsed} collapsed={siderCollapsed}
collapsedWidth={0} collapsedWidth={0}
> >
<SiderContent /> <div className="preview-sider-header">
<div style={{ display: 'flex', alignItems: 'center', gap: 8, marginBottom: 8 }}>
<img src="/favicon.svg" alt="logo" style={{ width: 24, height: 24 }} />
<h2 style={{ margin: 0 }}>{projectInfo?.name || '项目预览'}</h2>
</div>
{projectInfo?.description && (
<p className="preview-project-desc">{projectInfo.description}</p>
)}
</div>
{/* 搜索框 */}
<div style={{ padding: '12px 16px 4px' }}>
<Input.Search
placeholder="搜索文档内容..."
allowClear
value={searchKeyword}
onChange={(e) => setSearchKeyword(e.target.value)}
onSearch={handleSearch}
loading={isSearching}
enterButton
/>
</div>
{filteredTreeData.length > 0 ? (
<Menu
mode="inline"
selectedKeys={[selectedFile]}
openKeys={openKeys}
onOpenChange={setOpenKeys}
items={menuItems}
onClick={handleMenuClick}
className="preview-menu"
/>
) : (
<div style={{ padding: '20px', textAlign: 'center', color: '#999' }}>
<Empty image={Empty.PRESENTED_IMAGE_SIMPLE} description="未找到匹配文档" />
</div>
)}
</Sider> </Sider>
)} )}
{/* 右侧内容区 */}
<Layout className="preview-content-layout"> <Layout className="preview-content-layout">
<Content className="preview-content" ref={contentRef}> <Content className="preview-content" ref={contentRef}>
<div className={`preview-content-wrapper ${viewMode === 'pdf' ? 'pdf-mode' : ''}`}> <div className={`preview-content-wrapper ${viewMode === 'pdf' ? 'pdf-mode' : ''}`}>
@ -407,7 +580,7 @@ function PreviewPage() {
filename={pdfFilename} filename={pdfFilename}
/> />
) : ( ) : (
<div className="markdown-body" onClick={handleContentClick}> <div className="markdown-body" onClick={handleContentClick} ref={viewerRef}>
<Viewer <Viewer
value={markdownContent} value={markdownContent}
plugins={plugins} plugins={plugins}
@ -416,7 +589,6 @@ function PreviewPage() {
)} )}
</div> </div>
{/* 返回顶部按钮 - 仅在markdown模式显示 */}
{viewMode === 'markdown' && ( {viewMode === 'markdown' && (
<FloatButton <FloatButton
icon={<VerticalAlignTopOutlined />} icon={<VerticalAlignTopOutlined />}
@ -431,7 +603,6 @@ function PreviewPage() {
)} )}
</Content> </Content>
{/* 右侧TOC面板仅桌面端且markdown模式显示 */}
{!isMobile && viewMode === 'markdown' && !tocCollapsed && ( {!isMobile && viewMode === 'markdown' && !tocCollapsed && (
<Sider width={250} theme="light" className="preview-toc-sider"> <Sider width={250} theme="light" className="preview-toc-sider">
<div className="toc-header"> <div className="toc-header">
@ -455,7 +626,7 @@ function PreviewPage() {
title: ( title: (
<div style={{ paddingLeft: `${(item.level - 1) * 12}px`, display: 'flex', alignItems: 'center', gap: '4px' }}> <div style={{ paddingLeft: `${(item.level - 1) * 12}px`, display: 'flex', alignItems: 'center', gap: '4px' }}>
<FileTextOutlined style={{ fontSize: '12px', color: '#8c8c8c' }} /> <FileTextOutlined style={{ fontSize: '12px', color: '#8c8c8c' }} />
{item.title} <HighlightText text={item.title} keyword={searchKeyword} />
</div> </div>
), ),
}))} }))}
@ -468,7 +639,6 @@ function PreviewPage() {
)} )}
</Layout> </Layout>
{/* TOC展开按钮仅桌面端 */}
{!isMobile && tocCollapsed && ( {!isMobile && tocCollapsed && (
<Button <Button
type="primary" type="primary"
@ -481,7 +651,6 @@ function PreviewPage() {
)} )}
</Layout> </Layout>
{/* 密码验证模态框 */}
<Modal <Modal
title={ title={
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}> <div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>

View File

@ -418,7 +418,7 @@ function ProjectList({ type = 'my' }) {
navigate(`/projects/${item.project_id}/docs`) navigate(`/projects/${item.project_id}/docs`)
} else if (item.type === 'file') { } else if (item.type === 'file') {
// //
navigate(`/projects/${item.project_id}/docs?file=${encodeURIComponent(item.file_path)}`) navigate(`/projects/${item.project_id}/docs?file=${encodeURIComponent(item.file_path)}&keyword=${encodeURIComponent(searchKeyword)}`)
} }
} }
@ -530,7 +530,7 @@ function ProjectList({ type = 'my' }) {
<h3>{project.name}</h3> <h3>{project.name}</h3>
<p className="project-description">{project.description || '暂无描述'}</p> <p className="project-description">{project.description || '暂无描述'}</p>
<div className="project-meta"> <div className="project-meta">
<span>访问: {project.visit_count}</span> <span>文档数: {project.doc_count || 0}</span>
{type === 'share' && project.owner_name && ( {type === 'share' && project.owner_name && (
<span style={{ marginLeft: 12 }}> <span style={{ marginLeft: 12 }}>
所有者: {project.owner_nickname || project.owner_name} 所有者: {project.owner_nickname || project.owner_name}

View File

@ -19,6 +19,7 @@ export default defineConfig({
}, },
}, },
server: { server: {
host: '0.0.0.0',
port: 5173, port: 5173,
open: true, open: true,
proxy: { proxy: {

View File

@ -3209,6 +3209,11 @@ hastscript@^9.0.0:
property-information "^7.0.0" property-information "^7.0.0"
space-separated-tokens "^2.0.0" space-separated-tokens "^2.0.0"
highlight-words-core@^1.2.0:
version "1.2.3"
resolved "https://registry.npmmirror.com/highlight-words-core/-/highlight-words-core-1.2.3.tgz#781f37b2a220bf998114e4ef8c8cb6c7a4802ea8"
integrity sha512-m1O9HW3/GNHxzSIXWw1wCNXXsgLlxrP0OI6+ycGUhiUHkikqW3OrwVHz+lxeNBe5yqLESdIcj8PowHQ2zLvUvQ==
highlight.js@^11.7.0, highlight.js@~11.11.0: highlight.js@^11.7.0, highlight.js@~11.11.0:
version "11.11.1" version "11.11.1"
resolved "https://registry.npmmirror.com/highlight.js/-/highlight.js-11.11.1.tgz" resolved "https://registry.npmmirror.com/highlight.js/-/highlight.js-11.11.1.tgz"
@ -3680,6 +3685,11 @@ make-event-props@^2.0.0:
resolved "https://registry.npmmirror.com/make-event-props/-/make-event-props-2.0.0.tgz#41f7a6e96841296d6835aebe94be86c25602f923" resolved "https://registry.npmmirror.com/make-event-props/-/make-event-props-2.0.0.tgz#41f7a6e96841296d6835aebe94be86c25602f923"
integrity sha512-G/hncXrl4Qt7mauJEXSg3AcdYzmpkIITTNl5I+rH9sog5Yw0kK6vseJjCaPfOXqOqQuPUP89Rkhfz5kPS8ijtw== integrity sha512-G/hncXrl4Qt7mauJEXSg3AcdYzmpkIITTNl5I+rH9sog5Yw0kK6vseJjCaPfOXqOqQuPUP89Rkhfz5kPS8ijtw==
mark.js@^8.11.1:
version "8.11.1"
resolved "https://registry.npmmirror.com/mark.js/-/mark.js-8.11.1.tgz#180f1f9ebef8b0e638e4166ad52db879beb2ffc5"
integrity sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==
markdown-table@^3.0.0: markdown-table@^3.0.0:
version "3.0.4" version "3.0.4"
resolved "https://registry.npmmirror.com/markdown-table/-/markdown-table-3.0.4.tgz" resolved "https://registry.npmmirror.com/markdown-table/-/markdown-table-3.0.4.tgz"
@ -4025,6 +4035,11 @@ mdast-util-to-string@^4.0.0:
dependencies: dependencies:
"@types/mdast" "^4.0.0" "@types/mdast" "^4.0.0"
memoize-one@^4.0.0:
version "4.1.0"
resolved "https://registry.npmmirror.com/memoize-one/-/memoize-one-4.1.0.tgz#a2387c58c03fff27ca390c31b764a79addf3f906"
integrity sha512-2GApq0yI/b22J2j9rhbrAlsHb0Qcz+7yWxeLG8h+95sl1XPUgeLimQSOdur4Vw7cUhrBHwaUZxWFZueojqNRzA==
meow@^13.0.0: meow@^13.0.0:
version "13.2.0" version "13.2.0"
resolved "https://registry.npmmirror.com/meow/-/meow-13.2.0.tgz#6b7d63f913f984063b3cc261b6e8800c4cd3474f" resolved "https://registry.npmmirror.com/meow/-/meow-13.2.0.tgz#6b7d63f913f984063b3cc261b6e8800c4cd3474f"
@ -5318,6 +5333,14 @@ react-easy-crop@^5.5.3:
normalize-wheel "^1.0.1" normalize-wheel "^1.0.1"
tslib "^2.0.1" tslib "^2.0.1"
react-highlight-words@^0.21.0:
version "0.21.0"
resolved "https://registry.npmmirror.com/react-highlight-words/-/react-highlight-words-0.21.0.tgz#a109acdf7dc6fac3ed7db82e9cba94e8d65c281c"
integrity sha512-SdWEeU9fIINArEPO1rO5OxPyuhdEKZQhHzZZP1ie6UeXQf+CjycT1kWaB+9bwGcVbR0NowuHK3RqgqNg6bgBDQ==
dependencies:
highlight-words-core "^1.2.0"
memoize-one "^4.0.0"
react-is@^16.13.1: react-is@^16.13.1:
version "16.13.1" version "16.13.1"
resolved "https://registry.npmmirror.com/react-is/-/react-is-16.13.1.tgz" resolved "https://registry.npmmirror.com/react-is/-/react-is-16.13.1.tgz"