import asyncio import json import logging import os import re import shutil import tempfile import time import zipfile from datetime import datetime, timedelta, timezone from typing import Any, Dict, List, Optional from urllib.parse import quote, unquote from zoneinfo import ZoneInfo import httpx from pydantic import BaseModel from fastapi import Depends, FastAPI, File, Form, HTTPException, Request, UploadFile, WebSocket, WebSocketDisconnect from fastapi.responses import JSONResponse from fastapi.middleware.cors import CORSMiddleware from sqlmodel import Session, select from core.config_manager import BotConfigManager from core.cache import cache from core.database import engine, get_session, init_database from core.docker_manager import BotDockerManager from core.speech_service import ( SpeechDisabledError, SpeechDurationError, SpeechServiceError, WhisperSpeechService, ) from core.settings import ( BOTS_WORKSPACE_ROOT, DATA_ROOT, DATABASE_ECHO, DATABASE_ENGINE, DATABASE_URL_DISPLAY, AGENT_MD_TEMPLATES_FILE, DEFAULT_AGENTS_MD, DEFAULT_BOT_SYSTEM_TIMEZONE, DEFAULT_IDENTITY_MD, DEFAULT_SOUL_MD, DEFAULT_TOOLS_MD, DEFAULT_USER_MD, PANEL_ACCESS_PASSWORD, PROJECT_ROOT, REDIS_ENABLED, REDIS_PREFIX, REDIS_URL, TOPIC_PRESET_TEMPLATES, TOPIC_PRESETS_TEMPLATES_FILE, load_agent_md_templates, load_topic_presets_template, ) from models.bot import BotInstance, BotMessage, NanobotImage from models.platform import BotActivityEvent, BotRequestUsage from models.skill import BotSkillInstall, SkillMarketItem from models.topic import TopicItem, TopicTopic from api.platform_router import router as platform_router from api.topic_router import router as topic_router from clients.edge.errors import is_expected_edge_offline_error, log_edge_failure, summarize_edge_exception from clients.edge.http import HttpEdgeClient from services.topic_runtime import publish_runtime_topic_packet from services.platform_service import ( bind_usage_message, create_usage_request, fail_latest_usage, finalize_usage_from_packet, get_chat_pull_page_size, get_platform_settings_snapshot, get_speech_runtime_settings, prune_expired_activity_events, record_activity_event, ) from providers.provision.edge import EdgeProvisionProvider from providers.provision.local import LocalProvisionProvider from providers.registry import ProviderRegistry from providers.runtime.edge import EdgeRuntimeProvider from providers.runtime.local import LocalRuntimeProvider from providers.selector import get_provision_provider, get_runtime_provider from providers.target import ( ProviderTarget, normalize_provider_target, provider_target_from_config, provider_target_to_dict, ) from providers.workspace.edge import EdgeWorkspaceProvider from providers.workspace.local import LocalWorkspaceProvider from services.bot_command_service import BotCommandService from services.node_registry_service import ManagedNode, NodeRegistryService from services.runtime_service import RuntimeService from services.workspace_service import WorkspaceService app = FastAPI(title="Dashboard Nanobot API") logger = logging.getLogger("dashboard.backend") def _apply_log_noise_guard() -> None: for name in ( "httpx", "httpcore", "uvicorn.access", "watchfiles.main", "watchfiles.watcher", ): logging.getLogger(name).setLevel(logging.WARNING) _apply_log_noise_guard() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) app.include_router(topic_router) app.include_router(platform_router) os.makedirs(BOTS_WORKSPACE_ROOT, exist_ok=True) os.makedirs(DATA_ROOT, exist_ok=True) docker_manager = BotDockerManager(host_data_root=BOTS_WORKSPACE_ROOT) config_manager = BotConfigManager(host_data_root=BOTS_WORKSPACE_ROOT) speech_service = WhisperSpeechService() app.state.docker_manager = docker_manager app.state.speech_service = speech_service BOT_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+$") BOT_ACCESS_PASSWORD_HEADER = "X-Bot-Access-Password" class ChannelConfigRequest(BaseModel): channel_type: str external_app_id: Optional[str] = None app_secret: Optional[str] = None internal_port: Optional[int] = None is_active: bool = True extra_config: Optional[Dict[str, Any]] = None class ChannelConfigUpdateRequest(BaseModel): channel_type: Optional[str] = None external_app_id: Optional[str] = None app_secret: Optional[str] = None internal_port: Optional[int] = None is_active: Optional[bool] = None extra_config: Optional[Dict[str, Any]] = None class BotCreateRequest(BaseModel): id: str name: str enabled: Optional[bool] = True access_password: Optional[str] = None llm_provider: str llm_model: str api_key: str image_tag: Optional[str] = None system_prompt: Optional[str] = None api_base: Optional[str] = None temperature: float = 0.2 top_p: float = 1.0 max_tokens: int = 8192 cpu_cores: float = 1.0 memory_mb: int = 1024 storage_gb: int = 10 system_timezone: Optional[str] = None soul_md: Optional[str] = None agents_md: Optional[str] = None user_md: Optional[str] = None tools_md: Optional[str] = None tools_config: Optional[Dict[str, Any]] = None env_params: Optional[Dict[str, str]] = None identity_md: Optional[str] = None channels: Optional[List[ChannelConfigRequest]] = None send_progress: Optional[bool] = None send_tool_hints: Optional[bool] = None node_id: Optional[str] = None transport_kind: Optional[str] = None runtime_kind: Optional[str] = None core_adapter: Optional[str] = None class BotUpdateRequest(BaseModel): name: Optional[str] = None enabled: Optional[bool] = None access_password: Optional[str] = None llm_provider: Optional[str] = None llm_model: Optional[str] = None api_key: Optional[str] = None api_base: Optional[str] = None image_tag: Optional[str] = None system_prompt: Optional[str] = None temperature: Optional[float] = None top_p: Optional[float] = None max_tokens: Optional[int] = None cpu_cores: Optional[float] = None memory_mb: Optional[int] = None storage_gb: Optional[int] = None system_timezone: Optional[str] = None soul_md: Optional[str] = None agents_md: Optional[str] = None user_md: Optional[str] = None tools_md: Optional[str] = None tools_config: Optional[Dict[str, Any]] = None env_params: Optional[Dict[str, str]] = None identity_md: Optional[str] = None send_progress: Optional[bool] = None send_tool_hints: Optional[bool] = None node_id: Optional[str] = None transport_kind: Optional[str] = None runtime_kind: Optional[str] = None core_adapter: Optional[str] = None class BotDeployRequest(BaseModel): node_id: str runtime_kind: Optional[str] = None image_tag: Optional[str] = None auto_start: bool = False class BotToolsConfigUpdateRequest(BaseModel): tools_config: Optional[Dict[str, Any]] = None class BotMcpConfigUpdateRequest(BaseModel): mcp_servers: Optional[Dict[str, Any]] = None class BotEnvParamsUpdateRequest(BaseModel): env_params: Optional[Dict[str, str]] = None class BotPageAuthLoginRequest(BaseModel): password: str class CommandRequest(BaseModel): command: Optional[str] = None attachments: Optional[List[str]] = None class MessageFeedbackRequest(BaseModel): feedback: Optional[str] = None # up | down | null class WorkspaceFileUpdateRequest(BaseModel): content: str class PanelLoginRequest(BaseModel): password: Optional[str] = None class SystemTemplatesUpdateRequest(BaseModel): agent_md_templates: Optional[Dict[str, str]] = None topic_presets: Optional[Dict[str, Any]] = None def _normalize_packet_channel(packet: Dict[str, Any]) -> str: raw = str(packet.get("channel") or packet.get("source") or "").strip().lower() if raw in {"dashboard", "dashboard_channel", "dashboard-channel"}: return "dashboard" return raw def _normalize_media_item(bot_id: str, value: Any) -> str: raw = str(value or "").strip().replace("\\", "/") if not raw: return "" if raw.startswith("/root/.nanobot/workspace/"): return raw[len("/root/.nanobot/workspace/") :].lstrip("/") root = _workspace_root(bot_id) if os.path.isabs(raw): try: if os.path.commonpath([root, raw]) == root: return os.path.relpath(raw, root).replace("\\", "/") except Exception: pass return raw.lstrip("/") def _normalize_media_list(raw: Any, bot_id: str) -> List[str]: if not isinstance(raw, list): return [] rows: List[str] = [] for v in raw: s = _normalize_media_item(bot_id, v) if s: rows.append(s) return rows def _persist_runtime_packet(bot_id: str, packet: Dict[str, Any]) -> Optional[int]: packet_type = str(packet.get("type", "")).upper() if packet_type not in {"AGENT_STATE", "ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: return None source_channel = _normalize_packet_channel(packet) if source_channel != "dashboard": return None persisted_message_id: Optional[int] = None with Session(engine) as session: bot = session.get(BotInstance, bot_id) if not bot: return None if packet_type == "AGENT_STATE": payload = packet.get("payload") or {} state = str(payload.get("state") or "").strip() action = str(payload.get("action_msg") or payload.get("msg") or "").strip() if state: bot.current_state = state if action: bot.last_action = action[:4000] elif packet_type == "ASSISTANT_MESSAGE": bot.current_state = "IDLE" text_msg = str(packet.get("text") or "").strip() media_list = _normalize_media_list(packet.get("media"), bot_id) if text_msg or media_list: if text_msg: bot.last_action = " ".join(text_msg.split())[:4000] message_row = BotMessage( bot_id=bot_id, role="assistant", text=text_msg, media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, ) session.add(message_row) session.flush() persisted_message_id = message_row.id usage_row = finalize_usage_from_packet( session, bot_id, { **packet, "message_id": persisted_message_id, }, ) elif packet_type == "USER_COMMAND": text_msg = str(packet.get("text") or "").strip() media_list = _normalize_media_list(packet.get("media"), bot_id) if text_msg or media_list: message_row = BotMessage( bot_id=bot_id, role="user", text=text_msg, media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, ) session.add(message_row) session.flush() persisted_message_id = message_row.id bind_usage_message( session, bot_id, str(packet.get("request_id") or "").strip(), persisted_message_id, ) elif packet_type == "BUS_EVENT": # Dashboard channel emits BUS_EVENT for both progress and final replies. # Persist only non-progress events to keep durable chat history clean. is_progress = bool(packet.get("is_progress")) detail_text = str(packet.get("content") or packet.get("text") or "").strip() if not is_progress: text_msg = detail_text media_list = _normalize_media_list(packet.get("media"), bot_id) if text_msg or media_list: bot.current_state = "IDLE" if text_msg: bot.last_action = " ".join(text_msg.split())[:4000] message_row = BotMessage( bot_id=bot_id, role="assistant", text=text_msg, media_json=json.dumps(media_list, ensure_ascii=False) if media_list else None, ) session.add(message_row) session.flush() persisted_message_id = message_row.id usage_row = finalize_usage_from_packet( session, bot_id, { "text": text_msg, "usage": packet.get("usage"), "request_id": packet.get("request_id"), "provider": packet.get("provider"), "model": packet.get("model"), "message_id": persisted_message_id, }, ) bot.updated_at = datetime.utcnow() session.add(bot) session.commit() publish_runtime_topic_packet( engine, bot_id, packet, source_channel, persisted_message_id, logger, ) if persisted_message_id: packet["message_id"] = persisted_message_id if packet_type in {"ASSISTANT_MESSAGE", "USER_COMMAND", "BUS_EVENT"}: _invalidate_bot_messages_cache(bot_id) _invalidate_bot_detail_cache(bot_id) return persisted_message_id class WSConnectionManager: def __init__(self): self.connections: Dict[str, List[WebSocket]] = {} async def connect(self, bot_id: str, websocket: WebSocket): await websocket.accept() self.connections.setdefault(bot_id, []).append(websocket) def disconnect(self, bot_id: str, websocket: WebSocket): conns = self.connections.get(bot_id, []) if websocket in conns: conns.remove(websocket) if not conns and bot_id in self.connections: del self.connections[bot_id] async def broadcast(self, bot_id: str, data: Dict[str, Any]): conns = list(self.connections.get(bot_id, [])) for ws in conns: try: await ws.send_json(data) except Exception: self.disconnect(bot_id, ws) manager = WSConnectionManager() def _broadcast_runtime_packet(bot_id: str, packet: Dict[str, Any], loop: Any) -> None: asyncio.run_coroutine_threadsafe(manager.broadcast(bot_id, packet), loop) PANEL_ACCESS_PASSWORD_HEADER = "x-panel-password" def _extract_bot_id_from_api_path(path: str) -> Optional[str]: raw = str(path or "").strip() if not raw.startswith("/api/bots/"): return None rest = raw[len("/api/bots/") :] if not rest: return None bot_id_segment = rest.split("/", 1)[0].strip() if not bot_id_segment: return None try: decoded = unquote(bot_id_segment) except Exception: decoded = bot_id_segment return str(decoded).strip() or None def _get_supplied_panel_password_http(request: Request) -> str: header_value = str(request.headers.get(PANEL_ACCESS_PASSWORD_HEADER) or "").strip() if header_value: return header_value query_value = str(request.query_params.get("panel_access_password") or "").strip() return query_value def _get_supplied_bot_access_password_http(request: Request) -> str: header_value = str(request.headers.get(BOT_ACCESS_PASSWORD_HEADER) or "").strip() if header_value: return header_value query_value = str(request.query_params.get("bot_access_password") or "").strip() return query_value def _validate_panel_access_password(supplied: str) -> Optional[str]: configured = str(PANEL_ACCESS_PASSWORD or "").strip() if not configured: return None candidate = str(supplied or "").strip() if not candidate: return "Panel access password required" if candidate != configured: return "Invalid panel access password" return None def _validate_bot_access_password(bot: BotInstance, supplied: str) -> Optional[str]: configured = str(getattr(bot, "access_password", "") or "").strip() if not configured: return None candidate = str(supplied or "").strip() if not candidate: return "Bot access password required" if candidate != configured: return "Invalid bot access password" return None def _is_panel_protected_api_path(path: str, method: str = "GET") -> bool: raw = str(path or "").strip() verb = str(method or "GET").strip().upper() if not raw.startswith("/api/"): return False if raw in { "/api/panel/auth/status", "/api/panel/auth/login", "/api/health", "/api/health/cache", }: return False if _is_bot_panel_management_api_path(raw, verb): return True # Other bot-scoped APIs are not protected by panel password. if _extract_bot_id_from_api_path(raw): return False return True def _is_bot_panel_management_api_path(path: str, method: str = "GET") -> bool: raw = str(path or "").strip() verb = str(method or "GET").strip().upper() if not raw.startswith("/api/bots/"): return False bot_id = _extract_bot_id_from_api_path(raw) if not bot_id: return False return ( raw.endswith("/start") or raw.endswith("/stop") or raw.endswith("/enable") or raw.endswith("/disable") or raw.endswith("/deactivate") or (verb in {"PUT", "DELETE"} and raw == f"/api/bots/{bot_id}") ) def _is_bot_enable_api_path(path: str, method: str = "GET") -> bool: raw = str(path or "").strip() verb = str(method or "GET").strip().upper() if verb != "POST": return False bot_id = _extract_bot_id_from_api_path(raw) if not bot_id: return False return raw == f"/api/bots/{bot_id}/enable" @app.middleware("http") async def bot_access_password_guard(request: Request, call_next): if request.method.upper() == "OPTIONS": return await call_next(request) bot_id = _extract_bot_id_from_api_path(request.url.path) if not bot_id: if _is_panel_protected_api_path(request.url.path, request.method): panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) if panel_error: return JSONResponse(status_code=401, content={"detail": panel_error}) return await call_next(request) with Session(engine) as session: bot = session.get(BotInstance, bot_id) if not bot: return JSONResponse(status_code=404, content={"detail": "Bot not found"}) if _is_bot_panel_management_api_path(request.url.path, request.method): panel_error = _validate_panel_access_password(_get_supplied_panel_password_http(request)) if panel_error: bot_error = _validate_bot_access_password(bot, _get_supplied_bot_access_password_http(request)) if bot_error: return JSONResponse(status_code=401, content={"detail": bot_error}) enabled = bool(getattr(bot, "enabled", True)) if not enabled: is_enable_api = _is_bot_enable_api_path(request.url.path, request.method) is_read_api = request.method.upper() == "GET" is_auth_login = request.method.upper() == "POST" and request.url.path == f"/api/bots/{bot_id}/auth/login" if not (is_enable_api or is_read_api or is_auth_login): return JSONResponse(status_code=403, content={"detail": "Bot is disabled. Enable it first."}) return await call_next(request) @app.get("/api/panel/auth/status") def get_panel_auth_status(): configured = str(PANEL_ACCESS_PASSWORD or "").strip() return {"enabled": bool(configured)} @app.post("/api/panel/auth/login") def panel_login(payload: PanelLoginRequest): configured = str(PANEL_ACCESS_PASSWORD or "").strip() if not configured: return {"success": True, "enabled": False} supplied = str(payload.password or "").strip() if supplied != configured: raise HTTPException(status_code=401, detail="Invalid panel access password") return {"success": True, "enabled": True} def docker_callback(bot_id: str, packet: Dict[str, Any]): _persist_runtime_packet(bot_id, packet) loop = getattr(app.state, "main_loop", None) if not loop or not loop.is_running(): return asyncio.run_coroutine_threadsafe(manager.broadcast(bot_id, packet), loop) def _cache_key_bots_list() -> str: return "bots:list" def _cache_key_bot_detail(bot_id: str) -> str: return f"bot:detail:{bot_id}" def _cache_key_bot_messages(bot_id: str, limit: int) -> str: return f"bot:messages:v2:{bot_id}:limit:{limit}" def _cache_key_bot_messages_page(bot_id: str, limit: int, before_id: Optional[int]) -> str: cursor = str(int(before_id)) if isinstance(before_id, int) and before_id > 0 else "latest" return f"bot:messages:page:v2:{bot_id}:before:{cursor}:limit:{limit}" def _serialize_bot_message_row(bot_id: str, row: BotMessage) -> Dict[str, Any]: created_at = row.created_at if created_at.tzinfo is None: created_at = created_at.replace(tzinfo=timezone.utc) return { "id": row.id, "bot_id": row.bot_id, "role": row.role, "text": row.text, "media": _parse_message_media(bot_id, getattr(row, "media_json", None)), "feedback": str(getattr(row, "feedback", "") or "").strip() or None, "ts": int(created_at.timestamp() * 1000), } def _resolve_local_day_range(date_text: str, tz_offset_minutes: Optional[int]) -> tuple[datetime, datetime]: try: local_day = datetime.strptime(str(date_text or "").strip(), "%Y-%m-%d") except ValueError as exc: raise HTTPException(status_code=400, detail="Invalid date, expected YYYY-MM-DD") from exc offset_minutes = 0 if tz_offset_minutes is not None: try: offset_minutes = int(tz_offset_minutes) except (TypeError, ValueError) as exc: raise HTTPException(status_code=400, detail="Invalid timezone offset") from exc utc_start = local_day + timedelta(minutes=offset_minutes) utc_end = utc_start + timedelta(days=1) return utc_start, utc_end def _cache_key_images() -> str: return "images:list" def _invalidate_bot_detail_cache(bot_id: str) -> None: cache.delete(_cache_key_bots_list(), _cache_key_bot_detail(bot_id)) def _invalidate_bot_messages_cache(bot_id: str) -> None: cache.delete_prefix(f"bot:messages:{bot_id}:") def _invalidate_images_cache() -> None: cache.delete(_cache_key_images()) @app.on_event("startup") async def on_startup(): app.state.main_loop = asyncio.get_running_loop() _provider_target_overrides.clear() logger.info( "startup project_root=%s db_engine=%s db_echo=%s db_url=%s redis=%s panel_password=%s", PROJECT_ROOT, DATABASE_ENGINE, DATABASE_ECHO, DATABASE_URL_DISPLAY, "enabled" if cache.ping() else ("disabled" if REDIS_ENABLED else "not_configured"), "enabled" if str(PANEL_ACCESS_PASSWORD or "").strip() else "disabled", ) init_database() cache.delete_prefix("") with Session(engine) as session: node_registry_service.load_from_session(session) node_registry_service.upsert_node(session, _local_managed_node()) pruned_events = prune_expired_activity_events(session, force=True) if pruned_events > 0: session.commit() target_dirty = False for bot in session.exec(select(BotInstance)).all(): _migrate_bot_resources_store(bot.id) target = _resolve_bot_provider_target_for_instance(bot) if str(target.transport_kind or "").strip().lower() != "edge": target = normalize_provider_target( { "node_id": target.node_id, "transport_kind": "edge", "runtime_kind": target.runtime_kind, "core_adapter": target.core_adapter, }, fallback=_default_provider_target(), ) _set_bot_provider_target(bot.id, target) if ( str(getattr(bot, "node_id", "") or "").strip().lower() != target.node_id or str(getattr(bot, "transport_kind", "") or "").strip().lower() != target.transport_kind or str(getattr(bot, "runtime_kind", "") or "").strip().lower() != target.runtime_kind or str(getattr(bot, "core_adapter", "") or "").strip().lower() != target.core_adapter ): _apply_provider_target_to_bot(bot, target) session.add(bot) target_dirty = True if target_dirty: session.commit() running_bots = session.exec(select(BotInstance).where(BotInstance.docker_status == "RUNNING")).all() for bot in running_bots: try: runtime_service.ensure_monitor(app_state=app.state, bot=bot) pending_usage = session.exec( select(BotRequestUsage) .where(BotRequestUsage.bot_id == str(bot.id or "").strip()) .where(BotRequestUsage.status == "PENDING") .order_by(BotRequestUsage.started_at.desc(), BotRequestUsage.id.desc()) .limit(1) ).first() if pending_usage and str(getattr(pending_usage, "request_id", "") or "").strip(): runtime_service.sync_edge_monitor_packets( app_state=app.state, bot=bot, request_id=str(pending_usage.request_id or "").strip(), ) except HTTPException as exc: logger.warning( "Skip runtime monitor restore on startup for bot_id=%s due to unavailable runtime backend: %s", str(bot.id or ""), str(getattr(exc, "detail", "") or exc), ) except Exception: logger.exception("Failed to restore runtime monitor on startup for bot_id=%s", str(bot.id or "")) def _provider_defaults(provider: str) -> tuple[str, str]: p = provider.lower().strip() if p in {"openrouter"}: return "openrouter", "https://openrouter.ai/api/v1" if p in {"dashscope", "aliyun", "qwen", "aliyun-qwen"}: return "dashscope", "https://dashscope.aliyuncs.com/compatible-mode/v1" if p in {"xunfei", "iflytek", "xfyun"}: return "openai", "https://spark-api-open.xf-yun.com/v1" if p in {"kimi", "moonshot"}: return "kimi", "https://api.moonshot.cn/v1" if p in {"minimax"}: return "minimax", "https://api.minimax.chat/v1" if p in {"vllm"}: return "openai", "" return p, "" @app.get("/api/system/defaults") def get_system_defaults(): md_templates = load_agent_md_templates() topic_presets = load_topic_presets_template() platform_settings = get_platform_settings_snapshot() speech_settings = get_speech_runtime_settings() return { "templates": { "soul_md": md_templates.get("soul_md") or DEFAULT_SOUL_MD, "agents_md": md_templates.get("agents_md") or DEFAULT_AGENTS_MD, "user_md": md_templates.get("user_md") or DEFAULT_USER_MD, "tools_md": md_templates.get("tools_md") or DEFAULT_TOOLS_MD, "identity_md": md_templates.get("identity_md") or DEFAULT_IDENTITY_MD, }, "limits": { "upload_max_mb": platform_settings.upload_max_mb, }, "workspace": { "download_extensions": list(platform_settings.workspace_download_extensions), "allowed_attachment_extensions": list(platform_settings.allowed_attachment_extensions), }, "bot": { "system_timezone": _get_default_system_timezone(), }, "loading_page": platform_settings.loading_page.model_dump(), "chat": { "pull_page_size": platform_settings.chat_pull_page_size, "page_size": platform_settings.page_size, "command_auto_unlock_seconds": platform_settings.command_auto_unlock_seconds, }, "topic_presets": topic_presets.get("presets") or TOPIC_PRESET_TEMPLATES, "speech": { "enabled": speech_settings["enabled"], "model": speech_settings["model"], "device": speech_settings["device"], "max_audio_seconds": speech_settings["max_audio_seconds"], "default_language": speech_settings["default_language"], }, } def _write_json_atomic(path: str, payload: Dict[str, Any]) -> None: os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8") as f: json.dump(payload, f, ensure_ascii=False, indent=2) os.replace(tmp, path) def _write_text_atomic(path: str, content: str) -> None: os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8", newline="") as f: f.write(str(content or "")) os.replace(tmp, path) @app.get("/api/system/templates") def get_system_templates(): return { "agent_md_templates": load_agent_md_templates(), "topic_presets": load_topic_presets_template(), } @app.put("/api/system/templates") def update_system_templates(payload: SystemTemplatesUpdateRequest): if payload.agent_md_templates is not None: sanitized_agent: Dict[str, str] = {} for key in ("agents_md", "soul_md", "user_md", "tools_md", "identity_md"): sanitized_agent[key] = str(payload.agent_md_templates.get(key, "") or "").replace("\r\n", "\n") _write_json_atomic(str(AGENT_MD_TEMPLATES_FILE), sanitized_agent) if payload.topic_presets is not None: presets = payload.topic_presets.get("presets") if isinstance(payload.topic_presets, dict) else None if presets is None: normalized_topic: Dict[str, Any] = {"presets": []} elif isinstance(presets, list): normalized_topic = {"presets": [dict(row) for row in presets if isinstance(row, dict)]} else: raise HTTPException(status_code=400, detail="topic_presets.presets must be an array") _write_json_atomic(str(TOPIC_PRESETS_TEMPLATES_FILE), normalized_topic) return { "status": "ok", "agent_md_templates": load_agent_md_templates(), "topic_presets": load_topic_presets_template(), } @app.get("/api/health") def get_health(): try: with Session(engine) as session: session.exec(select(BotInstance).limit(1)).first() return {"status": "ok", "database": DATABASE_ENGINE} except Exception as e: raise HTTPException(status_code=503, detail=f"database check failed: {e}") @app.get("/api/health/cache") def get_cache_health(): redis_url = str(REDIS_URL or "").strip() configured = bool(REDIS_ENABLED and redis_url) client_enabled = bool(getattr(cache, "enabled", False)) reachable = bool(cache.ping()) if client_enabled else False status = "ok" if configured and not reachable: status = "degraded" return { "status": status, "cache": { "configured": configured, "enabled": client_enabled, "reachable": reachable, "prefix": REDIS_PREFIX, }, } def _config_json_path(bot_id: str) -> str: return os.path.join(_bot_data_root(bot_id), "config.json") def _read_bot_config(bot_id: str) -> Dict[str, Any]: if _resolve_edge_state_context(bot_id) is not None: data = _read_edge_state_data(bot_id=bot_id, state_key="config", default_payload={}) return data if isinstance(data, dict) else {} path = _config_json_path(bot_id) if not os.path.isfile(path): return {} try: with open(path, "r", encoding="utf-8") as f: data = json.load(f) return data if isinstance(data, dict) else {} except Exception: return {} def _write_bot_config(bot_id: str, config_data: Dict[str, Any]) -> None: normalized = dict(config_data if isinstance(config_data, dict) else {}) if _write_edge_state_data(bot_id=bot_id, state_key="config", data=normalized): return path = _config_json_path(bot_id) os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8") as f: json.dump(normalized, f, ensure_ascii=False, indent=2) os.replace(tmp, path) _provider_target_overrides: Dict[str, ProviderTarget] = {} def _default_provider_target() -> ProviderTarget: return normalize_provider_target( { "node_id": getattr(app.state, "provider_default_node_id", None), "transport_kind": getattr(app.state, "provider_default_transport_kind", None), "runtime_kind": getattr(app.state, "provider_default_runtime_kind", None), "core_adapter": getattr(app.state, "provider_default_core_adapter", None), }, fallback=ProviderTarget(), ) def _read_bot_provider_target(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> ProviderTarget: normalized_bot_id = str(bot_id or "").strip() if normalized_bot_id and normalized_bot_id in _provider_target_overrides: return _provider_target_overrides[normalized_bot_id] if normalized_bot_id: with Session(engine) as session: bot = session.get(BotInstance, normalized_bot_id) if bot is not None: return normalize_provider_target( { "node_id": getattr(bot, "node_id", None), "transport_kind": getattr(bot, "transport_kind", None), "runtime_kind": getattr(bot, "runtime_kind", None), "core_adapter": getattr(bot, "core_adapter", None), }, fallback=_default_provider_target(), ) raw_config = config_data if isinstance(config_data, dict) else _read_bot_config(bot_id) return provider_target_from_config(raw_config, fallback=_default_provider_target()) def _resolve_bot_provider_target_for_instance(bot: BotInstance) -> ProviderTarget: normalized_bot_id = str(getattr(bot, "id", "") or "").strip() if normalized_bot_id and normalized_bot_id in _provider_target_overrides: return _provider_target_overrides[normalized_bot_id] inline_values = { "node_id": getattr(bot, "node_id", None), "transport_kind": getattr(bot, "transport_kind", None), "runtime_kind": getattr(bot, "runtime_kind", None), "core_adapter": getattr(bot, "core_adapter", None), } if any(str(value or "").strip() for value in inline_values.values()): return normalize_provider_target(inline_values, fallback=_default_provider_target()) return _read_bot_provider_target(str(bot.id or "")) def _set_provider_target_override(bot_id: str, target: ProviderTarget) -> None: normalized_bot_id = str(bot_id or "").strip() if not normalized_bot_id: return _provider_target_overrides[normalized_bot_id] = target def _clear_provider_target_override(bot_id: str) -> None: normalized_bot_id = str(bot_id or "").strip() if not normalized_bot_id: return _provider_target_overrides.pop(normalized_bot_id, None) def _apply_provider_target_to_bot(bot: BotInstance, target: ProviderTarget) -> None: bot.node_id = target.node_id bot.transport_kind = target.transport_kind bot.runtime_kind = target.runtime_kind bot.core_adapter = target.core_adapter def _local_managed_node() -> ManagedNode: return ManagedNode( node_id="local", display_name="Local Node", base_url=str(os.getenv("LOCAL_EDGE_BASE_URL", "http://127.0.0.1:8010") or "http://127.0.0.1:8010").strip(), enabled=True, auth_token=str(os.getenv("EDGE_AUTH_TOKEN", "") or "").strip(), metadata={ "transport_kind": "edge", "runtime_kind": "docker", "core_adapter": "nanobot", "workspace_root": str( os.getenv("EDGE_WORKSPACE_ROOT", os.getenv("EDGE_BOTS_WORKSPACE_ROOT", "")) or "" ).strip(), "native_command": str(os.getenv("EDGE_NATIVE_COMMAND", "") or "").strip(), "native_workdir": str(os.getenv("EDGE_NATIVE_WORKDIR", "") or "").strip(), "native_sandbox_mode": str(os.getenv("EDGE_NATIVE_SANDBOX_MODE", "inherit") or "inherit").strip().lower(), }, ) def _provider_target_from_node(node_id: Optional[str]) -> Optional[ProviderTarget]: normalized = str(node_id or "").strip().lower() if not normalized: return None node = node_registry_service.get_node(normalized) if node is None: return None metadata = dict(node.metadata or {}) return ProviderTarget( node_id=node.node_id, transport_kind=str(metadata.get("transport_kind") or "edge").strip().lower() or "edge", runtime_kind=str(metadata.get("runtime_kind") or "docker").strip().lower() or "docker", core_adapter=str(metadata.get("core_adapter") or "nanobot").strip().lower() or "nanobot", ) node_registry_service = NodeRegistryService() node_registry_service.register_node(_local_managed_node()) app.state.node_registry_service = node_registry_service def _node_display_name(node_id: str) -> str: node = node_registry_service.get_node(node_id) if node is not None: return str(node.display_name or node.node_id or node_id).strip() or str(node_id or "").strip() return str(node_id or "").strip() def _node_metadata(node_id: str) -> Dict[str, Any]: node = node_registry_service.get_node(node_id) if node is None: return {} return dict(node.metadata or {}) def _serialize_provider_target_summary(target: ProviderTarget) -> Dict[str, Any]: return { **provider_target_to_dict(target), "node_display_name": _node_display_name(target.node_id), } def _resolve_edge_client(target: ProviderTarget) -> HttpEdgeClient: try: node = node_registry_service.require_node(target.node_id) except ValueError as exc: raise HTTPException(status_code=400, detail=str(exc)) from exc return HttpEdgeClient( node=node, http_client_factory=lambda: httpx.Client(timeout=15.0, trust_env=False), async_http_client_factory=lambda: httpx.AsyncClient(timeout=15.0, trust_env=False), ) def _resolve_edge_state_context(bot_id: str) -> Optional[tuple[HttpEdgeClient, Optional[str], str]]: normalized_bot_id = str(bot_id or "").strip() if not normalized_bot_id: return None with Session(engine) as session: bot = session.get(BotInstance, normalized_bot_id) if bot is None: return None target = _resolve_bot_provider_target_for_instance(bot) if str(target.transport_kind or "").strip().lower() != "edge": return None client = _resolve_edge_client(target) metadata = _node_metadata(target.node_id) workspace_root = str(metadata.get("workspace_root") or "").strip() or None return client, workspace_root, target.node_id def _read_edge_state_data( *, bot_id: str, state_key: str, default_payload: Dict[str, Any], ) -> Dict[str, Any]: context = _resolve_edge_state_context(bot_id) if context is None: return dict(default_payload) client, workspace_root, node_id = context try: payload = client.read_state( bot_id=bot_id, state_key=state_key, workspace_root=workspace_root, ) except Exception as exc: log_edge_failure( logger, key=f"edge-state-read:{node_id}:{bot_id}:{state_key}", exc=exc, message=f"Failed to read edge state for bot_id={bot_id}, state_key={state_key}", ) return dict(default_payload) data = payload.get("data") if isinstance(data, dict): return dict(data) return dict(default_payload) def _write_edge_state_data( *, bot_id: str, state_key: str, data: Dict[str, Any], ) -> bool: context = _resolve_edge_state_context(bot_id) if context is None: return False client, workspace_root, node_id = context try: client.write_state( bot_id=bot_id, state_key=state_key, data=dict(data or {}), workspace_root=workspace_root, ) except Exception as exc: log_edge_failure( logger, key=f"edge-state-write:{node_id}:{bot_id}:{state_key}", exc=exc, message=f"Failed to write edge state for bot_id={bot_id}, state_key={state_key}", ) raise return True def _resources_json_path(bot_id: str) -> str: return os.path.join(_bot_data_root(bot_id), "resources.json") def _write_bot_resources(bot_id: str, cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> None: normalized = _normalize_resource_limits(cpu_cores, memory_mb, storage_gb) payload = { "cpuCores": normalized["cpu_cores"], "memoryMB": normalized["memory_mb"], "storageGB": normalized["storage_gb"], } if _write_edge_state_data(bot_id=bot_id, state_key="resources", data=payload): return path = _resources_json_path(bot_id) os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8") as f: json.dump(payload, f, ensure_ascii=False, indent=2) os.replace(tmp, path) def _read_legacy_resource_values(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> tuple[Any, Any, Any]: cpu_raw: Any = None memory_raw: Any = None storage_raw: Any = None path = _resources_json_path(bot_id) if os.path.isfile(path): try: with open(path, "r", encoding="utf-8") as f: data = json.load(f) if isinstance(data, dict): cpu_raw = data.get("cpuCores", data.get("cpu_cores")) memory_raw = data.get("memoryMB", data.get("memory_mb")) storage_raw = data.get("storageGB", data.get("storage_gb")) except Exception: pass # Backward compatibility: read old runtime.resources only if new file is missing/incomplete. if cpu_raw is None or memory_raw is None or storage_raw is None: cfg = config_data if isinstance(config_data, dict) else _read_bot_config(bot_id) runtime_cfg = cfg.get("runtime") if isinstance(runtime_cfg, dict): resources_raw = runtime_cfg.get("resources") if isinstance(resources_raw, dict): if cpu_raw is None: cpu_raw = resources_raw.get("cpuCores", resources_raw.get("cpu_cores")) if memory_raw is None: memory_raw = resources_raw.get("memoryMB", resources_raw.get("memory_mb")) if storage_raw is None: storage_raw = resources_raw.get("storageGB", resources_raw.get("storage_gb")) return cpu_raw, memory_raw, storage_raw def _read_bot_resources(bot_id: str, config_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: edge_context = _resolve_edge_state_context(bot_id) cpu_raw: Any = None memory_raw: Any = None storage_raw: Any = None if edge_context is not None: data = _read_edge_state_data( bot_id=bot_id, state_key="resources", default_payload={}, ) cpu_raw = data.get("cpuCores", data.get("cpu_cores")) memory_raw = data.get("memoryMB", data.get("memory_mb")) storage_raw = data.get("storageGB", data.get("storage_gb")) if cpu_raw is None or memory_raw is None or storage_raw is None: legacy_cpu, legacy_memory, legacy_storage = _read_legacy_resource_values(bot_id, config_data=config_data) if cpu_raw is None: cpu_raw = legacy_cpu if memory_raw is None: memory_raw = legacy_memory if storage_raw is None: storage_raw = legacy_storage return _normalize_resource_limits(cpu_raw, memory_raw, storage_raw) cpu_raw, memory_raw, storage_raw = _read_legacy_resource_values(bot_id, config_data=config_data) return _normalize_resource_limits(cpu_raw, memory_raw, storage_raw) def _migrate_bot_resources_store(bot_id: str) -> None: edge_context = _resolve_edge_state_context(bot_id) if edge_context is not None: return config_data = _read_bot_config(bot_id) runtime_cfg = config_data.get("runtime") resources_raw: Dict[str, Any] = {} if isinstance(runtime_cfg, dict): legacy_raw = runtime_cfg.get("resources") if isinstance(legacy_raw, dict): resources_raw = legacy_raw path = _resources_json_path(bot_id) if not os.path.isfile(path): _write_bot_resources( bot_id, resources_raw.get("cpuCores", resources_raw.get("cpu_cores")), resources_raw.get("memoryMB", resources_raw.get("memory_mb")), resources_raw.get("storageGB", resources_raw.get("storage_gb")), ) if isinstance(runtime_cfg, dict) and "resources" in runtime_cfg: runtime_cfg.pop("resources", None) if not runtime_cfg: config_data.pop("runtime", None) _write_bot_config(bot_id, config_data) def _normalize_channel_extra(raw: Any) -> Dict[str, Any]: if not isinstance(raw, dict): return {} return raw def _normalize_allow_from(raw: Any) -> List[str]: rows: List[str] = [] if isinstance(raw, list): for item in raw: text = str(item or "").strip() if text and text not in rows: rows.append(text) if not rows: return ["*"] return rows def _read_global_delivery_flags(channels_cfg: Any) -> tuple[bool, bool]: if not isinstance(channels_cfg, dict): return False, False send_progress = channels_cfg.get("sendProgress") send_tool_hints = channels_cfg.get("sendToolHints") dashboard_cfg = channels_cfg.get("dashboard") if isinstance(dashboard_cfg, dict): if send_progress is None and "sendProgress" in dashboard_cfg: send_progress = dashboard_cfg.get("sendProgress") if send_tool_hints is None and "sendToolHints" in dashboard_cfg: send_tool_hints = dashboard_cfg.get("sendToolHints") return bool(send_progress), bool(send_tool_hints) def _channel_cfg_to_api_dict(bot_id: str, ctype: str, cfg: Dict[str, Any]) -> Dict[str, Any]: ctype = str(ctype or "").strip().lower() enabled = bool(cfg.get("enabled", True)) port = max(1, min(int(cfg.get("port", 8080) or 8080), 65535)) extra: Dict[str, Any] = {} external_app_id = "" app_secret = "" if ctype == "feishu": external_app_id = str(cfg.get("appId") or "") app_secret = str(cfg.get("appSecret") or "") extra = { "encryptKey": cfg.get("encryptKey", ""), "verificationToken": cfg.get("verificationToken", ""), "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), } elif ctype == "dingtalk": external_app_id = str(cfg.get("clientId") or "") app_secret = str(cfg.get("clientSecret") or "") extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} elif ctype == "telegram": app_secret = str(cfg.get("token") or "") extra = { "proxy": cfg.get("proxy", ""), "replyToMessage": bool(cfg.get("replyToMessage", False)), "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), } elif ctype == "slack": external_app_id = str(cfg.get("botToken") or "") app_secret = str(cfg.get("appToken") or "") extra = { "mode": cfg.get("mode", "socket"), "replyInThread": bool(cfg.get("replyInThread", True)), "groupPolicy": cfg.get("groupPolicy", "mention"), "groupAllowFrom": cfg.get("groupAllowFrom", []), "reactEmoji": cfg.get("reactEmoji", "eyes"), } elif ctype == "qq": external_app_id = str(cfg.get("appId") or "") app_secret = str(cfg.get("secret") or "") extra = {"allowFrom": _normalize_allow_from(cfg.get("allowFrom", []))} elif ctype == "email": extra = { "consentGranted": bool(cfg.get("consentGranted", False)), "imapHost": str(cfg.get("imapHost") or ""), "imapPort": int(cfg.get("imapPort") or 993), "imapUsername": str(cfg.get("imapUsername") or ""), "imapPassword": str(cfg.get("imapPassword") or ""), "imapMailbox": str(cfg.get("imapMailbox") or "INBOX"), "imapUseSsl": bool(cfg.get("imapUseSsl", True)), "smtpHost": str(cfg.get("smtpHost") or ""), "smtpPort": int(cfg.get("smtpPort") or 587), "smtpUsername": str(cfg.get("smtpUsername") or ""), "smtpPassword": str(cfg.get("smtpPassword") or ""), "smtpUseTls": bool(cfg.get("smtpUseTls", True)), "smtpUseSsl": bool(cfg.get("smtpUseSsl", False)), "fromAddress": str(cfg.get("fromAddress") or ""), "autoReplyEnabled": bool(cfg.get("autoReplyEnabled", True)), "pollIntervalSeconds": int(cfg.get("pollIntervalSeconds") or 30), "markSeen": bool(cfg.get("markSeen", True)), "maxBodyChars": int(cfg.get("maxBodyChars") or 12000), "subjectPrefix": str(cfg.get("subjectPrefix") or "Re: "), "allowFrom": _normalize_allow_from(cfg.get("allowFrom", [])), } else: external_app_id = str( cfg.get("appId") or cfg.get("clientId") or cfg.get("botToken") or cfg.get("externalAppId") or "" ) app_secret = str( cfg.get("appSecret") or cfg.get("clientSecret") or cfg.get("secret") or cfg.get("token") or cfg.get("appToken") or "" ) extra = {k: v for k, v in cfg.items() if k not in {"enabled", "port", "appId", "clientId", "botToken", "externalAppId", "appSecret", "clientSecret", "secret", "token", "appToken"}} return { "id": ctype, "bot_id": bot_id, "channel_type": ctype, "external_app_id": external_app_id, "app_secret": app_secret, "internal_port": port, "is_active": enabled, "extra_config": extra, "locked": ctype == "dashboard", } def _channel_api_to_cfg(row: Dict[str, Any]) -> Dict[str, Any]: ctype = str(row.get("channel_type") or "").strip().lower() enabled = bool(row.get("is_active", True)) extra = _normalize_channel_extra(row.get("extra_config")) external_app_id = str(row.get("external_app_id") or "") app_secret = str(row.get("app_secret") or "") port = max(1, min(int(row.get("internal_port") or 8080), 65535)) if ctype == "feishu": return { "enabled": enabled, "appId": external_app_id, "appSecret": app_secret, "encryptKey": extra.get("encryptKey", ""), "verificationToken": extra.get("verificationToken", ""), "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), } if ctype == "dingtalk": return { "enabled": enabled, "clientId": external_app_id, "clientSecret": app_secret, "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), } if ctype == "telegram": return { "enabled": enabled, "token": app_secret, "proxy": extra.get("proxy", ""), "replyToMessage": bool(extra.get("replyToMessage", False)), "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), } if ctype == "slack": return { "enabled": enabled, "mode": extra.get("mode", "socket"), "botToken": external_app_id, "appToken": app_secret, "replyInThread": bool(extra.get("replyInThread", True)), "groupPolicy": extra.get("groupPolicy", "mention"), "groupAllowFrom": extra.get("groupAllowFrom", []), "reactEmoji": extra.get("reactEmoji", "eyes"), } if ctype == "qq": return { "enabled": enabled, "appId": external_app_id, "secret": app_secret, "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), } if ctype == "email": return { "enabled": enabled, "consentGranted": bool(extra.get("consentGranted", False)), "imapHost": str(extra.get("imapHost") or ""), "imapPort": max(1, min(int(extra.get("imapPort") or 993), 65535)), "imapUsername": str(extra.get("imapUsername") or ""), "imapPassword": str(extra.get("imapPassword") or ""), "imapMailbox": str(extra.get("imapMailbox") or "INBOX"), "imapUseSsl": bool(extra.get("imapUseSsl", True)), "smtpHost": str(extra.get("smtpHost") or ""), "smtpPort": max(1, min(int(extra.get("smtpPort") or 587), 65535)), "smtpUsername": str(extra.get("smtpUsername") or ""), "smtpPassword": str(extra.get("smtpPassword") or ""), "smtpUseTls": bool(extra.get("smtpUseTls", True)), "smtpUseSsl": bool(extra.get("smtpUseSsl", False)), "fromAddress": str(extra.get("fromAddress") or ""), "autoReplyEnabled": bool(extra.get("autoReplyEnabled", True)), "pollIntervalSeconds": max(5, int(extra.get("pollIntervalSeconds") or 30)), "markSeen": bool(extra.get("markSeen", True)), "maxBodyChars": max(1, int(extra.get("maxBodyChars") or 12000)), "subjectPrefix": str(extra.get("subjectPrefix") or "Re: "), "allowFrom": _normalize_allow_from(extra.get("allowFrom", [])), } merged = dict(extra) merged.update( { "enabled": enabled, "appId": external_app_id, "appSecret": app_secret, "port": port, } ) return merged def _get_bot_channels_from_config(bot: BotInstance) -> List[Dict[str, Any]]: config_data = _read_bot_config(bot.id) channels_cfg = config_data.get("channels") if not isinstance(channels_cfg, dict): channels_cfg = {} send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) rows: List[Dict[str, Any]] = [ { "id": "dashboard", "bot_id": bot.id, "channel_type": "dashboard", "external_app_id": f"dashboard-{bot.id}", "app_secret": "", "internal_port": 9000, "is_active": True, "extra_config": { "sendProgress": send_progress, "sendToolHints": send_tool_hints, }, "locked": True, } ] for ctype, cfg in channels_cfg.items(): if ctype in {"sendProgress", "sendToolHints", "dashboard"}: continue if not isinstance(cfg, dict): continue rows.append(_channel_cfg_to_api_dict(bot.id, ctype, cfg)) return rows def _normalize_initial_channels(bot_id: str, channels: Optional[List[ChannelConfigRequest]]) -> List[Dict[str, Any]]: rows: List[Dict[str, Any]] = [] seen_types: set[str] = set() for c in channels or []: ctype = (c.channel_type or "").strip().lower() if not ctype or ctype == "dashboard" or ctype in seen_types: continue seen_types.add(ctype) rows.append( { "id": ctype, "bot_id": bot_id, "channel_type": ctype, "external_app_id": (c.external_app_id or "").strip() or f"{ctype}-{bot_id}", "app_secret": (c.app_secret or "").strip(), "internal_port": max(1, min(int(c.internal_port or 8080), 65535)), "is_active": bool(c.is_active), "extra_config": _normalize_channel_extra(c.extra_config), "locked": False, } ) return rows def _parse_message_media(bot_id: str, media_raw: Optional[str]) -> List[str]: if not media_raw: return [] try: parsed = json.loads(media_raw) return _normalize_media_list(parsed, bot_id) except Exception: return [] _ENV_KEY_RE = re.compile(r"^[A-Z_][A-Z0-9_]{0,127}$") def _normalize_env_params(raw: Any) -> Dict[str, str]: if not isinstance(raw, dict): return {} rows: Dict[str, str] = {} for k, v in raw.items(): key = str(k or "").strip().upper() if not key or not _ENV_KEY_RE.fullmatch(key): continue rows[key] = str(v or "").strip() return rows def _get_default_system_timezone() -> str: value = str(DEFAULT_BOT_SYSTEM_TIMEZONE or "").strip() or "Asia/Shanghai" try: ZoneInfo(value) return value except Exception: return "Asia/Shanghai" def _normalize_system_timezone(raw: Any) -> str: value = str(raw or "").strip() if not value: return _get_default_system_timezone() try: ZoneInfo(value) except Exception as exc: raise ValueError("Invalid system timezone. Use an IANA timezone such as Asia/Shanghai.") from exc return value def _resolve_bot_env_params(bot_id: str, raw: Optional[Dict[str, str]] = None) -> Dict[str, str]: env_params = _normalize_env_params(raw if isinstance(raw, dict) else _read_env_store(bot_id)) try: env_params["TZ"] = _normalize_system_timezone(env_params.get("TZ")) except ValueError: env_params["TZ"] = _get_default_system_timezone() return env_params _MCP_SERVER_NAME_RE = re.compile(r"^[A-Za-z0-9._-]{1,64}$") def _normalize_mcp_servers(raw: Any) -> Dict[str, Dict[str, Any]]: if not isinstance(raw, dict): return {} rows: Dict[str, Dict[str, Any]] = {} for server_name, server_cfg in raw.items(): name = str(server_name or "").strip() if not name or not _MCP_SERVER_NAME_RE.fullmatch(name): continue if not isinstance(server_cfg, dict): continue url = str(server_cfg.get("url") or "").strip() if not url: continue transport_type = str(server_cfg.get("type") or "streamableHttp").strip() if transport_type not in {"streamableHttp", "sse"}: transport_type = "streamableHttp" headers_raw = server_cfg.get("headers") headers: Dict[str, str] = {} if isinstance(headers_raw, dict): for k, v in headers_raw.items(): hk = str(k or "").strip() if not hk: continue headers[hk] = str(v or "").strip() timeout_raw = server_cfg.get("toolTimeout", 60) try: timeout = int(timeout_raw) except Exception: timeout = 60 timeout = max(1, min(timeout, 600)) rows[name] = { "type": transport_type, "url": url, "headers": headers, "toolTimeout": timeout, } return rows def _merge_mcp_servers_preserving_extras( current_raw: Any, normalized: Dict[str, Dict[str, Any]], ) -> Dict[str, Dict[str, Any]]: """Preserve unknown per-server fields already present in config.json. Dashboard only edits a subset of MCP fields (type/url/headers/toolTimeout). Some MCP providers may rely on additional keys; dropping them can break startup. """ current_map = current_raw if isinstance(current_raw, dict) else {} merged: Dict[str, Dict[str, Any]] = {} for name, normalized_cfg in normalized.items(): base = current_map.get(name) base_cfg = dict(base) if isinstance(base, dict) else {} next_cfg = dict(base_cfg) next_cfg.update(normalized_cfg) merged[name] = next_cfg return merged def _sanitize_mcp_servers_in_config_data(config_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]: """Normalize tools.mcpServers and drop hidden invalid entries safely. Returns the sanitized mcpServers map written into config_data["tools"]["mcpServers"]. """ if not isinstance(config_data, dict): return {} tools_cfg = config_data.get("tools") if not isinstance(tools_cfg, dict): tools_cfg = {} current_raw = tools_cfg.get("mcpServers") normalized = _normalize_mcp_servers(current_raw) merged = _merge_mcp_servers_preserving_extras(current_raw, normalized) tools_cfg["mcpServers"] = merged config_data["tools"] = tools_cfg return merged def _parse_env_params(raw: Any) -> Dict[str, str]: return _normalize_env_params(raw) def _safe_float(raw: Any, default: float) -> float: try: return float(raw) except Exception: return default def _safe_int(raw: Any, default: int) -> int: try: return int(raw) except Exception: return default def _normalize_resource_limits(cpu_cores: Any, memory_mb: Any, storage_gb: Any) -> Dict[str, Any]: cpu = _safe_float(cpu_cores, 1.0) mem = _safe_int(memory_mb, 1024) storage = _safe_int(storage_gb, 10) if cpu < 0: cpu = 1.0 if mem < 0: mem = 1024 if storage < 0: storage = 10 normalized_cpu = 0.0 if cpu == 0 else min(16.0, max(0.1, cpu)) normalized_mem = 0 if mem == 0 else min(65536, max(256, mem)) normalized_storage = 0 if storage == 0 else min(1024, max(1, storage)) return { "cpu_cores": normalized_cpu, "memory_mb": normalized_mem, "storage_gb": normalized_storage, } def _read_workspace_md(bot_id: str, filename: str, default_value: str) -> str: path = os.path.join(_workspace_root(bot_id), filename) if not os.path.isfile(path): return default_value try: with open(path, "r", encoding="utf-8") as f: return f.read().strip() except Exception: return default_value def _read_bot_runtime_snapshot(bot: BotInstance) -> Dict[str, Any]: config_data = _read_bot_config(bot.id) env_params = _resolve_bot_env_params(bot.id) target = _resolve_bot_provider_target_for_instance(bot) provider_name = "" provider_cfg: Dict[str, Any] = {} providers_cfg = config_data.get("providers") if isinstance(providers_cfg, dict): for p_name, p_cfg in providers_cfg.items(): provider_name = str(p_name or "").strip() if isinstance(p_cfg, dict): provider_cfg = p_cfg break agents_defaults: Dict[str, Any] = {} agents_cfg = config_data.get("agents") if isinstance(agents_cfg, dict): defaults = agents_cfg.get("defaults") if isinstance(defaults, dict): agents_defaults = defaults channels_cfg = config_data.get("channels") send_progress, send_tool_hints = _read_global_delivery_flags(channels_cfg) llm_provider = provider_name or "dashscope" llm_model = str(agents_defaults.get("model") or "") api_key = str(provider_cfg.get("apiKey") or "").strip() api_base = str(provider_cfg.get("apiBase") or "").strip() api_base_lower = api_base.lower() if llm_provider == "openai" and ("spark-api-open.xf-yun.com" in api_base_lower or "xf-yun.com" in api_base_lower): llm_provider = "xunfei" soul_md = _read_workspace_md(bot.id, "SOUL.md", DEFAULT_SOUL_MD) resources = _read_bot_resources(bot.id, config_data=config_data) return { **provider_target_to_dict(target), "llm_provider": llm_provider, "llm_model": llm_model, "api_key": api_key, "api_base": api_base, "temperature": _safe_float(agents_defaults.get("temperature"), 0.2), "top_p": _safe_float(agents_defaults.get("topP"), 1.0), "max_tokens": _safe_int(agents_defaults.get("maxTokens"), 8192), "cpu_cores": resources["cpu_cores"], "memory_mb": resources["memory_mb"], "storage_gb": resources["storage_gb"], "system_timezone": env_params.get("TZ") or _get_default_system_timezone(), "send_progress": send_progress, "send_tool_hints": send_tool_hints, "soul_md": soul_md, "agents_md": _read_workspace_md(bot.id, "AGENTS.md", DEFAULT_AGENTS_MD), "user_md": _read_workspace_md(bot.id, "USER.md", DEFAULT_USER_MD), "tools_md": _read_workspace_md(bot.id, "TOOLS.md", DEFAULT_TOOLS_MD), "identity_md": _read_workspace_md(bot.id, "IDENTITY.md", DEFAULT_IDENTITY_MD), "system_prompt": soul_md, } def _serialize_bot(bot: BotInstance) -> Dict[str, Any]: runtime = _read_bot_runtime_snapshot(bot) target = _resolve_bot_provider_target_for_instance(bot) return { "id": bot.id, "name": bot.name, "enabled": bool(getattr(bot, "enabled", True)), "access_password": bot.access_password or "", "has_access_password": bool(str(bot.access_password or "").strip()), "avatar_model": "base", "avatar_skin": "blue_suit", "image_tag": bot.image_tag, "llm_provider": runtime.get("llm_provider") or "", "llm_model": runtime.get("llm_model") or "", "system_prompt": runtime.get("system_prompt") or "", "api_base": runtime.get("api_base") or "", "temperature": _safe_float(runtime.get("temperature"), 0.2), "top_p": _safe_float(runtime.get("top_p"), 1.0), "max_tokens": _safe_int(runtime.get("max_tokens"), 8192), "cpu_cores": _safe_float(runtime.get("cpu_cores"), 1.0), "memory_mb": _safe_int(runtime.get("memory_mb"), 1024), "storage_gb": _safe_int(runtime.get("storage_gb"), 10), "system_timezone": str(runtime.get("system_timezone") or _get_default_system_timezone()), "send_progress": bool(runtime.get("send_progress")), "send_tool_hints": bool(runtime.get("send_tool_hints")), "node_id": target.node_id, "node_display_name": _node_display_name(target.node_id), "transport_kind": target.transport_kind, "runtime_kind": target.runtime_kind, "core_adapter": target.core_adapter, "soul_md": runtime.get("soul_md") or "", "agents_md": runtime.get("agents_md") or "", "user_md": runtime.get("user_md") or "", "tools_md": runtime.get("tools_md") or "", "identity_md": runtime.get("identity_md") or "", "workspace_dir": bot.workspace_dir, "docker_status": bot.docker_status, "current_state": bot.current_state, "last_action": bot.last_action, "created_at": bot.created_at, "updated_at": bot.updated_at, } def _serialize_bot_list_item(bot: BotInstance) -> Dict[str, Any]: target = _resolve_bot_provider_target_for_instance(bot) return { "id": bot.id, "name": bot.name, "enabled": bool(getattr(bot, "enabled", True)), "has_access_password": bool(str(bot.access_password or "").strip()), "image_tag": bot.image_tag, "node_id": target.node_id, "node_display_name": _node_display_name(target.node_id), "transport_kind": target.transport_kind, "runtime_kind": target.runtime_kind, "core_adapter": target.core_adapter, "docker_status": bot.docker_status, "current_state": bot.current_state, "last_action": bot.last_action, "updated_at": bot.updated_at, } def _refresh_bot_runtime_status(app_state: Any, bot: BotInstance) -> str: current_status = str(bot.docker_status or "STOPPED").upper() try: status = str(get_runtime_provider(app_state, bot).get_runtime_status(bot_id=str(bot.id or "")) or "STOPPED").upper() except Exception as exc: log_edge_failure( logger, key=f"bot-runtime-status:{bot.id}", exc=exc, message=f"Failed to refresh runtime status for bot_id={bot.id}", ) return current_status bot.docker_status = status if status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}: bot.current_state = "IDLE" return status _AGENT_LOOP_READY_MARKER = "Agent loop started" async def _wait_for_agent_loop_ready( bot_id: str, timeout_seconds: float = 12.0, poll_interval_seconds: float = 0.5, ) -> bool: deadline = time.monotonic() + max(1.0, timeout_seconds) marker = _AGENT_LOOP_READY_MARKER.lower() while time.monotonic() < deadline: logs = docker_manager.get_recent_logs(bot_id, tail=200) if any(marker in str(line or "").lower() for line in logs): return True await asyncio.sleep(max(0.1, poll_interval_seconds)) return False async def _record_agent_loop_ready_warning( bot_id: str, timeout_seconds: float = 12.0, poll_interval_seconds: float = 0.5, ) -> None: try: agent_loop_ready = await _wait_for_agent_loop_ready( bot_id, timeout_seconds=timeout_seconds, poll_interval_seconds=poll_interval_seconds, ) if agent_loop_ready: return if docker_manager.get_bot_status(bot_id) != "RUNNING": return detail = ( "Bot container started, but ready marker was not found in logs within " f"{int(timeout_seconds)}s. Check bot logs or MCP config if the bot stays unavailable." ) logger.warning("bot_id=%s agent loop ready marker not found within %ss", bot_id, timeout_seconds) with Session(engine) as background_session: if not background_session.get(BotInstance, bot_id): return record_activity_event( background_session, bot_id, "bot_warning", channel="system", detail=detail, metadata={ "kind": "agent_loop_ready_timeout", "marker": _AGENT_LOOP_READY_MARKER, "timeout_seconds": timeout_seconds, }, ) background_session.commit() _invalidate_bot_detail_cache(bot_id) except Exception: logger.exception("Failed to record agent loop readiness warning for bot_id=%s", bot_id) def _sync_workspace_channels( session: Session, bot_id: str, channels_override: Optional[List[Dict[str, Any]]] = None, global_delivery_override: Optional[Dict[str, Any]] = None, runtime_overrides: Optional[Dict[str, Any]] = None, ) -> None: bot = session.get(BotInstance, bot_id) if not bot: return snapshot = _read_bot_runtime_snapshot(bot) bot_data: Dict[str, Any] = { "name": bot.name, "node_id": snapshot.get("node_id") or _default_provider_target().node_id, "transport_kind": snapshot.get("transport_kind") or _default_provider_target().transport_kind, "runtime_kind": snapshot.get("runtime_kind") or _default_provider_target().runtime_kind, "core_adapter": snapshot.get("core_adapter") or _default_provider_target().core_adapter, "system_prompt": snapshot.get("system_prompt") or DEFAULT_SOUL_MD, "soul_md": snapshot.get("soul_md") or DEFAULT_SOUL_MD, "agents_md": snapshot.get("agents_md") or DEFAULT_AGENTS_MD, "user_md": snapshot.get("user_md") or DEFAULT_USER_MD, "tools_md": snapshot.get("tools_md") or DEFAULT_TOOLS_MD, "identity_md": snapshot.get("identity_md") or DEFAULT_IDENTITY_MD, "llm_provider": snapshot.get("llm_provider") or "dashscope", "llm_model": snapshot.get("llm_model") or "", "api_key": snapshot.get("api_key") or "", "api_base": snapshot.get("api_base") or "", "temperature": _safe_float(snapshot.get("temperature"), 0.2), "top_p": _safe_float(snapshot.get("top_p"), 1.0), "max_tokens": _safe_int(snapshot.get("max_tokens"), 8192), "cpu_cores": _safe_float(snapshot.get("cpu_cores"), 1.0), "memory_mb": _safe_int(snapshot.get("memory_mb"), 1024), "storage_gb": _safe_int(snapshot.get("storage_gb"), 10), "send_progress": bool(snapshot.get("send_progress")), "send_tool_hints": bool(snapshot.get("send_tool_hints")), } if isinstance(runtime_overrides, dict): for key, value in runtime_overrides.items(): # Keep existing runtime secrets/config when caller sends empty placeholder values. if key in {"api_key", "llm_provider", "llm_model"}: text = str(value or "").strip() if not text: continue bot_data[key] = text continue if key == "api_base": # api_base may be intentionally empty (use provider default), so keep explicit value. bot_data[key] = str(value or "").strip() continue bot_data[key] = value resources = _normalize_resource_limits( bot_data.get("cpu_cores"), bot_data.get("memory_mb"), bot_data.get("storage_gb"), ) bot_data["cpu_cores"] = resources["cpu_cores"] bot_data["memory_mb"] = resources["memory_mb"] bot_data["storage_gb"] = resources["storage_gb"] send_progress = bool(bot_data.get("send_progress", False)) send_tool_hints = bool(bot_data.get("send_tool_hints", False)) if isinstance(global_delivery_override, dict): if "sendProgress" in global_delivery_override: send_progress = bool(global_delivery_override.get("sendProgress")) if "sendToolHints" in global_delivery_override: send_tool_hints = bool(global_delivery_override.get("sendToolHints")) channels_data = channels_override if channels_override is not None else _get_bot_channels_from_config(bot) bot_data["send_progress"] = send_progress bot_data["send_tool_hints"] = send_tool_hints normalized_channels: List[Dict[str, Any]] = [] for row in channels_data: ctype = str(row.get("channel_type") or "").strip().lower() if not ctype or ctype == "dashboard": continue normalized_channels.append( { "channel_type": ctype, "external_app_id": str(row.get("external_app_id") or ""), "app_secret": str(row.get("app_secret") or ""), "internal_port": max(1, min(int(row.get("internal_port") or 8080), 65535)), "is_active": bool(row.get("is_active", True)), "extra_config": _normalize_channel_extra(row.get("extra_config")), } ) config_manager.update_workspace( bot_id=bot_id, bot_data=bot_data, channels=normalized_channels, ) _write_bot_resources( bot_id, bot_data.get("cpu_cores"), bot_data.get("memory_mb"), bot_data.get("storage_gb"), ) def _set_bot_provider_target(bot_id: str, target: ProviderTarget) -> None: _set_provider_target_override(bot_id, target) def _sync_bot_workspace_via_provider( session: Session, bot: BotInstance, *, target_override: Optional[ProviderTarget] = None, channels_override: Optional[List[Dict[str, Any]]] = None, global_delivery_override: Optional[Dict[str, Any]] = None, runtime_overrides: Optional[Dict[str, Any]] = None, ) -> None: bot_id = str(bot.id or "") previous_override = _provider_target_overrides.get(bot_id) wrote_target = False try: if target_override is not None: _set_bot_provider_target(bot_id, target_override) wrote_target = True get_provision_provider(app.state, bot).sync_bot_workspace( session=session, bot_id=bot_id, channels_override=channels_override, global_delivery_override=global_delivery_override, runtime_overrides=runtime_overrides, ) except Exception: if wrote_target: if previous_override is not None: _set_provider_target_override(bot_id, previous_override) else: _clear_provider_target_override(bot_id) raise def reconcile_image_registry(session: Session): """Only reconcile status for images explicitly registered in DB.""" db_images = session.exec(select(NanobotImage)).all() for img in db_images: if docker_manager.has_image(img.tag): try: docker_img = docker_manager.client.images.get(img.tag) if docker_manager.client else None img.image_id = docker_img.id if docker_img else img.image_id except Exception: pass img.status = "READY" else: img.status = "UNKNOWN" session.add(img) session.commit() def _workspace_root(bot_id: str) -> str: return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot", "workspace")) def _bot_data_root(bot_id: str) -> str: return os.path.abspath(os.path.join(BOTS_WORKSPACE_ROOT, bot_id, ".nanobot")) def _skills_root(bot_id: str) -> str: return os.path.join(_workspace_root(bot_id), "skills") def _is_valid_top_level_skill_name(name: str) -> bool: text = str(name or "").strip() if not text: return False if "/" in text or "\\" in text: return False if text in {".", ".."}: return False return True def _read_skill_description(entry_path: str) -> str: candidates: List[str] = [] if os.path.isdir(entry_path): candidates = [ os.path.join(entry_path, "SKILL.md"), os.path.join(entry_path, "skill.md"), os.path.join(entry_path, "README.md"), os.path.join(entry_path, "readme.md"), ] elif entry_path.lower().endswith(".md"): candidates = [entry_path] for candidate in candidates: if not os.path.isfile(candidate): continue try: with open(candidate, "r", encoding="utf-8") as f: for line in f: text = line.strip() if text and not text.startswith("#"): return text[:240] except Exception: continue return "" def _list_workspace_skills(bot_id: str) -> List[Dict[str, Any]]: edge_context = _resolve_edge_state_context(bot_id) if edge_context is not None: client, workspace_root, node_id = edge_context try: payload = client.list_tree( bot_id=bot_id, path="skills", recursive=False, workspace_root=workspace_root, ) except Exception as exc: log_edge_failure( logger, key=f"skills-list:{node_id}:{bot_id}", exc=exc, message=f"Failed to list skills from edge workspace for bot_id={bot_id}", ) return [] rows: List[Dict[str, Any]] = [] for entry in list(payload.get("entries") or []): if not isinstance(entry, dict): continue name = str(entry.get("name") or "").strip() if not name or name.startswith("."): continue if not _is_valid_top_level_skill_name(name): continue entry_type = str(entry.get("type") or "").strip().lower() if entry_type not in {"dir", "file"}: continue mtime = str(entry.get("mtime") or "").strip() or (datetime.utcnow().isoformat() + "Z") size = entry.get("size") rows.append( { "id": name, "name": name, "type": entry_type, "path": f"skills/{name}", "size": int(size) if isinstance(size, (int, float)) and entry_type == "file" else None, "mtime": mtime, "description": "", } ) rows.sort(key=lambda row: (row.get("type") != "dir", str(row.get("name") or "").lower())) return rows root = _skills_root(bot_id) if not os.path.isdir(root): return [] rows: List[Dict[str, Any]] = [] names = sorted(os.listdir(root), key=lambda n: (not os.path.isdir(os.path.join(root, n)), n.lower())) for name in names: if not name or name.startswith("."): continue if not _is_valid_top_level_skill_name(name): continue abs_path = os.path.join(root, name) if not os.path.exists(abs_path): continue stat = os.stat(abs_path) rows.append( { "id": name, "name": name, "type": "dir" if os.path.isdir(abs_path) else "file", "path": f"skills/{name}", "size": stat.st_size if os.path.isfile(abs_path) else None, "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", "description": _read_skill_description(abs_path), } ) return rows def _skill_market_root() -> str: return os.path.abspath(os.path.join(DATA_ROOT, "skills")) def _parse_json_string_list(raw: Any) -> List[str]: if not raw: return [] try: data = json.loads(str(raw)) except Exception: return [] if not isinstance(data, list): return [] rows: List[str] = [] for item in data: text = str(item or "").strip() if text and text not in rows: rows.append(text) return rows def _is_ignored_skill_zip_top_level(name: str) -> bool: text = str(name or "").strip() if not text: return True lowered = text.lower() if lowered == "__macosx": return True if text.startswith("."): return True return False def _read_description_from_text(raw: str) -> str: for line in str(raw or "").splitlines(): text = line.strip() if text and not text.startswith("#"): return text[:240] return "" def _extract_skill_zip_summary(zip_path: str) -> Dict[str, Any]: entry_names: List[str] = [] description = "" with zipfile.ZipFile(zip_path) as archive: members = archive.infolist() file_members = [member for member in members if not member.is_dir()] for member in file_members: raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") if not raw_name: continue first = raw_name.split("/", 1)[0].strip() if _is_ignored_skill_zip_top_level(first): continue if _is_valid_top_level_skill_name(first) and first not in entry_names: entry_names.append(first) candidates = sorted( [ str(member.filename or "").replace("\\", "/").lstrip("/") for member in file_members if str(member.filename or "").replace("\\", "/").rsplit("/", 1)[-1].lower() in {"skill.md", "readme.md"} ], key=lambda value: (value.count("/"), value.lower()), ) for candidate in candidates: try: with archive.open(candidate, "r") as fh: preview = fh.read(4096).decode("utf-8", errors="ignore") description = _read_description_from_text(preview) if description: break except Exception: continue return { "entry_names": entry_names, "description": description, } def _sanitize_skill_market_key(raw: Any) -> str: value = str(raw or "").strip().lower() value = re.sub(r"[^a-z0-9._-]+", "-", value) value = re.sub(r"-{2,}", "-", value).strip("._-") return value[:120] def _sanitize_zip_filename(raw: Any) -> str: filename = os.path.basename(str(raw or "").strip()) if not filename: return "" filename = filename.replace("\\", "/").rsplit("/", 1)[-1] stem, ext = os.path.splitext(filename) safe_stem = re.sub(r"[^A-Za-z0-9._-]+", "-", stem).strip("._-") if not safe_stem: safe_stem = "skill-package" safe_ext = ".zip" if ext.lower() == ".zip" else "" return f"{safe_stem[:180]}{safe_ext}" def _resolve_unique_skill_market_key(session: Session, preferred_key: str, exclude_id: Optional[int] = None) -> str: base_key = _sanitize_skill_market_key(preferred_key) or "skill" candidate = base_key counter = 2 while True: stmt = select(SkillMarketItem).where(SkillMarketItem.skill_key == candidate) rows = session.exec(stmt).all() conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) if not conflict: return candidate candidate = f"{base_key}-{counter}" counter += 1 def _resolve_unique_skill_market_zip_filename( session: Session, filename: str, *, exclude_filename: Optional[str] = None, exclude_id: Optional[int] = None, ) -> str: root = _skill_market_root() os.makedirs(root, exist_ok=True) safe_name = _sanitize_zip_filename(filename) if not safe_name.lower().endswith(".zip"): raise HTTPException(status_code=400, detail="Only .zip skill package is supported") candidate = safe_name stem, ext = os.path.splitext(safe_name) counter = 2 while True: file_conflict = os.path.exists(os.path.join(root, candidate)) and candidate != str(exclude_filename or "").strip() rows = session.exec(select(SkillMarketItem).where(SkillMarketItem.zip_filename == candidate)).all() db_conflict = next((row for row in rows if exclude_id is None or row.id != exclude_id), None) if not file_conflict and not db_conflict: return candidate candidate = f"{stem}-{counter}{ext}" counter += 1 async def _store_skill_market_zip_upload( session: Session, upload: UploadFile, *, exclude_filename: Optional[str] = None, exclude_id: Optional[int] = None, ) -> Dict[str, Any]: root = _skill_market_root() os.makedirs(root, exist_ok=True) incoming_name = _sanitize_zip_filename(upload.filename or "") if not incoming_name.lower().endswith(".zip"): raise HTTPException(status_code=400, detail="Only .zip skill package is supported") target_filename = _resolve_unique_skill_market_zip_filename( session, incoming_name, exclude_filename=exclude_filename, exclude_id=exclude_id, ) max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 total_size = 0 tmp_path: Optional[str] = None try: with tempfile.NamedTemporaryFile(prefix=".skill_market_", suffix=".zip", dir=root, delete=False) as tmp_zip: tmp_path = tmp_zip.name while True: chunk = await upload.read(1024 * 1024) if not chunk: break total_size += len(chunk) if total_size > max_bytes: raise HTTPException( status_code=413, detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", ) tmp_zip.write(chunk) if total_size == 0: raise HTTPException(status_code=400, detail="Zip package is empty") summary = _extract_skill_zip_summary(tmp_path) if not summary["entry_names"]: raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") final_path = os.path.join(root, target_filename) os.replace(tmp_path, final_path) tmp_path = None return { "zip_filename": target_filename, "zip_size_bytes": total_size, "entry_names": summary["entry_names"], "description": summary["description"], } except zipfile.BadZipFile as exc: raise HTTPException(status_code=400, detail="Invalid zip file") from exc finally: await upload.close() if tmp_path and os.path.exists(tmp_path): os.remove(tmp_path) def _serialize_skill_market_item( item: SkillMarketItem, *, install_count: int = 0, install_row: Optional[BotSkillInstall] = None, workspace_installed: Optional[bool] = None, installed_entries: Optional[List[str]] = None, ) -> Dict[str, Any]: zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) entry_names = _parse_json_string_list(item.entry_names_json) payload = { "id": item.id, "skill_key": item.skill_key, "display_name": item.display_name or item.skill_key, "description": item.description or "", "zip_filename": item.zip_filename, "zip_size_bytes": int(item.zip_size_bytes or 0), "entry_names": entry_names, "entry_count": len(entry_names), "zip_exists": os.path.isfile(zip_path), "install_count": int(install_count or 0), "created_at": item.created_at.isoformat() + "Z" if item.created_at else None, "updated_at": item.updated_at.isoformat() + "Z" if item.updated_at else None, } if install_row is not None: resolved_entries = installed_entries if installed_entries is not None else _parse_json_string_list(install_row.installed_entries_json) resolved_installed = workspace_installed if workspace_installed is not None else install_row.status == "INSTALLED" payload.update( { "installed": resolved_installed, "install_status": install_row.status, "installed_at": install_row.installed_at.isoformat() + "Z" if install_row.installed_at else None, "installed_entries": resolved_entries, "install_error": install_row.last_error, } ) return payload def _install_skill_zip_into_workspace(bot_id: str, zip_path: str) -> Dict[str, Any]: if _resolve_edge_state_context(bot_id) is not None: raise HTTPException( status_code=400, detail="Edge bot skill install by zip is disabled here. Use edge workspace upload/deploy flow.", ) try: archive = zipfile.ZipFile(zip_path) except Exception as exc: raise HTTPException(status_code=400, detail="Invalid zip file") from exc skills_root = _skills_root(bot_id) os.makedirs(skills_root, exist_ok=True) installed: List[str] = [] with archive: members = archive.infolist() file_members = [m for m in members if not m.is_dir()] if not file_members: raise HTTPException(status_code=400, detail="Zip package has no files") top_names: List[str] = [] for member in file_members: raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") if not raw_name: continue first = raw_name.split("/", 1)[0].strip() if _is_ignored_skill_zip_top_level(first): continue if not _is_valid_top_level_skill_name(first): raise HTTPException(status_code=400, detail=f"Invalid skill entry name in zip: {first}") if first not in top_names: top_names.append(first) if not top_names: raise HTTPException(status_code=400, detail="Zip package has no valid skill entries") conflicts = [name for name in top_names if os.path.exists(os.path.join(skills_root, name))] if conflicts: raise HTTPException(status_code=400, detail=f"Skill already exists: {', '.join(conflicts)}") with tempfile.TemporaryDirectory(prefix=".skill_upload_", dir=skills_root) as tmp_dir: tmp_root = os.path.abspath(tmp_dir) for member in members: raw_name = str(member.filename or "").replace("\\", "/").lstrip("/") if not raw_name: continue target = os.path.abspath(os.path.join(tmp_root, raw_name)) if os.path.commonpath([tmp_root, target]) != tmp_root: raise HTTPException(status_code=400, detail=f"Unsafe zip entry path: {raw_name}") if member.is_dir(): os.makedirs(target, exist_ok=True) continue os.makedirs(os.path.dirname(target), exist_ok=True) with archive.open(member, "r") as source, open(target, "wb") as dest: shutil.copyfileobj(source, dest) for name in top_names: src = os.path.join(tmp_root, name) dst = os.path.join(skills_root, name) if not os.path.exists(src): continue shutil.move(src, dst) installed.append(name) if not installed: raise HTTPException(status_code=400, detail="No skill entries installed from zip") return { "installed": installed, "skills": _list_workspace_skills(bot_id), } def _cron_store_path(bot_id: str) -> str: return os.path.join(_bot_data_root(bot_id), "cron", "jobs.json") def _env_store_path(bot_id: str) -> str: return os.path.join(_bot_data_root(bot_id), "env.json") def _sessions_root(bot_id: str) -> str: return os.path.join(_workspace_root(bot_id), "sessions") def _clear_bot_sessions(bot_id: str) -> int: """Remove persisted session files for the bot workspace.""" root = _sessions_root(bot_id) if not os.path.isdir(root): return 0 deleted = 0 for name in os.listdir(root): path = os.path.join(root, name) if not os.path.isfile(path): continue if not name.lower().endswith(".jsonl"): continue try: os.remove(path) deleted += 1 except Exception: continue return deleted def _clear_bot_dashboard_direct_session(bot_id: str) -> Dict[str, Any]: """Truncate the dashboard:direct session file while preserving the workspace session root.""" root = _sessions_root(bot_id) os.makedirs(root, exist_ok=True) path = os.path.join(root, "dashboard_direct.jsonl") existed = os.path.exists(path) with open(path, "w", encoding="utf-8"): pass return {"path": path, "existed": existed} def _read_env_store(bot_id: str) -> Dict[str, str]: if _resolve_edge_state_context(bot_id) is not None: data = _read_edge_state_data(bot_id=bot_id, state_key="env", default_payload={}) return _normalize_env_params(data) path = _env_store_path(bot_id) if not os.path.isfile(path): return {} try: with open(path, "r", encoding="utf-8") as f: data = json.load(f) return _normalize_env_params(data) except Exception: return {} def _write_env_store(bot_id: str, env_params: Dict[str, str]) -> None: normalized_env = _normalize_env_params(env_params) if _write_edge_state_data(bot_id=bot_id, state_key="env", data=normalized_env): return path = _env_store_path(bot_id) os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8") as f: json.dump(normalized_env, f, ensure_ascii=False, indent=2) os.replace(tmp, path) local_provision_provider = LocalProvisionProvider(sync_workspace_func=_sync_workspace_channels) local_runtime_provider = LocalRuntimeProvider( docker_manager=docker_manager, on_state_change=docker_callback, provision_provider=local_provision_provider, read_runtime_snapshot=_read_bot_runtime_snapshot, resolve_env_params=_resolve_bot_env_params, write_env_store=_write_env_store, invalidate_bot_cache=_invalidate_bot_detail_cache, record_agent_loop_ready_warning=_record_agent_loop_ready_warning, safe_float=_safe_float, safe_int=_safe_int, ) local_workspace_provider = LocalWorkspaceProvider() edge_provision_provider = EdgeProvisionProvider( read_provider_target=_read_bot_provider_target, resolve_edge_client=_resolve_edge_client, read_runtime_snapshot=_read_bot_runtime_snapshot, read_bot_channels=_get_bot_channels_from_config, read_node_metadata=_node_metadata, ) edge_runtime_provider = EdgeRuntimeProvider( read_provider_target=_read_bot_provider_target, resolve_edge_client=_resolve_edge_client, read_runtime_snapshot=_read_bot_runtime_snapshot, resolve_env_params=_resolve_bot_env_params, read_bot_channels=_get_bot_channels_from_config, read_node_metadata=_node_metadata, ) edge_workspace_provider = EdgeWorkspaceProvider( read_provider_target=_read_bot_provider_target, resolve_edge_client=_resolve_edge_client, read_node_metadata=_node_metadata, ) local_provider_target = ProviderTarget( node_id="local", transport_kind="edge", runtime_kind="docker", core_adapter="nanobot", ) provider_registry = ProviderRegistry() provider_registry.register_bundle( key=local_provider_target.key, runtime_provider=local_runtime_provider, workspace_provider=local_workspace_provider, provision_provider=local_provision_provider, ) provider_registry.register_bundle( key=ProviderTarget(node_id="local", transport_kind="edge", runtime_kind="docker", core_adapter="nanobot").key, runtime_provider=edge_runtime_provider, workspace_provider=edge_workspace_provider, provision_provider=edge_provision_provider, ) provider_registry.register_bundle( key=ProviderTarget(node_id="local", transport_kind="edge", runtime_kind="native", core_adapter="nanobot").key, runtime_provider=edge_runtime_provider, workspace_provider=edge_workspace_provider, provision_provider=edge_provision_provider, ) app.state.provider_default_node_id = local_provider_target.node_id app.state.provider_default_transport_kind = local_provider_target.transport_kind app.state.provider_default_runtime_kind = local_provider_target.runtime_kind app.state.provider_default_core_adapter = local_provider_target.core_adapter app.state.provider_registry = provider_registry app.state.resolve_bot_provider_target = _resolve_bot_provider_target_for_instance app.state.resolve_edge_client = _resolve_edge_client app.state.edge_provision_provider = edge_provision_provider app.state.edge_runtime_provider = edge_runtime_provider app.state.edge_workspace_provider = edge_workspace_provider app.state.provision_provider = local_provision_provider app.state.runtime_provider = local_runtime_provider app.state.workspace_provider = local_workspace_provider def _ensure_provider_target_supported(target: ProviderTarget) -> None: key = provider_registry.resolve_bundle_key(target) if key is None: raise HTTPException(status_code=400, detail=f"Execution target is not supported yet: {target.key}") def _read_cron_store(bot_id: str) -> Dict[str, Any]: if _resolve_edge_state_context(bot_id) is not None: data = _read_edge_state_data( bot_id=bot_id, state_key="cron", default_payload={"version": 1, "jobs": []}, ) if not isinstance(data, dict): return {"version": 1, "jobs": []} jobs = data.get("jobs") if not isinstance(jobs, list): jobs = [] try: version = int(data.get("version", 1) or 1) except Exception: version = 1 return {"version": max(1, version), "jobs": jobs} path = _cron_store_path(bot_id) if not os.path.isfile(path): return {"version": 1, "jobs": []} try: with open(path, "r", encoding="utf-8") as f: data = json.load(f) if not isinstance(data, dict): return {"version": 1, "jobs": []} jobs = data.get("jobs") if not isinstance(jobs, list): data["jobs"] = [] if "version" not in data: data["version"] = 1 return data except Exception: return {"version": 1, "jobs": []} def _write_cron_store(bot_id: str, store: Dict[str, Any]) -> None: normalized_store = dict(store if isinstance(store, dict) else {}) jobs = normalized_store.get("jobs") if not isinstance(jobs, list): normalized_store["jobs"] = [] try: normalized_store["version"] = max(1, int(normalized_store.get("version", 1) or 1)) except Exception: normalized_store["version"] = 1 if _write_edge_state_data(bot_id=bot_id, state_key="cron", data=normalized_store): return path = _cron_store_path(bot_id) os.makedirs(os.path.dirname(path), exist_ok=True) tmp = f"{path}.tmp" with open(tmp, "w", encoding="utf-8") as f: json.dump(normalized_store, f, ensure_ascii=False, indent=2) os.replace(tmp, path) def _resolve_workspace_path(bot_id: str, rel_path: Optional[str] = None) -> tuple[str, str]: root = _workspace_root(bot_id) rel = (rel_path or "").strip().replace("\\", "/") target = os.path.abspath(os.path.join(root, rel)) if os.path.commonpath([root, target]) != root: raise HTTPException(status_code=400, detail="invalid workspace path") return root, target def _calc_dir_size_bytes(path: str) -> int: total = 0 if not os.path.exists(path): return 0 for root, _, files in os.walk(path): for filename in files: try: file_path = os.path.join(root, filename) if os.path.islink(file_path): continue total += os.path.getsize(file_path) except Exception: continue return max(0, total) def _is_image_attachment_path(path: str) -> bool: lower = str(path or "").strip().lower() return lower.endswith(".png") or lower.endswith(".jpg") or lower.endswith(".jpeg") or lower.endswith(".webp") def _is_video_attachment_path(path: str) -> bool: lower = str(path or "").strip().lower() return ( lower.endswith(".mp4") or lower.endswith(".mov") or lower.endswith(".m4v") or lower.endswith(".webm") or lower.endswith(".mkv") or lower.endswith(".avi") ) def _is_visual_attachment_path(path: str) -> bool: return _is_image_attachment_path(path) or _is_video_attachment_path(path) bot_command_service = BotCommandService( read_runtime_snapshot=_read_bot_runtime_snapshot, normalize_media_list=_normalize_media_list, resolve_workspace_path=_resolve_workspace_path, is_visual_attachment_path=_is_visual_attachment_path, is_video_attachment_path=_is_video_attachment_path, create_usage_request=create_usage_request, record_activity_event=record_activity_event, fail_latest_usage=fail_latest_usage, persist_runtime_packet=_persist_runtime_packet, get_main_loop=lambda app_state: getattr(app_state, "main_loop", None), broadcast_packet=_broadcast_runtime_packet, ) workspace_service = WorkspaceService() runtime_service = RuntimeService( command_service=bot_command_service, resolve_runtime_provider=get_runtime_provider, clear_bot_sessions=_clear_bot_sessions, clear_dashboard_direct_session_file=_clear_bot_dashboard_direct_session, invalidate_bot_detail_cache=_invalidate_bot_detail_cache, invalidate_bot_messages_cache=_invalidate_bot_messages_cache, record_activity_event=record_activity_event, ) app.state.bot_command_service = bot_command_service app.state.workspace_service = workspace_service app.state.runtime_service = runtime_service def _workspace_stat_ctime_iso(stat: os.stat_result) -> str: ts = getattr(stat, "st_birthtime", None) if ts is None: ts = getattr(stat, "st_ctime", None) try: return datetime.utcfromtimestamp(float(ts)).isoformat() + "Z" except Exception: return datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z" def _build_workspace_tree(path: str, root: str, depth: int) -> List[Dict[str, Any]]: rows: List[Dict[str, Any]] = [] try: names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) except FileNotFoundError: return rows for name in names: if name in {".DS_Store"}: continue abs_path = os.path.join(path, name) rel_path = os.path.relpath(abs_path, root).replace("\\", "/") stat = os.stat(abs_path) base: Dict[str, Any] = { "name": name, "path": rel_path, "ctime": _workspace_stat_ctime_iso(stat), "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", } if os.path.isdir(abs_path): node = {**base, "type": "dir"} if depth > 0: node["children"] = _build_workspace_tree(abs_path, root, depth - 1) rows.append(node) continue rows.append( { **base, "type": "file", "size": stat.st_size, "ext": os.path.splitext(name)[1].lower(), } ) return rows def _list_workspace_dir(path: str, root: str) -> List[Dict[str, Any]]: rows: List[Dict[str, Any]] = [] names = sorted(os.listdir(path), key=lambda v: (not os.path.isdir(os.path.join(path, v)), v.lower())) for name in names: if name in {".DS_Store"}: continue abs_path = os.path.join(path, name) rel_path = os.path.relpath(abs_path, root).replace("\\", "/") stat = os.stat(abs_path) rows.append( { "name": name, "path": rel_path, "type": "dir" if os.path.isdir(abs_path) else "file", "size": stat.st_size if os.path.isfile(abs_path) else None, "ext": os.path.splitext(name)[1].lower() if os.path.isfile(abs_path) else "", "ctime": _workspace_stat_ctime_iso(stat), "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", } ) return rows def _list_workspace_dir_recursive(path: str, root: str) -> List[Dict[str, Any]]: rows: List[Dict[str, Any]] = [] for walk_root, dirnames, filenames in os.walk(path): dirnames.sort(key=lambda v: v.lower()) filenames.sort(key=lambda v: v.lower()) for name in dirnames: if name in {".DS_Store"}: continue abs_path = os.path.join(walk_root, name) rel_path = os.path.relpath(abs_path, root).replace("\\", "/") stat = os.stat(abs_path) rows.append( { "name": name, "path": rel_path, "type": "dir", "size": None, "ext": "", "ctime": _workspace_stat_ctime_iso(stat), "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", } ) for name in filenames: if name in {".DS_Store"}: continue abs_path = os.path.join(walk_root, name) rel_path = os.path.relpath(abs_path, root).replace("\\", "/") stat = os.stat(abs_path) rows.append( { "name": name, "path": rel_path, "type": "file", "size": stat.st_size, "ext": os.path.splitext(name)[1].lower(), "ctime": _workspace_stat_ctime_iso(stat), "mtime": datetime.utcfromtimestamp(stat.st_mtime).isoformat() + "Z", } ) rows.sort(key=lambda v: (v.get("type") != "dir", str(v.get("path", "")).lower())) return rows @app.get("/api/images", response_model=List[NanobotImage]) def list_images(session: Session = Depends(get_session)): cached = cache.get_json(_cache_key_images()) if isinstance(cached, list) and all(isinstance(row, dict) for row in cached): return cached if isinstance(cached, list): _invalidate_images_cache() reconcile_image_registry(session) rows = session.exec(select(NanobotImage)).all() payload = [row.model_dump() for row in rows] cache.set_json(_cache_key_images(), payload, ttl=60) return payload @app.delete("/api/images/{tag:path}") def delete_image(tag: str, session: Session = Depends(get_session)): image = session.get(NanobotImage, tag) if not image: raise HTTPException(status_code=404, detail="Image not found") # 检查是否有机器人正在使用此镜像 bots_using = session.exec(select(BotInstance).where(BotInstance.image_tag == tag)).all() if bots_using: raise HTTPException(status_code=400, detail=f"Cannot delete image: {len(bots_using)} bots are using it.") session.delete(image) session.commit() _invalidate_images_cache() return {"status": "deleted"} @app.get("/api/docker-images") def list_docker_images(repository: str = "nanobot-base"): rows = docker_manager.list_images_by_repo(repository) return rows @app.post("/api/images/register") def register_image(payload: dict, session: Session = Depends(get_session)): tag = (payload.get("tag") or "").strip() source_dir = (payload.get("source_dir") or "manual").strip() or "manual" if not tag: raise HTTPException(status_code=400, detail="tag is required") if not docker_manager.has_image(tag): raise HTTPException(status_code=404, detail=f"Docker image not found: {tag}") version = tag.split(":")[-1].removeprefix("v") if ":" in tag else tag try: docker_img = docker_manager.client.images.get(tag) if docker_manager.client else None image_id = docker_img.id if docker_img else None except Exception: image_id = None row = session.get(NanobotImage, tag) if not row: row = NanobotImage( tag=tag, version=version, status="READY", source_dir=source_dir, image_id=image_id, ) else: row.version = version row.status = "READY" row.source_dir = source_dir row.image_id = image_id session.add(row) session.commit() session.refresh(row) _invalidate_images_cache() return row @app.post("/api/providers/test") async def test_provider(payload: dict): provider = (payload.get("provider") or "").strip() api_key = (payload.get("api_key") or "").strip() model = (payload.get("model") or "").strip() api_base = (payload.get("api_base") or "").strip() if not provider or not api_key: raise HTTPException(status_code=400, detail="provider and api_key are required") normalized_provider, default_base = _provider_defaults(provider) base = (api_base or default_base).rstrip("/") if normalized_provider not in {"openrouter", "dashscope", "kimi", "minimax", "openai", "deepseek"}: raise HTTPException(status_code=400, detail=f"provider not supported for test: {provider}") if not base: raise HTTPException(status_code=400, detail=f"api_base is required for provider: {provider}") headers = {"Authorization": f"Bearer {api_key}"} timeout = httpx.Timeout(20.0, connect=10.0) url = f"{base}/models" try: async with httpx.AsyncClient(timeout=timeout) as client: resp = await client.get(url, headers=headers) if resp.status_code >= 400: return { "ok": False, "provider": normalized_provider, "status_code": resp.status_code, "detail": resp.text[:500], } data = resp.json() models_raw = data.get("data", []) if isinstance(data, dict) else [] model_ids: List[str] = [] for item in models_raw[:20]: if isinstance(item, dict) and item.get("id"): model_ids.append(str(item["id"])) model_hint = "" if model: model_hint = "model_found" if any(model in m for m in model_ids) else "model_not_listed" return { "ok": True, "provider": normalized_provider, "endpoint": url, "models_preview": model_ids[:8], "model_hint": model_hint, } except Exception as e: return { "ok": False, "provider": normalized_provider, "endpoint": url, "detail": str(e), } def _require_ready_image(session: Session, image_tag: str, *, require_local_image: bool) -> NanobotImage: normalized_tag = str(image_tag or "").strip() if not normalized_tag: raise HTTPException(status_code=400, detail="image_tag is required") image_row = session.get(NanobotImage, normalized_tag) if not image_row: raise HTTPException(status_code=400, detail=f"Image not registered in DB: {normalized_tag}") if image_row.status != "READY": raise HTTPException(status_code=400, detail=f"Image status is not READY: {normalized_tag} ({image_row.status})") if require_local_image and not docker_manager.has_image(normalized_tag): raise HTTPException(status_code=400, detail=f"Docker image not found locally: {normalized_tag}") return image_row @app.post("/api/bots") def create_bot(payload: BotCreateRequest, session: Session = Depends(get_session)): normalized_bot_id = str(payload.id or "").strip() if not normalized_bot_id: raise HTTPException(status_code=400, detail="Bot ID is required") if not BOT_ID_PATTERN.fullmatch(normalized_bot_id): raise HTTPException(status_code=400, detail="Bot ID can only contain letters, numbers, and underscores") if session.get(BotInstance, normalized_bot_id): raise HTTPException(status_code=409, detail=f"Bot ID already exists: {normalized_bot_id}") normalized_env_params = _normalize_env_params(payload.env_params) try: normalized_env_params["TZ"] = _normalize_system_timezone(payload.system_timezone) except ValueError as exc: raise HTTPException(status_code=400, detail=str(exc)) from exc provider_target = normalize_provider_target( { "node_id": payload.node_id, "transport_kind": payload.transport_kind, "runtime_kind": payload.runtime_kind, "core_adapter": payload.core_adapter, }, fallback=_provider_target_from_node(payload.node_id) or _default_provider_target(), ) _ensure_provider_target_supported(provider_target) normalized_image_tag = str(payload.image_tag or "").strip() if provider_target.runtime_kind == "docker": _require_ready_image( session, normalized_image_tag, require_local_image=True, ) bot = BotInstance( id=normalized_bot_id, name=payload.name, enabled=bool(payload.enabled) if payload.enabled is not None else True, access_password=str(payload.access_password or ""), image_tag=normalized_image_tag, node_id=provider_target.node_id, transport_kind=provider_target.transport_kind, runtime_kind=provider_target.runtime_kind, core_adapter=provider_target.core_adapter, workspace_dir=os.path.join(BOTS_WORKSPACE_ROOT, normalized_bot_id), ) session.add(bot) session.commit() session.refresh(bot) resource_limits = _normalize_resource_limits(payload.cpu_cores, payload.memory_mb, payload.storage_gb) workspace_synced = True sync_error_detail = "" try: _write_env_store(normalized_bot_id, normalized_env_params) _sync_bot_workspace_via_provider( session, bot, target_override=provider_target, channels_override=_normalize_initial_channels(normalized_bot_id, payload.channels), global_delivery_override={ "sendProgress": bool(payload.send_progress) if payload.send_progress is not None else False, "sendToolHints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, }, runtime_overrides={ "llm_provider": payload.llm_provider, "llm_model": payload.llm_model, "api_key": payload.api_key, "api_base": payload.api_base or "", "temperature": payload.temperature, "top_p": payload.top_p, "max_tokens": payload.max_tokens, "cpu_cores": resource_limits["cpu_cores"], "memory_mb": resource_limits["memory_mb"], "storage_gb": resource_limits["storage_gb"], "node_id": provider_target.node_id, "transport_kind": provider_target.transport_kind, "runtime_kind": provider_target.runtime_kind, "core_adapter": provider_target.core_adapter, "system_prompt": payload.system_prompt or payload.soul_md or DEFAULT_SOUL_MD, "soul_md": payload.soul_md or payload.system_prompt or DEFAULT_SOUL_MD, "agents_md": payload.agents_md or DEFAULT_AGENTS_MD, "user_md": payload.user_md or DEFAULT_USER_MD, "tools_md": payload.tools_md or DEFAULT_TOOLS_MD, "identity_md": payload.identity_md or DEFAULT_IDENTITY_MD, "send_progress": bool(payload.send_progress) if payload.send_progress is not None else False, "send_tool_hints": bool(payload.send_tool_hints) if payload.send_tool_hints is not None else False, }, ) except Exception as exc: if is_expected_edge_offline_error(exc): workspace_synced = False sync_error_detail = summarize_edge_exception(exc) logger.info( "Create bot pending sync due to offline edge bot_id=%s node=%s detail=%s", normalized_bot_id, provider_target.node_id, sync_error_detail, ) else: detail = summarize_edge_exception(exc) try: doomed = session.get(BotInstance, normalized_bot_id) if doomed is not None: session.delete(doomed) session.commit() _clear_provider_target_override(normalized_bot_id) except Exception: session.rollback() raise HTTPException(status_code=502, detail=f"Failed to initialize bot workspace: {detail}") from exc session.refresh(bot) record_activity_event( session, normalized_bot_id, "bot_created", channel="system", detail=f"Bot {normalized_bot_id} created", metadata={ "image_tag": normalized_image_tag, "workspace_synced": workspace_synced, "sync_error": sync_error_detail if not workspace_synced else "", }, ) if not workspace_synced: record_activity_event( session, normalized_bot_id, "bot_warning", channel="system", detail="Bot created, but node is offline. Workspace sync is pending.", metadata={"sync_error": sync_error_detail, "node_id": provider_target.node_id}, ) session.commit() _invalidate_bot_detail_cache(normalized_bot_id) return _serialize_bot(bot) @app.get("/api/bots") def list_bots(request: Request, session: Session = Depends(get_session)): cached = cache.get_json(_cache_key_bots_list()) if isinstance(cached, list): return cached bots = session.exec(select(BotInstance)).all() dirty = False for bot in bots: previous_status = str(bot.docker_status or "").upper() previous_state = str(bot.current_state or "") actual_status = _refresh_bot_runtime_status(request.app.state, bot) if previous_status != actual_status or previous_state != str(bot.current_state or ""): session.add(bot) dirty = True if dirty: session.commit() for bot in bots: session.refresh(bot) rows = [_serialize_bot_list_item(bot) for bot in bots] cache.set_json(_cache_key_bots_list(), rows, ttl=30) return rows @app.get("/api/bots/{bot_id}") def get_bot_detail(bot_id: str, request: Request, session: Session = Depends(get_session)): cached = cache.get_json(_cache_key_bot_detail(bot_id)) if isinstance(cached, dict): return cached bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") previous_status = str(bot.docker_status or "").upper() previous_state = str(bot.current_state or "") actual_status = _refresh_bot_runtime_status(request.app.state, bot) if previous_status != actual_status or previous_state != str(bot.current_state or ""): session.add(bot) session.commit() session.refresh(bot) row = _serialize_bot(bot) cache.set_json(_cache_key_bot_detail(bot_id), row, ttl=30) return row @app.post("/api/bots/{bot_id}/auth/login") def login_bot_page(bot_id: str, payload: BotPageAuthLoginRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") configured = str(bot.access_password or "").strip() if not configured: return {"ok": True, "enabled": False, "bot_id": bot_id} candidate = str(payload.password or "").strip() if not candidate: raise HTTPException(status_code=401, detail="Bot access password required") if candidate != configured: raise HTTPException(status_code=401, detail="Invalid bot access password") return {"ok": True, "enabled": True, "bot_id": bot_id} @app.get("/api/bots/{bot_id}/resources") def get_bot_resources(bot_id: str, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") configured = _read_bot_resources(bot_id) try: runtime = get_runtime_provider(request.app.state, bot).get_resource_snapshot(bot_id=bot_id) except Exception as exc: log_edge_failure( logger, key=f"bot-resources:{bot_id}", exc=exc, message=f"Failed to refresh bot resources for bot_id={bot_id}", ) runtime = {"usage": {}, "limits": {}, "docker_status": str(bot.docker_status or "STOPPED").upper()} runtime_status = str(runtime.get("docker_status") or "").upper() previous_status = str(bot.docker_status or "").upper() previous_state = str(bot.current_state or "") if runtime_status: bot.docker_status = runtime_status if runtime_status != "RUNNING" and str(bot.current_state or "").upper() not in {"ERROR"}: bot.current_state = "IDLE" if previous_status != str(bot.docker_status or "").upper() or previous_state != str(bot.current_state or ""): session.add(bot) session.commit() session.refresh(bot) target = _resolve_bot_provider_target_for_instance(bot) usage_payload = dict(runtime.get("usage") or {}) workspace_bytes = int(usage_payload.get("container_rw_bytes") or usage_payload.get("workspace_used_bytes") or 0) workspace_root = "" if workspace_bytes <= 0: workspace_root = _workspace_root(bot_id) workspace_bytes = _calc_dir_size_bytes(workspace_root) elif target.transport_kind != "edge": workspace_root = _workspace_root(bot_id) configured_storage_bytes = int(configured.get("storage_gb", 0) or 0) * 1024 * 1024 * 1024 workspace_percent = 0.0 if configured_storage_bytes > 0: workspace_percent = (workspace_bytes / configured_storage_bytes) * 100.0 limits = runtime.get("limits") or {} cpu_limited = (limits.get("cpu_cores") or 0) > 0 memory_limited = (limits.get("memory_bytes") or 0) > 0 storage_limited = bool(limits.get("storage_bytes")) or bool(limits.get("storage_opt_raw")) return { "bot_id": bot_id, "docker_status": runtime.get("docker_status") or bot.docker_status, "configured": configured, "runtime": runtime, "workspace": { "path": workspace_root or None, "usage_bytes": workspace_bytes, "configured_limit_bytes": configured_storage_bytes if configured_storage_bytes > 0 else None, "usage_percent": max(0.0, workspace_percent), }, "enforcement": { "cpu_limited": cpu_limited, "memory_limited": memory_limited, "storage_limited": storage_limited, }, "note": ( "Resource value 0 means unlimited. CPU/Memory limits come from Docker HostConfig and are enforced by cgroup. " "Storage limit depends on Docker storage driver support." ), "collected_at": datetime.utcnow().isoformat() + "Z", } @app.put("/api/bots/{bot_id}") def update_bot(bot_id: str, payload: BotUpdateRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") update_data = payload.model_dump(exclude_unset=True) env_params = update_data.pop("env_params", None) if isinstance(update_data, dict) else None system_timezone = update_data.pop("system_timezone", None) if isinstance(update_data, dict) else None normalized_system_timezone: Optional[str] = None if system_timezone is not None: try: normalized_system_timezone = _normalize_system_timezone(system_timezone) except ValueError as exc: raise HTTPException(status_code=400, detail=str(exc)) from exc runtime_overrides: Dict[str, Any] = {} update_data.pop("tools_config", None) if isinstance(update_data, dict) else None runtime_fields = { "llm_provider", "llm_model", "api_key", "api_base", "temperature", "top_p", "max_tokens", "cpu_cores", "memory_mb", "storage_gb", "soul_md", "agents_md", "user_md", "tools_md", "identity_md", "send_progress", "send_tool_hints", "system_prompt", } execution_target_fields = { "node_id", "transport_kind", "runtime_kind", "core_adapter", } deploy_only_fields = {"image_tag", *execution_target_fields} if deploy_only_fields & set(update_data.keys()): raise HTTPException( status_code=400, detail=f"Use /api/bots/{bot_id}/deploy for execution target or image changes", ) for field in runtime_fields: if field in update_data: runtime_overrides[field] = update_data.pop(field) next_target: Optional[ProviderTarget] = None # Never allow empty placeholders to overwrite existing runtime model settings. for text_field in ("llm_provider", "llm_model", "api_key"): if text_field in runtime_overrides: text = str(runtime_overrides.get(text_field) or "").strip() if not text: runtime_overrides.pop(text_field, None) else: runtime_overrides[text_field] = text if "api_base" in runtime_overrides: runtime_overrides["api_base"] = str(runtime_overrides.get("api_base") or "").strip() if "system_prompt" in runtime_overrides and "soul_md" not in runtime_overrides: runtime_overrides["soul_md"] = runtime_overrides["system_prompt"] if "soul_md" in runtime_overrides and "system_prompt" not in runtime_overrides: runtime_overrides["system_prompt"] = runtime_overrides["soul_md"] if {"cpu_cores", "memory_mb", "storage_gb"} & set(runtime_overrides.keys()): normalized_resources = _normalize_resource_limits( runtime_overrides.get("cpu_cores"), runtime_overrides.get("memory_mb"), runtime_overrides.get("storage_gb"), ) runtime_overrides.update(normalized_resources) db_fields = {"name", "access_password", "enabled"} for key, value in update_data.items(): if key in db_fields: setattr(bot, key, value) previous_env_params: Optional[Dict[str, str]] = None next_env_params: Optional[Dict[str, str]] = None if env_params is not None or normalized_system_timezone is not None: previous_env_params = _resolve_bot_env_params(bot_id) next_env_params = dict(previous_env_params) if env_params is not None: next_env_params = _normalize_env_params(env_params) if normalized_system_timezone is not None: next_env_params["TZ"] = normalized_system_timezone global_delivery_override: Optional[Dict[str, Any]] = None if "send_progress" in runtime_overrides or "send_tool_hints" in runtime_overrides: global_delivery_override = {} if "send_progress" in runtime_overrides: global_delivery_override["sendProgress"] = bool(runtime_overrides.get("send_progress")) if "send_tool_hints" in runtime_overrides: global_delivery_override["sendToolHints"] = bool(runtime_overrides.get("send_tool_hints")) _sync_bot_workspace_via_provider( session, bot, target_override=next_target, runtime_overrides=runtime_overrides if runtime_overrides else None, global_delivery_override=global_delivery_override, ) try: if next_env_params is not None: _write_env_store(bot_id, next_env_params) if next_target is not None: _apply_provider_target_to_bot(bot, next_target) session.add(bot) session.commit() except Exception: session.rollback() if previous_env_params is not None: _write_env_store(bot_id, previous_env_params) raise session.refresh(bot) _invalidate_bot_detail_cache(bot_id) return _serialize_bot(bot) @app.post("/api/bots/{bot_id}/deploy") async def deploy_bot(bot_id: str, payload: BotDeployRequest, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") actual_status = _refresh_bot_runtime_status(request.app.state, bot) session.add(bot) session.commit() if actual_status == "RUNNING": raise HTTPException(status_code=409, detail="Stop the bot before deploy or migrate") current_target = _resolve_bot_provider_target_for_instance(bot) next_target_base = _provider_target_from_node(payload.node_id) if next_target_base is None: raise HTTPException(status_code=400, detail=f"Managed node not found: {payload.node_id}") next_target = normalize_provider_target( { "node_id": payload.node_id, "runtime_kind": payload.runtime_kind, }, fallback=next_target_base, ) _ensure_provider_target_supported(next_target) existing_image_tag = str(bot.image_tag or "").strip() requested_image_tag = str(payload.image_tag or "").strip() if next_target.runtime_kind == "docker": requested_image_tag = requested_image_tag or existing_image_tag image_changed = requested_image_tag != str(bot.image_tag or "").strip() target_changed = next_target.key != current_target.key if not image_changed and not target_changed: raise HTTPException(status_code=400, detail="No deploy changes detected") if next_target.runtime_kind == "docker": _require_ready_image( session, requested_image_tag, require_local_image=True, ) _sync_bot_workspace_via_provider( session, bot, target_override=next_target, runtime_overrides=provider_target_to_dict(next_target), ) previous_image_tag = str(bot.image_tag or "").strip() bot.image_tag = requested_image_tag _apply_provider_target_to_bot(bot, next_target) bot.updated_at = datetime.utcnow() session.add(bot) record_activity_event( session, bot_id, "bot_deployed", channel="system", detail=( f"Bot {bot_id} deployed to {_node_display_name(next_target.node_id)}" if target_changed else f"Bot {bot_id} redeployed with image {requested_image_tag}" ), metadata={ "previous_target": _serialize_provider_target_summary(current_target), "next_target": _serialize_provider_target_summary(next_target), "previous_image_tag": previous_image_tag, "image_tag": requested_image_tag, "auto_start": bool(payload.auto_start), }, ) session.commit() session.refresh(bot) started = False if bool(payload.auto_start): await runtime_service.start_bot(app_state=request.app.state, session=session, bot=bot) session.refresh(bot) started = True _invalidate_bot_detail_cache(bot_id) return { "status": "deployed", "bot": _serialize_bot(bot), "started": started, "image_tag": requested_image_tag, "previous_image_tag": previous_image_tag, "previous_target": _serialize_provider_target_summary(current_target), "next_target": _serialize_provider_target_summary(next_target), } @app.post("/api/bots/{bot_id}/start") async def start_bot(bot_id: str, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return await runtime_service.start_bot(app_state=request.app.state, session=session, bot=bot) @app.post("/api/bots/{bot_id}/stop") def stop_bot(bot_id: str, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return runtime_service.stop_bot(app_state=request.app.state, session=session, bot=bot) @app.post("/api/bots/{bot_id}/enable") def enable_bot(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") bot.enabled = True session.add(bot) record_activity_event(session, bot_id, "bot_enabled", channel="system", detail=f"Bot {bot_id} enabled") session.commit() _invalidate_bot_detail_cache(bot_id) return {"status": "enabled", "enabled": True} @app.post("/api/bots/{bot_id}/disable") def disable_bot(bot_id: str, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") try: runtime_service.stop_bot(app_state=request.app.state, session=session, bot=bot) except Exception: pass bot.enabled = False bot.docker_status = "STOPPED" if str(bot.current_state or "").upper() not in {"ERROR"}: bot.current_state = "IDLE" session.add(bot) record_activity_event(session, bot_id, "bot_disabled", channel="system", detail=f"Bot {bot_id} disabled") session.commit() _invalidate_bot_detail_cache(bot_id) return {"status": "disabled", "enabled": False} @app.post("/api/bots/{bot_id}/deactivate") def deactivate_bot(bot_id: str, request: Request, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") try: runtime_service.stop_bot(app_state=request.app.state, session=session, bot=bot) except Exception: pass bot.enabled = False bot.docker_status = "STOPPED" if str(bot.current_state or "").upper() not in {"ERROR"}: bot.current_state = "IDLE" session.add(bot) record_activity_event(session, bot_id, "bot_deactivated", channel="system", detail=f"Bot {bot_id} deactivated") session.commit() _invalidate_bot_detail_cache(bot_id) return {"status": "deactivated"} @app.delete("/api/bots/{bot_id}") def delete_bot(bot_id: str, request: Request, delete_workspace: bool = True, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") target = _resolve_bot_provider_target_for_instance(bot) try: runtime_service.stop_bot(app_state=request.app.state, session=session, bot=bot) except Exception: pass workspace_deleted = not bool(delete_workspace) if delete_workspace: if target.transport_kind == "edge": try: workspace_root = str(_node_metadata(target.node_id).get("workspace_root") or "").strip() or None purge_result = _resolve_edge_client(target).purge_workspace( bot_id=bot_id, workspace_root=workspace_root, ) workspace_deleted = str(purge_result.get("status") or "").strip().lower() in {"deleted", "not_found"} except Exception as exc: log_edge_failure( logger, key=f"bot-delete-workspace:{bot_id}", exc=exc, message=f"Failed to purge edge workspace for bot_id={bot_id}", ) workspace_deleted = False workspace_root = os.path.join(BOTS_WORKSPACE_ROOT, bot_id) if os.path.isdir(workspace_root): shutil.rmtree(workspace_root, ignore_errors=True) workspace_deleted = True messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all() for row in messages: session.delete(row) topic_items = session.exec(select(TopicItem).where(TopicItem.bot_id == bot_id)).all() for row in topic_items: session.delete(row) topics = session.exec(select(TopicTopic).where(TopicTopic.bot_id == bot_id)).all() for row in topics: session.delete(row) usage_rows = session.exec(select(BotRequestUsage).where(BotRequestUsage.bot_id == bot_id)).all() for row in usage_rows: session.delete(row) activity_rows = session.exec(select(BotActivityEvent).where(BotActivityEvent.bot_id == bot_id)).all() for row in activity_rows: session.delete(row) skill_install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() for row in skill_install_rows: session.delete(row) session.delete(bot) session.commit() _clear_provider_target_override(bot_id) _invalidate_bot_detail_cache(bot_id) _invalidate_bot_messages_cache(bot_id) return {"status": "deleted", "workspace_deleted": workspace_deleted} @app.get("/api/bots/{bot_id}/channels") def list_bot_channels(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return _get_bot_channels_from_config(bot) @app.get("/api/platform/skills") def list_skill_market(session: Session = Depends(get_session)): items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() installs = session.exec(select(BotSkillInstall)).all() install_count_by_skill: Dict[int, int] = {} for row in installs: skill_id = int(row.skill_market_item_id or 0) if skill_id <= 0 or row.status != "INSTALLED": continue install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1 return [ _serialize_skill_market_item(item, install_count=install_count_by_skill.get(int(item.id or 0), 0)) for item in items ] @app.post("/api/platform/skills") async def create_skill_market_item( skill_key: str = Form(""), display_name: str = Form(""), description: str = Form(""), file: UploadFile = File(...), session: Session = Depends(get_session), ): upload_meta = await _store_skill_market_zip_upload(session, file) try: preferred_key = skill_key or display_name or os.path.splitext(upload_meta["zip_filename"])[0] next_key = _resolve_unique_skill_market_key(session, preferred_key) item = SkillMarketItem( skill_key=next_key, display_name=str(display_name or next_key).strip() or next_key, description=str(description or upload_meta["description"] or "").strip(), zip_filename=upload_meta["zip_filename"], zip_size_bytes=int(upload_meta["zip_size_bytes"] or 0), entry_names_json=json.dumps(upload_meta["entry_names"], ensure_ascii=False), ) session.add(item) session.commit() session.refresh(item) return _serialize_skill_market_item(item, install_count=0) except Exception: target_path = os.path.join(_skill_market_root(), upload_meta["zip_filename"]) if os.path.exists(target_path): os.remove(target_path) raise @app.put("/api/platform/skills/{skill_id}") async def update_skill_market_item( skill_id: int, skill_key: str = Form(""), display_name: str = Form(""), description: str = Form(""), file: Optional[UploadFile] = File(None), session: Session = Depends(get_session), ): item = session.get(SkillMarketItem, skill_id) if not item: raise HTTPException(status_code=404, detail="Skill market item not found") old_filename = str(item.zip_filename or "").strip() upload_meta: Optional[Dict[str, Any]] = None if file is not None: upload_meta = await _store_skill_market_zip_upload( session, file, exclude_filename=old_filename or None, exclude_id=item.id, ) next_key = _resolve_unique_skill_market_key( session, skill_key or item.skill_key or display_name or os.path.splitext(upload_meta["zip_filename"] if upload_meta else old_filename)[0], exclude_id=item.id, ) item.skill_key = next_key item.display_name = str(display_name or item.display_name or next_key).strip() or next_key item.description = str(description or (upload_meta["description"] if upload_meta else item.description) or "").strip() item.updated_at = datetime.utcnow() if upload_meta: item.zip_filename = upload_meta["zip_filename"] item.zip_size_bytes = int(upload_meta["zip_size_bytes"] or 0) item.entry_names_json = json.dumps(upload_meta["entry_names"], ensure_ascii=False) session.add(item) session.commit() session.refresh(item) if upload_meta and old_filename and old_filename != upload_meta["zip_filename"]: old_path = os.path.join(_skill_market_root(), old_filename) if os.path.exists(old_path): os.remove(old_path) installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() install_count = sum(1 for row in installs if row.status == "INSTALLED") return _serialize_skill_market_item(item, install_count=install_count) @app.delete("/api/platform/skills/{skill_id}") def delete_skill_market_item(skill_id: int, session: Session = Depends(get_session)): item = session.get(SkillMarketItem, skill_id) if not item: raise HTTPException(status_code=404, detail="Skill market item not found") zip_filename = str(item.zip_filename or "").strip() installs = session.exec(select(BotSkillInstall).where(BotSkillInstall.skill_market_item_id == skill_id)).all() for row in installs: session.delete(row) session.delete(item) session.commit() if zip_filename: zip_path = os.path.join(_skill_market_root(), zip_filename) if os.path.exists(zip_path): os.remove(zip_path) return {"status": "deleted", "id": skill_id} @app.get("/api/bots/{bot_id}/skills") def list_bot_skills(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return _list_workspace_skills(bot_id) @app.get("/api/bots/{bot_id}/skill-market") def list_bot_skill_market(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") items = session.exec(select(SkillMarketItem).order_by(SkillMarketItem.display_name, SkillMarketItem.id)).all() install_rows = session.exec(select(BotSkillInstall).where(BotSkillInstall.bot_id == bot_id)).all() install_lookup = {int(row.skill_market_item_id): row for row in install_rows} all_install_rows = session.exec(select(BotSkillInstall)).all() install_count_by_skill: Dict[int, int] = {} for row in all_install_rows: skill_id = int(row.skill_market_item_id or 0) if skill_id <= 0 or row.status != "INSTALLED": continue install_count_by_skill[skill_id] = install_count_by_skill.get(skill_id, 0) + 1 workspace_skill_names = {str(row.get("name") or "").strip() for row in _list_workspace_skills(bot_id)} return [ _serialize_skill_market_item( item, install_count=install_count_by_skill.get(int(item.id or 0), 0), install_row=install_lookup.get(int(item.id or 0)), workspace_installed=( None if install_lookup.get(int(item.id or 0)) is None else ( install_lookup[int(item.id or 0)].status == "INSTALLED" and all( name in workspace_skill_names for name in _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) ) ) ), installed_entries=( None if install_lookup.get(int(item.id or 0)) is None else _parse_json_string_list(install_lookup[int(item.id or 0)].installed_entries_json) ), ) for item in items ] @app.post("/api/bots/{bot_id}/skill-market/{skill_id}/install") def install_bot_skill_from_market(bot_id: str, skill_id: int, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") item = session.get(SkillMarketItem, skill_id) if not item: raise HTTPException(status_code=404, detail="Skill market item not found") zip_path = os.path.join(_skill_market_root(), str(item.zip_filename or "")) if not os.path.isfile(zip_path): raise HTTPException(status_code=404, detail="Skill zip package not found") install_row = session.exec( select(BotSkillInstall).where( BotSkillInstall.bot_id == bot_id, BotSkillInstall.skill_market_item_id == skill_id, ) ).first() try: install_result = _install_skill_zip_into_workspace(bot_id, zip_path) now = datetime.utcnow() if not install_row: install_row = BotSkillInstall( bot_id=bot_id, skill_market_item_id=skill_id, ) install_row.installed_entries_json = json.dumps(install_result["installed"], ensure_ascii=False) install_row.source_zip_filename = str(item.zip_filename or "") install_row.status = "INSTALLED" install_row.last_error = None install_row.installed_at = now install_row.updated_at = now session.add(install_row) session.commit() session.refresh(install_row) return { "status": "installed", "bot_id": bot_id, "skill_market_item_id": skill_id, "installed": install_result["installed"], "skills": install_result["skills"], "market_item": _serialize_skill_market_item(item, install_count=0, install_row=install_row), } except HTTPException as exc: now = datetime.utcnow() if not install_row: install_row = BotSkillInstall( bot_id=bot_id, skill_market_item_id=skill_id, installed_at=now, ) install_row.source_zip_filename = str(item.zip_filename or "") install_row.status = "FAILED" install_row.last_error = str(exc.detail or "Install failed") install_row.updated_at = now session.add(install_row) session.commit() raise except Exception as exc: now = datetime.utcnow() if not install_row: install_row = BotSkillInstall( bot_id=bot_id, skill_market_item_id=skill_id, installed_at=now, ) install_row.source_zip_filename = str(item.zip_filename or "") install_row.status = "FAILED" install_row.last_error = str(exc or "Install failed")[:1000] install_row.updated_at = now session.add(install_row) session.commit() raise HTTPException(status_code=500, detail="Skill install failed unexpectedly") from exc @app.get("/api/bots/{bot_id}/tools-config") def get_bot_tools_config(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return { "bot_id": bot_id, "tools_config": {}, "managed_by_dashboard": False, "hint": "Tools config is disabled in dashboard. Configure tool-related env vars manually.", } @app.put("/api/bots/{bot_id}/tools-config") def update_bot_tools_config(bot_id: str, payload: BotToolsConfigUpdateRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") raise HTTPException( status_code=400, detail="Tools config is no longer managed by dashboard. Please set required env vars manually.", ) @app.get("/api/bots/{bot_id}/mcp-config") def get_bot_mcp_config(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") config_data = _read_bot_config(bot_id) tools_cfg = config_data.get("tools") if isinstance(config_data, dict) else {} if not isinstance(tools_cfg, dict): tools_cfg = {} mcp_servers = _normalize_mcp_servers(tools_cfg.get("mcpServers")) return { "bot_id": bot_id, "mcp_servers": mcp_servers, "locked_servers": [], "restart_required": True, } @app.put("/api/bots/{bot_id}/mcp-config") def update_bot_mcp_config(bot_id: str, payload: BotMcpConfigUpdateRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") config_data = _read_bot_config(bot_id) if not isinstance(config_data, dict): config_data = {} tools_cfg = config_data.get("tools") if not isinstance(tools_cfg, dict): tools_cfg = {} normalized_mcp_servers = _normalize_mcp_servers(payload.mcp_servers or {}) current_mcp_servers = tools_cfg.get("mcpServers") merged_mcp_servers = _merge_mcp_servers_preserving_extras(current_mcp_servers, normalized_mcp_servers) tools_cfg["mcpServers"] = merged_mcp_servers config_data["tools"] = tools_cfg sanitized_after_save = _sanitize_mcp_servers_in_config_data(config_data) _write_bot_config(bot_id, config_data) _invalidate_bot_detail_cache(bot_id) return { "status": "updated", "bot_id": bot_id, "mcp_servers": _normalize_mcp_servers(sanitized_after_save), "locked_servers": [], "restart_required": True, } @app.get("/api/bots/{bot_id}/env-params") def get_bot_env_params(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return { "bot_id": bot_id, "env_params": _read_env_store(bot_id), } @app.put("/api/bots/{bot_id}/env-params") def update_bot_env_params(bot_id: str, payload: BotEnvParamsUpdateRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") normalized = _normalize_env_params(payload.env_params) _write_env_store(bot_id, normalized) _invalidate_bot_detail_cache(bot_id) return { "status": "updated", "bot_id": bot_id, "env_params": normalized, "restart_required": True, } @app.post("/api/bots/{bot_id}/skills/upload") async def upload_bot_skill_zip(bot_id: str, file: UploadFile = File(...), session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") tmp_zip_path: Optional[str] = None try: with tempfile.NamedTemporaryFile(prefix=".skill_upload_", suffix=".zip", delete=False) as tmp_zip: tmp_zip_path = tmp_zip.name filename = str(file.filename or "").strip() if not filename.lower().endswith(".zip"): raise HTTPException(status_code=400, detail="Only .zip skill package is supported") max_bytes = get_platform_settings_snapshot().upload_max_mb * 1024 * 1024 total_size = 0 while True: chunk = await file.read(1024 * 1024) if not chunk: break total_size += len(chunk) if total_size > max_bytes: raise HTTPException( status_code=413, detail=f"Zip package too large (max {max_bytes // (1024 * 1024)}MB)", ) tmp_zip.write(chunk) if total_size == 0: raise HTTPException(status_code=400, detail="Zip package is empty") finally: await file.close() try: install_result = _install_skill_zip_into_workspace(bot_id, tmp_zip_path) finally: if tmp_zip_path and os.path.exists(tmp_zip_path): os.remove(tmp_zip_path) return { "status": "installed", "bot_id": bot_id, "installed": install_result["installed"], "skills": install_result["skills"], } @app.delete("/api/bots/{bot_id}/skills/{skill_name}") def delete_bot_skill(bot_id: str, skill_name: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") if _resolve_edge_state_context(bot_id) is not None: raise HTTPException( status_code=400, detail="Edge bot skill delete is disabled here. Use edge workspace file management.", ) name = str(skill_name or "").strip() if not _is_valid_top_level_skill_name(name): raise HTTPException(status_code=400, detail="Invalid skill name") root = _skills_root(bot_id) target = os.path.abspath(os.path.join(root, name)) if os.path.commonpath([os.path.abspath(root), target]) != os.path.abspath(root): raise HTTPException(status_code=400, detail="Invalid skill path") if not os.path.exists(target): raise HTTPException(status_code=404, detail="Skill not found in workspace") if os.path.isdir(target): shutil.rmtree(target, ignore_errors=False) else: os.remove(target) return {"status": "deleted", "bot_id": bot_id, "skill": name} @app.post("/api/bots/{bot_id}/channels") def create_bot_channel(bot_id: str, payload: ChannelConfigRequest, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") ctype = (payload.channel_type or "").strip().lower() if not ctype: raise HTTPException(status_code=400, detail="channel_type is required") if ctype == "dashboard": raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be created manually") current_rows = _get_bot_channels_from_config(bot) if any(str(row.get("channel_type") or "").lower() == ctype for row in current_rows): raise HTTPException(status_code=400, detail=f"Channel already exists: {ctype}") new_row = { "id": ctype, "bot_id": bot_id, "channel_type": ctype, "external_app_id": (payload.external_app_id or "").strip() or f"{ctype}-{bot_id}", "app_secret": (payload.app_secret or "").strip(), "internal_port": max(1, min(int(payload.internal_port or 8080), 65535)), "is_active": bool(payload.is_active), "extra_config": _normalize_channel_extra(payload.extra_config), "locked": False, } config_data = _read_bot_config(bot_id) channels_cfg = config_data.get("channels") if not isinstance(channels_cfg, dict): channels_cfg = {} config_data["channels"] = channels_cfg channels_cfg[ctype] = _channel_api_to_cfg(new_row) _write_bot_config(bot_id, config_data) _sync_bot_workspace_via_provider(session, bot) _invalidate_bot_detail_cache(bot_id) return new_row @app.put("/api/bots/{bot_id}/channels/{channel_id}") def update_bot_channel( bot_id: str, channel_id: str, payload: ChannelConfigUpdateRequest, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") channel_key = str(channel_id or "").strip().lower() rows = _get_bot_channels_from_config(bot) row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) if not row: raise HTTPException(status_code=404, detail="Channel not found") if str(row.get("channel_type") or "").strip().lower() == "dashboard" or bool(row.get("locked")): raise HTTPException(status_code=400, detail="dashboard channel is built-in and cannot be modified") update_data = payload.model_dump(exclude_unset=True) existing_type = str(row.get("channel_type") or "").strip().lower() new_type = existing_type if "channel_type" in update_data and update_data["channel_type"] is not None: new_type = str(update_data["channel_type"]).strip().lower() if not new_type: raise HTTPException(status_code=400, detail="channel_type cannot be empty") if existing_type == "dashboard" and new_type != "dashboard": raise HTTPException(status_code=400, detail="dashboard channel type cannot be changed") if new_type != existing_type and any(str(r.get("channel_type") or "").lower() == new_type for r in rows): raise HTTPException(status_code=400, detail=f"Channel already exists: {new_type}") if "external_app_id" in update_data and update_data["external_app_id"] is not None: row["external_app_id"] = str(update_data["external_app_id"]).strip() if "app_secret" in update_data and update_data["app_secret"] is not None: row["app_secret"] = str(update_data["app_secret"]).strip() if "internal_port" in update_data and update_data["internal_port"] is not None: row["internal_port"] = max(1, min(int(update_data["internal_port"]), 65535)) if "is_active" in update_data and update_data["is_active"] is not None: next_active = bool(update_data["is_active"]) if existing_type == "dashboard" and not next_active: raise HTTPException(status_code=400, detail="dashboard channel must remain enabled") row["is_active"] = next_active if "extra_config" in update_data: row["extra_config"] = _normalize_channel_extra(update_data.get("extra_config")) row["channel_type"] = new_type row["id"] = new_type row["locked"] = new_type == "dashboard" config_data = _read_bot_config(bot_id) channels_cfg = config_data.get("channels") if not isinstance(channels_cfg, dict): channels_cfg = {} config_data["channels"] = channels_cfg current_send_progress, current_send_tool_hints = _read_global_delivery_flags(channels_cfg) if new_type == "dashboard": extra = _normalize_channel_extra(row.get("extra_config")) channels_cfg["sendProgress"] = bool(extra.get("sendProgress", current_send_progress)) channels_cfg["sendToolHints"] = bool(extra.get("sendToolHints", current_send_tool_hints)) else: channels_cfg["sendProgress"] = current_send_progress channels_cfg["sendToolHints"] = current_send_tool_hints channels_cfg.pop("dashboard", None) if existing_type != "dashboard" and existing_type in channels_cfg and existing_type != new_type: channels_cfg.pop(existing_type, None) if new_type != "dashboard": channels_cfg[new_type] = _channel_api_to_cfg(row) _write_bot_config(bot_id, config_data) session.commit() _sync_bot_workspace_via_provider(session, bot) _invalidate_bot_detail_cache(bot_id) return row @app.delete("/api/bots/{bot_id}/channels/{channel_id}") def delete_bot_channel(bot_id: str, channel_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") channel_key = str(channel_id or "").strip().lower() rows = _get_bot_channels_from_config(bot) row = next((r for r in rows if str(r.get("id") or "").lower() == channel_key), None) if not row: raise HTTPException(status_code=404, detail="Channel not found") if str(row.get("channel_type") or "").lower() == "dashboard": raise HTTPException(status_code=400, detail="dashboard channel cannot be deleted") config_data = _read_bot_config(bot_id) channels_cfg = config_data.get("channels") if not isinstance(channels_cfg, dict): channels_cfg = {} config_data["channels"] = channels_cfg channels_cfg.pop(str(row.get("channel_type") or "").lower(), None) _write_bot_config(bot_id, config_data) session.commit() _sync_bot_workspace_via_provider(session, bot) _invalidate_bot_detail_cache(bot_id) return {"status": "deleted"} @app.post("/api/bots/{bot_id}/command") def send_command( bot_id: str, payload: CommandRequest, request: Request, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return runtime_service.send_command( app_state=request.app.state, session=session, bot_id=bot_id, bot=bot, payload=payload, ) @app.get("/api/bots/{bot_id}/messages") def list_bot_messages(bot_id: str, limit: int = 200, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") safe_limit = max(1, min(int(limit), 500)) cached = cache.get_json(_cache_key_bot_messages(bot_id, safe_limit)) if isinstance(cached, list): return cached rows = session.exec( select(BotMessage) .where(BotMessage.bot_id == bot_id) .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) .limit(safe_limit) ).all() ordered = list(reversed(rows)) payload = [_serialize_bot_message_row(bot_id, row) for row in ordered] cache.set_json(_cache_key_bot_messages(bot_id, safe_limit), payload, ttl=30) return payload @app.get("/api/bots/{bot_id}/messages/page") def list_bot_messages_page( bot_id: str, limit: Optional[int] = None, before_id: Optional[int] = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") configured_limit = get_chat_pull_page_size() safe_limit = max(1, min(int(limit if limit is not None else configured_limit), 500)) safe_before_id = int(before_id) if isinstance(before_id, int) and before_id > 0 else None cache_key = _cache_key_bot_messages_page(bot_id, safe_limit, safe_before_id) cached = cache.get_json(cache_key) if isinstance(cached, dict) and isinstance(cached.get("items"), list): return cached stmt = ( select(BotMessage) .where(BotMessage.bot_id == bot_id) .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) .limit(safe_limit + 1) ) if safe_before_id is not None: stmt = stmt.where(BotMessage.id < safe_before_id) rows = session.exec(stmt).all() has_more = len(rows) > safe_limit if has_more: rows = rows[:safe_limit] ordered = list(reversed(rows)) items = [_serialize_bot_message_row(bot_id, row) for row in ordered] next_before_id = rows[-1].id if rows else None payload = { "items": items, "has_more": bool(has_more), "next_before_id": next_before_id, "limit": safe_limit, } cache.set_json(cache_key, payload, ttl=30) return payload @app.get("/api/bots/{bot_id}/messages/by-date") def list_bot_messages_by_date( bot_id: str, date: str, tz_offset_minutes: Optional[int] = None, limit: Optional[int] = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") utc_start, utc_end = _resolve_local_day_range(date, tz_offset_minutes) configured_limit = max(60, get_chat_pull_page_size()) safe_limit = max(12, min(int(limit if limit is not None else configured_limit), 240)) before_limit = max(3, min(18, safe_limit // 4)) after_limit = max(0, safe_limit - before_limit - 1) exact_anchor = session.exec( select(BotMessage) .where( BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_start, BotMessage.created_at < utc_end, ) .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) .limit(1) ).first() anchor = exact_anchor matched_exact_date = exact_anchor is not None if anchor is None: next_row = session.exec( select(BotMessage) .where(BotMessage.bot_id == bot_id, BotMessage.created_at >= utc_end) .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) .limit(1) ).first() prev_row = session.exec( select(BotMessage) .where(BotMessage.bot_id == bot_id, BotMessage.created_at < utc_start) .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) .limit(1) ).first() if next_row and prev_row: gap_after = next_row.created_at - utc_end gap_before = utc_start - prev_row.created_at anchor = next_row if gap_after <= gap_before else prev_row else: anchor = next_row or prev_row if anchor is None or anchor.id is None: return { "items": [], "anchor_id": None, "resolved_ts": None, "matched_exact_date": False, "has_more_before": False, "has_more_after": False, } before_rows = session.exec( select(BotMessage) .where(BotMessage.bot_id == bot_id, BotMessage.id < anchor.id) .order_by(BotMessage.created_at.desc(), BotMessage.id.desc()) .limit(before_limit) ).all() after_rows = session.exec( select(BotMessage) .where(BotMessage.bot_id == bot_id, BotMessage.id > anchor.id) .order_by(BotMessage.created_at.asc(), BotMessage.id.asc()) .limit(after_limit) ).all() ordered = list(reversed(before_rows)) + [anchor] + after_rows first_row = ordered[0] if ordered else None last_row = ordered[-1] if ordered else None has_more_before = False if first_row is not None and first_row.id is not None: has_more_before = session.exec( select(BotMessage.id) .where(BotMessage.bot_id == bot_id, BotMessage.id < first_row.id) .order_by(BotMessage.id.desc()) .limit(1) ).first() is not None has_more_after = False if last_row is not None and last_row.id is not None: has_more_after = session.exec( select(BotMessage.id) .where(BotMessage.bot_id == bot_id, BotMessage.id > last_row.id) .order_by(BotMessage.id.asc()) .limit(1) ).first() is not None return { "items": [_serialize_bot_message_row(bot_id, row) for row in ordered], "anchor_id": anchor.id, "resolved_ts": int(anchor.created_at.timestamp() * 1000), "matched_exact_date": matched_exact_date, "has_more_before": has_more_before, "has_more_after": has_more_after, } @app.put("/api/bots/{bot_id}/messages/{message_id}/feedback") def update_bot_message_feedback( bot_id: str, message_id: int, payload: MessageFeedbackRequest, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") row = session.get(BotMessage, message_id) if not row or row.bot_id != bot_id: raise HTTPException(status_code=404, detail="Message not found") if row.role != "assistant": raise HTTPException(status_code=400, detail="Only assistant messages support feedback") raw = str(payload.feedback or "").strip().lower() if raw in {"", "none", "null"}: row.feedback = None row.feedback_at = None elif raw in {"up", "down"}: row.feedback = raw row.feedback_at = datetime.utcnow() else: raise HTTPException(status_code=400, detail="feedback must be 'up' or 'down'") session.add(row) session.commit() _invalidate_bot_messages_cache(bot_id) return { "status": "updated", "bot_id": bot_id, "message_id": row.id, "feedback": row.feedback, "feedback_at": row.feedback_at.isoformat() if row.feedback_at else None, } @app.delete("/api/bots/{bot_id}/messages") def clear_bot_messages(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return runtime_service.clear_messages(app_state=app.state, session=session, bot=bot) @app.post("/api/bots/{bot_id}/sessions/dashboard-direct/clear") def clear_bot_dashboard_direct_session(bot_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return runtime_service.clear_dashboard_direct_session(app_state=app.state, session=session, bot=bot) @app.get("/api/bots/{bot_id}/logs") def get_bot_logs(bot_id: str, tail: int = 300, request: Request = None, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return runtime_service.get_logs(app_state=request.app.state, bot=bot, tail=tail) @app.get("/api/bots/{bot_id}/workspace/tree") def get_workspace_tree( bot_id: str, path: Optional[str] = None, recursive: bool = False, request: Request = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return workspace_service.list_tree(app_state=request.app.state, bot=bot, path=path, recursive=recursive) @app.get("/api/bots/{bot_id}/workspace/file") def read_workspace_file( bot_id: str, path: str, max_bytes: int = 200000, request: Request = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return workspace_service.read_file(app_state=request.app.state, bot=bot, path=path, max_bytes=max_bytes) @app.put("/api/bots/{bot_id}/workspace/file") def update_workspace_file( bot_id: str, path: str, payload: WorkspaceFileUpdateRequest, request: Request = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return workspace_service.write_markdown( app_state=request.app.state, bot=bot, path=path, content=str(payload.content or ""), ) def _serve_workspace_file( bot_id: str, path: str, download: bool, request: Request, session: Session, *, public: bool = False, redirect_html_to_raw: bool = False, ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return workspace_service.serve_file( app_state=request.app.state, bot=bot, path=path, download=download, request=request, public=public, redirect_html_to_raw=redirect_html_to_raw, ) @app.get("/api/bots/{bot_id}/cron/jobs") def list_cron_jobs(bot_id: str, include_disabled: bool = True, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") store = _read_cron_store(bot_id) rows = [] for row in store.get("jobs", []): if not isinstance(row, dict): continue enabled = bool(row.get("enabled", True)) if not include_disabled and not enabled: continue rows.append(row) rows.sort(key=lambda v: int(((v.get("state") or {}).get("nextRunAtMs")) or 2**62)) return {"bot_id": bot_id, "version": int(store.get("version", 1) or 1), "jobs": rows} @app.post("/api/bots/{bot_id}/cron/jobs/{job_id}/stop") def stop_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") store = _read_cron_store(bot_id) jobs = store.get("jobs", []) if not isinstance(jobs, list): jobs = [] found = None for row in jobs: if isinstance(row, dict) and str(row.get("id")) == job_id: found = row break if not found: raise HTTPException(status_code=404, detail="Cron job not found") found["enabled"] = False found["updatedAtMs"] = int(datetime.utcnow().timestamp() * 1000) _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": jobs}) return {"status": "stopped", "job_id": job_id} @app.delete("/api/bots/{bot_id}/cron/jobs/{job_id}") def delete_cron_job(bot_id: str, job_id: str, session: Session = Depends(get_session)): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") store = _read_cron_store(bot_id) jobs = store.get("jobs", []) if not isinstance(jobs, list): jobs = [] kept = [row for row in jobs if not (isinstance(row, dict) and str(row.get("id")) == job_id)] if len(kept) == len(jobs): raise HTTPException(status_code=404, detail="Cron job not found") _write_cron_store(bot_id, {"version": int(store.get("version", 1) or 1), "jobs": kept}) return {"status": "deleted", "job_id": job_id} @app.get("/api/bots/{bot_id}/workspace/download") def download_workspace_file( bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session), ): return _serve_workspace_file( bot_id=bot_id, path=path, download=download, request=request, session=session, public=False, redirect_html_to_raw=True, ) @app.get("/public/bots/{bot_id}/workspace/download") def public_download_workspace_file( bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session), ): return _serve_workspace_file( bot_id=bot_id, path=path, download=download, request=request, session=session, public=True, redirect_html_to_raw=True, ) @app.get("/api/bots/{bot_id}/workspace/raw/{path:path}") def raw_workspace_file( bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session), ): return _serve_workspace_file( bot_id=bot_id, path=path, download=download, request=request, session=session, public=False, redirect_html_to_raw=False, ) @app.get("/public/bots/{bot_id}/workspace/raw/{path:path}") def public_raw_workspace_file( bot_id: str, path: str, download: bool = False, request: Request = None, session: Session = Depends(get_session), ): return _serve_workspace_file( bot_id=bot_id, path=path, download=download, request=request, session=session, public=True, redirect_html_to_raw=False, ) @app.post("/api/bots/{bot_id}/workspace/upload") async def upload_workspace_files( bot_id: str, files: List[UploadFile] = File(...), path: Optional[str] = None, request: Request = None, session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") return await workspace_service.upload_files(app_state=request.app.state, bot=bot, files=files, path=path) @app.post("/api/bots/{bot_id}/speech/transcribe") async def transcribe_bot_speech( bot_id: str, file: UploadFile = File(...), language: Optional[str] = Form(None), session: Session = Depends(get_session), ): bot = session.get(BotInstance, bot_id) if not bot: raise HTTPException(status_code=404, detail="Bot not found") speech_settings = get_speech_runtime_settings() if not speech_settings["enabled"]: raise HTTPException(status_code=400, detail="Speech recognition is disabled") if not file: raise HTTPException(status_code=400, detail="no audio file uploaded") original_name = str(file.filename or "audio.webm").strip() or "audio.webm" safe_name = os.path.basename(original_name).replace("\\", "_").replace("/", "_") ext = os.path.splitext(safe_name)[1].strip().lower() or ".webm" if len(ext) > 12: ext = ".webm" tmp_path = "" try: with tempfile.NamedTemporaryFile(delete=False, suffix=ext, prefix=".speech_", dir=DATA_ROOT) as tmp: tmp_path = tmp.name while True: chunk = await file.read(1024 * 1024) if not chunk: break tmp.write(chunk) if not tmp_path or not os.path.exists(tmp_path) or os.path.getsize(tmp_path) <= 0: raise HTTPException(status_code=400, detail="audio payload is empty") resolved_language = str(language or "").strip() or speech_settings["default_language"] result = await asyncio.to_thread(speech_service.transcribe_file, tmp_path, resolved_language) text = str(result.get("text") or "").strip() if not text: raise HTTPException(status_code=400, detail="No speech detected") return { "bot_id": bot_id, "text": text, "duration_seconds": result.get("duration_seconds"), "max_audio_seconds": speech_settings["max_audio_seconds"], "model": speech_settings["model"], "device": speech_settings["device"], "language": result.get("language") or resolved_language, } except SpeechDisabledError as exc: logger.warning( "speech transcribe disabled bot_id=%s file=%s language=%s detail=%s", bot_id, safe_name, language, exc, ) raise HTTPException(status_code=400, detail=str(exc)) except SpeechDurationError: logger.warning( "speech transcribe too long bot_id=%s file=%s language=%s max_seconds=%s", bot_id, safe_name, language, speech_settings["max_audio_seconds"], ) raise HTTPException(status_code=413, detail=f"Audio duration exceeds {speech_settings['max_audio_seconds']} seconds") except SpeechServiceError as exc: logger.exception( "speech transcribe failed bot_id=%s file=%s language=%s", bot_id, safe_name, language, ) raise HTTPException(status_code=400, detail=str(exc)) except HTTPException: raise except Exception as exc: logger.exception( "speech transcribe unexpected error bot_id=%s file=%s language=%s", bot_id, safe_name, language, ) raise HTTPException(status_code=500, detail=f"speech transcription failed: {exc}") finally: try: await file.close() except Exception: pass if tmp_path and os.path.exists(tmp_path): try: os.remove(tmp_path) except Exception: pass @app.websocket("/ws/monitor/{bot_id}") async def websocket_endpoint(websocket: WebSocket, bot_id: str): with Session(engine) as session: bot = session.get(BotInstance, bot_id) if not bot: await websocket.close(code=4404, reason="Bot not found") return connected = False try: await manager.connect(bot_id, websocket) connected = True except Exception as exc: logger.warning("websocket connect failed bot_id=%s detail=%s", bot_id, exc) try: await websocket.close(code=1011, reason="WebSocket accept failed") except Exception: pass return runtime_service.ensure_monitor(app_state=websocket.app.state, bot=bot) try: while True: await websocket.receive_text() except WebSocketDisconnect: pass except RuntimeError as exc: # Client may drop before handshake settles; treat as benign disconnect. msg = str(exc or "").lower() if "need to call \"accept\" first" not in msg and "not connected" not in msg: logger.exception("websocket runtime error bot_id=%s", bot_id) except Exception: logger.exception("websocket unexpected error bot_id=%s", bot_id) finally: if connected: manager.disconnect(bot_id, websocket) def _main_server_options() -> tuple[str, int, bool, str, bool]: host = str(os.getenv("APP_HOST", "0.0.0.0") or "0.0.0.0").strip() or "0.0.0.0" try: port = int(os.getenv("APP_PORT", "8000")) except Exception: port = 8000 port = max(1, min(port, 65535)) reload_flag = str(os.getenv("APP_RELOAD", "true")).strip().lower() in {"1", "true", "yes", "on"} log_level = str(os.getenv("APP_LOG_LEVEL", "warning") or "warning").strip().lower() or "warning" access_log = str(os.getenv("APP_ACCESS_LOG", "false")).strip().lower() in {"1", "true", "yes", "on"} return host, port, reload_flag, log_level, access_log if __name__ == "__main__": import uvicorn host, port, reload_flag, log_level, access_log = _main_server_options() app_module = f"{os.path.splitext(os.path.basename(__file__))[0]}:app" if reload_flag: uvicorn.run( app_module, host=host, port=port, reload=True, log_level=log_level, access_log=access_log, ) else: uvicorn.run( app, host=host, port=port, log_level=log_level, access_log=access_log, )