Merge branch 'main' of https://git.unissense.tech/mula/dashboard-nanobot
commit
f6cf5edc1c
|
|
@ -11,6 +11,17 @@ import docker
|
|||
|
||||
|
||||
class BotDockerManager:
|
||||
_RUNTIME_BOOTSTRAP_LABEL_KEY = "dashboard.runtime_bootstrap"
|
||||
_RUNTIME_BOOTSTRAP_LABEL_VALUE = "env-json-v1"
|
||||
_DASHBOARD_READY_LOG_MARKERS = (
|
||||
"nanobot.channels.dashboard:start",
|
||||
"dashboard channel 代理已上线",
|
||||
)
|
||||
_DASHBOARD_FAILURE_LOG_MARKERS = (
|
||||
"failed to start channel dashboard",
|
||||
"dashboard channel not available",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host_data_root: str,
|
||||
|
|
@ -180,6 +191,162 @@ class BotDockerManager:
|
|||
|
||||
return str(network_settings.get("IPAddress") or "").strip()
|
||||
|
||||
@classmethod
|
||||
def _container_uses_expected_bootstrap(cls, container: Any) -> bool:
|
||||
attrs = getattr(container, "attrs", {}) or {}
|
||||
config = attrs.get("Config") or {}
|
||||
labels = config.get("Labels") or {}
|
||||
return str(labels.get(cls._RUNTIME_BOOTSTRAP_LABEL_KEY) or "").strip() == cls._RUNTIME_BOOTSTRAP_LABEL_VALUE
|
||||
|
||||
@staticmethod
|
||||
def _runtime_bootstrap_entrypoint() -> List[str]:
|
||||
bootstrap_code = "\n".join(
|
||||
[
|
||||
"import json",
|
||||
"import os",
|
||||
"import pathlib",
|
||||
"import re",
|
||||
"",
|
||||
"path = pathlib.Path('/root/.nanobot/env.json')",
|
||||
"pattern = re.compile(r'^[A-Z_][A-Z0-9_]{0,127}$')",
|
||||
"data = {}",
|
||||
"if path.is_file():",
|
||||
" try:",
|
||||
" data = json.loads(path.read_text(encoding='utf-8'))",
|
||||
" except Exception:",
|
||||
" data = {}",
|
||||
"if not isinstance(data, dict):",
|
||||
" data = {}",
|
||||
"for raw_key, raw_value in data.items():",
|
||||
" key = str(raw_key or '').strip().upper()",
|
||||
" if not pattern.fullmatch(key):",
|
||||
" continue",
|
||||
" os.environ[key] = str(raw_value or '').strip()",
|
||||
"os.execvp('nanobot', ['nanobot', 'gateway'])",
|
||||
]
|
||||
)
|
||||
return [
|
||||
"python",
|
||||
"-c",
|
||||
bootstrap_code,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _container_has_mount(container: Any, source: str, destination: str) -> bool:
|
||||
attrs = getattr(container, "attrs", {}) or {}
|
||||
mounts = attrs.get("Mounts") or []
|
||||
expected_source = os.path.normpath(source)
|
||||
expected_destination = str(destination or "").strip()
|
||||
for mount in mounts:
|
||||
if not isinstance(mount, dict):
|
||||
continue
|
||||
current_source = os.path.normpath(str(mount.get("Source") or ""))
|
||||
current_destination = str(mount.get("Destination") or "").strip()
|
||||
if current_source != expected_source or current_destination != expected_destination:
|
||||
continue
|
||||
if mount.get("RW") is False:
|
||||
continue
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _desired_memory_bytes(memory_mb: int) -> int:
|
||||
return int(memory_mb) * 1024 * 1024 if int(memory_mb or 0) > 0 else 0
|
||||
|
||||
@staticmethod
|
||||
def _desired_storage_bytes(storage_gb: int) -> Optional[int]:
|
||||
storage = int(storage_gb or 0)
|
||||
if storage <= 0:
|
||||
return None
|
||||
return storage * 1024 * 1024 * 1024
|
||||
|
||||
@staticmethod
|
||||
def _get_container_cpu_cores(container: Any) -> float:
|
||||
attrs = getattr(container, "attrs", {}) or {}
|
||||
host_cfg = attrs.get("HostConfig") or {}
|
||||
nano_cpus = int(host_cfg.get("NanoCpus") or 0)
|
||||
if nano_cpus > 0:
|
||||
return nano_cpus / 1_000_000_000
|
||||
cpu_quota = int(host_cfg.get("CpuQuota") or 0)
|
||||
cpu_period = int(host_cfg.get("CpuPeriod") or 0)
|
||||
if cpu_quota > 0 and cpu_period > 0:
|
||||
return cpu_quota / cpu_period
|
||||
return 0.0
|
||||
|
||||
@staticmethod
|
||||
def _normalize_image_id(raw: Any) -> str:
|
||||
text = str(raw or "").strip().lower()
|
||||
if text.startswith("sha256:"):
|
||||
return text[7:]
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
def _get_container_image_id(cls, container: Any) -> str:
|
||||
attrs = getattr(container, "attrs", {}) or {}
|
||||
image_id = attrs.get("Image")
|
||||
if image_id:
|
||||
return cls._normalize_image_id(image_id)
|
||||
image = getattr(container, "image", None)
|
||||
return cls._normalize_image_id(getattr(image, "id", ""))
|
||||
|
||||
def _resolve_image_id(self, image_ref: str) -> str:
|
||||
if not self.client:
|
||||
return ""
|
||||
try:
|
||||
image = self.client.images.get(image_ref)
|
||||
except Exception as e:
|
||||
print(f"[DockerManager] failed to resolve image id for {image_ref}: {e}")
|
||||
return ""
|
||||
return self._normalize_image_id(getattr(image, "id", ""))
|
||||
|
||||
def _container_storage_matches(self, actual_storage_bytes: Optional[int], desired_storage_gb: int) -> bool:
|
||||
expected_storage_bytes = self._desired_storage_bytes(desired_storage_gb)
|
||||
if expected_storage_bytes is None:
|
||||
return actual_storage_bytes in {None, 0}
|
||||
if actual_storage_bytes == expected_storage_bytes:
|
||||
return True
|
||||
return actual_storage_bytes is None and self._storage_limit_supported is not True
|
||||
|
||||
def _container_matches_runtime(
|
||||
self,
|
||||
container: Any,
|
||||
*,
|
||||
image_id: str,
|
||||
cpu_cores: float,
|
||||
memory_mb: int,
|
||||
storage_gb: int,
|
||||
bot_workspace: str,
|
||||
network_name: str,
|
||||
) -> bool:
|
||||
attrs = getattr(container, "attrs", {}) or {}
|
||||
host_cfg = attrs.get("HostConfig") or {}
|
||||
current_image_id = self._get_container_image_id(container)
|
||||
desired_image_id = self._normalize_image_id(image_id)
|
||||
if not desired_image_id or not current_image_id or current_image_id != desired_image_id:
|
||||
return False
|
||||
if not self._container_uses_expected_bootstrap(container):
|
||||
return False
|
||||
if not self._container_uses_network(container, network_name):
|
||||
return False
|
||||
if not self._container_has_mount(container, bot_workspace, "/root/.nanobot"):
|
||||
return False
|
||||
|
||||
actual_memory_bytes = int(host_cfg.get("Memory") or 0)
|
||||
if actual_memory_bytes != self._desired_memory_bytes(memory_mb):
|
||||
return False
|
||||
|
||||
desired_cpu = float(cpu_cores or 0)
|
||||
actual_cpu = self._get_container_cpu_cores(container)
|
||||
if abs(actual_cpu - desired_cpu) > 0.01:
|
||||
return False
|
||||
|
||||
storage_opt = host_cfg.get("StorageOpt") or {}
|
||||
actual_storage_bytes = self._parse_size_to_bytes(storage_opt.get("size"))
|
||||
if not self._container_storage_matches(actual_storage_bytes, storage_gb):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _run_container_with_storage_fallback(
|
||||
self,
|
||||
bot_id: str,
|
||||
|
|
@ -234,6 +401,10 @@ class BotDockerManager:
|
|||
if not self.has_image(image):
|
||||
print(f"❌ 错误: 镜像不存在: {image}")
|
||||
return False
|
||||
desired_image_id = self._resolve_image_id(image)
|
||||
if not desired_image_id:
|
||||
print(f"❌ 错误: 无法解析镜像 ID: {image}")
|
||||
return False
|
||||
|
||||
bot_workspace = os.path.join(self.host_data_root, bot_id, ".nanobot")
|
||||
container_name = f"worker_{bot_id}"
|
||||
|
|
@ -246,7 +417,10 @@ class BotDockerManager:
|
|||
"detach": True,
|
||||
"stdin_open": True,
|
||||
"tty": True,
|
||||
"environment": env_vars or {},
|
||||
"entrypoint": self._runtime_bootstrap_entrypoint(),
|
||||
"labels": {
|
||||
self._RUNTIME_BOOTSTRAP_LABEL_KEY: self._RUNTIME_BOOTSTRAP_LABEL_VALUE,
|
||||
},
|
||||
"volumes": {
|
||||
bot_workspace: {"bind": "/root/.nanobot", "mode": "rw"},
|
||||
},
|
||||
|
|
@ -261,16 +435,36 @@ class BotDockerManager:
|
|||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
container.reload()
|
||||
if container.status == "running" and self._container_uses_network(container, target_network):
|
||||
runtime_matches = self._container_matches_runtime(
|
||||
container,
|
||||
image_id=desired_image_id,
|
||||
cpu_cores=cpu,
|
||||
memory_mb=memory,
|
||||
storage_gb=storage,
|
||||
bot_workspace=bot_workspace,
|
||||
network_name=target_network,
|
||||
)
|
||||
if container.status in {"running", "restarting"} and runtime_matches:
|
||||
if on_state_change:
|
||||
self.ensure_monitor(bot_id, on_state_change)
|
||||
return True
|
||||
if container.status == "running":
|
||||
print(
|
||||
f"[DockerManager] recreating {container_name} to switch network "
|
||||
f"from current attachment to '{target_network}'"
|
||||
)
|
||||
container.remove(force=True)
|
||||
if container.status in {"running", "restarting"}:
|
||||
if not self._container_uses_network(container, target_network):
|
||||
print(
|
||||
f"[DockerManager] recreating {container_name} to switch network "
|
||||
f"from current attachment to '{target_network}'"
|
||||
)
|
||||
else:
|
||||
print(f"[DockerManager] recreating {container_name} because container config no longer matches desired runtime")
|
||||
container.remove(force=True)
|
||||
elif runtime_matches:
|
||||
container.start()
|
||||
if on_state_change:
|
||||
self.ensure_monitor(bot_id, on_state_change)
|
||||
return True
|
||||
else:
|
||||
print(f"[DockerManager] recreating {container_name} because container config no longer matches desired runtime")
|
||||
container.remove(force=True)
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
|
||||
|
|
@ -322,14 +516,17 @@ class BotDockerManager:
|
|||
print(f"[DockerManager] Error ensuring monitor for {bot_id}: {e}")
|
||||
return False
|
||||
|
||||
def stop_bot(self, bot_id: str) -> bool:
|
||||
def stop_bot(self, bot_id: str, remove: bool = False) -> bool:
|
||||
if not self.client:
|
||||
return False
|
||||
container_name = f"worker_{bot_id}"
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
container.stop(timeout=5)
|
||||
container.remove()
|
||||
container.reload()
|
||||
if str(container.status or "").strip().lower() in {"running", "restarting", "paused"}:
|
||||
container.stop(timeout=5)
|
||||
if remove:
|
||||
container.remove()
|
||||
self.active_monitors.pop(bot_id, None)
|
||||
return True
|
||||
except docker.errors.NotFound:
|
||||
|
|
@ -347,6 +544,11 @@ class BotDockerManager:
|
|||
media_paths = [str(v).strip().replace("\\", "/") for v in (media or []) if str(v).strip()]
|
||||
self._last_delivery_error.pop(bot_id, None)
|
||||
|
||||
if not self._wait_for_dashboard_ready(bot_id):
|
||||
if bot_id not in self._last_delivery_error:
|
||||
self._last_delivery_error[bot_id] = "Dashboard channel is not ready"
|
||||
return False
|
||||
|
||||
# Primary path on Docker Desktop/Mac: execute curl inside container namespace.
|
||||
for attempt in range(3):
|
||||
if self._send_command_via_exec(bot_id, command, media_paths):
|
||||
|
|
@ -365,6 +567,45 @@ class BotDockerManager:
|
|||
def get_last_delivery_error(self, bot_id: str) -> str:
|
||||
return str(self._last_delivery_error.get(bot_id, "") or "").strip()
|
||||
|
||||
@classmethod
|
||||
def _log_indicates_dashboard_ready(cls, line: str) -> bool:
|
||||
lowered = str(line or "").strip().lower()
|
||||
return any(marker in lowered for marker in cls._DASHBOARD_READY_LOG_MARKERS)
|
||||
|
||||
@classmethod
|
||||
def _log_indicates_dashboard_failure(cls, line: str) -> bool:
|
||||
lowered = str(line or "").strip().lower()
|
||||
return any(marker in lowered for marker in cls._DASHBOARD_FAILURE_LOG_MARKERS)
|
||||
|
||||
def _wait_for_dashboard_ready(
|
||||
self,
|
||||
bot_id: str,
|
||||
timeout_seconds: float = 15.0,
|
||||
poll_interval_seconds: float = 0.5,
|
||||
) -> bool:
|
||||
deadline = time.monotonic() + max(1.0, timeout_seconds)
|
||||
while time.monotonic() < deadline:
|
||||
status = self.get_bot_status(bot_id)
|
||||
if status != "RUNNING":
|
||||
self._last_delivery_error[bot_id] = f"Container status is {status.lower()}"
|
||||
return False
|
||||
|
||||
logs = self.get_recent_logs(bot_id, tail=200)
|
||||
for line in logs:
|
||||
if self._log_indicates_dashboard_failure(line):
|
||||
detail = str(line or "").strip()
|
||||
self._last_delivery_error[bot_id] = detail[:300] if detail else "Dashboard channel failed to start"
|
||||
return False
|
||||
if self._log_indicates_dashboard_ready(line):
|
||||
return True
|
||||
|
||||
time.sleep(max(0.1, poll_interval_seconds))
|
||||
|
||||
self._last_delivery_error[bot_id] = (
|
||||
f"Dashboard channel was not ready within {int(max(1.0, timeout_seconds))}s"
|
||||
)
|
||||
return False
|
||||
|
||||
def get_bot_status(self, bot_id: str) -> str:
|
||||
"""Return normalized runtime status from Docker: RUNNING or STOPPED."""
|
||||
if not self.client:
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ def deactivate_bot_instance(session: Session, bot_id: str) -> Dict[str, Any]:
|
|||
|
||||
def delete_bot_instance(session: Session, bot_id: str, delete_workspace: bool = True) -> Dict[str, Any]:
|
||||
bot = _get_bot_or_404(session, bot_id)
|
||||
docker_manager.stop_bot(bot_id)
|
||||
docker_manager.stop_bot(bot_id, remove=True)
|
||||
|
||||
messages = session.exec(select(BotMessage).where(BotMessage.bot_id == bot_id)).all()
|
||||
for row in messages:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,352 @@
|
|||
import sys
|
||||
import tempfile
|
||||
import types
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
docker_stub = types.ModuleType("docker")
|
||||
docker_stub.errors = types.SimpleNamespace(
|
||||
ImageNotFound=type("ImageNotFound", (Exception,), {}),
|
||||
NotFound=type("NotFound", (Exception,), {}),
|
||||
)
|
||||
sys.modules.setdefault("docker", docker_stub)
|
||||
|
||||
from core.docker_manager import BotDockerManager
|
||||
|
||||
|
||||
class BotDockerManagerTests(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self._tmpdir = tempfile.TemporaryDirectory()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self._tmpdir.cleanup()
|
||||
|
||||
def _make_manager(self) -> BotDockerManager:
|
||||
manager = BotDockerManager.__new__(BotDockerManager)
|
||||
manager.client = MagicMock()
|
||||
manager.host_data_root = self._tmpdir.name
|
||||
manager.base_image = "nanobot-base"
|
||||
manager.network_name = ""
|
||||
manager.active_monitors = {}
|
||||
manager._last_delivery_error = {}
|
||||
manager._storage_limit_supported = True
|
||||
manager._storage_limit_warning_emitted = False
|
||||
return manager
|
||||
|
||||
@staticmethod
|
||||
def _build_container(
|
||||
*,
|
||||
status: str,
|
||||
image: str,
|
||||
image_id: str | None = None,
|
||||
nano_cpus: int,
|
||||
memory_bytes: int,
|
||||
storage_opt_size: str,
|
||||
source_mount: str,
|
||||
network_name: str,
|
||||
bootstrap_label: str | None = "env-json-v1",
|
||||
) -> MagicMock:
|
||||
actual_image_id = image_id or image
|
||||
container = MagicMock()
|
||||
container.status = status
|
||||
container.reload = MagicMock()
|
||||
container.start = MagicMock()
|
||||
container.stop = MagicMock()
|
||||
container.remove = MagicMock()
|
||||
container.image = types.SimpleNamespace(id=actual_image_id)
|
||||
container.attrs = {
|
||||
"Image": actual_image_id,
|
||||
"Config": {
|
||||
"Image": image,
|
||||
"Labels": (
|
||||
{"dashboard.runtime_bootstrap": bootstrap_label}
|
||||
if bootstrap_label is not None
|
||||
else {}
|
||||
),
|
||||
},
|
||||
"HostConfig": {
|
||||
"NanoCpus": nano_cpus,
|
||||
"Memory": memory_bytes,
|
||||
"StorageOpt": {"size": storage_opt_size},
|
||||
},
|
||||
"Mounts": [
|
||||
{
|
||||
"Source": source_mount,
|
||||
"Destination": "/root/.nanobot",
|
||||
"RW": True,
|
||||
}
|
||||
],
|
||||
"NetworkSettings": {
|
||||
"Networks": {network_name: {"IPAddress": "172.18.0.2"}},
|
||||
},
|
||||
}
|
||||
return container
|
||||
|
||||
def test_stop_bot_keeps_container_by_default(self) -> None:
|
||||
manager = self._make_manager()
|
||||
container = MagicMock()
|
||||
container.status = "running"
|
||||
container.reload = MagicMock()
|
||||
container.stop = MagicMock()
|
||||
container.remove = MagicMock()
|
||||
manager.client.containers.get.return_value = container
|
||||
|
||||
result = manager.stop_bot("demo")
|
||||
|
||||
self.assertTrue(result)
|
||||
container.stop.assert_called_once_with(timeout=5)
|
||||
container.remove.assert_not_called()
|
||||
|
||||
def test_stop_bot_remove_true_deletes_container(self) -> None:
|
||||
manager = self._make_manager()
|
||||
container = MagicMock()
|
||||
container.status = "exited"
|
||||
container.reload = MagicMock()
|
||||
container.stop = MagicMock()
|
||||
container.remove = MagicMock()
|
||||
manager.client.containers.get.return_value = container
|
||||
|
||||
result = manager.stop_bot("demo", remove=True)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.stop.assert_not_called()
|
||||
container.remove.assert_called_once_with()
|
||||
|
||||
def test_start_bot_reuses_compatible_stopped_container(self) -> None:
|
||||
manager = self._make_manager()
|
||||
image_tag = "nanobot-base:v1"
|
||||
image_id = "sha256:img-v1"
|
||||
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
|
||||
container = self._build_container(
|
||||
status="exited",
|
||||
image=image_tag,
|
||||
image_id=image_id,
|
||||
nano_cpus=1_000_000_000,
|
||||
memory_bytes=1024 * 1024 * 1024,
|
||||
storage_opt_size="10G",
|
||||
source_mount=workspace_mount,
|
||||
network_name="bridge",
|
||||
)
|
||||
manager.client.images.get.return_value = types.SimpleNamespace(id=image_id)
|
||||
manager.client.containers.get.return_value = container
|
||||
|
||||
result = manager.start_bot(
|
||||
"demo",
|
||||
image_tag=image_tag,
|
||||
env_vars={"TZ": "UTC", "API_KEY": "updated-secret"},
|
||||
cpu_cores=1.0,
|
||||
memory_mb=1024,
|
||||
storage_gb=10,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.start.assert_called_once_with()
|
||||
container.remove.assert_not_called()
|
||||
manager.client.containers.run.assert_not_called()
|
||||
|
||||
def test_start_bot_recreates_incompatible_stopped_container(self) -> None:
|
||||
manager = self._make_manager()
|
||||
image_tag = "nanobot-base:v1"
|
||||
desired_image_id = "sha256:img-v1"
|
||||
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
|
||||
container = self._build_container(
|
||||
status="exited",
|
||||
image="nanobot-base:old",
|
||||
image_id="sha256:img-old",
|
||||
nano_cpus=1_000_000_000,
|
||||
memory_bytes=1024 * 1024 * 1024,
|
||||
storage_opt_size="10G",
|
||||
source_mount=workspace_mount,
|
||||
network_name="bridge",
|
||||
)
|
||||
manager.client.images.get.return_value = types.SimpleNamespace(id=desired_image_id)
|
||||
manager.client.containers.get.return_value = container
|
||||
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
|
||||
|
||||
result = manager.start_bot(
|
||||
"demo",
|
||||
image_tag=image_tag,
|
||||
env_vars={"TZ": "Asia/Shanghai"},
|
||||
cpu_cores=1.0,
|
||||
memory_mb=1024,
|
||||
storage_gb=10,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.start.assert_not_called()
|
||||
container.remove.assert_called_once_with(force=True)
|
||||
manager._run_container_with_storage_fallback.assert_called_once()
|
||||
|
||||
def test_start_bot_recreates_stopped_container_when_image_id_changes_under_same_tag(self) -> None:
|
||||
manager = self._make_manager()
|
||||
image_tag = "nanobot-base:v1"
|
||||
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
|
||||
container = self._build_container(
|
||||
status="exited",
|
||||
image=image_tag,
|
||||
image_id="sha256:img-old",
|
||||
nano_cpus=1_000_000_000,
|
||||
memory_bytes=1024 * 1024 * 1024,
|
||||
storage_opt_size="10G",
|
||||
source_mount=workspace_mount,
|
||||
network_name="bridge",
|
||||
)
|
||||
manager.client.images.get.return_value = types.SimpleNamespace(id="sha256:img-new")
|
||||
manager.client.containers.get.return_value = container
|
||||
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
|
||||
|
||||
result = manager.start_bot(
|
||||
"demo",
|
||||
image_tag=image_tag,
|
||||
env_vars={"TZ": "Asia/Shanghai"},
|
||||
cpu_cores=1.0,
|
||||
memory_mb=1024,
|
||||
storage_gb=10,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.start.assert_not_called()
|
||||
container.remove.assert_called_once_with(force=True)
|
||||
manager._run_container_with_storage_fallback.assert_called_once()
|
||||
|
||||
def test_start_bot_recreates_container_without_new_entrypoint(self) -> None:
|
||||
manager = self._make_manager()
|
||||
image_tag = "nanobot-base:v1"
|
||||
image_id = "sha256:img-v1"
|
||||
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
|
||||
container = self._build_container(
|
||||
status="exited",
|
||||
image=image_tag,
|
||||
image_id=image_id,
|
||||
nano_cpus=1_000_000_000,
|
||||
memory_bytes=1024 * 1024 * 1024,
|
||||
storage_opt_size="10G",
|
||||
source_mount=workspace_mount,
|
||||
network_name="bridge",
|
||||
bootstrap_label=None,
|
||||
)
|
||||
manager.client.images.get.return_value = types.SimpleNamespace(id=image_id)
|
||||
manager.client.containers.get.return_value = container
|
||||
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
|
||||
|
||||
result = manager.start_bot(
|
||||
"demo",
|
||||
image_tag=image_tag,
|
||||
env_vars={"TZ": "Asia/Shanghai"},
|
||||
cpu_cores=1.0,
|
||||
memory_mb=1024,
|
||||
storage_gb=10,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.start.assert_not_called()
|
||||
container.remove.assert_called_once_with(force=True)
|
||||
manager._run_container_with_storage_fallback.assert_called_once()
|
||||
|
||||
def test_start_bot_recreates_running_container_when_image_id_changes_under_same_tag(self) -> None:
|
||||
manager = self._make_manager()
|
||||
image_tag = "nanobot-base:v1"
|
||||
workspace_mount = f"{self._tmpdir.name}/demo/.nanobot"
|
||||
container = self._build_container(
|
||||
status="running",
|
||||
image=image_tag,
|
||||
image_id="sha256:img-old",
|
||||
nano_cpus=1_000_000_000,
|
||||
memory_bytes=1024 * 1024 * 1024,
|
||||
storage_opt_size="10G",
|
||||
source_mount=workspace_mount,
|
||||
network_name="bridge",
|
||||
)
|
||||
manager.client.images.get.return_value = types.SimpleNamespace(id="sha256:img-new")
|
||||
manager.client.containers.get.return_value = container
|
||||
manager._run_container_with_storage_fallback = MagicMock(return_value=MagicMock())
|
||||
|
||||
result = manager.start_bot(
|
||||
"demo",
|
||||
image_tag=image_tag,
|
||||
env_vars={"TZ": "Asia/Shanghai"},
|
||||
cpu_cores=1.0,
|
||||
memory_mb=1024,
|
||||
storage_gb=10,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
container.remove.assert_called_once_with(force=True)
|
||||
manager._run_container_with_storage_fallback.assert_called_once()
|
||||
|
||||
def test_send_command_waits_for_dashboard_ready(self) -> None:
|
||||
manager = self._make_manager()
|
||||
manager._wait_for_dashboard_ready = MagicMock(return_value=True)
|
||||
manager._send_command_via_exec = MagicMock(return_value=True)
|
||||
|
||||
result = manager.send_command("demo", "hello")
|
||||
|
||||
self.assertTrue(result)
|
||||
manager._wait_for_dashboard_ready.assert_called_once_with("demo")
|
||||
manager._send_command_via_exec.assert_called_once_with("demo", "hello", [])
|
||||
|
||||
def test_send_command_returns_false_when_dashboard_never_becomes_ready(self) -> None:
|
||||
manager = self._make_manager()
|
||||
def _wait_timeout(bot_id: str) -> bool:
|
||||
manager._last_delivery_error[bot_id] = "Dashboard channel was not ready within 15s"
|
||||
return False
|
||||
|
||||
manager._wait_for_dashboard_ready = MagicMock(side_effect=_wait_timeout)
|
||||
manager._send_command_via_exec = MagicMock()
|
||||
manager._send_command_via_host_http = MagicMock()
|
||||
|
||||
result = manager.send_command("demo", "hello")
|
||||
|
||||
self.assertFalse(result)
|
||||
manager._send_command_via_exec.assert_not_called()
|
||||
manager._send_command_via_host_http.assert_not_called()
|
||||
self.assertEqual(
|
||||
manager.get_last_delivery_error("demo"),
|
||||
"Dashboard channel was not ready within 15s",
|
||||
)
|
||||
|
||||
def test_wait_for_dashboard_ready_returns_true_after_start_log(self) -> None:
|
||||
manager = self._make_manager()
|
||||
manager.get_bot_status = MagicMock(return_value="RUNNING")
|
||||
manager.get_recent_logs = MagicMock(
|
||||
side_effect=[
|
||||
["Agent loop started"],
|
||||
["2026-04-25 | INFO | nanobot.channels.dashboard:start:66 - ready"],
|
||||
]
|
||||
)
|
||||
|
||||
with patch("core.docker_manager.time.sleep", return_value=None):
|
||||
result = manager._wait_for_dashboard_ready(
|
||||
"demo",
|
||||
timeout_seconds=2.0,
|
||||
poll_interval_seconds=0.1,
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_for_dashboard_ready_sets_timeout_error(self) -> None:
|
||||
manager = self._make_manager()
|
||||
manager.get_bot_status = MagicMock(return_value="RUNNING")
|
||||
manager.get_recent_logs = MagicMock(return_value=["Agent loop started"])
|
||||
|
||||
time_values = iter([0.0, 0.2, 0.4, 1.2])
|
||||
|
||||
with (
|
||||
patch("core.docker_manager.time.monotonic", side_effect=lambda: next(time_values)),
|
||||
patch("core.docker_manager.time.sleep", return_value=None),
|
||||
):
|
||||
result = manager._wait_for_dashboard_ready(
|
||||
"demo",
|
||||
timeout_seconds=1.0,
|
||||
poll_interval_seconds=0.1,
|
||||
)
|
||||
|
||||
self.assertFalse(result)
|
||||
self.assertEqual(
|
||||
manager.get_last_delivery_error("demo"),
|
||||
"Dashboard channel was not ready within 1s",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -1,30 +1,86 @@
|
|||
FROM python:3.12-slim
|
||||
FROM python:3.12-slim AS builder
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
ENV PYTHONIOENCODING=utf-8
|
||||
ENV PATH=/opt/venv/bin:$PATH
|
||||
|
||||
# 1. 替换 Debian 源为国内镜像
|
||||
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
||||
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources
|
||||
|
||||
# 2. 安装基础依赖
|
||||
# 2. 仅在构建阶段安装编译依赖
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
gcc \
|
||||
libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# 3. 安装 aiohttp 和基础 python 工具
|
||||
RUN python -m pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ --upgrade \
|
||||
pip setuptools wheel aiohttp
|
||||
RUN python -m venv /opt/venv
|
||||
|
||||
WORKDIR /app
|
||||
# 这一步会把您修改好的 nanobot/channels/dashboard.py 一起拷进去
|
||||
COPY . /app
|
||||
COPY pyproject.toml README.md LICENSE THIRD_PARTY_NOTICES.md ./
|
||||
|
||||
# 4. 安装 nanobot(包含 WeCom 渠道依赖)
|
||||
RUN pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ ".[wecom]"
|
||||
# 3. 先安装第三方依赖。该层只依赖 pyproject.toml,源码改动不会触发整套依赖重装。
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --upgrade \
|
||||
--no-compile pip setuptools wheel aiohttp hatchling && \
|
||||
python -c 'import tomllib; data=tomllib.load(open("pyproject.toml","rb")); deps=list(data["project"].get("dependencies", [])); deps.extend(data["project"].get("optional-dependencies", {}).get("wecom", [])); print("\n".join(deps))' > /tmp/requirements.txt && \
|
||||
pip install --no-compile -i https://mirrors.aliyun.com/pypi/simple/ -r /tmp/requirements.txt && \
|
||||
rm -f /tmp/requirements.txt
|
||||
|
||||
COPY nanobot/ nanobot/
|
||||
COPY bridge/ bridge/
|
||||
|
||||
# 4. 源码层只安装 nanobot 本体,不重复解析/下载第三方依赖。
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-deps --no-build-isolation . && \
|
||||
find /opt/venv -type d -name __pycache__ -prune -exec rm -rf {} + && \
|
||||
find /opt/venv -name '*.pyc' -delete
|
||||
|
||||
|
||||
FROM python:3.12-slim
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
ENV PYTHONIOENCODING=utf-8
|
||||
ENV PATH=/opt/venv/bin:$PATH
|
||||
ARG INSTALL_EXTRA_CLI=false
|
||||
|
||||
# 1. 替换 Debian 源为国内镜像
|
||||
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
||||
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources
|
||||
|
||||
# 2. 安装基础运行时依赖
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
bubblewrap \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
openssh-client \
|
||||
tmux \
|
||||
&& git config --global --add url."https://github.com/".insteadOf ssh://git@github.com/ \
|
||||
&& git config --global --add url."https://github.com/".insteadOf git@github.com: \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# 3. Node.js 与 GitHub CLI 只在需要相关工具时安装,默认跳过以避免访问外部 apt 源。
|
||||
RUN if [ "$INSTALL_EXTRA_CLI" = "true" ]; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends gnupg \
|
||||
&& mkdir -p /etc/apt/keyrings /etc/apt/sources.list.d \
|
||||
&& curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
|
||||
&& echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_22.x nodistro main" > /etc/apt/sources.list.d/nodesource.list \
|
||||
&& curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg > /etc/apt/keyrings/githubcli-archive-keyring.gpg \
|
||||
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
|
||||
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" > /etc/apt/sources.list.d/github-cli.list \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
gh \
|
||||
nodejs \
|
||||
&& apt-get purge -y --auto-remove gnupg \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Skipping optional Node.js and GitHub CLI"; \
|
||||
fi
|
||||
|
||||
# 4. 仅复制已安装好的运行环境,避免把源码目录打进最终镜像
|
||||
COPY --from=builder /opt/venv /opt/venv
|
||||
|
||||
WORKDIR /root
|
||||
# 官方 gateway 模式,现在它会自动加载您的 DashboardChannel
|
||||
|
|
|
|||
|
|
@ -21,8 +21,6 @@ build_image() {
|
|||
echo ">> [2/3] 拷贝 dashboard.py 到 channels 目录 ..."
|
||||
if [ -d "${BASE_DIR}/${dir_name}/nanobot/channels" ]; then
|
||||
cp "${BASE_DIR}/dashboard.py" "${BASE_DIR}/${dir_name}/nanobot/channels/"
|
||||
elif [ -d "${BASE_DIR}/${dir_name}/channels" ]; then
|
||||
cp "${BASE_DIR}/dashboard.py" "${BASE_DIR}/${dir_name}/channels/"
|
||||
else
|
||||
# 兜底创建 nanobot/channels/
|
||||
mkdir -p "${BASE_DIR}/${dir_name}/nanobot/channels/"
|
||||
|
|
@ -32,7 +30,11 @@ build_image() {
|
|||
# 3. 执行 Docker build
|
||||
echo ">> [3/3] 开始打包 Docker 镜像: ${image_name} ..."
|
||||
cd "${BASE_DIR}/${dir_name}"
|
||||
docker build -f Dashboard.Dockerfile -t "${image_name}" .
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--build-arg INSTALL_EXTRA_CLI="${INSTALL_EXTRA_CLI:-false}" \
|
||||
-f Dashboard.Dockerfile \
|
||||
-t "${image_name}" \
|
||||
.
|
||||
|
||||
echo "=================================================="
|
||||
echo "✅ 构建完成: ${image_name}"
|
||||
|
|
|
|||
|
|
@ -114,8 +114,9 @@ class DashboardChannel(BaseChannel):
|
|||
"""处理来自面板的指令入站"""
|
||||
try:
|
||||
data = await request.json()
|
||||
user_message = data.get("message", "").strip()
|
||||
media = [str(v).strip().replace("\\", "/") for v in (data.get("media") or []) if str(v).strip()]
|
||||
user_message = str(data.get("message") or "").strip()
|
||||
raw_media = data.get("media") or []
|
||||
media = [str(v).strip().replace("\\", "/") for v in raw_media if str(v).strip()] if isinstance(raw_media, list) else []
|
||||
|
||||
if not user_message and not media:
|
||||
return web.json_response({"status": "error", "reason": "empty message and media"}, status=400)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,552 @@
|
|||
"""WeCom (Enterprise WeChat) channel implementation using wecom_aibot_sdk."""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import hashlib
|
||||
import importlib.util
|
||||
import os
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from nanobot.bus.events import OutboundMessage
|
||||
from nanobot.bus.queue import MessageBus
|
||||
from nanobot.channels.base import BaseChannel
|
||||
from nanobot.config.paths import get_media_dir, get_workspace_path
|
||||
from nanobot.config.schema import Base
|
||||
from pydantic import Field
|
||||
|
||||
WECOM_AVAILABLE = importlib.util.find_spec("wecom_aibot_sdk") is not None
|
||||
|
||||
# Upload safety limits (matching QQ channel defaults)
|
||||
WECOM_UPLOAD_MAX_BYTES = 1024 * 1024 * 200 # 200MB
|
||||
|
||||
# Replace unsafe characters with "_", keep Chinese and common safe punctuation.
|
||||
_SAFE_NAME_RE = re.compile(r"[^\w.\-()\[\]()【】\u4e00-\u9fff]+", re.UNICODE)
|
||||
|
||||
|
||||
def _sanitize_filename(name: str) -> str:
|
||||
"""Sanitize filename to avoid traversal and problematic chars."""
|
||||
name = (name or "").strip()
|
||||
name = Path(name).name
|
||||
name = _SAFE_NAME_RE.sub("_", name).strip("._ ")
|
||||
return name
|
||||
|
||||
|
||||
_IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp"}
|
||||
_VIDEO_EXTS = {".mp4", ".avi", ".mov"}
|
||||
_AUDIO_EXTS = {".amr", ".mp3", ".wav", ".ogg"}
|
||||
|
||||
|
||||
def _guess_wecom_media_type(filename: str) -> str:
|
||||
"""Classify file extension as WeCom media_type string."""
|
||||
ext = Path(filename).suffix.lower()
|
||||
if ext in _IMAGE_EXTS:
|
||||
return "image"
|
||||
if ext in _VIDEO_EXTS:
|
||||
return "video"
|
||||
if ext in _AUDIO_EXTS:
|
||||
return "voice"
|
||||
return "file"
|
||||
|
||||
class WecomConfig(Base):
|
||||
"""WeCom (Enterprise WeChat) AI Bot channel configuration."""
|
||||
|
||||
enabled: bool = False
|
||||
bot_id: str = ""
|
||||
secret: str = ""
|
||||
allow_from: list[str] = Field(default_factory=list)
|
||||
welcome_message: str = ""
|
||||
|
||||
|
||||
# Message type display mapping
|
||||
MSG_TYPE_MAP = {
|
||||
"image": "[image]",
|
||||
"voice": "[voice]",
|
||||
"file": "[file]",
|
||||
"mixed": "[mixed content]",
|
||||
}
|
||||
|
||||
|
||||
class WecomChannel(BaseChannel):
|
||||
"""
|
||||
WeCom (Enterprise WeChat) channel using WebSocket long connection.
|
||||
|
||||
Uses WebSocket to receive events - no public IP or webhook required.
|
||||
|
||||
Requires:
|
||||
- Bot ID and Secret from WeCom AI Bot platform
|
||||
"""
|
||||
|
||||
name = "wecom"
|
||||
display_name = "WeCom"
|
||||
|
||||
@classmethod
|
||||
def default_config(cls) -> dict[str, Any]:
|
||||
return WecomConfig().model_dump(by_alias=True)
|
||||
|
||||
def __init__(self, config: Any, bus: MessageBus):
|
||||
if isinstance(config, dict):
|
||||
config = WecomConfig.model_validate(config)
|
||||
super().__init__(config, bus)
|
||||
self.config: WecomConfig = config
|
||||
self._client: Any = None
|
||||
self._processed_message_ids: OrderedDict[str, None] = OrderedDict()
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._generate_req_id = None
|
||||
# Store frame headers for each chat to enable replies
|
||||
self._chat_frames: dict[str, Any] = {}
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the WeCom bot with WebSocket long connection."""
|
||||
if not WECOM_AVAILABLE:
|
||||
logger.error("WeCom SDK not installed. Run: pip install nanobot-ai[wecom]")
|
||||
return
|
||||
|
||||
if not self.config.bot_id or not self.config.secret:
|
||||
logger.error("WeCom bot_id and secret not configured")
|
||||
return
|
||||
|
||||
from wecom_aibot_sdk import WSClient, generate_req_id
|
||||
|
||||
self._running = True
|
||||
self._loop = asyncio.get_running_loop()
|
||||
self._generate_req_id = generate_req_id
|
||||
|
||||
# Create WebSocket client
|
||||
self._client = WSClient({
|
||||
"bot_id": self.config.bot_id,
|
||||
"secret": self.config.secret,
|
||||
"reconnect_interval": 1000,
|
||||
"max_reconnect_attempts": -1, # Infinite reconnect
|
||||
"heartbeat_interval": 30000,
|
||||
})
|
||||
|
||||
# Register event handlers
|
||||
self._client.on("connected", self._on_connected)
|
||||
self._client.on("authenticated", self._on_authenticated)
|
||||
self._client.on("disconnected", self._on_disconnected)
|
||||
self._client.on("error", self._on_error)
|
||||
self._client.on("message.text", self._on_text_message)
|
||||
self._client.on("message.image", self._on_image_message)
|
||||
self._client.on("message.voice", self._on_voice_message)
|
||||
self._client.on("message.file", self._on_file_message)
|
||||
self._client.on("message.mixed", self._on_mixed_message)
|
||||
self._client.on("event.enter_chat", self._on_enter_chat)
|
||||
|
||||
logger.info("WeCom bot starting with WebSocket long connection")
|
||||
logger.info("No public IP required - using WebSocket to receive events")
|
||||
|
||||
# Connect
|
||||
await self._client.connect_async()
|
||||
|
||||
# Keep running until stopped
|
||||
while self._running:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the WeCom bot."""
|
||||
self._running = False
|
||||
if self._client:
|
||||
await self._client.disconnect()
|
||||
logger.info("WeCom bot stopped")
|
||||
|
||||
async def _on_connected(self, frame: Any) -> None:
|
||||
"""Handle WebSocket connected event."""
|
||||
logger.info("WeCom WebSocket connected")
|
||||
|
||||
async def _on_authenticated(self, frame: Any) -> None:
|
||||
"""Handle authentication success event."""
|
||||
logger.info("WeCom authenticated successfully")
|
||||
|
||||
async def _on_disconnected(self, frame: Any) -> None:
|
||||
"""Handle WebSocket disconnected event."""
|
||||
reason = frame.body if hasattr(frame, 'body') else str(frame)
|
||||
logger.warning("WeCom WebSocket disconnected: {}", reason)
|
||||
|
||||
async def _on_error(self, frame: Any) -> None:
|
||||
"""Handle error event."""
|
||||
logger.error("WeCom error: {}", frame)
|
||||
|
||||
async def _on_text_message(self, frame: Any) -> None:
|
||||
"""Handle text message."""
|
||||
await self._process_message(frame, "text")
|
||||
|
||||
async def _on_image_message(self, frame: Any) -> None:
|
||||
"""Handle image message."""
|
||||
await self._process_message(frame, "image")
|
||||
|
||||
async def _on_voice_message(self, frame: Any) -> None:
|
||||
"""Handle voice message."""
|
||||
await self._process_message(frame, "voice")
|
||||
|
||||
async def _on_file_message(self, frame: Any) -> None:
|
||||
"""Handle file message."""
|
||||
await self._process_message(frame, "file")
|
||||
|
||||
async def _on_mixed_message(self, frame: Any) -> None:
|
||||
"""Handle mixed content message."""
|
||||
await self._process_message(frame, "mixed")
|
||||
|
||||
async def _on_enter_chat(self, frame: Any) -> None:
|
||||
"""Handle enter_chat event (user opens chat with bot)."""
|
||||
try:
|
||||
# Extract body from WsFrame dataclass or dict
|
||||
if hasattr(frame, 'body'):
|
||||
body = frame.body or {}
|
||||
elif isinstance(frame, dict):
|
||||
body = frame.get("body", frame)
|
||||
else:
|
||||
body = {}
|
||||
|
||||
chat_id = body.get("chatid", "") if isinstance(body, dict) else ""
|
||||
|
||||
if chat_id and self.config.welcome_message:
|
||||
await self._client.reply_welcome(frame, {
|
||||
"msgtype": "text",
|
||||
"text": {"content": self.config.welcome_message},
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error("Error handling enter_chat: {}", e)
|
||||
|
||||
async def _process_message(self, frame: Any, msg_type: str) -> None:
|
||||
"""Process incoming message and forward to bus."""
|
||||
try:
|
||||
# Extract body from WsFrame dataclass or dict
|
||||
if hasattr(frame, 'body'):
|
||||
body = frame.body or {}
|
||||
elif isinstance(frame, dict):
|
||||
body = frame.get("body", frame)
|
||||
else:
|
||||
body = {}
|
||||
|
||||
# Ensure body is a dict
|
||||
if not isinstance(body, dict):
|
||||
logger.warning("Invalid body type: {}", type(body))
|
||||
return
|
||||
|
||||
# Extract message info
|
||||
msg_id = body.get("msgid", "")
|
||||
if not msg_id:
|
||||
msg_id = f"{body.get('chatid', '')}_{body.get('sendertime', '')}"
|
||||
|
||||
# Deduplication check
|
||||
if msg_id in self._processed_message_ids:
|
||||
return
|
||||
self._processed_message_ids[msg_id] = None
|
||||
|
||||
# Trim cache
|
||||
while len(self._processed_message_ids) > 1000:
|
||||
self._processed_message_ids.popitem(last=False)
|
||||
|
||||
# Extract sender info from "from" field (SDK format)
|
||||
from_info = body.get("from", {})
|
||||
sender_id = from_info.get("userid", "unknown") if isinstance(from_info, dict) else "unknown"
|
||||
|
||||
# For single chat, chatid is the sender's userid
|
||||
# For group chat, chatid is provided in body
|
||||
chat_type = body.get("chattype", "single")
|
||||
chat_id = body.get("chatid", sender_id)
|
||||
|
||||
content_parts = []
|
||||
media_paths: list[str] = []
|
||||
|
||||
if msg_type == "text":
|
||||
text = body.get("text", {}).get("content", "")
|
||||
if text:
|
||||
content_parts.append(text)
|
||||
|
||||
elif msg_type == "image":
|
||||
image_info = body.get("image", {})
|
||||
file_url = image_info.get("url", "")
|
||||
aes_key = image_info.get("aeskey", "")
|
||||
|
||||
if file_url and aes_key:
|
||||
file_path = await self._download_and_save_media(file_url, aes_key, "image")
|
||||
if file_path:
|
||||
filename = os.path.basename(file_path)
|
||||
content_parts.append(f"[image: {filename}]")
|
||||
media_paths.append(file_path)
|
||||
else:
|
||||
content_parts.append("[image: download failed]")
|
||||
else:
|
||||
content_parts.append("[image: download failed]")
|
||||
|
||||
elif msg_type == "voice":
|
||||
voice_info = body.get("voice", {})
|
||||
# Voice message already contains transcribed content from WeCom
|
||||
voice_content = voice_info.get("content", "")
|
||||
if voice_content:
|
||||
content_parts.append(f"[voice] {voice_content}")
|
||||
else:
|
||||
content_parts.append("[voice]")
|
||||
|
||||
elif msg_type == "file":
|
||||
file_info = body.get("file", {})
|
||||
file_url = file_info.get("url", "")
|
||||
aes_key = file_info.get("aeskey", "")
|
||||
file_name = file_info.get("name", "unknown")
|
||||
|
||||
if file_url and aes_key:
|
||||
file_path = await self._download_and_save_media(file_url, aes_key, "file", file_name)
|
||||
if file_path:
|
||||
content_parts.append(f"[file: {file_name}]")
|
||||
media_paths.append(file_path)
|
||||
else:
|
||||
content_parts.append(f"[file: {file_name}: download failed]")
|
||||
else:
|
||||
content_parts.append(f"[file: {file_name}: download failed]")
|
||||
|
||||
elif msg_type == "mixed":
|
||||
# Mixed content contains multiple message items
|
||||
msg_items = body.get("mixed", {}).get("msg_item", [])
|
||||
for item in msg_items:
|
||||
item_type = item.get("msgtype", "")
|
||||
if item_type == "text":
|
||||
text = item.get("text", {}).get("content", "")
|
||||
if text:
|
||||
content_parts.append(text)
|
||||
elif item_type == "image":
|
||||
file_url = item.get("image", {}).get("url", "")
|
||||
aes_key = item.get("image", {}).get("aeskey", "")
|
||||
if file_url and aes_key:
|
||||
file_path = await self._download_and_save_media(file_url, aes_key, "image")
|
||||
if file_path:
|
||||
filename = os.path.basename(file_path)
|
||||
content_parts.append(f"[image: {filename}]")
|
||||
media_paths.append(file_path)
|
||||
else:
|
||||
content_parts.append(MSG_TYPE_MAP.get(item_type, f"[{item_type}]"))
|
||||
|
||||
else:
|
||||
content_parts.append(MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]"))
|
||||
|
||||
content = "\n".join(content_parts) if content_parts else ""
|
||||
|
||||
if not content:
|
||||
return
|
||||
|
||||
# Store frame for this chat to enable replies
|
||||
self._chat_frames[chat_id] = frame
|
||||
|
||||
# Forward to message bus
|
||||
await self._handle_message(
|
||||
sender_id=sender_id,
|
||||
chat_id=chat_id,
|
||||
content=content,
|
||||
media=media_paths or None,
|
||||
metadata={
|
||||
"message_id": msg_id,
|
||||
"msg_type": msg_type,
|
||||
"chat_type": chat_type,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error processing WeCom message: {}", e)
|
||||
|
||||
async def _download_and_save_media(
|
||||
self,
|
||||
file_url: str,
|
||||
aes_key: str,
|
||||
media_type: str,
|
||||
filename: str | None = None,
|
||||
) -> str | None:
|
||||
"""
|
||||
Download and decrypt media from WeCom.
|
||||
|
||||
Returns:
|
||||
file_path or None if download failed
|
||||
"""
|
||||
try:
|
||||
data, fname = await self._client.download_file(file_url, aes_key)
|
||||
|
||||
if not data:
|
||||
logger.warning("Failed to download media from WeCom")
|
||||
return None
|
||||
|
||||
if len(data) > WECOM_UPLOAD_MAX_BYTES:
|
||||
logger.warning(
|
||||
"WeCom inbound media too large: {} bytes (max {})",
|
||||
len(data),
|
||||
WECOM_UPLOAD_MAX_BYTES,
|
||||
)
|
||||
return None
|
||||
|
||||
media_dir = get_media_dir("wecom")
|
||||
if not filename:
|
||||
filename = fname or f"{media_type}_{hash(file_url) % 100000}"
|
||||
filename = _sanitize_filename(filename)
|
||||
|
||||
file_path = media_dir / filename
|
||||
await asyncio.to_thread(file_path.write_bytes, data)
|
||||
logger.debug("Downloaded {} to {}", media_type, file_path)
|
||||
return str(file_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error downloading media: {}", e)
|
||||
return None
|
||||
|
||||
async def _upload_media_ws(
|
||||
self, client: Any, file_path: str,
|
||||
) -> "tuple[str, str] | tuple[None, None]":
|
||||
"""Upload a local file to WeCom via WebSocket 3-step protocol (base64).
|
||||
|
||||
Uses the WeCom WebSocket upload commands directly via
|
||||
``client._ws_manager.send_reply()``:
|
||||
|
||||
``aibot_upload_media_init`` → upload_id
|
||||
``aibot_upload_media_chunk`` × N (≤512 KB raw per chunk, base64)
|
||||
``aibot_upload_media_finish`` → media_id
|
||||
|
||||
Returns (media_id, media_type) on success, (None, None) on failure.
|
||||
"""
|
||||
from wecom_aibot_sdk.utils import generate_req_id as _gen_req_id
|
||||
|
||||
try:
|
||||
fname = os.path.basename(file_path)
|
||||
media_type = _guess_wecom_media_type(fname)
|
||||
|
||||
# Read file size and data in a thread to avoid blocking the event loop
|
||||
def _read_file():
|
||||
file_size = os.path.getsize(file_path)
|
||||
if file_size > WECOM_UPLOAD_MAX_BYTES:
|
||||
raise ValueError(
|
||||
f"File too large: {file_size} bytes (max {WECOM_UPLOAD_MAX_BYTES})"
|
||||
)
|
||||
with open(file_path, "rb") as f:
|
||||
return file_size, f.read()
|
||||
|
||||
file_size, data = await asyncio.to_thread(_read_file)
|
||||
# MD5 is used for file integrity only, not cryptographic security
|
||||
md5_hash = hashlib.md5(data).hexdigest()
|
||||
|
||||
CHUNK_SIZE = 512 * 1024 # 512 KB raw (before base64)
|
||||
mv = memoryview(data)
|
||||
chunk_list = [bytes(mv[i : i + CHUNK_SIZE]) for i in range(0, file_size, CHUNK_SIZE)]
|
||||
n_chunks = len(chunk_list)
|
||||
del mv, data
|
||||
|
||||
# Step 1: init
|
||||
req_id = _gen_req_id("upload_init")
|
||||
resp = await client._ws_manager.send_reply(req_id, {
|
||||
"type": media_type,
|
||||
"filename": fname,
|
||||
"total_size": file_size,
|
||||
"total_chunks": n_chunks,
|
||||
"md5": md5_hash,
|
||||
}, "aibot_upload_media_init")
|
||||
if resp.errcode != 0:
|
||||
logger.warning("WeCom upload init failed ({}): {}", resp.errcode, resp.errmsg)
|
||||
return None, None
|
||||
upload_id = resp.body.get("upload_id") if resp.body else None
|
||||
if not upload_id:
|
||||
logger.warning("WeCom upload init: no upload_id in response")
|
||||
return None, None
|
||||
|
||||
# Step 2: send chunks
|
||||
for i, chunk in enumerate(chunk_list):
|
||||
req_id = _gen_req_id("upload_chunk")
|
||||
resp = await client._ws_manager.send_reply(req_id, {
|
||||
"upload_id": upload_id,
|
||||
"chunk_index": i,
|
||||
"base64_data": base64.b64encode(chunk).decode(),
|
||||
}, "aibot_upload_media_chunk")
|
||||
if resp.errcode != 0:
|
||||
logger.warning("WeCom upload chunk {} failed ({}): {}", i, resp.errcode, resp.errmsg)
|
||||
return None, None
|
||||
|
||||
# Step 3: finish
|
||||
req_id = _gen_req_id("upload_finish")
|
||||
resp = await client._ws_manager.send_reply(req_id, {
|
||||
"upload_id": upload_id,
|
||||
}, "aibot_upload_media_finish")
|
||||
if resp.errcode != 0:
|
||||
logger.warning("WeCom upload finish failed ({}): {}", resp.errcode, resp.errmsg)
|
||||
return None, None
|
||||
|
||||
media_id = resp.body.get("media_id") if resp.body else None
|
||||
if not media_id:
|
||||
logger.warning("WeCom upload finish: no media_id in response body={}", resp.body)
|
||||
return None, None
|
||||
|
||||
suffix = "..." if len(media_id) > 16 else ""
|
||||
logger.debug("WeCom uploaded {} ({}) → media_id={}", fname, media_type, media_id[:16] + suffix)
|
||||
return media_id, media_type
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning("WeCom upload skipped for {}: {}", file_path, e)
|
||||
return None, None
|
||||
except Exception as e:
|
||||
logger.error("WeCom _upload_media_ws error for {}: {}", file_path, e)
|
||||
return None, None
|
||||
|
||||
async def send(self, msg: OutboundMessage) -> None:
|
||||
"""Send a message through WeCom."""
|
||||
if not self._client:
|
||||
logger.warning("WeCom client not initialized")
|
||||
return
|
||||
|
||||
try:
|
||||
content = (msg.content or "").strip()
|
||||
is_progress = bool(msg.metadata.get("_progress"))
|
||||
|
||||
# Get the stored frame for this chat
|
||||
frame = self._chat_frames.get(msg.chat_id)
|
||||
|
||||
# Send media files via WebSocket upload
|
||||
for file_path in msg.media or []:
|
||||
upload_path = file_path
|
||||
if not os.path.isfile(upload_path) and not os.path.isabs(file_path):
|
||||
upload_path = str(get_workspace_path() / file_path)
|
||||
if not os.path.isfile(upload_path):
|
||||
logger.warning("WeCom media file not found: {}", file_path)
|
||||
continue
|
||||
media_id, media_type = await self._upload_media_ws(self._client, upload_path)
|
||||
if media_id:
|
||||
if frame:
|
||||
await self._client.reply(frame, {
|
||||
"msgtype": media_type,
|
||||
media_type: {"media_id": media_id},
|
||||
})
|
||||
else:
|
||||
await self._client.send_message(msg.chat_id, {
|
||||
"msgtype": media_type,
|
||||
media_type: {"media_id": media_id},
|
||||
})
|
||||
logger.debug("WeCom sent {} → {}", media_type, msg.chat_id)
|
||||
else:
|
||||
content += f"\n[file upload failed: {os.path.basename(file_path)}]"
|
||||
|
||||
if not content:
|
||||
return
|
||||
|
||||
if frame:
|
||||
# Both progress and final messages must use reply_stream (cmd="aibot_respond_msg").
|
||||
# The plain reply() uses cmd="reply" which does not support "text" msgtype
|
||||
# and causes errcode=40008 from WeCom API.
|
||||
stream_id = self._generate_req_id("stream")
|
||||
await self._client.reply_stream(
|
||||
frame,
|
||||
stream_id,
|
||||
content,
|
||||
finish=not is_progress,
|
||||
)
|
||||
logger.debug(
|
||||
"WeCom {} sent to {}",
|
||||
"progress" if is_progress else "message",
|
||||
msg.chat_id,
|
||||
)
|
||||
else:
|
||||
# No frame (e.g. cron push): proactive send only supports markdown
|
||||
await self._client.send_message(msg.chat_id, {
|
||||
"msgtype": "markdown",
|
||||
"markdown": {"content": content},
|
||||
})
|
||||
logger.info("WeCom proactive send to {}", msg.chat_id)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Error sending WeCom message to chat_id={}", msg.chat_id)
|
||||
|
|
@ -120,6 +120,7 @@ export function BotRouteAccessGate({
|
|||
const customEvent = event as CustomEvent<{ botId?: string }>;
|
||||
const invalidBotId = String(customEvent.detail?.botId || '').trim();
|
||||
if (!invalidBotId || invalidBotId !== normalizedBotId) return;
|
||||
if (!unlocked) return;
|
||||
setUnlocked(false);
|
||||
setAuthRefreshNonce((value) => value + 1);
|
||||
setPassword('');
|
||||
|
|
@ -128,7 +129,7 @@ export function BotRouteAccessGate({
|
|||
};
|
||||
window.addEventListener(BOT_AUTH_INVALID_EVENT, handleBotAuthInvalid as EventListener);
|
||||
return () => window.removeEventListener(BOT_AUTH_INVALID_EVENT, handleBotAuthInvalid as EventListener);
|
||||
}, [copy.errorExpired, normalizedBotId, passwordEnabled]);
|
||||
}, [copy.errorExpired, normalizedBotId, passwordEnabled, unlocked]);
|
||||
|
||||
const unlockBot = async () => {
|
||||
const entered = String(password || '').trim();
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ export function PanelLoginGate({
|
|||
useEffect(() => {
|
||||
if (typeof window === 'undefined' || bypass) return undefined;
|
||||
const handlePanelAuthInvalid = () => {
|
||||
if (!authenticated) return;
|
||||
setRequired(true);
|
||||
setAuthenticated(false);
|
||||
setChecking(false);
|
||||
|
|
@ -94,7 +95,7 @@ export function PanelLoginGate({
|
|||
};
|
||||
window.addEventListener(PANEL_AUTH_INVALID_EVENT, handlePanelAuthInvalid);
|
||||
return () => window.removeEventListener(PANEL_AUTH_INVALID_EVENT, handlePanelAuthInvalid);
|
||||
}, [bypass, isZh]);
|
||||
}, [authenticated, bypass, isZh]);
|
||||
|
||||
const onSubmit = async () => {
|
||||
const next = String(password || '').trim();
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ export const channelsEn = {
|
|||
wizardSectionTitle: 'Channel Configuration',
|
||||
wizardSectionDesc: 'Manage custom channels.',
|
||||
globalDeliveryTitle: 'Global Delivery',
|
||||
globalDeliveryDesc: '`sendProgress` / `sendToolHints` are global switches and apply to all channels.',
|
||||
openManager: 'Manage Channels',
|
||||
defaultChannel: 'Default Channel',
|
||||
customChannel: 'Custom Channel',
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ export const channelsZhCn = {
|
|||
wizardSectionTitle: '渠道配置',
|
||||
wizardSectionDesc: '管理自定义渠道。',
|
||||
globalDeliveryTitle: '全局消息投递',
|
||||
globalDeliveryDesc: 'sendProgress / sendToolHints 为全局开关,对所有渠道统一生效。',
|
||||
openManager: '管理渠道',
|
||||
defaultChannel: '默认渠道',
|
||||
customChannel: '自定义渠道',
|
||||
|
|
|
|||
|
|
@ -444,7 +444,6 @@ export function ChannelConfigModal({
|
|||
<div className="ops-config-modal">
|
||||
<div className="card">
|
||||
<div className="section-mini-title">{labels.globalDeliveryTitle}</div>
|
||||
<div className="field-label">{labels.globalDeliveryDesc}</div>
|
||||
<div className="wizard-dashboard-switches" style={{ marginTop: 8 }}>
|
||||
<label className="field-label">
|
||||
<input
|
||||
|
|
|
|||
|
|
@ -159,7 +159,6 @@ export function BotWizardChannelModal({
|
|||
<h3>{lc.wizardSectionTitle}</h3>
|
||||
<div className="card">
|
||||
<div className="section-mini-title">{lc.globalDeliveryTitle}</div>
|
||||
<div className="field-label">{lc.globalDeliveryDesc}</div>
|
||||
<div className="bot-wizard-switches" style={{ marginTop: 8 }}>
|
||||
<label className="field-label">
|
||||
<input type="checkbox" checked={sendProgress} onChange={(e) => onUpdateGlobalDeliveryFlag('sendProgress', e.target.checked)} style={{ marginRight: 6 }} />
|
||||
|
|
|
|||
Loading…
Reference in New Issue