182 lines
6.0 KiB
Docker
182 lines
6.0 KiB
Docker
FROM python:3.12-slim
|
|
ENV PYTHONUNBUFFERED=1
|
|
ENV LANG=C.UTF-8
|
|
ENV LC_ALL=C.UTF-8
|
|
ENV PYTHONIOENCODING=utf-8
|
|
ENV PYTHONPATH=/opt/dashboard-patches${PYTHONPATH:+:${PYTHONPATH}}
|
|
|
|
# 1. 替换 Debian 源为国内镜像
|
|
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
|
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources
|
|
|
|
# 2. 安装基础依赖
|
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
curl \
|
|
gcc \
|
|
libpq-dev \
|
|
&& rm -rf /var/lib/apt/lists/*
|
|
|
|
# 3. 安装 aiohttp 和基础 python 工具
|
|
RUN python -m pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ --upgrade \
|
|
pip setuptools wheel aiohttp
|
|
|
|
# 3.1 LiteLLM compatibility patch for DashScope coder/code models.
|
|
# DashScope coder models require `tool_calls[*].function.arguments` to be a JSON string.
|
|
# Some upstream stacks may replay historical tool calls as dicts / Python-literal strings.
|
|
# We patch LiteLLM entrypoints at runtime so old history can still be forwarded safely.
|
|
RUN mkdir -p /opt/dashboard-patches && cat > /opt/dashboard-patches/sitecustomize.py <<'PY'
|
|
import ast
|
|
import copy
|
|
import json
|
|
import os
|
|
import sys
|
|
from typing import Any
|
|
|
|
|
|
def _log(message: str) -> None:
|
|
if str(os.getenv("DASHBOARD_LITELLM_PATCH_VERBOSE") or "").strip().lower() not in {"1", "true", "yes", "on"}:
|
|
return
|
|
print(f"[dashboard-litellm-patch] {message}", file=sys.stderr, flush=True)
|
|
|
|
|
|
def _coerce_json_arguments(value: Any) -> str:
|
|
if value is None:
|
|
return "{}"
|
|
if isinstance(value, str):
|
|
text = value.strip()
|
|
if not text:
|
|
return "{}"
|
|
try:
|
|
json.loads(text)
|
|
return text
|
|
except Exception:
|
|
pass
|
|
try:
|
|
parsed = ast.literal_eval(text)
|
|
except Exception:
|
|
parsed = None
|
|
else:
|
|
try:
|
|
return json.dumps(parsed, ensure_ascii=False)
|
|
except Exception:
|
|
pass
|
|
return json.dumps({"raw": text}, ensure_ascii=False)
|
|
try:
|
|
return json.dumps(value, ensure_ascii=False)
|
|
except Exception:
|
|
return json.dumps({"raw": str(value)}, ensure_ascii=False)
|
|
|
|
|
|
def _sanitize_openai_messages(messages: Any) -> tuple[Any, int]:
|
|
if not isinstance(messages, list):
|
|
return messages, 0
|
|
try:
|
|
cloned = copy.deepcopy(messages)
|
|
except Exception:
|
|
cloned = list(messages)
|
|
|
|
changed = 0
|
|
for message in cloned:
|
|
if not isinstance(message, dict):
|
|
continue
|
|
|
|
tool_calls = message.get("tool_calls")
|
|
if isinstance(tool_calls, list):
|
|
for tool_call in tool_calls:
|
|
if not isinstance(tool_call, dict):
|
|
continue
|
|
function = tool_call.get("function")
|
|
if not isinstance(function, dict):
|
|
continue
|
|
arguments = function.get("arguments")
|
|
normalized = _coerce_json_arguments(arguments)
|
|
if arguments != normalized:
|
|
function["arguments"] = normalized
|
|
changed += 1
|
|
|
|
function_call = message.get("function_call")
|
|
if isinstance(function_call, dict):
|
|
arguments = function_call.get("arguments")
|
|
normalized = _coerce_json_arguments(arguments)
|
|
if arguments != normalized:
|
|
function_call["arguments"] = normalized
|
|
changed += 1
|
|
|
|
return cloned, changed
|
|
|
|
|
|
def _patch_litellm() -> None:
|
|
try:
|
|
import litellm # type: ignore
|
|
except Exception as exc:
|
|
_log(f"litellm import skipped: {exc}")
|
|
return
|
|
|
|
def _wrap_sync(fn):
|
|
if not callable(fn) or getattr(fn, "_dashboard_litellm_patch", False):
|
|
return fn
|
|
|
|
def wrapper(*args, **kwargs):
|
|
messages = kwargs.get("messages")
|
|
normalized_messages, changed = _sanitize_openai_messages(messages)
|
|
if changed:
|
|
kwargs["messages"] = normalized_messages
|
|
_log(f"sanitized {changed} tool/function argument payload(s) before sync completion")
|
|
return fn(*args, **kwargs)
|
|
|
|
setattr(wrapper, "_dashboard_litellm_patch", True)
|
|
return wrapper
|
|
|
|
def _wrap_async(fn):
|
|
if not callable(fn) or getattr(fn, "_dashboard_litellm_patch", False):
|
|
return fn
|
|
|
|
async def wrapper(*args, **kwargs):
|
|
messages = kwargs.get("messages")
|
|
normalized_messages, changed = _sanitize_openai_messages(messages)
|
|
if changed:
|
|
kwargs["messages"] = normalized_messages
|
|
_log(f"sanitized {changed} tool/function argument payload(s) before async completion")
|
|
return await fn(*args, **kwargs)
|
|
|
|
setattr(wrapper, "_dashboard_litellm_patch", True)
|
|
return wrapper
|
|
|
|
for attr in ("completion", "completion_with_retries"):
|
|
if hasattr(litellm, attr):
|
|
setattr(litellm, attr, _wrap_sync(getattr(litellm, attr)))
|
|
|
|
for attr in ("acompletion",):
|
|
if hasattr(litellm, attr):
|
|
setattr(litellm, attr, _wrap_async(getattr(litellm, attr)))
|
|
|
|
try:
|
|
import litellm.main as litellm_main # type: ignore
|
|
except Exception:
|
|
litellm_main = None
|
|
|
|
if litellm_main is not None:
|
|
for attr in ("completion", "completion_with_retries"):
|
|
if hasattr(litellm_main, attr):
|
|
setattr(litellm_main, attr, _wrap_sync(getattr(litellm_main, attr)))
|
|
for attr in ("acompletion",):
|
|
if hasattr(litellm_main, attr):
|
|
setattr(litellm_main, attr, _wrap_async(getattr(litellm_main, attr)))
|
|
|
|
_log("LiteLLM monkey patch installed")
|
|
|
|
|
|
_patch_litellm()
|
|
PY
|
|
|
|
WORKDIR /app
|
|
# 这一步会把您修改好的 nanobot/channels/dashboard.py 一起拷进去
|
|
COPY . /app
|
|
|
|
# 4. 安装 nanobot
|
|
RUN pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple/ .
|
|
|
|
WORKDIR /root
|
|
# 官方 gateway 模式,现在它会自动加载您的 DashboardChannel
|
|
CMD ["nanobot", "gateway"]
|