Compare commits

..

No commits in common. "codex/offline" and "main" have entirely different histories.

10 changed files with 251 additions and 1681 deletions

View File

@ -1,62 +0,0 @@
# Bot Images 使用说明
这个目录用于构建 `nanobot-base` 相关镜像。
## 1. 直接构建镜像
```bash
./bot-images/build.sh
```
执行后脚本会提示你选择:
- `1` 从远程拉取最新版本再构建
- `2` 从本地已有 `nanobot-base-*` 目录中选择一个构建
默认行为只构建 Docker 镜像,不导出压缩产物。
## 2. 构建后同时导出镜像产物
如果需要把构建好的镜像发给没有源码的客户,可以加:
```bash
./bot-images/build.sh --save-artifact
```
执行完成后,会在你运行命令时的当前目录生成一个压缩包,例如:
```text
nanobot-base-v0.1.5.tar.gz
```
这个压缩包本质上是 `docker save | gzip` 的产物,可以直接拿到客户机器上导入。
## 3. 指定导出目录
如果不想导出到当前目录,可以指定目录:
```bash
./bot-images/build.sh --artifact-dir /path/to/output
```
执行完成后,镜像压缩包会输出到指定目录。
## 4. 客户侧如何导入
客户机器上拿到压缩包后可执行:
```bash
gunzip -c nanobot-base-v0.1.5.tar.gz | docker load
```
导入完成后,可用下面命令确认:
```bash
docker images | grep nanobot-base
```
## 5. 说明
- `--artifact-dir` 会自动包含 `--save-artifact` 的效果。
- 如果只是本机构建测试,不需要加导出参数。
- 如果后续要给多个客户复用,推荐保留导出的 `.tar.gz`,这样不需要客户再拿源码构建。

View File

@ -2,71 +2,12 @@
set -e
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CALLER_DIR="$(pwd)"
SAVE_ARTIFACT=false
ARTIFACT_DIR=""
usage() {
cat <<EOF
Usage: $(basename "$0") [--save-artifact] [--artifact-dir PATH]
Options:
--save-artifact Build image and export a compressed docker image archive.
Default export location is the current shell directory.
--artifact-dir Custom directory for exported archive. Implies --save-artifact.
-h, --help Show this help message.
EOF
}
parse_args() {
while [ $# -gt 0 ]; do
case "$1" in
--save-artifact)
SAVE_ARTIFACT=true
;;
--artifact-dir)
if [ $# -lt 2 ]; then
echo "缺少 --artifact-dir 的路径参数。"
exit 1
fi
SAVE_ARTIFACT=true
ARTIFACT_DIR="$2"
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "未知参数: $1"
usage
exit 1
;;
esac
shift
done
}
export_image_artifact() {
local image_name=$1
local version=$2
local output_dir="${ARTIFACT_DIR:-$CALLER_DIR}"
local archive_name="nanobot-base-${version}.tar.gz"
local archive_path="${output_dir}/${archive_name}"
mkdir -p "${output_dir}"
echo ">> [额外步骤] 导出镜像产物到: ${archive_path}"
docker save "${image_name}" | gzip > "${archive_path}"
echo "✅ 已导出镜像产物: ${archive_path}"
}
# 执行拷贝和打包的核心函数
build_image() {
local dir_name=$1
local version=$2
local image_name="nanobot-base:${version}"
local previous_dir="${PWD}"
echo "=================================================="
echo "准备构建镜像: ${image_name}"
@ -98,16 +39,8 @@ build_image() {
echo "=================================================="
echo "✅ 构建完成: ${image_name}"
echo "=================================================="
cd "${previous_dir}"
if [ "${SAVE_ARTIFACT}" = "true" ]; then
export_image_artifact "${image_name}" "${version}"
fi
}
parse_args "$@"
echo "请选择操作模式:"
echo "1) 从 Git 拉取最新代码并打包 (会覆盖已有同名目录)"
echo "2) 扫描本地已有的目录并打包"

View File

@ -1,163 +1,156 @@
# Dashboard Nanobot Docker 编排文件Full 模式)
# 说明:
# 1. 当前文件用于“前端 + 后端 + PostgreSQL + Redis”整套部署。
# 2. 数据库和 Redis 都在本 compose 中启动,适合客户没有外部中间件时使用。
# 3. 客户通常需要重点修改 `.env` 里的端口、数据库密码、工作目录路径。
# 4. 如果需要调整宿主机挂载路径,可以直接修改下方 `volumes`。
services:
postgres:
image: ${POSTGRES_IMAGE:-postgres:16-alpine}
container_name: dashboard-nanobot-postgres
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
POSTGRES_USER: ${POSTGRES_SUPERUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_SUPERPASSWORD:?POSTGRES_SUPERPASSWORD is required}
POSTGRES_DB: ${POSTGRES_BOOTSTRAP_DB:-postgres}
volumes:
- ./data/postgres:/var/lib/postgresql/data
expose:
- "5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U \"$${POSTGRES_USER}\" -d \"$${POSTGRES_DB}\""]
interval: 10s
timeout: 5s
retries: 10
start_period: 20s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
services: # 定义当前 compose 里需要启动的服务
postgres: # PostgreSQL 数据库服务
image: ${POSTGRES_IMAGE:-postgres:16-alpine} # PostgreSQL 镜像名
container_name: dashboard-nanobot-postgres # PostgreSQL 容器固定名称
restart: unless-stopped # 异常退出后自动拉起
environment: # PostgreSQL 启动参数
TZ: ${TZ:-Asia/Shanghai} # 容器时区
POSTGRES_USER: ${POSTGRES_SUPERUSER:-postgres} # PostgreSQL 超级用户
POSTGRES_PASSWORD: ${POSTGRES_SUPERPASSWORD:?POSTGRES_SUPERPASSWORD is required} # PostgreSQL 超级用户密码
POSTGRES_DB: ${POSTGRES_BOOTSTRAP_DB:-postgres} # 启动时默认数据库
volumes: # 数据库存储挂载
- ./data/postgres:/var/lib/postgresql/data # 宿主机数据库目录映射到容器数据目录
expose: # 仅对内部网络开放
- "5432" # PostgreSQL 默认端口
healthcheck: # PostgreSQL 健康检查
test: ["CMD-SHELL", "pg_isready -U \"$${POSTGRES_USER}\" -d \"$${POSTGRES_DB}\""] # 检测数据库是否就绪
interval: 10s # 每 10 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 10 # 连续失败 10 次判定不健康
start_period: 20s # 启动后预留 20 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
redis:
image: ${REDIS_IMAGE:-redis:7-alpine}
container_name: dashboard-nanobot-redis
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
command: ["redis-server", "--appendonly", "yes", "--save", "60", "1000"]
volumes:
- ./data/redis:/data
expose:
- "6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 10
start_period: 10s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
redis: # Redis 缓存服务
image: ${REDIS_IMAGE:-redis:7-alpine} # Redis 镜像名
container_name: dashboard-nanobot-redis # Redis 容器固定名称
restart: unless-stopped # 异常退出后自动拉起
environment: # Redis 运行环境变量
TZ: ${TZ:-Asia/Shanghai} # 容器时区
command: ["redis-server", "--appendonly", "yes", "--save", "60", "1000"] # 开启 AOF 持久化并保留快照策略
volumes: # Redis 数据挂载
- ./data/redis:/data # 宿主机 Redis 数据目录映射到容器
expose: # 仅对内部网络开放
- "6379" # Redis 默认端口
healthcheck: # Redis 健康检查
test: ["CMD", "redis-cli", "ping"] # 检测 Redis 是否响应
interval: 10s # 每 10 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 10 # 连续失败 10 次判定不健康
start_period: 10s # 启动后预留 10 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
backend:
build:
context: .
dockerfile: backend/Dockerfile
args:
PYTHON_BASE_IMAGE: ${PYTHON_BASE_IMAGE:-python:3.12-slim}
PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple}
PIP_TRUSTED_HOST: ${PIP_TRUSTED_HOST:-}
image: dashboard-nanobot/backend:${BACKEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-backend
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
TZ: ${TZ:-Asia/Shanghai}
APP_HOST: 0.0.0.0
APP_PORT: 8000
APP_RELOAD: "false"
DATABASE_ECHO: "false"
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20}
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40}
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30}
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800}
DATA_ROOT: /app/data
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT}
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
DATABASE_URL: postgresql+psycopg://${POSTGRES_APP_USER}:${POSTGRES_APP_PASSWORD}@postgres:5432/${POSTGRES_APP_DB}
REDIS_ENABLED: ${REDIS_ENABLED:-true}
REDIS_URL: redis://redis:6379/${REDIS_DB:-8}
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot}
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60}
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai}
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-}
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-}
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600}
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-}
STT_ENABLED: ${STT_ENABLED:-true}
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin}
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model}
STT_DEVICE: ${STT_DEVICE:-cpu}
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20}
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh}
STT_FORCE_SIMPLIFIED: ${STT_FORCE_SIMPLIFIED:-true}
STT_AUDIO_PREPROCESS: ${STT_AUDIO_PREPROCESS:-true}
STT_AUDIO_FILTER: ${STT_AUDIO_FILTER:-highpass=f=120,lowpass=f=7600,afftdn=nf=-20}
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/app/data
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT}
expose:
- "8000"
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/api/health', timeout=3).read()"]
interval: 15s
timeout: 5s
retries: 5
start_period: 20s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
backend: # 后端 API 服务
build: # 保留构建信息,便于有源码时重新 build离线部署通常直接用 image
context: . # 构建上下文为项目根目录
dockerfile: backend/Dockerfile # 后端 Dockerfile 路径
args: # 构建参数
PYTHON_BASE_IMAGE: ${PYTHON_BASE_IMAGE:-python:3.12-slim} # Python 基础镜像
PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple} # pip 源地址
PIP_TRUSTED_HOST: ${PIP_TRUSTED_HOST:-} # pip 信任主机
image: dashboard-nanobot/backend:${BACKEND_IMAGE_TAG:-latest} # 后端运行镜像名
container_name: dashboard-nanobot-backend # 后端容器固定名称
restart: unless-stopped # 异常退出后自动拉起
depends_on: # 依赖中间件健康后再启动
postgres: # 依赖 PostgreSQL
condition: service_healthy # 要求 PostgreSQL 健康
redis: # 依赖 Redis
condition: service_healthy # 要求 Redis 健康
environment: # 后端环境变量
TZ: ${TZ:-Asia/Shanghai} # 容器时区
APP_HOST: 0.0.0.0 # 服务监听地址,容器内通常保持 0.0.0.0
APP_PORT: 8000 # 服务监听端口,需与 expose 和健康检查保持一致
APP_RELOAD: "false" # 生产环境关闭热重载
DATABASE_ECHO: "false" # 生产环境关闭 SQL 日志回显
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20} # 数据库连接池基础连接数
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40} # 连接池额外可溢出连接数
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30} # 获取连接超时时间(秒)
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800} # 连接回收周期(秒)
DATA_ROOT: /app/data # 容器内业务数据目录
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT} # Bot 工作目录,容器内外路径保持一致
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network} # 业务容器使用的 Docker 网络名称
DATABASE_URL: postgresql+psycopg://${POSTGRES_APP_USER}:${POSTGRES_APP_PASSWORD}@postgres:5432/${POSTGRES_APP_DB} # Full 模式固定连接内部 PostgreSQL
REDIS_ENABLED: ${REDIS_ENABLED:-true} # Full 模式默认启用 Redis
REDIS_URL: redis://redis:6379/${REDIS_DB:-8} # Full 模式固定连接内部 Redis
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot} # Redis key 前缀
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60} # Redis 默认过期时间(秒)
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai} # Bot 默认时区
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-} # 面板访问密码
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-} # 预览签名密钥
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600} # 预览令牌有效期(秒)
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-} # 前端跨域白名单
STT_ENABLED: ${STT_ENABLED:-true} # 是否启用语音识别
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin} # 语音识别模型文件名
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model} # 语音识别模型目录
STT_DEVICE: ${STT_DEVICE:-cpu} # 语音识别运行设备
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20} # 单次音频最大秒数
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh} # 默认语音识别语言
STT_FORCE_SIMPLIFIED: ${STT_FORCE_SIMPLIFIED:-true} # 是否强制输出简体中文
STT_AUDIO_PREPROCESS: ${STT_AUDIO_PREPROCESS:-true} # 是否预处理音频
STT_AUDIO_FILTER: ${STT_AUDIO_FILTER:-highpass=f=120,lowpass=f=7600,afftdn=nf=-20} # 音频滤波参数
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。} # 语音识别初始提示词
volumes: # 宿主机与容器挂载关系
- /var/run/docker.sock:/var/run/docker.sock # 必须保留,后端需要管理 Bot 容器
- ./data:/app/data # 项目数据目录,建议保留在交付目录下
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT} # Bot 工作目录挂载,路径通常由客户现场决定
expose: # 只暴露给内部网络,不直接发布到宿主机
- "8000" # 后端服务端口
healthcheck: # 健康检查,供 nginx 依赖判断使用
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/api/health', timeout=3).read()"] # 检测后端健康接口
interval: 15s # 每 15 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 5 # 连续失败 5 次判定不健康
start_period: 20s # 启动后预留 20 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
nginx:
build:
context: ./frontend
dockerfile: Dockerfile
args:
NODE_BASE_IMAGE: ${NODE_BASE_IMAGE:-node:22-alpine}
NGINX_BASE_IMAGE: ${NGINX_BASE_IMAGE:-nginx:alpine}
NPM_REGISTRY: ${NPM_REGISTRY:-https://registry.npmjs.org/}
VITE_API_BASE: /api
VITE_WS_BASE: /ws/monitor
image: dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-nginx
restart: unless-stopped
environment:
TZ: ${TZ:-Asia/Shanghai}
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100}
depends_on:
backend:
condition: service_healthy
ports:
- "${NGINX_PORT}:80"
healthcheck:
test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1/"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
nginx: # 前端 Nginx 服务
build: # 保留构建信息,离线部署通常直接使用导入镜像
context: ./frontend # 前端构建上下文
dockerfile: Dockerfile # 前端 Dockerfile 路径
args: # 前端构建参数
NODE_BASE_IMAGE: ${NODE_BASE_IMAGE:-node:22-alpine} # Node 基础镜像
NGINX_BASE_IMAGE: ${NGINX_BASE_IMAGE:-nginx:alpine} # Nginx 基础镜像
NPM_REGISTRY: ${NPM_REGISTRY:-https://registry.npmjs.org/} # npm 源地址
VITE_API_BASE: /api # 前端 API 前缀
VITE_WS_BASE: /ws/monitor # 前端 WebSocket 前缀
image: dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG:-latest} # 前端运行镜像名
container_name: dashboard-nanobot-nginx # 前端容器固定名称
restart: unless-stopped # 异常退出后自动拉起
environment: # 前端环境变量
TZ: ${TZ:-Asia/Shanghai} # 容器时区
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100} # 上传大小限制,传给 Nginx 配置使用
depends_on: # 依赖后端健康后再启动
backend: # 依赖后端容器
condition: service_healthy # 要求后端健康
ports: # 对外开放端口
- "${NGINX_PORT}:80" # 宿主机端口映射到容器 80 端口
healthcheck: # Nginx 健康检查
test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1/"] # 检测首页是否可访问
interval: 15s # 每 15 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 5 # 连续失败 5 次判定不健康
start_period: 10s # 启动后预留 10 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
networks: # 自定义网络配置
default: # 默认网络
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network} # 网络名称,客户通常无需修改
driver: bridge # 使用 bridge 网络驱动
ipam: # IP 地址管理配置
config: # 网段配置列表
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16} # 自定义子网,现场冲突时可修改
networks:
default:
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
driver: bridge
ipam:
config:
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16}

View File

@ -1,108 +1,101 @@
# Dashboard Nanobot Docker 编排文件Prod 模式)
# 说明:
# 1. 当前文件用于“前端 + 后端”部署。
# 2. PostgreSQL 与 Redis 由客户现场外部提供,本文件不会启动这两个服务。
# 3. 客户通常需要重点修改 `.env` 里的端口、数据库连接串、Redis 连接串、工作目录路径。
# 4. 如果需要调整宿主机挂载路径,可以直接修改下方 `volumes`。
services:
backend:
build:
context: .
dockerfile: backend/Dockerfile
args:
PYTHON_BASE_IMAGE: ${PYTHON_BASE_IMAGE:-python:3.12-slim}
PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple}
PIP_TRUSTED_HOST: ${PIP_TRUSTED_HOST:-}
image: dashboard-nanobot/backend:${BACKEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-backend
restart: unless-stopped
environment:
APP_HOST: 0.0.0.0
APP_PORT: 8002
APP_RELOAD: "false"
DATABASE_ECHO: "false"
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20}
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40}
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30}
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800}
DATA_ROOT: /app/data
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT}
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
DATABASE_URL: ${DATABASE_URL:-}
REDIS_ENABLED: ${REDIS_ENABLED:-false}
REDIS_URL: ${REDIS_URL:-}
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot}
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60}
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai}
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-}
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-}
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600}
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-}
STT_ENABLED: ${STT_ENABLED:-true}
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin}
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model}
STT_DEVICE: ${STT_DEVICE:-cpu}
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20}
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh}
STT_FORCE_SIMPLIFIED: ${STT_FORCE_SIMPLIFIED:-true}
STT_AUDIO_PREPROCESS: ${STT_AUDIO_PREPROCESS:-true}
STT_AUDIO_FILTER: ${STT_AUDIO_FILTER:-highpass=f=120,lowpass=f=7600,afftdn=nf=-20}
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/app/data
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT}
expose:
- "8002"
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8002/api/health', timeout=3).read()"]
interval: 15s
timeout: 5s
retries: 5
start_period: 20s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
services: # 定义当前 compose 里需要启动的服务
backend: # 后端 API 服务
build: # 保留构建信息,便于有源码时重新 build离线部署通常直接用 image
context: . # 构建上下文为项目根目录
dockerfile: backend/Dockerfile # 后端 Dockerfile 路径
args: # 构建参数
PYTHON_BASE_IMAGE: ${PYTHON_BASE_IMAGE:-python:3.12-slim} # Python 基础镜像
PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple} # pip 源地址
PIP_TRUSTED_HOST: ${PIP_TRUSTED_HOST:-} # pip 信任主机
image: dashboard-nanobot/backend:${BACKEND_IMAGE_TAG:-latest} # 后端运行镜像名
container_name: dashboard-nanobot-backend # 后端容器固定名称
restart: unless-stopped # 异常退出后自动拉起,手动停止时不自动启动
environment: # 后端环境变量
APP_HOST: 0.0.0.0 # 服务监听地址,容器内通常保持 0.0.0.0
APP_PORT: 8002 # 服务监听端口,需与 expose 和健康检查保持一致
APP_RELOAD: "false" # 生产环境关闭热重载
DATABASE_ECHO: "false" # 生产环境关闭 SQL 日志回显
DATABASE_POOL_SIZE: ${DATABASE_POOL_SIZE:-20} # 数据库连接池基础连接数
DATABASE_MAX_OVERFLOW: ${DATABASE_MAX_OVERFLOW:-40} # 连接池额外可溢出连接数
DATABASE_POOL_TIMEOUT: ${DATABASE_POOL_TIMEOUT:-30} # 获取连接超时时间(秒)
DATABASE_POOL_RECYCLE: ${DATABASE_POOL_RECYCLE:-1800} # 连接回收周期(秒)
DATA_ROOT: /app/data # 容器内业务数据目录
BOTS_WORKSPACE_ROOT: ${HOST_BOTS_WORKSPACE_ROOT} # Bot 工作目录,容器内外路径保持一致
DOCKER_NETWORK_NAME: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network} # 业务容器使用的 Docker 网络名称
DATABASE_URL: ${DATABASE_URL:-} # 外部 PostgreSQL 连接串Prod 模式必须配置
REDIS_ENABLED: ${REDIS_ENABLED:-false} # 是否启用 Redis
REDIS_URL: ${REDIS_URL:-} # 外部 Redis 连接串
REDIS_PREFIX: ${REDIS_PREFIX:-dashboard_nanobot} # Redis key 前缀
REDIS_DEFAULT_TTL: ${REDIS_DEFAULT_TTL:-60} # Redis 默认过期时间(秒)
DEFAULT_BOT_SYSTEM_TIMEZONE: ${DEFAULT_BOT_SYSTEM_TIMEZONE:-Asia/Shanghai} # Bot 默认时区
PANEL_ACCESS_PASSWORD: ${PANEL_ACCESS_PASSWORD:-} # 面板访问密码
WORKSPACE_PREVIEW_SIGNING_SECRET: ${WORKSPACE_PREVIEW_SIGNING_SECRET:-} # 预览签名密钥
WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS: ${WORKSPACE_PREVIEW_TOKEN_TTL_SECONDS:-3600} # 预览令牌有效期(秒)
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-} # 前端跨域白名单
STT_ENABLED: ${STT_ENABLED:-true} # 是否启用语音识别
STT_MODEL: ${STT_MODEL:-ggml-small-q8_0.bin} # 语音识别模型文件名
STT_MODEL_DIR: ${STT_MODEL_DIR:-/app/data/model} # 语音识别模型目录
STT_DEVICE: ${STT_DEVICE:-cpu} # 语音识别运行设备
STT_MAX_AUDIO_SECONDS: ${STT_MAX_AUDIO_SECONDS:-20} # 单次音频最大秒数
STT_DEFAULT_LANGUAGE: ${STT_DEFAULT_LANGUAGE:-zh} # 默认语音识别语言
STT_FORCE_SIMPLIFIED: ${STT_FORCE_SIMPLIFIED:-true} # 是否强制输出简体中文
STT_AUDIO_PREPROCESS: ${STT_AUDIO_PREPROCESS:-true} # 是否预处理音频
STT_AUDIO_FILTER: ${STT_AUDIO_FILTER:-highpass=f=120,lowpass=f=7600,afftdn=nf=-20} # 音频滤波参数
STT_INITIAL_PROMPT: ${STT_INITIAL_PROMPT:-以下内容可能包含简体中文和英文术语。请优先输出简体中文,英文单词、缩写、品牌名和数字保持原文,不要翻译。} # 语音识别初始提示词
volumes: # 宿主机与容器挂载关系
- /var/run/docker.sock:/var/run/docker.sock # 必须保留,后端需要管理 Bot 容器
- ./data:/app/data # 项目数据目录,建议保留在交付目录下
- ${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT} # Bot 工作目录挂载,路径通常由客户现场决定
expose: # 只暴露给内部网络,不直接发布到宿主机
- "8002" # 后端服务端口
healthcheck: # 健康检查,供 nginx 依赖判断使用
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8002/api/health', timeout=3).read()"] # 检测后端健康接口
interval: 15s # 每 15 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 5 # 连续失败 5 次判定不健康
start_period: 20s # 启动后预留 20 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
nginx:
build:
context: ./frontend
dockerfile: Dockerfile
args:
NODE_BASE_IMAGE: ${NODE_BASE_IMAGE:-node:22-alpine}
NGINX_BASE_IMAGE: ${NGINX_BASE_IMAGE:-nginx:alpine}
NPM_REGISTRY: ${NPM_REGISTRY:-https://registry.npmjs.org/}
VITE_API_BASE: /api
VITE_WS_BASE: /ws/monitor
image: dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG:-latest}
container_name: dashboard-nanobot-nginx
restart: unless-stopped
environment:
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100}
depends_on:
backend:
condition: service_healthy
ports:
- "${NGINX_PORT}:80"
healthcheck:
test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1/"]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
logging:
driver: json-file
options:
max-size: "20m"
max-file: "3"
nginx: # 前端 Nginx 服务
build: # 保留构建信息,离线部署通常直接使用导入镜像
context: ./frontend # 前端构建上下文
dockerfile: Dockerfile # 前端 Dockerfile 路径
args: # 前端构建参数
NODE_BASE_IMAGE: ${NODE_BASE_IMAGE:-node:22-alpine} # Node 基础镜像
NGINX_BASE_IMAGE: ${NGINX_BASE_IMAGE:-nginx:alpine} # Nginx 基础镜像
NPM_REGISTRY: ${NPM_REGISTRY:-https://registry.npmjs.org/} # npm 源地址
VITE_API_BASE: /api # 前端 API 前缀
VITE_WS_BASE: /ws/monitor # 前端 WebSocket 前缀
image: dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG:-latest} # 前端运行镜像名
container_name: dashboard-nanobot-nginx # 前端容器固定名称
restart: unless-stopped # 异常退出后自动拉起
environment: # 前端环境变量
UPLOAD_MAX_MB: ${UPLOAD_MAX_MB:-100} # 上传大小限制,传给 Nginx 配置使用
depends_on: # 依赖后端健康后再启动
backend: # 依赖后端容器
condition: service_healthy # 要求后端健康
ports: # 对外开放端口
- "${NGINX_PORT}:80" # 宿主机端口映射到容器 80 端口
healthcheck: # Nginx 健康检查
test: ["CMD", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1/"] # 检测首页是否可访问
interval: 15s # 每 15 秒检查一次
timeout: 5s # 单次检查超时 5 秒
retries: 5 # 连续失败 5 次判定不健康
start_period: 10s # 启动后预留 10 秒缓冲时间
logging: # 容器日志策略
driver: json-file # 使用 Docker 默认 json-file 日志驱动
options: # 日志滚动配置
max-size: "20m" # 单个日志文件最大 20MB
max-file: "3" # 最多保留 3 个日志文件
networks: # 自定义网络配置
default: # 默认网络
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network} # 网络名称,客户通常无需修改
driver: bridge # 使用 bridge 网络驱动
ipam: # IP 地址管理配置
config: # 网段配置列表
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16} # 自定义子网,现场冲突时可修改
networks:
default:
name: ${DOCKER_NETWORK_NAME:-dashboard-nanobot-network}
driver: bridge
ipam:
config:
- subnet: ${DOCKER_NETWORK_SUBNET:-172.20.0.0/16}

View File

@ -1,184 +0,0 @@
# Offline 打包与部署说明
这套离线方案只放在 `offline/` 目录,不改项目原有部署脚本。
## 仓库内文件
- `offline/export-offline-bundle.sh`
- `offline/deploy-prod-offline.sh`
- `offline/deploy-full-offline.sh`
- `offline/init-full-db-offline.sh`
- `bot-images/build.sh`
- `bot-images/README.md`
## 1. 如何打包
完整模式:
```bash
./offline/export-offline-bundle.sh --mode full
```
生产模式:
```bash
./offline/export-offline-bundle.sh --mode prod
```
可选参数:
```bash
./offline/export-offline-bundle.sh --mode full --env-file .env.full --output-dir offline-dist
```
说明:
- `full` 会导出 `backend`、`nginx`、`postgres`、`redis` 镜像。
- `prod` 只导出 `backend`、`nginx` 镜像,数据库和 Redis 由客户自己提供。
- 导出产物默认放在 `offline-dist/`
## 2. 打包后会生成什么
每次导出会得到一个目录和一个压缩包,例如:
```text
offline-dist/dashboard-nanobot-full-offline-YYYYMMDD_HHMMSS/
offline-dist/dashboard-nanobot-full-offline-YYYYMMDD_HHMMSS.tar.gz
```
产物根目录主要包含:
- `docker-compose.yml`
- `.env`
- `import-images.sh`
- `init-db.sh`
- `start.sh`
- `stop.sh`
- `README.txt`
其中导出的 `docker-compose.yml` 会带中文注释,方便客户直接按注释修改端口、挂载路径、数据库与 Redis 配置。
此外还会带上:
- `offline/` 内部离线脚本
- `sql/` 数据库初始化 SQL
- `data/templates/`
- `data/skills/`
- `data/model/`
也就是说,发给客户的是一套部署产物,不需要把源码仓库一起发过去。
## 3. 客户如何部署
客户拿到压缩包后按下面做:
```bash
tar -xzf dashboard-nanobot-full-offline-YYYYMMDD_HHMMSS.tar.gz
cd dashboard-nanobot-full-offline-YYYYMMDD_HHMMSS
./import-images.sh
```
如果你还另外给了客户 `nanobot-base-v0.1.5.tar.gz` 这类 Bot 基础镜像包,也需要先导入:
```bash
gunzip -c nanobot-base-v0.1.5.tar.gz | docker load
```
然后修改:
- `.env`
- 如果要改挂载路径,再改 `docker-compose.yml`
再初始化数据库:
```bash
./init-db.sh
```
最后启动:
```bash
./start.sh
```
停止:
```bash
./stop.sh
```
## 4. 客户主要改哪些配置
`.env` 里通常需要改:
- `PUBLIC_HOST`
- `NGINX_PORT`
- `HOST_BOTS_WORKSPACE_ROOT`
- `DOCKER_NETWORK_SUBNET`
- `PANEL_ACCESS_PASSWORD`
`prod` 模式额外常改:
- `DATABASE_URL`
- `REDIS_ENABLED`
- `REDIS_URL`
`full` 模式额外常改:
- `POSTGRES_SUPERPASSWORD`
- `POSTGRES_APP_PASSWORD`
## 5. 挂载路径怎么改
默认挂载关系在导出产物里的 `docker-compose.yml`
- `./data:/app/data`
- `${HOST_BOTS_WORKSPACE_ROOT}:${HOST_BOTS_WORKSPACE_ROOT}`
- `/var/run/docker.sock:/var/run/docker.sock`
如果客户现场要换宿主机路径,直接改产物里的:
- `.env` 中的 `HOST_BOTS_WORKSPACE_ROOT`
- `docker-compose.yml` 中的 volume 挂载项
建议:
- `HOST_BOTS_WORKSPACE_ROOT` 必须是宿主机绝对路径
- 保留 `/var/run/docker.sock:/var/run/docker.sock`
- `./data` 最好保留在产物目录下,方便整体交付
## 6. 数据库说明
`prod` 模式:
- 客户需要提前准备 PostgreSQL
- 推荐直接执行:
- `./init-db.sh`
- 这个脚本会自动使用 `.env` 里的 `DATABASE_URL` 执行 `sql/create-tables.sql``sql/init-data.sql`
- 如果客户想手工执行,也可以执行:
- `sql/create-tables.sql`
- `sql/init-data.sql`
`full` 模式:
- 产物里包含 PostgreSQL 和 Redis 镜像
- `./init-db.sh` 可以手工初始化数据库
- `start.sh` 启动完整栈时也会自动初始化数据库
## 7. Bot 基础镜像说明
- 离线部署包里的 `./import-images.sh` 只负责导入当前离线包自带的业务镜像。
- 如果客户现场还需要运行依赖 `nanobot-base` 的 Bot 容器,则还需要额外导入单独提供的 `nanobot-base-*.tar.gz`
- 导入命令:
```bash
gunzip -c nanobot-base-v0.1.5.tar.gz | docker load
```
- 如果没有导入这类镜像Dashboard 主服务可以启动,但相关 Bot 运行时容器可能会因为缺少基础镜像而启动失败。
## 8. 看哪份文档
- 研发/打包同学看本文件:`offline/README.md`
- 客户部署时看导出产物根目录里的:`README.txt`
- 如果要单独构建和导出 `nanobot-base` 镜像,看:`bot-images/README.md`

View File

@ -1,177 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ENV_FILE="${1:-$ROOT_DIR/.env}"
COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
DATA_DIR="$ROOT_DIR/data"
INIT_DB_SCRIPT="$ROOT_DIR/offline/init-full-db-offline.sh"
AGENT_TEMPLATES_FILE="$DATA_DIR/templates/agent_md_templates.json"
TOPIC_PRESETS_FILE="$DATA_DIR/templates/topic_presets.json"
SKILLS_DIR="$DATA_DIR/skills"
if [[ ! -f "$ENV_FILE" && -f "$ROOT_DIR/.env.full" ]]; then
ENV_FILE="$ROOT_DIR/.env.full"
fi
if [[ ! -f "$COMPOSE_FILE" && -f "$ROOT_DIR/docker-compose.full.yml" ]]; then
COMPOSE_FILE="$ROOT_DIR/docker-compose.full.yml"
fi
require_file() {
local path="$1"
local hint="${2:-}"
if [[ -f "$path" ]]; then
return 0
fi
echo "Missing file: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_dir() {
local path="$1"
local hint="${2:-}"
if [[ -d "$path" ]]; then
return 0
fi
echo "Missing directory: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_env() {
local name="$1"
[[ -n "${!name:-}" ]] || { echo "Missing required env: $name"; exit 1; }
}
read_env_value() {
local key="$1"
local line=""
local value=""
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
[[ -z "${line//[[:space:]]/}" ]] && continue
[[ "${line#\#}" != "$line" ]] && continue
[[ "${line#export }" != "$line" ]] && line="${line#export }"
[[ "$line" == "$key="* ]] || continue
value="${line#*=}"
if [[ "$value" =~ ^\"(.*)\"$ ]]; then
value="${BASH_REMATCH[1]}"
elif [[ "$value" =~ ^\'(.*)\'$ ]]; then
value="${BASH_REMATCH[1]}"
fi
printf '%s' "$value"
return 0
done < "$ENV_FILE"
return 1
}
load_env_var() {
local name="$1"
local default_value="${2:-}"
local value=""
value="$(read_env_value "$name" || true)"
if [[ -z "$value" ]]; then
value="$default_value"
fi
printf -v "$name" '%s' "$value"
}
wait_for_health() {
local container_name="$1"
local timeout_seconds="$2"
local elapsed=0
local status=""
while (( elapsed < timeout_seconds )); do
status="$(
docker inspect --format '{{if .State.Health}}{{.State.Health.Status}}{{else}}{{.State.Status}}{{end}}' "$container_name" 2>/dev/null || true
)"
if [[ "$status" == "healthy" || "$status" == "running" ]]; then
echo "[deploy-full-offline] $container_name is $status"
return 0
fi
sleep 2
elapsed=$((elapsed + 2))
done
echo "[deploy-full-offline] timed out waiting for $container_name (last status: ${status:-unknown})"
docker logs --tail 80 "$container_name" 2>/dev/null || true
exit 1
}
require_file "$ENV_FILE" "Expected bundle config file such as .env"
require_file "$COMPOSE_FILE"
require_file "$INIT_DB_SCRIPT"
require_file "$AGENT_TEMPLATES_FILE"
require_file "$TOPIC_PRESETS_FILE"
require_dir "$SKILLS_DIR"
load_env_var HOST_BOTS_WORKSPACE_ROOT
load_env_var POSTGRES_SUPERUSER postgres
load_env_var POSTGRES_SUPERPASSWORD
load_env_var POSTGRES_BOOTSTRAP_DB postgres
load_env_var POSTGRES_APP_DB
load_env_var POSTGRES_APP_USER
load_env_var POSTGRES_APP_PASSWORD
load_env_var NGINX_PORT 8080
load_env_var PUBLIC_HOST ""
require_env HOST_BOTS_WORKSPACE_ROOT
require_env POSTGRES_SUPERUSER
require_env POSTGRES_SUPERPASSWORD
require_env POSTGRES_BOOTSTRAP_DB
require_env POSTGRES_APP_DB
require_env POSTGRES_APP_USER
require_env POSTGRES_APP_PASSWORD
require_env NGINX_PORT
case "$HOST_BOTS_WORKSPACE_ROOT" in
/*) ;;
*)
echo "HOST_BOTS_WORKSPACE_ROOT must be an absolute host path: $HOST_BOTS_WORKSPACE_ROOT"
exit 1
;;
esac
if [[ ! -S /var/run/docker.sock ]]; then
echo "Missing required mount source: /var/run/docker.sock"
exit 1
fi
echo "[deploy-full-offline] using env: $ENV_FILE"
mkdir -p \
"$DATA_DIR" \
"$DATA_DIR/postgres" \
"$DATA_DIR/redis" \
"$DATA_DIR/model" \
"$HOST_BOTS_WORKSPACE_ROOT"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" config -q
echo "[deploy-full-offline] starting postgres and redis"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" up -d postgres redis
wait_for_health "dashboard-nanobot-postgres" 120
wait_for_health "dashboard-nanobot-redis" 60
echo "[deploy-full-offline] initializing application database"
"$INIT_DB_SCRIPT" "$ENV_FILE"
echo "[deploy-full-offline] starting backend and nginx"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" up -d backend nginx
wait_for_health "dashboard-nanobot-backend" 180
wait_for_health "dashboard-nanobot-nginx" 120
echo "[deploy-full-offline] service status"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" ps
if [[ -n "$PUBLIC_HOST" ]]; then
echo "[deploy-full-offline] open: http://${PUBLIC_HOST}:${NGINX_PORT}"
fi
echo "[deploy-full-offline] done"

View File

@ -1,142 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ENV_FILE="${1:-$ROOT_DIR/.env}"
DATA_DIR="$ROOT_DIR/data"
COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
AGENT_TEMPLATES_FILE="$DATA_DIR/templates/agent_md_templates.json"
TOPIC_PRESETS_FILE="$DATA_DIR/templates/topic_presets.json"
SKILLS_DIR="$DATA_DIR/skills"
if [[ ! -f "$ENV_FILE" && -f "$ROOT_DIR/.env.prod" ]]; then
ENV_FILE="$ROOT_DIR/.env.prod"
fi
if [[ ! -f "$COMPOSE_FILE" && -f "$ROOT_DIR/docker-compose.prod.yml" ]]; then
COMPOSE_FILE="$ROOT_DIR/docker-compose.prod.yml"
fi
require_file() {
local path="$1"
local hint="${2:-}"
if [[ -f "$path" ]]; then
return 0
fi
echo "Missing file: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_dir() {
local path="$1"
local hint="${2:-}"
if [[ -d "$path" ]]; then
return 0
fi
echo "Missing directory: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_env() {
local name="$1"
[[ -n "${!name:-}" ]] || { echo "Missing required env: $name"; exit 1; }
}
read_env_value() {
local key="$1"
local line=""
local value=""
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
[[ -z "${line//[[:space:]]/}" ]] && continue
[[ "${line#\#}" != "$line" ]] && continue
[[ "${line#export }" != "$line" ]] && line="${line#export }"
[[ "$line" == "$key="* ]] || continue
value="${line#*=}"
if [[ "$value" =~ ^\"(.*)\"$ ]]; then
value="${BASH_REMATCH[1]}"
elif [[ "$value" =~ ^\'(.*)\'$ ]]; then
value="${BASH_REMATCH[1]}"
fi
printf '%s' "$value"
return 0
done < "$ENV_FILE"
return 1
}
load_env_var() {
local name="$1"
local default_value="${2:-}"
local value=""
value="$(read_env_value "$name" || true)"
if [[ -z "$value" ]]; then
value="$default_value"
fi
printf -v "$name" '%s' "$value"
}
is_truthy() {
local value="${1:-}"
value="$(printf '%s' "$value" | tr '[:upper:]' '[:lower:]')"
[[ "$value" =~ ^(1|true|yes|on)$ ]]
}
require_file "$ENV_FILE" "Expected bundle config file such as .env"
require_file "$COMPOSE_FILE"
require_file "$AGENT_TEMPLATES_FILE"
require_file "$TOPIC_PRESETS_FILE"
require_dir "$SKILLS_DIR"
load_env_var HOST_BOTS_WORKSPACE_ROOT
load_env_var DATABASE_URL
load_env_var NGINX_PORT 8080
load_env_var REDIS_ENABLED false
load_env_var REDIS_URL
load_env_var PUBLIC_HOST ""
require_env HOST_BOTS_WORKSPACE_ROOT
require_env DATABASE_URL
require_env NGINX_PORT
case "$HOST_BOTS_WORKSPACE_ROOT" in
/*) ;;
*)
echo "HOST_BOTS_WORKSPACE_ROOT must be an absolute host path: $HOST_BOTS_WORKSPACE_ROOT"
exit 1
;;
esac
if [[ ! -S /var/run/docker.sock ]]; then
echo "Missing required mount source: /var/run/docker.sock"
exit 1
fi
if [[ "$DATABASE_URL" != postgresql* ]]; then
echo "Unsupported DATABASE_URL for deploy-prod-offline.sh: $DATABASE_URL"
exit 1
fi
if is_truthy "$REDIS_ENABLED" && [[ -z "$REDIS_URL" ]]; then
echo "Missing required env: REDIS_URL"
exit 1
fi
echo "[deploy-prod-offline] using env: $ENV_FILE"
mkdir -p "$DATA_DIR" "$DATA_DIR/model" "$HOST_BOTS_WORKSPACE_ROOT"
echo "[deploy-prod-offline] expecting external PostgreSQL to be pre-initialized with sql/create-tables.sql and sql/init-data.sql, or by running ./init-db.sh"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" config -q
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" up -d
echo "[deploy-prod-offline] service status"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" ps
if [[ -n "$PUBLIC_HOST" ]]; then
echo "[deploy-prod-offline] open: http://${PUBLIC_HOST}:${NGINX_PORT}"
fi
echo "[deploy-prod-offline] done"

View File

@ -1,459 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
MODE="full"
ENV_FILE=""
OUTPUT_DIR="$ROOT_DIR/offline-dist"
VERSION="$(date +"%Y%m%d_%H%M%S")"
usage() {
cat <<EOF
Usage: $(basename "$0") [--mode full|prod] [--env-file path] [--output-dir path]
Options:
--mode Export deployment bundle for full or prod mode. Default: full
--env-file Compose env file used for image tags and build args.
--output-dir Output directory for offline bundles. Default: offline-dist
-h, --help Show this help message.
EOF
}
parse_args() {
while (( $# > 0 )); do
case "$1" in
--mode)
[[ $# -ge 2 ]] || { echo "Missing value for --mode"; exit 1; }
MODE="$2"
shift
;;
--env-file)
[[ $# -ge 2 ]] || { echo "Missing value for --env-file"; exit 1; }
ENV_FILE="$2"
shift
;;
--output-dir)
[[ $# -ge 2 ]] || { echo "Missing value for --output-dir"; exit 1; }
OUTPUT_DIR="$2"
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unexpected argument: $1"
usage
exit 1
;;
esac
shift
done
}
require_file() {
local path="$1"
[[ -f "$path" ]] || { echo "Missing file: $path"; exit 1; }
}
require_dir() {
local path="$1"
[[ -d "$path" ]] || { echo "Missing directory: $path"; exit 1; }
}
read_env_value() {
local env_path="$1"
local key="$2"
local line=""
local value=""
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
[[ -z "${line//[[:space:]]/}" ]] && continue
[[ "${line#\#}" != "$line" ]] && continue
[[ "${line#export }" != "$line" ]] && line="${line#export }"
[[ "$line" == "$key="* ]] || continue
value="${line#*=}"
if [[ "$value" =~ ^\"(.*)\"$ ]]; then
value="${BASH_REMATCH[1]}"
elif [[ "$value" =~ ^\'(.*)\'$ ]]; then
value="${BASH_REMATCH[1]}"
fi
printf '%s' "$value"
return 0
done < "$env_path"
return 1
}
load_env_value() {
local key="$1"
local default_value="${2:-}"
local value=""
value="$(read_env_value "$ENV_FILE" "$key" || true)"
if [[ -z "$value" ]]; then
value="$default_value"
fi
printf '%s' "$value"
}
ensure_image_available() {
local image_ref="$1"
if docker image inspect "$image_ref" >/dev/null 2>&1; then
return 0
fi
echo "[export] local image not found, pulling: $image_ref"
docker pull "$image_ref"
}
copy_into_bundle() {
local src="$1"
local dst="$BUNDLE_DIR/$1"
mkdir -p "$(dirname "$dst")"
if [[ -d "$ROOT_DIR/$src" ]]; then
cp -R "$ROOT_DIR/$src" "$dst"
else
cp "$ROOT_DIR/$src" "$dst"
fi
}
write_bundle_compose() {
cp "$ROOT_DIR/docker-compose.$MODE.yml" "$BUNDLE_DIR/docker-compose.yml"
}
copy_sql_bundle() {
mkdir -p "$BUNDLE_DIR/sql"
cp "$ROOT_DIR/scripts/sql/create-tables.sql" "$BUNDLE_DIR/sql/"
cp "$ROOT_DIR/scripts/sql/init-data.sql" "$BUNDLE_DIR/sql/"
if [[ "$MODE" == "full" ]]; then
cp "$ROOT_DIR/scripts/sql/init-postgres-bootstrap.sql" "$BUNDLE_DIR/sql/"
cp "$ROOT_DIR/scripts/sql/init-postgres-app.sql" "$BUNDLE_DIR/sql/"
fi
}
upsert_env_file() {
local file="$1"
local key="$2"
local value="$3"
local tmp_file=""
tmp_file="$(mktemp)"
awk -v key="$key" -v value="$value" '
BEGIN { updated = 0 }
{
if ($0 ~ "^[[:space:]]*#") {
print
next
}
if ($0 ~ "^" key "=") {
print key "=" value
updated = 1
next
}
print
}
END {
if (!updated) {
print key "=" value
}
}
' "$file" > "$tmp_file"
mv "$tmp_file" "$file"
}
prepare_bundle_env() {
local target="$BUNDLE_DIR/${ROOT_ENV_FILE}"
cp "$ROOT_DIR/.env.$MODE.example" "$target"
{
echo ""
echo "# Offline bundle helper field."
echo "# Used only for README / start script output."
echo "PUBLIC_HOST=127.0.0.1"
} >> "$target"
upsert_env_file "$target" "BACKEND_IMAGE_TAG" "$BACKEND_IMAGE_TAG"
upsert_env_file "$target" "FRONTEND_IMAGE_TAG" "$FRONTEND_IMAGE_TAG"
upsert_env_file "$target" "NGINX_PORT" "$(load_env_value NGINX_PORT 8080)"
upsert_env_file "$target" "HOST_BOTS_WORKSPACE_ROOT" "$(load_env_value HOST_BOTS_WORKSPACE_ROOT /opt/dashboard-nanobot/workspace/bots)"
upsert_env_file "$target" "DOCKER_NETWORK_NAME" "$(load_env_value DOCKER_NETWORK_NAME dashboard-nanobot-network)"
upsert_env_file "$target" "DOCKER_NETWORK_SUBNET" "$(load_env_value DOCKER_NETWORK_SUBNET 172.20.0.0/16)"
upsert_env_file "$target" "PANEL_ACCESS_PASSWORD" "$(load_env_value PANEL_ACCESS_PASSWORD change_me_panel_password)"
if [[ "$MODE" == "prod" ]]; then
upsert_env_file "$target" "DATABASE_URL" "$(load_env_value DATABASE_URL postgresql+psycopg://postgres:change_me_db_password@127.0.0.1:5432/nanobot)"
upsert_env_file "$target" "REDIS_ENABLED" "$(load_env_value REDIS_ENABLED true)"
upsert_env_file "$target" "REDIS_URL" "$(load_env_value REDIS_URL redis://127.0.0.1:6379/8)"
else
upsert_env_file "$target" "POSTGRES_IMAGE" "$POSTGRES_IMAGE"
upsert_env_file "$target" "REDIS_IMAGE" "$REDIS_IMAGE"
upsert_env_file "$target" "POSTGRES_SUPERPASSWORD" "$(load_env_value POSTGRES_SUPERPASSWORD change_me_pg_super_password)"
upsert_env_file "$target" "POSTGRES_APP_PASSWORD" "$(load_env_value POSTGRES_APP_PASSWORD change_me_nanobot_password)"
fi
}
write_root_helper_scripts() {
cat > "$BUNDLE_DIR/import-images.sh" <<EOF
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="\$(cd "\$(dirname "\$0")" && pwd)"
IMAGE_ARCHIVE="\$SCRIPT_DIR/${IMAGE_ARCHIVE}"
if [[ ! -f "\$IMAGE_ARCHIVE" ]]; then
echo "Missing image archive: \$IMAGE_ARCHIVE"
exit 1
fi
echo "[import-images] loading images from ${IMAGE_ARCHIVE}"
gunzip -c "\$IMAGE_ARCHIVE" | docker load
echo "[import-images] done"
EOF
cat > "$BUNDLE_DIR/init-db.sh" <<EOF
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="\$(cd "\$(dirname "\$0")" && pwd)"
ENV_FILE="\$SCRIPT_DIR/${ROOT_ENV_FILE}"
if [[ ! -f "\$ENV_FILE" ]]; then
echo "Missing env file: \$ENV_FILE"
exit 1
fi
if [[ "${MODE}" == "prod" ]]; then
if [[ ! -f "\$SCRIPT_DIR/offline/init-prod-db-offline.sh" ]]; then
echo "Missing script: \$SCRIPT_DIR/offline/init-prod-db-offline.sh"
exit 1
fi
"\$SCRIPT_DIR/offline/init-prod-db-offline.sh" "\$ENV_FILE"
else
if [[ ! -f "\$SCRIPT_DIR/offline/init-full-db-offline.sh" ]]; then
echo "Missing script: \$SCRIPT_DIR/offline/init-full-db-offline.sh"
exit 1
fi
"\$SCRIPT_DIR/offline/init-full-db-offline.sh" "\$ENV_FILE"
fi
EOF
cat > "$BUNDLE_DIR/start.sh" <<EOF
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="\$(cd "\$(dirname "\$0")" && pwd)"
ENV_FILE="\$SCRIPT_DIR/${ROOT_ENV_FILE}"
if [[ ! -f "\$ENV_FILE" ]]; then
echo "Missing env file: \$ENV_FILE"
exit 1
fi
"\$SCRIPT_DIR/offline/deploy-${MODE}-offline.sh" "\$ENV_FILE"
EOF
cat > "$BUNDLE_DIR/stop.sh" <<EOF
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="\$(cd "\$(dirname "\$0")" && pwd)"
ENV_FILE="\$SCRIPT_DIR/${ROOT_ENV_FILE}"
if [[ ! -f "\$ENV_FILE" ]]; then
echo "Missing env file: \$ENV_FILE"
exit 1
fi
docker compose --env-file "\$ENV_FILE" -f "\$SCRIPT_DIR/docker-compose.yml" down
EOF
chmod +x \
"$BUNDLE_DIR/import-images.sh" \
"$BUNDLE_DIR/init-db.sh" \
"$BUNDLE_DIR/start.sh" \
"$BUNDLE_DIR/stop.sh"
}
write_bundle_readme() {
cat > "$BUNDLE_DIR/README.txt" <<EOF
Dashboard Nanobot Offline Bundle
Mode: ${MODE}
Version: ${VERSION}
This directory is ready to send directly to the customer.
Included:
- ${IMAGE_ARCHIVE}
- ${ROOT_ENV_FILE}
- docker-compose.yml
- import-images.sh
- init-db.sh
- start.sh
- stop.sh
- offline/
- sql/
- data/templates/
- data/skills/
- data/model/
Customer Quick Start:
1. Extract this bundle on the target server.
2. Import images:
./import-images.sh
3. If a separate bot base image archive was also provided, import it before startup:
gunzip -c nanobot-base-<version>.tar.gz | docker load
4. Edit config:
${ROOT_ENV_FILE}
5. Initialize database:
./init-db.sh
6. Start service:
./start.sh
7. Stop service:
./stop.sh
Fields customer usually needs to edit:
- PUBLIC_HOST
- NGINX_PORT
- HOST_BOTS_WORKSPACE_ROOT
- DOCKER_NETWORK_SUBNET
- PANEL_ACCESS_PASSWORD
EOF
if [[ "$MODE" == "prod" ]]; then
cat >> "$BUNDLE_DIR/README.txt" <<EOF
- DATABASE_URL
- REDIS_ENABLED
- REDIS_URL
Prod mode note:
- Customer must prepare external PostgreSQL in advance.
- Customer can run ./init-db.sh to initialize external PostgreSQL automatically.
- If they prefer manual import, run sql/create-tables.sql and sql/init-data.sql before startup.
EOF
else
cat >> "$BUNDLE_DIR/README.txt" <<EOF
- POSTGRES_SUPERPASSWORD
- POSTGRES_APP_PASSWORD
Full mode note:
- PostgreSQL and Redis are already included in this bundle.
- ./init-db.sh can initialize the application database manually if needed.
- start.sh will also trigger database initialization during full startup flow.
EOF
fi
cat >> "$BUNDLE_DIR/README.txt" <<EOF
Access URL:
- http://<PUBLIC_HOST>:<NGINX_PORT>
Mounts used by this deployment:
- ./data -> /app/data
- HOST_BOTS_WORKSPACE_ROOT -> same path inside backend container
- /var/run/docker.sock -> /var/run/docker.sock
Mount note:
- Customer can edit docker-compose.yml directly if they want to change host mount paths.
- If customer also received a separate nanobot-base image archive and does not import it, Bot-related runtime containers may fail to start.
EOF
}
parse_args "$@"
case "$MODE" in
full|prod)
;;
*)
echo "Unsupported mode: $MODE"
exit 1
;;
esac
if [[ -z "$ENV_FILE" ]]; then
if [[ -f "$ROOT_DIR/.env.$MODE" ]]; then
ENV_FILE="$ROOT_DIR/.env.$MODE"
else
ENV_FILE="$ROOT_DIR/.env.$MODE.example"
fi
fi
COMPOSE_FILE="$ROOT_DIR/docker-compose.$MODE.yml"
BUNDLE_NAME="dashboard-nanobot-${MODE}-offline-${VERSION}"
BUNDLE_DIR="$OUTPUT_DIR/$BUNDLE_NAME"
ARCHIVE_FILE="$OUTPUT_DIR/${BUNDLE_NAME}.tar.gz"
IMAGE_ARCHIVE="docker-images-${MODE}.tar.gz"
ROOT_ENV_FILE=".env"
require_file "$ENV_FILE"
require_file "$COMPOSE_FILE"
require_file "$ROOT_DIR/offline/deploy-${MODE}-offline.sh"
require_dir "$ROOT_DIR/data/templates"
require_dir "$ROOT_DIR/data/skills"
require_dir "$ROOT_DIR/data/model"
BACKEND_IMAGE_TAG="$(load_env_value BACKEND_IMAGE_TAG latest)"
FRONTEND_IMAGE_TAG="$(load_env_value FRONTEND_IMAGE_TAG latest)"
BACKEND_IMAGE="dashboard-nanobot/backend:${BACKEND_IMAGE_TAG}"
FRONTEND_IMAGE="dashboard-nanobot/nginx:${FRONTEND_IMAGE_TAG}"
IMAGE_REFS=("$BACKEND_IMAGE" "$FRONTEND_IMAGE")
if [[ "$MODE" == "full" ]]; then
POSTGRES_IMAGE="$(load_env_value POSTGRES_IMAGE postgres:16-alpine)"
REDIS_IMAGE="$(load_env_value REDIS_IMAGE redis:7-alpine)"
IMAGE_REFS+=("$POSTGRES_IMAGE" "$REDIS_IMAGE")
fi
mkdir -p "$OUTPUT_DIR"
rm -rf "$BUNDLE_DIR"
mkdir -p "$BUNDLE_DIR"
echo "=== Export Dashboard Nanobot Offline Bundle ==="
echo "[export] mode: $MODE"
echo "[export] env file: $ENV_FILE"
echo "[export] bundle dir: $BUNDLE_DIR"
echo "[1/5] validating compose file"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" config -q
echo "[2/5] building backend and nginx images"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" build backend nginx
if [[ "$MODE" == "full" ]]; then
echo "[3/5] ensuring dependency images are available"
ensure_image_available "$POSTGRES_IMAGE"
ensure_image_available "$REDIS_IMAGE"
else
echo "[3/5] prod mode uses external PostgreSQL/Redis"
fi
echo "[4/5] exporting docker images"
docker save "${IMAGE_REFS[@]}" | gzip > "$BUNDLE_DIR/$IMAGE_ARCHIVE"
echo "[5/5] collecting deployment files"
copy_into_bundle "offline/deploy-$MODE-offline.sh"
copy_into_bundle "data/templates"
copy_into_bundle "data/skills"
copy_into_bundle "data/model"
if [[ "$MODE" == "prod" ]]; then
copy_into_bundle "offline/init-prod-db-offline.sh"
fi
if [[ "$MODE" == "full" ]]; then
copy_into_bundle "offline/init-full-db-offline.sh"
fi
copy_sql_bundle
write_bundle_compose
prepare_bundle_env
write_root_helper_scripts
write_bundle_readme
tar -C "$OUTPUT_DIR" -czf "$ARCHIVE_FILE" "$BUNDLE_NAME"
echo "[done] archive: $ARCHIVE_FILE"
echo "[done] images:"
printf ' - %s\n' "${IMAGE_REFS[@]}"

View File

@ -1,207 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ENV_FILE="${1:-$ROOT_DIR/.env}"
COMPOSE_FILE="$ROOT_DIR/docker-compose.yml"
BOOTSTRAP_SQL="$ROOT_DIR/sql/init-postgres-bootstrap.sql"
APP_SQL="$ROOT_DIR/sql/init-postgres-app.sql"
SCHEMA_SQL="$ROOT_DIR/sql/create-tables.sql"
SEED_SQL="$ROOT_DIR/sql/init-data.sql"
if [[ ! -f "$ENV_FILE" && -f "$ROOT_DIR/.env.full" ]]; then
ENV_FILE="$ROOT_DIR/.env.full"
fi
if [[ ! -f "$COMPOSE_FILE" && -f "$ROOT_DIR/docker-compose.full.yml" ]]; then
COMPOSE_FILE="$ROOT_DIR/docker-compose.full.yml"
fi
if [[ ! -f "$BOOTSTRAP_SQL" && -f "$ROOT_DIR/scripts/sql/init-postgres-bootstrap.sql" ]]; then
BOOTSTRAP_SQL="$ROOT_DIR/scripts/sql/init-postgres-bootstrap.sql"
fi
if [[ ! -f "$APP_SQL" && -f "$ROOT_DIR/scripts/sql/init-postgres-app.sql" ]]; then
APP_SQL="$ROOT_DIR/scripts/sql/init-postgres-app.sql"
fi
if [[ ! -f "$SCHEMA_SQL" && -f "$ROOT_DIR/scripts/sql/create-tables.sql" ]]; then
SCHEMA_SQL="$ROOT_DIR/scripts/sql/create-tables.sql"
fi
if [[ ! -f "$SEED_SQL" && -f "$ROOT_DIR/scripts/sql/init-data.sql" ]]; then
SEED_SQL="$ROOT_DIR/scripts/sql/init-data.sql"
fi
require_file() {
local path="$1"
local hint="${2:-}"
if [[ -f "$path" ]]; then
return 0
fi
echo "Missing file: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_env() {
local name="$1"
[[ -n "${!name:-}" ]] || { echo "Missing required env: $name"; exit 1; }
}
read_env_value() {
local key="$1"
local line=""
local value=""
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
[[ -z "${line//[[:space:]]/}" ]] && continue
[[ "${line#\#}" != "$line" ]] && continue
[[ "${line#export }" != "$line" ]] && line="${line#export }"
[[ "$line" == "$key="* ]] || continue
value="${line#*=}"
if [[ "$value" =~ ^\"(.*)\"$ ]]; then
value="${BASH_REMATCH[1]}"
elif [[ "$value" =~ ^\'(.*)\'$ ]]; then
value="${BASH_REMATCH[1]}"
fi
printf '%s' "$value"
return 0
done < "$ENV_FILE"
return 1
}
load_env_var() {
local name="$1"
local default_value="${2:-}"
local value=""
value="$(read_env_value "$name" || true)"
if [[ -z "$value" ]]; then
value="$default_value"
fi
printf -v "$name" '%s' "$value"
}
is_truthy() {
local value="${1:-}"
value="$(printf '%s' "$value" | tr '[:upper:]' '[:lower:]')"
[[ "$value" =~ ^(1|true|yes|on)$ ]]
}
wait_for_postgres() {
local timeout_seconds="${1:-120}"
local elapsed=0
while (( elapsed < timeout_seconds )); do
if docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" exec -T \
-e PGPASSWORD="$POSTGRES_SUPERPASSWORD" \
postgres \
pg_isready -U "$POSTGRES_SUPERUSER" -d "$POSTGRES_BOOTSTRAP_DB" >/dev/null 2>&1; then
return 0
fi
sleep 2
elapsed=$((elapsed + 2))
done
echo "[init-full-db-offline] timed out waiting for postgres"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" logs --tail 100 postgres || true
exit 1
}
require_file "$ENV_FILE" "Expected bundle config file such as .env"
require_file "$COMPOSE_FILE"
require_file "$BOOTSTRAP_SQL"
require_file "$APP_SQL"
require_file "$SCHEMA_SQL"
require_file "$SEED_SQL"
load_env_var POSTGRES_SUPERUSER postgres
load_env_var POSTGRES_SUPERPASSWORD
load_env_var POSTGRES_BOOTSTRAP_DB postgres
load_env_var POSTGRES_APP_DB
load_env_var POSTGRES_APP_USER
load_env_var POSTGRES_APP_PASSWORD
load_env_var UPLOAD_MAX_MB 100
load_env_var STT_ENABLED true
require_env POSTGRES_SUPERUSER
require_env POSTGRES_SUPERPASSWORD
require_env POSTGRES_BOOTSTRAP_DB
require_env POSTGRES_APP_DB
require_env POSTGRES_APP_USER
require_env POSTGRES_APP_PASSWORD
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" up -d postgres >/dev/null
wait_for_postgres 120
echo "[init-full-db-offline] ensuring role/database exist"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" exec -T \
-e PGPASSWORD="$POSTGRES_SUPERPASSWORD" \
postgres \
psql \
-v ON_ERROR_STOP=1 \
-v app_db="$POSTGRES_APP_DB" \
-v app_user="$POSTGRES_APP_USER" \
-v app_password="$POSTGRES_APP_PASSWORD" \
-U "$POSTGRES_SUPERUSER" \
-d "$POSTGRES_BOOTSTRAP_DB" \
-f - < "$BOOTSTRAP_SQL"
echo "[init-full-db-offline] ensuring schema privileges in $POSTGRES_APP_DB"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" exec -T \
-e PGPASSWORD="$POSTGRES_SUPERPASSWORD" \
postgres \
psql \
-v ON_ERROR_STOP=1 \
-v app_user="$POSTGRES_APP_USER" \
-U "$POSTGRES_SUPERUSER" \
-d "$POSTGRES_APP_DB" \
-f - < "$APP_SQL"
echo "[init-full-db-offline] applying application schema"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" exec -T \
-e PGPASSWORD="$POSTGRES_SUPERPASSWORD" \
postgres \
psql \
-v ON_ERROR_STOP=1 \
-U "$POSTGRES_SUPERUSER" \
-d "$POSTGRES_APP_DB" \
-f - < "$SCHEMA_SQL"
PAGE_SIZE_JSON="10"
CHAT_PULL_PAGE_SIZE_JSON="60"
AUTH_TOKEN_TTL_HOURS_JSON="24"
AUTH_TOKEN_MAX_ACTIVE_JSON="2"
UPLOAD_MAX_MB_JSON="$UPLOAD_MAX_MB"
ALLOWED_ATTACHMENT_EXTENSIONS_JSON="[]"
WORKSPACE_DOWNLOAD_EXTENSIONS_JSON='[".pdf", ".doc", ".docx", ".xls", ".xlsx", ".xlsm", ".ppt", ".pptx", ".odt", ".ods", ".odp", ".wps"]'
if is_truthy "$STT_ENABLED"; then
SPEECH_ENABLED_JSON="true"
else
SPEECH_ENABLED_JSON="false"
fi
ACTIVITY_EVENT_RETENTION_DAYS_JSON="7"
echo "[init-full-db-offline] applying initial data"
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" exec -T \
-e PGPASSWORD="$POSTGRES_SUPERPASSWORD" \
postgres \
psql \
-v ON_ERROR_STOP=1 \
-v page_size_json="$PAGE_SIZE_JSON" \
-v chat_pull_page_size_json="$CHAT_PULL_PAGE_SIZE_JSON" \
-v auth_token_ttl_hours_json="$AUTH_TOKEN_TTL_HOURS_JSON" \
-v auth_token_max_active_json="$AUTH_TOKEN_MAX_ACTIVE_JSON" \
-v upload_max_mb_json="$UPLOAD_MAX_MB_JSON" \
-v allowed_attachment_extensions_json="$ALLOWED_ATTACHMENT_EXTENSIONS_JSON" \
-v workspace_download_extensions_json="$WORKSPACE_DOWNLOAD_EXTENSIONS_JSON" \
-v speech_enabled_json="$SPEECH_ENABLED_JSON" \
-v activity_event_retention_days_json="$ACTIVITY_EVENT_RETENTION_DAYS_JSON" \
-U "$POSTGRES_SUPERUSER" \
-d "$POSTGRES_APP_DB" \
-f - < "$SEED_SQL"
echo "[init-full-db-offline] done"

View File

@ -1,118 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
ENV_FILE="${1:-$ROOT_DIR/.env}"
SCHEMA_SQL="$ROOT_DIR/sql/create-tables.sql"
SEED_SQL="$ROOT_DIR/sql/init-data.sql"
if [[ ! -f "$ENV_FILE" && -f "$ROOT_DIR/.env.prod" ]]; then
ENV_FILE="$ROOT_DIR/.env.prod"
fi
require_file() {
local path="$1"
local hint="${2:-}"
if [[ -f "$path" ]]; then
return 0
fi
echo "Missing file: $path"
[[ -n "$hint" ]] && echo "$hint"
exit 1
}
require_env() {
local name="$1"
[[ -n "${!name:-}" ]] || { echo "Missing required env: $name"; exit 1; }
}
read_env_value() {
local key="$1"
local line=""
local value=""
while IFS= read -r line || [[ -n "$line" ]]; do
line="${line%$'\r'}"
[[ -z "${line//[[:space:]]/}" ]] && continue
[[ "${line#\#}" != "$line" ]] && continue
[[ "${line#export }" != "$line" ]] && line="${line#export }"
[[ "$line" == "$key="* ]] || continue
value="${line#*=}"
if [[ "$value" =~ ^\"(.*)\"$ ]]; then
value="${BASH_REMATCH[1]}"
elif [[ "$value" =~ ^\'(.*)\'$ ]]; then
value="${BASH_REMATCH[1]}"
fi
printf '%s' "$value"
return 0
done < "$ENV_FILE"
return 1
}
load_env_var() {
local name="$1"
local default_value="${2:-}"
local value=""
value="$(read_env_value "$name" || true)"
if [[ -z "$value" ]]; then
value="$default_value"
fi
printf -v "$name" '%s' "$value"
}
is_truthy() {
local value="${1:-}"
value="$(printf '%s' "$value" | tr '[:upper:]' '[:lower:]')"
[[ "$value" =~ ^(1|true|yes|on)$ ]]
}
require_file "$ENV_FILE" "Expected bundle config file such as .env"
require_file "$SCHEMA_SQL"
require_file "$SEED_SQL"
if ! command -v psql >/dev/null 2>&1; then
echo "Missing command: psql"
echo "Please install PostgreSQL client tools on the target host, then rerun ./init-db.sh"
exit 1
fi
load_env_var DATABASE_URL
load_env_var UPLOAD_MAX_MB 100
load_env_var STT_ENABLED true
require_env DATABASE_URL
PAGE_SIZE_JSON="10"
CHAT_PULL_PAGE_SIZE_JSON="60"
AUTH_TOKEN_TTL_HOURS_JSON="24"
AUTH_TOKEN_MAX_ACTIVE_JSON="2"
UPLOAD_MAX_MB_JSON="$UPLOAD_MAX_MB"
ALLOWED_ATTACHMENT_EXTENSIONS_JSON="[]"
WORKSPACE_DOWNLOAD_EXTENSIONS_JSON='[".pdf", ".doc", ".docx", ".xls", ".xlsx", ".xlsm", ".ppt", ".pptx", ".odt", ".ods", ".odp", ".wps"]'
if is_truthy "$STT_ENABLED"; then
SPEECH_ENABLED_JSON="true"
else
SPEECH_ENABLED_JSON="false"
fi
ACTIVITY_EVENT_RETENTION_DAYS_JSON="7"
echo "[init-prod-db-offline] applying schema with DATABASE_URL"
psql "$DATABASE_URL" -v ON_ERROR_STOP=1 -f "$SCHEMA_SQL"
echo "[init-prod-db-offline] applying initial data with DATABASE_URL"
psql \
"$DATABASE_URL" \
-v ON_ERROR_STOP=1 \
-v page_size_json="$PAGE_SIZE_JSON" \
-v chat_pull_page_size_json="$CHAT_PULL_PAGE_SIZE_JSON" \
-v auth_token_ttl_hours_json="$AUTH_TOKEN_TTL_HOURS_JSON" \
-v auth_token_max_active_json="$AUTH_TOKEN_MAX_ACTIVE_JSON" \
-v upload_max_mb_json="$UPLOAD_MAX_MB_JSON" \
-v allowed_attachment_extensions_json="$ALLOWED_ATTACHMENT_EXTENSIONS_JSON" \
-v workspace_download_extensions_json="$WORKSPACE_DOWNLOAD_EXTENSIONS_JSON" \
-v speech_enabled_json="$SPEECH_ENABLED_JSON" \
-v activity_event_retention_days_json="$ACTIVITY_EVENT_RETENTION_DAYS_JSON" \
-f "$SEED_SQL"
echo "[init-prod-db-offline] done"