main
mula.liu 2025-12-27 19:26:35 +08:00
parent 10ed4222c5
commit 72d1fd5ef2
53 changed files with 142 additions and 5062 deletions

View File

@ -59,7 +59,18 @@
"Bash(awk:*)",
"Bash(git log:*)",
"WebFetch(domain:ssd-api.jpl.nasa.gov)",
"Bash(PYTHONPATH=/Users/jiliu/WorkSpace/cosmo/backend timeout 30 ./venv/bin/python:*)"
"Bash(PYTHONPATH=/Users/jiliu/WorkSpace/cosmo/backend timeout 30 ./venv/bin/python:*)",
"Bash(xargs basename:*)",
"Bash(activate_multisystem_stars.py )",
"Bash(create_orbits_table.sql )",
"Bash(fetch_interstellar_data.py )",
"Bash(inspect_sun.py )",
"Bash(migrate_data.py )",
"Bash(migrate_interstellar_data.py )",
"Bash(populate_primary_stars.py )",
"Bash(recreate_resources_table.py )",
"Bash(reset_positions.py )",
"Bash(test_pluto.py )"
],
"deny": [],
"ask": []

View File

@ -0,0 +1,130 @@
## 缺失的表定义
DATABASE_SCHEMA.md 文档中缺少以下表的定义,但代码中已经实现:
### 1. celestial_events - 天体事件表
```sql
CREATE TABLE celestial_events (
id SERIAL PRIMARY KEY,
body_id VARCHAR(50) NOT NULL REFERENCES celestial_bodies(id) ON DELETE CASCADE,
title VARCHAR(200) NOT NULL,
event_type VARCHAR(50) NOT NULL,
event_time TIMESTAMP NOT NULL,
description TEXT,
details JSONB,
source VARCHAR(50) DEFAULT 'nasa_sbdb',
created_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT chk_event_type CHECK (event_type IN (
'approach', 'opposition', 'conjunction', 'eclipse',
'perihelion', 'aphelion', 'closest_approach'
))
);
-- 索引
CREATE INDEX idx_celestial_events_body_id ON celestial_events(body_id);
CREATE INDEX idx_celestial_events_event_time ON celestial_events(event_time);
CREATE INDEX idx_celestial_events_event_type ON celestial_events(event_type);
-- 注释
COMMENT ON TABLE celestial_events IS '天体事件表(接近、冲、合、食等天文事件)';
COMMENT ON COLUMN celestial_events.event_type IS '事件类型approach(接近), opposition(冲), conjunction(合), eclipse(食), perihelion(近日点), aphelion(远日点), closest_approach(最接近)';
COMMENT ON COLUMN celestial_events.details IS 'JSON格式事件详细信息';
COMMENT ON COLUMN celestial_events.source IS '数据来源nasa_sbdb, calculated, skyfield_calculation';
```
### 2. user_follows - 用户关注天体表
```sql
CREATE TABLE user_follows (
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
body_id VARCHAR(50) NOT NULL REFERENCES celestial_bodies(id) ON DELETE CASCADE,
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (user_id, body_id)
);
-- 索引
CREATE INDEX idx_user_follows_user_id ON user_follows(user_id);
CREATE INDEX idx_user_follows_body_id ON user_follows(body_id);
-- 注释
COMMENT ON TABLE user_follows IS '用户关注天体关联表';
COMMENT ON COLUMN user_follows.user_id IS '用户ID外键关联users表';
COMMENT ON COLUMN user_follows.body_id IS '天体ID外键关联celestial_bodies表';
```
### 3. positions 表的 source 约束需要更新
文档中:
```sql
CONSTRAINT chk_source CHECK (source IN (
'nasa_horizons', 'calculated', 'user_defined', 'imported'
))
```
实际代码中应该是:
```sql
CONSTRAINT chk_source CHECK (source IN (
'nasa_horizons', 'nasa_horizons_cron', 'calculated', 'user_defined', 'imported'
))
```
### 4. scheduled_jobs 表结构需要更新
文档中使用了 ENUM 类型,但实际代码使用 VARCHAR
```sql
CREATE TABLE scheduled_jobs (
id SERIAL PRIMARY KEY,
name VARCHAR(200) NOT NULL,
cron_expression VARCHAR(100) NOT NULL,
python_code TEXT,
is_active BOOLEAN DEFAULT true,
last_run_at TIMESTAMP,
last_run_status VARCHAR(50),
next_run_at TIMESTAMP,
description TEXT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
job_type VARCHAR(50) DEFAULT 'custom',
predefined_function VARCHAR(200),
function_params JSONB
);
```
### 5. system_settings 表主键
文档中有 id 字段,但实际代码中 key 是主键:
```sql
CREATE TABLE system_settings (
key VARCHAR(100) PRIMARY KEY, -- 主键,不是 id
value TEXT NOT NULL,
value_type VARCHAR(20) NOT NULL DEFAULT 'string',
category VARCHAR(50) NOT NULL DEFAULT 'general',
label VARCHAR(200) NOT NULL,
description TEXT,
is_public BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT chk_value_type CHECK (value_type IN (
'string', 'int', 'float', 'bool', 'json'
))
);
```
### 6. role_menus 表主键
文档中有 id 字段,但实际代码使用复合主键:
```sql
CREATE TABLE role_menus (
role_id INTEGER NOT NULL REFERENCES roles(id) ON DELETE CASCADE,
menu_id INTEGER NOT NULL REFERENCES menus(id) ON DELETE CASCADE,
PRIMARY KEY (role_id, menu_id) -- 复合主键,没有 id 字段
);
```

View File

@ -1,226 +0,0 @@
#!/usr/bin/env python3
"""
补全多恒星系统数据并启用恒星和行星
参考比邻星系统Alpha Centauri的数据结构
"""
import asyncio
import asyncpg
import json
import logging
from datetime import datetime
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 数据库连接配置
DB_CONFIG = {
"host": "localhost",
"port": 5432,
"user": "postgres",
"password": "postgres",
"database": "cosmo_db"
}
# 已知的多恒星系统数据(来自天文学资料)
MULTI_STAR_SYSTEMS = {
# Alpha Centauri System (比邻星系统) - system_id = 479
479: {
"stars": [
{
"id": "star-479-primary",
"name": "Alpha Centauri A",
"name_zh": "南门二A",
"description": "该恒星系主序星,光谱类型: G2V, 表面温度: 5790K",
"extra_data": {
"spectral_type": "G2V",
"mass_solar": 1.1,
"radius_solar": 1.22,
"temperature_k": 5790
}
},
{
"id": "star-479-secondary",
"name": "Alpha Centauri B",
"name_zh": "南门二B",
"description": "与南门二A相互绕转的明亮双星是该系统的主体。",
"extra_data": {
"spectral_type": "K1V",
"mass_solar": 0.93,
"radius_solar": 0.86,
"temperature_k": 5260
}
},
{
"id": "star-479-tertiary",
"name": "Proxima Centauri",
"name_zh": "比邻星",
"description": "一颗质量小、光度弱的红矮星距离南门二A/B约0.2光年,围绕它们公转。",
"extra_data": {
"spectral_type": "M5.5V",
"mass_solar": 0.12,
"radius_solar": 0.14,
"temperature_k": 2900
}
}
]
}
}
async def check_existing_data(conn):
"""检查现有数据"""
logger.info("=== 检查现有数据 ===")
# 检查恒星系统
rows = await conn.fetch("""
SELECT id, name, name_zh, host_star_name, planet_count
FROM star_systems
WHERE id IN (479, 2, 3, 4, 5)
ORDER BY id
""")
print("\n恒星系统:")
for row in rows:
print(f" ID={row['id']}: {row['name_zh'] or row['name']} (主恒星: {row['host_star_name']}, 行星数: {row['planet_count']})")
# 检查比邻星系统的天体
rows = await conn.fetch("""
SELECT id, name, name_zh, type, is_active
FROM celestial_bodies
WHERE system_id = 479
ORDER BY type, name
""")
print("\n比邻星系统(479)的天体:")
for row in rows:
print(f" {row['type']:15} | {row['name']:30} | Active: {row['is_active']}")
async def add_missing_stars(conn):
"""添加缺失的恒星"""
logger.info("\n=== 添加缺失的恒星 ===")
for system_id, system_data in MULTI_STAR_SYSTEMS.items():
logger.info(f"\n处理恒星系统 ID={system_id}")
for star in system_data["stars"]:
# 检查是否已存在
existing = await conn.fetchrow(
"SELECT id FROM celestial_bodies WHERE id = $1",
star["id"]
)
if existing:
logger.info(f" ✓ 恒星已存在: {star['name']} ({star['id']})")
else:
# 插入新恒星
await conn.execute("""
INSERT INTO celestial_bodies
(id, name, name_zh, type, system_id, description, is_active, extra_data, created_at, updated_at)
VALUES
($1, $2, $3, 'star', $4, $5, TRUE, $6::jsonb, NOW(), NOW())
""",
star["id"],
star["name"],
star["name_zh"],
system_id,
star["description"],
json.dumps(star["extra_data"])
)
logger.info(f" ✅ 添加恒星: {star['name_zh']} ({star['id']})")
logger.info("\n恒星数据补全完成!")
async def activate_stars_and_planets(conn):
"""启用所有恒星和行星"""
logger.info("\n=== 启用恒星和行星 ===")
# 启用所有恒星(除了太阳系之外的其他系统)
stars = await conn.fetch("""
UPDATE celestial_bodies
SET is_active = TRUE, updated_at = NOW()
WHERE type = 'star' AND system_id > 1
RETURNING id, name, name_zh
""")
logger.info(f"\n启用了 {len(stars)} 颗恒星:")
for star in stars:
logger.info(f"{star['name_zh'] or star['name']} ({star['id']})")
# 启用所有行星(除了太阳系之外的其他系统)
planets = await conn.fetch("""
UPDATE celestial_bodies
SET is_active = TRUE, updated_at = NOW()
WHERE type = 'planet' AND system_id > 1
RETURNING id, name, name_zh
""")
logger.info(f"\n启用了 {len(planets)} 颗行星:")
for planet in planets:
logger.info(f"{planet['name_zh'] or planet['name']} ({planet['id']})")
logger.info("\n启用完成!")
async def verify_results(conn):
"""验证结果"""
logger.info("\n=== 验证结果 ===")
# 统计各系统的天体数量
rows = await conn.fetch("""
SELECT
s.id,
s.name,
s.name_zh,
COUNT(CASE WHEN cb.type = 'star' THEN 1 END) as star_count,
COUNT(CASE WHEN cb.type = 'planet' THEN 1 END) as planet_count,
COUNT(CASE WHEN cb.is_active = TRUE THEN 1 END) as active_count
FROM star_systems s
LEFT JOIN celestial_bodies cb ON s.id = cb.system_id
WHERE s.id IN (479, 2, 3, 4, 5, 6, 7, 8, 9, 10)
GROUP BY s.id, s.name, s.name_zh
ORDER BY s.id
""")
print("\n各恒星系统统计:")
print(f"{'系统ID':<8} {'名称':<30} {'恒星数':<8} {'行星数':<8} {'启用数':<8}")
print("-" * 80)
for row in rows:
print(f"{row['id']:<8} {(row['name_zh'] or row['name']):<30} {row['star_count']:<8} {row['planet_count']:<8} {row['active_count']:<8}")
async def main():
"""主函数"""
print("=" * 80)
print("多恒星系统数据补全和启用脚本")
print("=" * 80)
# 连接数据库
conn = await asyncpg.connect(**DB_CONFIG)
try:
# 1. 检查现有数据
await check_existing_data(conn)
# 2. 添加缺失的恒星
await add_missing_stars(conn)
# 3. 启用恒星和行星
await activate_stars_and_planets(conn)
# 4. 验证结果
await verify_results(conn)
print("\n" + "=" * 80)
print("✅ 所有操作完成!")
print("=" * 80)
finally:
await conn.close()
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,487 +0,0 @@
#!/usr/bin/env python3
"""
补全高价值双星/多星系统数据
包含8-10个科学价值最高的多恒星系统
"""
import asyncio
import asyncpg
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 数据库连接配置
DB_CONFIG = {
"host": "localhost",
"port": 5432,
"user": "postgres",
"password": "postgres",
"database": "cosmo_db"
}
# 高价值多恒星系统数据(基于天文学资料)
MULTI_STAR_SYSTEMS = {
# 1. Alpha Centauri (比邻星系统) - 已完成,保留用于验证
479: {
"stars": [
{
"id": "star-479-primary",
"name": "Alpha Centauri A",
"name_zh": "南门二A",
"description": "该恒星系主序星,光谱类型: G2V, 表面温度: 5790K",
"extra_data": {
"spectral_type": "G2V",
"mass_solar": 1.1,
"radius_solar": 1.22,
"temperature_k": 5790
}
},
{
"id": "star-479-secondary",
"name": "Alpha Centauri B",
"name_zh": "南门二B",
"description": "与南门二A相互绕转的明亮双星是该系统的主体。",
"extra_data": {
"spectral_type": "K1V",
"mass_solar": 0.93,
"radius_solar": 0.86,
"temperature_k": 5260
}
},
{
"id": "star-479-tertiary",
"name": "Proxima Centauri",
"name_zh": "比邻星",
"description": "一颗质量小、光度弱的红矮星距离南门二A/B约0.2光年,围绕它们公转。",
"extra_data": {
"spectral_type": "M5.5V",
"mass_solar": 0.12,
"radius_solar": 0.14,
"temperature_k": 2900
}
}
]
},
# 2. 55 Cancri (巨蟹座55) - 双星系统
11: {
"stars": [
{
"id": "star-11-primary",
"name": "55 Cancri A",
"name_zh": "巨蟹座55A",
"description": "类太阳黄矮星拥有5颗已确认行星包括著名的超级地球55 Cnc e。",
"extra_data": {
"spectral_type": "G8V",
"mass_solar": 0.95,
"radius_solar": 0.94,
"temperature_k": 5196
}
},
{
"id": "star-11-secondary",
"name": "55 Cancri B",
"name_zh": "巨蟹座55B",
"description": "红矮星伴星距离A星约1065 AU轨道周期约1000年。",
"extra_data": {
"spectral_type": "M4V",
"mass_solar": 0.13,
"radius_solar": 0.30,
"temperature_k": 3200,
"separation_au": 1065,
"orbital_period_years": 1000
}
}
]
},
# 3. 16 Cygni (天鹅座16) - 双星系统
5: {
"stars": [
{
"id": "star-5-primary",
"name": "16 Cygni A",
"name_zh": "天鹅座16A",
"description": "类太阳黄矮星,该双星系统的主星。",
"extra_data": {
"spectral_type": "G1.5V",
"mass_solar": 1.11,
"radius_solar": 1.24,
"temperature_k": 5825
}
},
{
"id": "star-5-secondary",
"name": "16 Cygni B",
"name_zh": "天鹅座16B",
"description": "类太阳黄矮星拥有一颗高偏心率轨道的行星16 Cyg B b展示了双星引力对行星轨道的影响。",
"extra_data": {
"spectral_type": "G2.5V",
"mass_solar": 1.07,
"radius_solar": 1.14,
"temperature_k": 5750,
"separation_au": 850,
"orbital_period_years": 18200
}
}
]
},
# 4. Epsilon Indi (天园增四) - 三体系统 (1恒星 + 2棕矮星)
40: {
"stars": [
{
"id": "star-40-primary",
"name": "Epsilon Indi A",
"name_zh": "天园增四A",
"description": "橙矮星第五近的恒星系统伴有两颗棕矮星Ba和Bb",
"extra_data": {
"spectral_type": "K5V",
"mass_solar": 0.76,
"radius_solar": 0.73,
"temperature_k": 4630
}
},
{
"id": "star-40-secondary",
"name": "Epsilon Indi Ba",
"name_zh": "天园增四Ba",
"description": "T1V型棕矮星距离A星约1460 AU与Bb组成棕矮星双星系统。",
"extra_data": {
"spectral_type": "T1V",
"mass_jupiter": 47,
"radius_jupiter": 0.91,
"temperature_k": 1300,
"separation_from_A_au": 1460,
"is_brown_dwarf": True
}
},
{
"id": "star-40-tertiary",
"name": "Epsilon Indi Bb",
"name_zh": "天园增四Bb",
"description": "T6V型棕矮星与Ba互绕周期约15年是最近的棕矮星双星系统。",
"extra_data": {
"spectral_type": "T6V",
"mass_jupiter": 28,
"radius_jupiter": 0.80,
"temperature_k": 880,
"orbital_period_years": 15,
"is_brown_dwarf": True
}
}
]
},
# 5. Gamma Cephei (仙王座γ) - 双星系统
49: {
"stars": [
{
"id": "star-49-primary",
"name": "Gamma Cephei A",
"name_zh": "仙王座γA",
"description": "亚巨星最早被怀疑有行星的恒星之一1988年拥有一颗类木行星。",
"extra_data": {
"spectral_type": "K1IV",
"mass_solar": 1.59,
"radius_solar": 4.9,
"temperature_k": 4800
}
},
{
"id": "star-49-secondary",
"name": "Gamma Cephei B",
"name_zh": "仙王座γB",
"description": "红矮星伴星距离A星约20 AU轨道周期约66年形成紧密双星系统。",
"extra_data": {
"spectral_type": "M4V",
"mass_solar": 0.4,
"radius_solar": 0.40,
"temperature_k": 3200,
"separation_au": 20,
"orbital_period_years": 66
}
}
]
},
# 6. Upsilon Andromedae (仙女座υ) - 双星系统
572: {
"stars": [
{
"id": "star-572-primary",
"name": "Upsilon Andromedae A",
"name_zh": "仙女座υA",
"description": "黄白主序星第一个被发现有多颗行星的主序星1999年拥有4颗已确认行星。",
"extra_data": {
"spectral_type": "F8V",
"mass_solar": 1.27,
"radius_solar": 1.63,
"temperature_k": 6212
}
},
{
"id": "star-572-secondary",
"name": "Upsilon Andromedae B",
"name_zh": "仙女座υB",
"description": "红矮星伴星距离A星约750 AU。",
"extra_data": {
"spectral_type": "M4.5V",
"mass_solar": 0.25,
"radius_solar": 0.28,
"temperature_k": 3100,
"separation_au": 750
}
}
]
},
# 7. HD 41004 - 双星系统两个独立的system_id需要合并
347: {
"stars": [
{
"id": "star-347-primary",
"name": "HD 41004 A",
"name_zh": "HD 41004 A",
"description": "橙矮星拥有一颗类木行星HD 41004 A b。",
"extra_data": {
"spectral_type": "K1V",
"mass_solar": 0.70,
"radius_solar": 0.67,
"temperature_k": 5000
}
},
{
"id": "star-347-secondary",
"name": "HD 41004 B",
"name_zh": "HD 41004 B",
"description": "红矮星伴星距离A星约23 AU可能拥有棕矮星伴星。",
"extra_data": {
"spectral_type": "M2V",
"mass_solar": 0.40,
"radius_solar": 0.39,
"temperature_k": 3400,
"separation_au": 23
}
}
]
},
# 8. GJ 86 (格利泽86) - 双星系统(橙矮星 + 白矮星)
128: {
"stars": [
{
"id": "star-128-primary",
"name": "GJ 86 A",
"name_zh": "格利泽86A",
"description": "橙矮星拥有一颗类木行星GJ 86 b伴星是罕见的白矮星。",
"extra_data": {
"spectral_type": "K1V",
"mass_solar": 0.79,
"radius_solar": 0.77,
"temperature_k": 5100
}
},
{
"id": "star-128-secondary",
"name": "GJ 86 B",
"name_zh": "格利泽86B",
"description": "白矮星伴星距离A星约21 AU是研究恒星演化对行星影响的重要案例。",
"extra_data": {
"spectral_type": "DA (白矮星)",
"mass_solar": 0.55,
"radius_solar": 0.01,
"temperature_k": 8000,
"separation_au": 21,
"is_white_dwarf": True
}
}
]
},
# 9. HD 196885 - 双星系统
267: {
"stars": [
{
"id": "star-267-primary",
"name": "HD 196885 A",
"name_zh": "HD 196885 A",
"description": "黄白主序星拥有一颗行星HD 196885 A b。",
"extra_data": {
"spectral_type": "F8V",
"mass_solar": 1.33,
"radius_solar": 1.68,
"temperature_k": 6172
}
},
{
"id": "star-267-secondary",
"name": "HD 196885 B",
"name_zh": "HD 196885 B",
"description": "红矮星伴星距离A星约25 AU。",
"extra_data": {
"spectral_type": "M",
"mass_solar": 0.45,
"radius_solar": 0.43,
"temperature_k": 3500,
"separation_au": 25
}
}
]
}
}
async def add_missing_stars(conn):
"""添加缺失的恒星"""
logger.info("=" * 80)
logger.info("开始补全多恒星系统数据")
logger.info("=" * 80)
added_count = 0
skipped_count = 0
for system_id, system_data in MULTI_STAR_SYSTEMS.items():
# 检查系统是否存在
system = await conn.fetchrow(
"SELECT id, name, name_zh FROM star_systems WHERE id = $1",
system_id
)
if not system:
logger.warning(f"\n⚠️ 系统ID={system_id}不存在,跳过")
continue
logger.info(f"\n{'='*80}")
logger.info(f"处理恒星系统: {system['name_zh'] or system['name']} (ID={system_id})")
logger.info(f"{'='*80}")
for star in system_data["stars"]:
# 检查是否已存在
existing = await conn.fetchrow(
"SELECT id FROM celestial_bodies WHERE id = $1",
star["id"]
)
if existing:
logger.info(f" ✓ 恒星已存在: {star['name_zh']} ({star['id']})")
skipped_count += 1
else:
# 插入新恒星
await conn.execute("""
INSERT INTO celestial_bodies
(id, name, name_zh, type, system_id, description, is_active, extra_data, created_at, updated_at)
VALUES
($1, $2, $3, 'star', $4, $5, TRUE, $6::jsonb, NOW(), NOW())
""",
star["id"],
star["name"],
star["name_zh"],
system_id,
star["description"],
json.dumps(star["extra_data"])
)
logger.info(f" ✅ 添加恒星: {star['name_zh']} ({star['id']})")
added_count += 1
logger.info(f"\n{'='*80}")
logger.info(f"恒星数据补全完成!")
logger.info(f" 新增: {added_count}")
logger.info(f" 跳过: {skipped_count}颗(已存在)")
logger.info(f"{'='*80}")
async def verify_results(conn):
"""验证结果"""
logger.info("\n" + "=" * 80)
logger.info("验证多星系统数据")
logger.info("=" * 80)
system_ids = list(MULTI_STAR_SYSTEMS.keys())
rows = await conn.fetch("""
SELECT
s.id,
s.name,
s.name_zh,
COUNT(CASE WHEN cb.type = 'star' THEN 1 END) as star_count,
COUNT(CASE WHEN cb.type = 'planet' THEN 1 END) as planet_count,
COUNT(CASE WHEN cb.is_active = TRUE THEN 1 END) as active_count,
string_agg(
CASE WHEN cb.type = 'star' THEN cb.name_zh || ' (' || cb.id || ')' END,
', '
ORDER BY cb.id
) as star_names
FROM star_systems s
LEFT JOIN celestial_bodies cb ON s.id = cb.system_id
WHERE s.id = ANY($1)
GROUP BY s.id, s.name, s.name_zh
ORDER BY s.id
""", system_ids)
print(f"\n{'系统ID':<8} {'系统名称':<30} {'恒星数':<8} {'行星数':<8} {'启用数':<8}")
print("=" * 100)
for row in rows:
system_name = row['name_zh'] or row['name']
print(f"{row['id']:<8} {system_name:<30} {row['star_count']:<8} {row['planet_count']:<8} {row['active_count']:<8}")
# 详细显示每个系统的恒星
print(f"\n{'='*100}")
print("各系统恒星详情:")
print(f"{'='*100}")
for row in rows:
system_name = row['name_zh'] or row['name']
stars = await conn.fetch("""
SELECT id, name, name_zh, extra_data
FROM celestial_bodies
WHERE system_id = $1 AND type = 'star'
ORDER BY id
""", row['id'])
print(f"\n{system_name} (ID={row['id']}):")
for star in stars:
# Handle both dict and JSON string
extra = star['extra_data']
if isinstance(extra, str):
extra = json.loads(extra) if extra else {}
elif extra is None:
extra = {}
spectral = extra.get('spectral_type', 'N/A')
mass = extra.get('mass_solar', extra.get('mass_jupiter'))
mass_unit = 'M☉' if 'mass_solar' in extra else ('MJ' if 'mass_jupiter' in extra else '')
print(f"{star['name_zh']:<25} | 光谱: {spectral:<10} | 质量: {mass}{mass_unit if mass else 'N/A'}")
async def main():
"""主函数"""
print("\n" + "=" * 80)
print("多恒星系统数据补全脚本 v2.0")
print("将补全8-10个高价值双星/多星系统")
print("=" * 80)
conn = await asyncpg.connect(**DB_CONFIG)
try:
# 1. 添加缺失的恒星
await add_missing_stars(conn)
# 2. 验证结果
await verify_results(conn)
print("\n" + "=" * 80)
print("✅ 所有操作完成!")
print("=" * 80)
except Exception as e:
logger.error(f"❌ 发生错误: {e}")
import traceback
traceback.print_exc()
finally:
await conn.close()
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,4 +0,0 @@
-- Add danmaku_ttl setting (default 24 hours = 86400 seconds)
INSERT INTO system_settings (key, value, value_type, category, label, description, is_public)
SELECT 'danmaku_ttl', '86400', 'int', 'platform', '弹幕保留时间', '用户发送的弹幕在系统中保留的时间(秒)', true
WHERE NOT EXISTS (SELECT 1 FROM system_settings WHERE key = 'danmaku_ttl');

View File

@ -1,33 +0,0 @@
-- Add Scheduled Job for Fetching Close Approach Events
-- This uses the predefined task: fetch_close_approach_events
--
-- 参数说明:
-- - days_ahead: 30 (查询未来30天的事件)
-- - dist_max: "30" (30 AU海王星轨道范围)
-- - approach_body: "Earth" (接近地球的天体)
-- - limit: 200 (最多返回200个事件)
-- - clean_old_events: true (清理过期事件)
--
-- Cron表达式: '0 2 * * 0' (每周日UTC 02:00执行)
--
-- 注意: 任务会自动创建不存在的天体记录(小行星/彗星)
INSERT INTO "public"."scheduled_jobs"
("name", "job_type", "predefined_function", "function_params", "cron_expression", "description", "is_active")
VALUES
(
'每周天体事件拉取 (Close Approaches)',
'predefined',
'fetch_close_approach_events',
'{
"days_ahead": 30,
"dist_max": "30",
"approach_body": "Earth",
"limit": 200,
"clean_old_events": true
}'::jsonb,
'0 2 * * 0',
'每周日UTC 02:00从NASA SBDB拉取未来30天内距离地球30AU以内海王星轨道范围的小行星/彗星接近事件',
true
)
ON CONFLICT DO NOTHING;

View File

@ -1,55 +0,0 @@
-- Add Celestial Events Menu
-- 添加天体事件展示菜单到数据管理菜单下
-- First check if menu already exists
DO $$
DECLARE
menu_exists BOOLEAN;
BEGIN
SELECT EXISTS(SELECT 1 FROM menus WHERE name = 'celestial_events') INTO menu_exists;
IF NOT menu_exists THEN
INSERT INTO "public"."menus"
("name", "title", "icon", "path", "component", "parent_id", "sort_order", "is_active")
VALUES
(
'celestial_events',
'天体事件',
'CalendarOutlined',
'/admin/celestial-events',
NULL,
2, -- parent_id = 2 (数据管理)
4, -- sort_order = 4 (在NASA数据下载之后)
true
);
END IF;
END $$;
-- Get the menu ID for role assignment
DO $$
DECLARE
menu_id_var INTEGER;
admin_role_id INTEGER;
user_role_id INTEGER;
BEGIN
-- Get the celestial_events menu ID
SELECT id INTO menu_id_var FROM menus WHERE name = 'celestial_events';
-- Get role IDs
SELECT id INTO admin_role_id FROM roles WHERE name = 'admin';
SELECT id INTO user_role_id FROM roles WHERE name = 'user';
-- Assign menu to admin role
IF menu_id_var IS NOT NULL AND admin_role_id IS NOT NULL THEN
INSERT INTO role_menus (role_id, menu_id)
VALUES (admin_role_id, menu_id_var)
ON CONFLICT DO NOTHING;
END IF;
-- Assign menu to user role (users can view events)
IF menu_id_var IS NOT NULL AND user_role_id IS NOT NULL THEN
INSERT INTO role_menus (role_id, menu_id)
VALUES (user_role_id, menu_id_var)
ON CONFLICT DO NOTHING;
END IF;
END $$;

View File

@ -1,15 +0,0 @@
-- Migration: 添加 nasa_horizons_cron 到 positions 表的 source 约束
-- Date: 2025-12-11
-- 1. 删除旧的约束
ALTER TABLE positions DROP CONSTRAINT IF EXISTS chk_source;
-- 2. 添加新的约束(包含 nasa_horizons_cron
ALTER TABLE positions ADD CONSTRAINT chk_source
CHECK (source IN ('nasa_horizons', 'nasa_horizons_cron', 'calculated', 'user_defined', 'imported'));
-- 3. 验证约束
SELECT conname, pg_get_constraintdef(oid)
FROM pg_constraint
WHERE conrelid = 'positions'::regclass
AND conname = 'chk_source';

View File

@ -1,63 +0,0 @@
-- Add calculate_planetary_events task to the scheduled tasks
-- This task will calculate planetary events (conjunctions, oppositions) using Skyfield
-- Example 1: Calculate events for all major planets (365 days ahead)
INSERT INTO tasks (name, description, category, parameters, status, schedule_config)
VALUES (
'calculate_planetary_events',
'计算太阳系主要天体的合、冲等事件(每周执行一次)',
'data_sync',
'{
"days_ahead": 365,
"clean_old_events": true,
"calculate_close_approaches": false
}'::json,
'active',
'{
"type": "cron",
"cron": "0 2 * * 0"
}'::json
)
ON CONFLICT (name) DO UPDATE SET
parameters = EXCLUDED.parameters,
schedule_config = EXCLUDED.schedule_config;
-- Example 2: Calculate events for inner planets only (30 days ahead, with close approaches)
-- INSERT INTO tasks (name, description, category, parameters, status, schedule_config)
-- VALUES (
-- 'calculate_inner_planetary_events',
-- '计算内行星事件(包括近距离接近)',
-- 'data_sync',
-- '{
-- "body_ids": ["199", "299", "399", "499"],
-- "days_ahead": 30,
-- "clean_old_events": true,
-- "calculate_close_approaches": true,
-- "threshold_degrees": 5.0
-- }'::json,
-- 'active',
-- '{
-- "type": "cron",
-- "cron": "0 3 * * *"
-- }'::json
-- )
-- ON CONFLICT (name) DO NOTHING;
-- Query to check the task was added
SELECT id, name, description, status, parameters, schedule_config
FROM tasks
WHERE name = 'calculate_planetary_events';
-- Query to view calculated events
-- SELECT
-- ce.id,
-- ce.title,
-- ce.event_type,
-- ce.event_time,
-- cb.name as body_name,
-- ce.details,
-- ce.created_at
-- FROM celestial_events ce
-- JOIN celestial_bodies cb ON ce.body_id = cb.id
-- WHERE ce.source = 'skyfield_calculation'
-- ORDER BY ce.event_time;

View File

@ -1,114 +0,0 @@
-- This script adds a new top-level menu "Platform Management"
-- with two sub-menus "User Management" and "Platform Parameters Management".
-- These menus will be assigned to the 'admin' role.
-- Start Transaction for atomicity
BEGIN;
-- 1. Find the ID of the 'admin' role
-- Assuming 'admin' role name exists and is unique.
DO $$
DECLARE
admin_role_id INTEGER;
platform_management_menu_id INTEGER;
user_management_menu_id INTEGER;
platform_parameters_menu_id INTEGER;
BEGIN
SELECT id INTO admin_role_id FROM roles WHERE name = 'admin';
IF admin_role_id IS NULL THEN
RAISE EXCEPTION 'Admin role not found. Please ensure the admin role exists.';
END IF;
-- 2. Insert the top-level menu: "Platform Management"
-- Check if it already exists to prevent duplicates on re-run
SELECT id INTO platform_management_menu_id FROM menus WHERE name = 'platform_management' AND parent_id IS NULL;
IF platform_management_menu_id IS NULL THEN
INSERT INTO menus (name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at)
VALUES (
'platform_management',
'平台管理',
'settings', -- Using a generic settings icon for platform management
NULL, -- It's a parent menu, no direct path
NULL,
3, -- Assuming sort_order 1 & 2 are for Dashboard & Data Management
TRUE,
'管理用户和系统参数',
NOW(),
NOW()
) RETURNING id INTO platform_management_menu_id;
RAISE NOTICE 'Inserted Platform Management menu with ID: %', platform_management_menu_id;
-- Assign to admin role
INSERT INTO role_menus (role_id, menu_id, created_at)
VALUES (admin_role_id, platform_management_menu_id, NOW());
RAISE NOTICE 'Assigned Platform Management to admin role.';
ELSE
RAISE NOTICE 'Platform Management menu already exists with ID: %', platform_management_menu_id;
END IF;
-- 3. Insert sub-menu: "User Management"
-- Check if it already exists
SELECT id INTO user_management_menu_id FROM menus WHERE name = 'user_management' AND parent_id = platform_management_menu_id;
IF user_management_menu_id IS NULL THEN
INSERT INTO menus (parent_id, name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at)
VALUES (
platform_management_menu_id,
'user_management',
'用户管理',
'users', -- Icon for user management
'/admin/users', -- Admin users page path
'admin/Users', -- React component path
1,
TRUE,
'管理系统用户账号',
NOW(),
NOW()
) RETURNING id INTO user_management_menu_id;
RAISE NOTICE 'Inserted User Management menu with ID: %', user_management_menu_id;
-- Assign to admin role
INSERT INTO role_menus (role_id, menu_id, created_at)
VALUES (admin_role_id, user_management_menu_id, NOW());
RAISE NOTICE 'Assigned User Management to admin role.';
ELSE
RAISE NOTICE 'User Management menu already exists with ID: %', user_management_menu_id;
END IF;
-- 4. Insert sub-menu: "Platform Parameters Management"
-- Check if it already exists
SELECT id INTO platform_parameters_menu_id FROM menus WHERE name = 'platform_parameters_management' AND parent_id = platform_management_menu_id;
IF platform_parameters_menu_id IS NULL THEN
INSERT INTO menus (parent_id, name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at)
VALUES (
platform_management_menu_id,
'platform_parameters_management',
'平台参数管理',
'sliders', -- Icon for parameters/settings
'/admin/settings', -- Admin settings page path
'admin/Settings', -- React component path
2,
TRUE,
'管理系统通用配置参数',
NOW(),
NOW()
) RETURNING id INTO platform_parameters_menu_id;
RAISE NOTICE 'Inserted Platform Parameters Management menu with ID: %', platform_parameters_menu_id;
-- Assign to admin role
INSERT INTO role_menus (role_id, menu_id, created_at)
VALUES (admin_role_id, platform_parameters_menu_id, NOW());
RAISE NOTICE 'Assigned Platform Parameters Management to admin role.';
ELSE
RAISE NOTICE 'Platform Parameters Management menu already exists with ID: %', platform_parameters_menu_id;
END IF;
END $$;
-- Commit the transaction
COMMIT;

View File

@ -1,77 +0,0 @@
"""
Add Pluto to celestial bodies database
"""
import asyncio
from sqlalchemy.dialects.postgresql import insert as pg_insert
from app.database import get_db
from app.models.db.celestial_body import CelestialBody
from app.models.db.resource import Resource
async def add_pluto():
"""Add Pluto to the database"""
async for session in get_db():
try:
# Add Pluto as a celestial body
print("📍 Adding Pluto to celestial_bodies table...")
stmt = pg_insert(CelestialBody).values(
id="999",
name="Pluto",
name_zh="冥王星",
type="planet",
description="冥王星,曾经的第九大行星,现为矮行星"
)
stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_={
'name': "Pluto",
'name_zh': "冥王星",
'type': "planet",
'description': "冥王星,曾经的第九大行星,现为矮行星"
}
)
await session.execute(stmt)
await session.commit()
print("✅ Pluto added successfully!")
# Check if Pluto texture exists
import os
texture_path = "upload/texture/2k_pluto.jpg"
if os.path.exists(texture_path):
print(f"\n📸 Found Pluto texture: {texture_path}")
file_size = os.path.getsize(texture_path)
# Add texture resource
print("📦 Adding Pluto texture to resources table...")
stmt = pg_insert(Resource).values(
body_id="999",
resource_type="texture",
file_path="texture/2k_pluto.jpg",
file_size=file_size,
mime_type="image/jpeg",
extra_data=None
)
stmt = stmt.on_conflict_do_update(
index_elements=['body_id', 'resource_type', 'file_path'],
set_={
'file_size': file_size,
'mime_type': "image/jpeg",
}
)
await session.execute(stmt)
await session.commit()
print(f"✅ Pluto texture resource added ({file_size} bytes)")
else:
print(f"\n⚠️ Pluto texture not found at {texture_path}")
print(" Please add a 2k_pluto.jpg file to upload/texture/ directory")
except Exception as e:
print(f"❌ Error adding Pluto: {e}")
await session.rollback()
raise
finally:
break
if __name__ == "__main__":
asyncio.run(add_pluto())

View File

@ -1,55 +0,0 @@
-- Migration: Add Predefined Task Support to scheduled_jobs
-- Date: 2025-12-11
-- Purpose: Transition from dynamic code execution to predefined task system
-- 1. Create job_type ENUM type
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'jobtype') THEN
CREATE TYPE jobtype AS ENUM ('predefined', 'custom_code');
END IF;
END $$;
-- 2. Add new columns
ALTER TABLE scheduled_jobs
ADD COLUMN IF NOT EXISTS job_type jobtype DEFAULT 'custom_code',
ADD COLUMN IF NOT EXISTS predefined_function VARCHAR(100),
ADD COLUMN IF NOT EXISTS function_params JSONB DEFAULT '{}'::jsonb;
-- 3. Update existing rows to custom_code type (preserve backward compatibility)
UPDATE scheduled_jobs
SET job_type = 'custom_code'
WHERE job_type IS NULL;
-- 4. Make job_type NOT NULL after setting defaults
ALTER TABLE scheduled_jobs
ALTER COLUMN job_type SET NOT NULL;
-- 5. Set default for job_type to 'predefined' for new records
ALTER TABLE scheduled_jobs
ALTER COLUMN job_type SET DEFAULT 'predefined';
-- 6. Add check constraint
ALTER TABLE scheduled_jobs
ADD CONSTRAINT chk_job_type_fields
CHECK (
(job_type = 'predefined' AND predefined_function IS NOT NULL)
OR
(job_type = 'custom_code' AND python_code IS NOT NULL)
);
-- 7. Add comment on columns
COMMENT ON COLUMN scheduled_jobs.job_type IS 'Job type: predefined or custom_code';
COMMENT ON COLUMN scheduled_jobs.predefined_function IS 'Predefined function name (required if job_type=predefined)';
COMMENT ON COLUMN scheduled_jobs.function_params IS 'JSON parameters for predefined function';
COMMENT ON COLUMN scheduled_jobs.python_code IS 'Dynamic Python code (only for custom_code type)';
-- 8. Verify the changes
SELECT
column_name,
data_type,
is_nullable,
column_default
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position;

View File

@ -1,93 +0,0 @@
"""
Simple migration to add predefined task columns
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text
from app.database import engine
async def run_simple_migration():
"""Add the new columns to scheduled_jobs table"""
async with engine.begin() as conn:
print("🔄 Adding new columns to scheduled_jobs table...")
# Add job_type column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN job_type jobtype DEFAULT 'custom_code'::jobtype NOT NULL
"""))
print("✅ Added job_type column")
except Exception as e:
print(f"⚠️ job_type column: {e}")
# Add predefined_function column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN predefined_function VARCHAR(100)
"""))
print("✅ Added predefined_function column")
except Exception as e:
print(f"⚠️ predefined_function column: {e}")
# Add function_params column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN function_params JSONB DEFAULT '{}'::jsonb
"""))
print("✅ Added function_params column")
except Exception as e:
print(f"⚠️ function_params column: {e}")
# Set default for future records to 'predefined'
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ALTER COLUMN job_type SET DEFAULT 'predefined'::jobtype
"""))
print("✅ Set default job_type to 'predefined'")
except Exception as e:
print(f"⚠️ Setting default: {e}")
# Add check constraint
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
DROP CONSTRAINT IF EXISTS chk_job_type_fields
"""))
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD CONSTRAINT chk_job_type_fields
CHECK (
(job_type = 'predefined' AND predefined_function IS NOT NULL)
OR
(job_type = 'custom_code' AND python_code IS NOT NULL)
)
"""))
print("✅ Added check constraint")
except Exception as e:
print(f"⚠️ Check constraint: {e}")
print("\n📋 Final table structure:")
result = await conn.execute(text("""
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position
"""))
rows = result.fetchall()
for row in rows:
print(f" - {row[0]}: {row[1]} (nullable: {row[2]})")
if __name__ == "__main__":
asyncio.run(run_simple_migration())

View File

@ -1,80 +0,0 @@
-- 1. 重建定时任务表 (增加 python_code 支持动态逻辑)
DROP TABLE IF EXISTS "public"."scheduled_jobs" CASCADE;
CREATE TABLE "public"."scheduled_jobs" (
"id" SERIAL PRIMARY KEY,
"name" VARCHAR(100) NOT NULL, -- 任务名称
"cron_expression" VARCHAR(50) NOT NULL, -- CRON表达式
"python_code" TEXT, -- 【核心】可执行的Python业务代码
"is_active" BOOLEAN DEFAULT TRUE, -- 启停状态
"last_run_at" TIMESTAMP, -- 上次执行时间
"last_run_status" VARCHAR(20), -- 上次执行结果
"next_run_at" TIMESTAMP, -- 下次预计执行时间
"description" TEXT, -- 描述
"created_at" TIMESTAMP DEFAULT NOW(),
"updated_at" TIMESTAMP DEFAULT NOW()
);
-- 索引
CREATE INDEX "idx_scheduled_jobs_active" ON "public"."scheduled_jobs" ("is_active");
-- 注释
COMMENT ON TABLE "public"."scheduled_jobs" IS '定时任务调度配置表支持动态Python代码';
COMMENT ON COLUMN "public"."scheduled_jobs"."python_code" IS '直接执行的Python代码体上下文中可使用 db, logger 等变量';
-- 插入默认任务:每日同步位置
INSERT INTO "public"."scheduled_jobs"
("name", "cron_expression", "description", "is_active", "python_code")
VALUES
(
'每日全量位置同步',
'0 0 * * *',
'每天UTC 0点同步所有活跃天体的最新位置数据',
true,
'# 这是一个动态任务示例
# 可用变量: db (AsyncSession), logger (Logger)
from app.services.db_service import celestial_body_service, position_service
from app.services.horizons import horizons_service
from datetime import datetime
logger.info("开始执行每日位置同步...")
# 获取所有活跃天体
bodies = await celestial_body_service.get_all_bodies(db)
active_bodies = [b for b in bodies if b.is_active]
count = 0
now = datetime.utcnow()
for body in active_bodies:
try:
# 获取当天位置
positions = await horizons_service.get_body_positions(
body_id=body.id,
start_time=now,
end_time=now
)
if positions:
# 这里的 save_positions 需要自己实现或确保 db_service 中有对应方法支持 list
# 假设我们循环 save_position 或者 db_service 已有批量接口
# 为简单起见,这里演示循环调用
for p in positions:
await position_service.save_position(
body_id=body.id,
time=p.time,
x=p.x,
y=p.y,
z=p.z,
source="nasa_horizons_cron",
session=db
)
count += 1
except Exception as e:
logger.error(f"同步 {body.name} 失败: {e}")
logger.info(f"同步完成,共更新 {count} 个天体")
# 脚本最后一行表达式的值会被作为 result 存储
f"Synced {count} bodies"
'
);

View File

@ -1,24 +0,0 @@
-- Add short_name column to celestial_bodies table
-- This field stores NASA SBDB API abbreviated names for planets
-- Add column
ALTER TABLE celestial_bodies
ADD COLUMN IF NOT EXISTS short_name VARCHAR(50);
COMMENT ON COLUMN celestial_bodies.short_name IS 'NASA SBDB API short name (e.g., Juptr for Jupiter)';
-- Update short_name for 8 major planets
UPDATE celestial_bodies SET short_name = 'Merc' WHERE id = '199' AND name = 'Mercury';
UPDATE celestial_bodies SET short_name = 'Venus' WHERE id = '299' AND name = 'Venus';
UPDATE celestial_bodies SET short_name = 'Earth' WHERE id = '399' AND name = 'Earth';
UPDATE celestial_bodies SET short_name = 'Mars' WHERE id = '499' AND name = 'Mars';
UPDATE celestial_bodies SET short_name = 'Juptr' WHERE id = '599' AND name = 'Jupiter';
UPDATE celestial_bodies SET short_name = 'Satrn' WHERE id = '699' AND name = 'Saturn';
UPDATE celestial_bodies SET short_name = 'Urnus' WHERE id = '799' AND name = 'Uranus';
UPDATE celestial_bodies SET short_name = 'Neptn' WHERE id = '899' AND name = 'Neptune';
-- Verify the updates
SELECT id, name, name_zh, short_name
FROM celestial_bodies
WHERE short_name IS NOT NULL
ORDER BY CAST(id AS INTEGER);

View File

@ -1,54 +0,0 @@
-- 添加恒星系统管理菜单项
-- 将其放在天体数据管理之前sort_order=0
-- 首先调整天体数据管理的sort_order从1改为2
UPDATE menus SET sort_order = 2 WHERE id = 3 AND name = 'celestial_bodies';
-- 添加恒星系统管理菜单sort_order=1在天体数据管理之前
INSERT INTO menus (
parent_id,
name,
title,
icon,
path,
component,
sort_order,
is_active,
description
) VALUES (
2, -- parent_id: 数据管理
'star_systems',
'恒星系统管理',
'StarOutlined',
'/admin/star-systems',
'StarSystems',
1, -- sort_order: 在天体数据管理(2)之前
true,
'管理太阳系和系外恒星系统'
) ON CONFLICT DO NOTHING;
-- 获取新插入的菜单ID并为管理员角色授权
DO $$
DECLARE
menu_id INT;
admin_role_id INT;
BEGIN
-- 获取刚插入的菜单ID
SELECT id INTO menu_id FROM menus WHERE name = 'star_systems';
-- 获取管理员角色ID通常是1
SELECT id INTO admin_role_id FROM roles WHERE name = 'admin' LIMIT 1;
-- 为管理员角色授权
IF menu_id IS NOT NULL AND admin_role_id IS NOT NULL THEN
INSERT INTO role_menus (role_id, menu_id)
VALUES (admin_role_id, menu_id)
ON CONFLICT DO NOTHING;
END IF;
END $$;
-- 验证结果
SELECT id, name, title, path, parent_id, sort_order
FROM menus
WHERE parent_id = 2
ORDER BY sort_order, id;

View File

@ -1,50 +0,0 @@
-- Add System Settings menu to platform management
-- This should be executed after the system is running
-- Insert Platform Settings menu under Platform Management (assuming parent_id=4 for Platform Management)
INSERT INTO menus (name, title, path, icon, parent_id, sort_order, is_active, created_at, updated_at)
VALUES (
'system_settings',
'系统参数',
'/admin/system-settings',
'settings',
(SELECT id FROM menus WHERE name = 'platform_management'),
1,
true,
NOW(),
NOW()
)
ON CONFLICT (name) DO UPDATE
SET
title = EXCLUDED.title,
path = EXCLUDED.path,
icon = EXCLUDED.icon,
parent_id = EXCLUDED.parent_id,
sort_order = EXCLUDED.sort_order,
updated_at = NOW();
-- Grant access to admin role
INSERT INTO role_menus (role_id, menu_id)
SELECT
r.id,
m.id
FROM
roles r,
menus m
WHERE
r.name = 'admin'
AND m.name = 'system_settings'
ON CONFLICT (role_id, menu_id) DO NOTHING;
-- Verify the menu was added
SELECT
m.id,
m.name,
m.title,
m.path,
m.icon,
parent.title as parent_menu,
m.sort_order
FROM menus m
LEFT JOIN menus parent ON m.parent_id = parent.id
WHERE m.name = 'system_settings';

View File

@ -1,13 +0,0 @@
-- Insert Tasks menu if it doesn't exist
INSERT INTO menus (name, title, icon, path, component, parent_id, sort_order, is_active)
SELECT 'system_tasks', 'System Tasks', 'schedule', '/admin/tasks', 'admin/Tasks', m.id, 30, true
FROM menus m
WHERE m.name = 'platform_management'
AND NOT EXISTS (SELECT 1 FROM menus WHERE name = 'system_tasks' AND parent_id = m.id);
-- Assign to admin role
INSERT INTO role_menus (role_id, menu_id, created_at)
SELECT r.id, m.id, NOW()
FROM roles r, menus m
WHERE r.name = 'admin' AND m.name = 'system_tasks'
AND NOT EXISTS (SELECT 1 FROM role_menus rm WHERE rm.role_id = r.id AND rm.menu_id = m.id);

View File

@ -1,19 +0,0 @@
-- Create tasks table for background job management
CREATE TABLE IF NOT EXISTS tasks (
id SERIAL PRIMARY KEY,
task_type VARCHAR(50) NOT NULL, -- e.g., 'nasa_download'
status VARCHAR(20) NOT NULL DEFAULT 'pending', -- pending, running, completed, failed, cancelled
description VARCHAR(255),
params JSONB, -- Store input parameters (body_ids, dates)
result JSONB, -- Store output results
progress INTEGER DEFAULT 0, -- 0 to 100
error_message TEXT,
created_by INTEGER, -- User ID who initiated
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
started_at TIMESTAMP WITH TIME ZONE,
completed_at TIMESTAMP WITH TIME ZONE
);
CREATE INDEX idx_tasks_status ON tasks(status);
CREATE INDEX idx_tasks_created_at ON tasks(created_at DESC);

View File

@ -1,27 +0,0 @@
-- 为 positions 表添加唯一约束
-- 这样 ON CONFLICT 才能正常工作
-- 1. 先删除现有的重复数据(如果有)
WITH duplicates AS (
SELECT id,
ROW_NUMBER() OVER (
PARTITION BY body_id, time
ORDER BY created_at DESC
) as rn
FROM positions
)
DELETE FROM positions
WHERE id IN (
SELECT id FROM duplicates WHERE rn > 1
);
-- 2. 添加唯一约束
ALTER TABLE positions
ADD CONSTRAINT positions_body_time_unique
UNIQUE (body_id, time);
-- 3. 验证约束已创建
SELECT constraint_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = 'positions'
AND constraint_type = 'UNIQUE';

View File

@ -1,48 +0,0 @@
-- Add unique constraint to celestial_events table to prevent duplicate events
-- This ensures that the same event (same body, type, and time) cannot be inserted twice
-- Step 1: Remove duplicate events (keep the earliest created_at)
WITH duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (
PARTITION BY body_id, event_type, DATE_TRUNC('minute', event_time)
ORDER BY created_at ASC
) AS rn
FROM celestial_events
)
DELETE FROM celestial_events
WHERE id IN (
SELECT id FROM duplicates WHERE rn > 1
);
-- Step 2: Add unique constraint
-- Note: We truncate to minute precision for event_time to handle slight variations
-- Create a unique index instead of constraint to allow custom handling
CREATE UNIQUE INDEX IF NOT EXISTS idx_celestial_events_unique
ON celestial_events (
body_id,
event_type,
DATE_TRUNC('minute', event_time)
);
-- Note: For the exact timestamp constraint, use this instead:
-- CREATE UNIQUE INDEX IF NOT EXISTS idx_celestial_events_unique_exact
-- ON celestial_events (body_id, event_type, event_time);
-- Verify the constraint was added
SELECT
indexname,
indexdef
FROM pg_indexes
WHERE tablename = 'celestial_events' AND indexname = 'idx_celestial_events_unique';
-- Check for remaining duplicates
SELECT
body_id,
event_type,
DATE_TRUNC('minute', event_time) as event_time_minute,
COUNT(*) as count
FROM celestial_events
GROUP BY body_id, event_type, DATE_TRUNC('minute', event_time)
HAVING COUNT(*) > 1;

View File

@ -1,74 +0,0 @@
-- 添加新菜单:个人信息 和 我的天体
-- 这两个菜单对普通用户也开放
-- 1. 添加"个人信息"菜单(普通用户可访问)
INSERT INTO menus (name, title, path, icon, parent_id, sort_order, is_active, roles)
VALUES (
'user-profile',
'个人信息',
'/admin/user-profile',
'users',
NULL,
15,
true,
ARRAY['user', 'admin']::varchar[]
)
ON CONFLICT (name) DO UPDATE SET
title = EXCLUDED.title,
path = EXCLUDED.path,
icon = EXCLUDED.icon,
parent_id = EXCLUDED.parent_id,
sort_order = EXCLUDED.sort_order,
roles = EXCLUDED.roles;
-- 2. 添加"我的天体"菜单(普通用户可访问)
INSERT INTO menus (name, title, path, icon, parent_id, sort_order, is_active, roles)
VALUES (
'my-celestial-bodies',
'我的天体',
'/admin/my-celestial-bodies',
'planet',
NULL,
16,
true,
ARRAY['user', 'admin']::varchar[]
)
ON CONFLICT (name) DO UPDATE SET
title = EXCLUDED.title,
path = EXCLUDED.path,
icon = EXCLUDED.icon,
parent_id = EXCLUDED.parent_id,
sort_order = EXCLUDED.sort_order,
roles = EXCLUDED.roles;
-- 3. 添加"修改密码"菜单(普通用户和管理员都可访问)
-- 注意:修改密码功能通过用户下拉菜单访问,不需要在侧边栏显示
-- 但是我们仍然需要在数据库中记录这个菜单以便权限管理
INSERT INTO menus (name, title, path, icon, parent_id, sort_order, is_active, roles)
VALUES (
'change-password',
'修改密码',
'/admin/change-password',
'settings',
NULL,
17,
true,
ARRAY['user', 'admin']::varchar[]
)
ON CONFLICT (name) DO UPDATE SET
title = EXCLUDED.title,
path = EXCLUDED.path,
icon = EXCLUDED.icon,
parent_id = EXCLUDED.parent_id,
sort_order = EXCLUDED.sort_order,
roles = EXCLUDED.roles;
-- 4. 调整其他菜单的排序(可选)
-- 如果需要调整现有菜单的顺序,可以更新 sort_order
UPDATE menus SET sort_order = 18 WHERE name = 'settings' AND sort_order < 18;
-- 5. 查看更新后的菜单列表
SELECT id, name, title, path, icon, parent_id, sort_order, is_active, roles
FROM menus
WHERE is_active = true
ORDER BY sort_order;

View File

@ -1,214 +0,0 @@
#!/usr/bin/env python3
"""
配置验证脚本 - 检查 PostgreSQL Redis 配置是否正确
Usage:
python scripts/check_config.py
"""
import asyncio
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from app.config import settings
import asyncpg
import redis.asyncio as redis
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def check_postgresql():
"""检查 PostgreSQL 连接"""
print("\n" + "=" * 60)
print("检查 PostgreSQL 配置")
print("=" * 60)
try:
# 连接参数
print(f"主机: {settings.database_host}")
print(f"端口: {settings.database_port}")
print(f"数据库: {settings.database_name}")
print(f"用户: {settings.database_user}")
print(f"连接池大小: {settings.database_pool_size}")
# 尝试连接
conn = await asyncpg.connect(
host=settings.database_host,
port=settings.database_port,
user=settings.database_user,
password=settings.database_password,
database=settings.database_name,
)
# 查询版本
version = await conn.fetchval("SELECT version()")
print(f"\n✓ PostgreSQL 连接成功")
print(f"版本: {version.split(',')[0]}")
# 查询数据库大小
db_size = await conn.fetchval(
"SELECT pg_size_pretty(pg_database_size($1))",
settings.database_name
)
print(f"数据库大小: {db_size}")
# 查询表数量
table_count = await conn.fetchval("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = 'public'
""")
print(f"数据表数量: {table_count}")
await conn.close()
return True
except Exception as e:
print(f"\n✗ PostgreSQL 连接失败: {e}")
print("\n请检查:")
print(" 1. PostgreSQL 是否正在运行")
print(" 2. 数据库是否已创建 (运行: python scripts/create_db.py)")
print(" 3. .env 文件中的账号密码是否正确")
return False
async def check_redis():
"""检查 Redis 连接"""
print("\n" + "=" * 60)
print("检查 Redis 配置")
print("=" * 60)
try:
# 连接参数
print(f"主机: {settings.redis_host}")
print(f"端口: {settings.redis_port}")
print(f"数据库: {settings.redis_db}")
print(f"密码: {'(无)' if not settings.redis_password else '******'}")
print(f"最大连接数: {settings.redis_max_connections}")
# 尝试连接
client = redis.from_url(
settings.redis_url,
encoding="utf-8",
decode_responses=True,
)
# 测试连接
await client.ping()
print(f"\n✓ Redis 连接成功")
# 获取 Redis 信息
info = await client.info()
print(f"版本: {info.get('redis_version')}")
print(f"使用内存: {info.get('used_memory_human')}")
print(f"已连接客户端: {info.get('connected_clients')}")
print(f"运行天数: {info.get('uptime_in_days')}")
await client.close()
return True
except Exception as e:
print(f"\n⚠ Redis 连接失败: {e}")
print("\n说明:")
print(" Redis 是可选的缓存服务")
print(" 如果 Redis 不可用,应用会自动降级为内存缓存")
print(" 不影响核心功能,但会失去跨进程缓存能力")
print("\n如需启用 Redis:")
print(" - macOS: brew install redis && brew services start redis")
print(" - Ubuntu: sudo apt install redis && sudo systemctl start redis")
return False
def check_env_file():
"""检查 .env 文件"""
print("\n" + "=" * 60)
print("检查配置文件")
print("=" * 60)
env_path = Path(__file__).parent.parent / ".env"
if env_path.exists():
print(f"✓ .env 文件存在: {env_path}")
print(f"文件大小: {env_path.stat().st_size} bytes")
return True
else:
print(f"✗ .env 文件不存在")
print(f"请从 .env.example 创建: cp .env.example .env")
return False
def check_upload_dir():
"""检查上传目录"""
print("\n" + "=" * 60)
print("检查上传目录")
print("=" * 60)
upload_dir = Path(__file__).parent.parent / settings.upload_dir
if upload_dir.exists():
print(f"✓ 上传目录存在: {upload_dir}")
return True
else:
print(f"⚠ 上传目录不存在: {upload_dir}")
print(f"自动创建...")
upload_dir.mkdir(parents=True, exist_ok=True)
print(f"✓ 上传目录创建成功")
return True
async def main():
"""主函数"""
print("\n" + "=" * 60)
print(" Cosmo 配置验证工具")
print("=" * 60)
results = []
# 1. 检查配置文件
results.append(("配置文件", check_env_file()))
# 2. 检查上传目录
results.append(("上传目录", check_upload_dir()))
# 3. 检查 PostgreSQL
results.append(("PostgreSQL", await check_postgresql()))
# 4. 检查 Redis
results.append(("Redis", await check_redis()))
# 总结
print("\n" + "=" * 60)
print(" 配置检查总结")
print("=" * 60)
for name, status in results:
status_str = "" if status else ""
print(f"{status_str} {name}")
# 判断是否所有必需服务都正常
required_services = [results[0], results[1], results[2]] # 配置文件、上传目录、PostgreSQL
all_required_ok = all(status for _, status in required_services)
if all_required_ok:
print("\n" + "=" * 60)
print(" ✓ 所有必需服务配置正确!")
print("=" * 60)
print("\n可以启动服务:")
print(" python -m uvicorn app.main:app --reload")
print("\n或者:")
print(" python app/main.py")
return 0
else:
print("\n" + "=" * 60)
print(" ✗ 部分必需服务配置有问题")
print("=" * 60)
print("\n请先解决上述问题,然后重新运行此脚本")
return 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)

View File

@ -1,68 +0,0 @@
"""
Check database status: bodies, positions, resources
"""
import asyncio
import os
import sys
from datetime import datetime
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db.celestial_body import CelestialBody
from app.models.db.position import Position
from app.models.db.resource import Resource
from sqlalchemy import select, func
async def check_status():
"""Check database status"""
print("🔍 Checking database status...")
async for session in get_db():
try:
# 1. Check Celestial Bodies
stmt = select(func.count(CelestialBody.id))
result = await session.execute(stmt)
body_count = result.scalar()
print(f"✅ Celestial Bodies: {body_count}")
# 2. Check Positions
stmt = select(func.count(Position.id))
result = await session.execute(stmt)
position_count = result.scalar()
print(f"✅ Total Positions: {position_count}")
# Check positions for Sun (10) and Earth (399)
for body_id in ['10', '399']:
stmt = select(func.count(Position.id)).where(Position.body_id == body_id)
result = await session.execute(stmt)
count = result.scalar()
print(f" - Positions for {body_id}: {count}")
if count > 0:
# Get latest position date
stmt = select(func.max(Position.time)).where(Position.body_id == body_id)
result = await session.execute(stmt)
latest_date = result.scalar()
print(f" Latest date: {latest_date}")
# 3. Check Resources
stmt = select(func.count(Resource.id))
result = await session.execute(stmt)
resource_count = result.scalar()
print(f"✅ Total Resources: {resource_count}")
# Check resources for Sun (10)
stmt = select(Resource).where(Resource.body_id == '10')
result = await session.execute(stmt)
resources = result.scalars().all()
print(f" - Resources for Sun (10): {len(resources)}")
for r in resources:
print(f" - {r.resource_type}: {r.file_path}")
finally:
break
if __name__ == "__main__":
asyncio.run(check_status())

View File

@ -1,63 +0,0 @@
"""
Check probe data in database
"""
import asyncio
import sys
import os
# Add backend to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from sqlalchemy import create_engine, text
from app.config import settings
def check_probes():
"""Check probe data directly with SQL"""
engine = create_engine(settings.database_url.replace('+asyncpg', ''))
with engine.connect() as conn:
# Check all celestial bodies
result = conn.execute(text("""
SELECT
cb.id,
cb.name,
cb.name_zh,
cb.type,
cb.is_active,
COUNT(p.id) as position_count
FROM celestial_bodies cb
LEFT JOIN positions p ON cb.id = p.body_id
GROUP BY cb.id, cb.name, cb.name_zh, cb.type, cb.is_active
ORDER BY cb.type, cb.name
"""))
print("All Celestial Bodies:")
print("=" * 100)
for row in result:
print(f"ID: {row.id:15s} | Name: {row.name:20s} | Type: {row.type:15s} | Active: {str(row.is_active):5s} | Positions: {row.position_count}")
print("\n" + "=" * 100)
print("\nProbes only:")
print("=" * 100)
result = conn.execute(text("""
SELECT
cb.id,
cb.name,
cb.name_zh,
cb.is_active,
COUNT(p.id) as position_count
FROM celestial_bodies cb
LEFT JOIN positions p ON cb.id = p.body_id
WHERE cb.type = 'probe'
GROUP BY cb.id, cb.name, cb.name_zh, cb.is_active
ORDER BY cb.name
"""))
for row in result:
print(f"ID: {row.id:15s} | Name: {row.name:20s} ({row.name_zh}) | Active: {str(row.is_active):5s} | Positions: {row.position_count}")
if __name__ == "__main__":
check_probes()

View File

@ -1,2 +0,0 @@
-- 检查 roles 表
SELECT * FROM roles ORDER BY id;

View File

@ -1,64 +0,0 @@
"""
Check the current state of scheduled_jobs table
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text
from app.database import engine
async def check_table():
"""Check current table structure"""
async with engine.begin() as conn:
# Check if table exists
result = await conn.execute(text("""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'scheduled_jobs'
)
"""))
exists = result.scalar()
if not exists:
print("❌ Table 'scheduled_jobs' does not exist yet")
print("💡 You need to run: alembic upgrade head")
return
# Get table structure
result = await conn.execute(text("""
SELECT column_name, data_type, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position
"""))
rows = result.fetchall()
print("✅ Table 'scheduled_jobs' exists")
print("\n📋 Current table structure:")
for row in rows:
default = row[3] if row[3] else 'NULL'
print(f" - {row[0]}: {row[1]} (nullable: {row[2]}, default: {default})")
# Check for enum type
result = await conn.execute(text("""
SELECT EXISTS (
SELECT FROM pg_type
WHERE typname = 'jobtype'
)
"""))
enum_exists = result.scalar()
if enum_exists:
print("\n✅ ENUM type 'jobtype' exists")
else:
print("\n❌ ENUM type 'jobtype' does NOT exist")
if __name__ == "__main__":
asyncio.run(check_table())

View File

@ -1,50 +0,0 @@
import asyncio
import os
import sys
from datetime import datetime
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db import Position
from sqlalchemy import select, func
async def check_sun_data():
"""Check data for 2025-12-04 00:00:00"""
async for session in get_db():
try:
target_time = datetime(2025, 12, 4, 0, 0, 0)
print(f"Checking data for all bodies at {target_time}...")
# Get all bodies
from app.models.db.celestial_body import CelestialBody
stmt = select(CelestialBody.id, CelestialBody.name, CelestialBody.type).where(CelestialBody.is_active != False)
result = await session.execute(stmt)
all_bodies = result.all()
print(f"Total active bodies: {len(all_bodies)}")
# Check positions for each
missing_bodies = []
for body_id, body_name, body_type in all_bodies:
stmt = select(func.count(Position.id)).where(
Position.body_id == body_id,
Position.time == target_time
)
result = await session.execute(stmt)
count = result.scalar()
if count == 0:
missing_bodies.append(f"{body_name} ({body_id}) [{body_type}]")
if missing_bodies:
print(f"❌ Missing data for {len(missing_bodies)} bodies:")
for b in missing_bodies:
print(f" - {b}")
else:
print("✅ All active bodies have data for this time!")
finally:
break
if __name__ == "__main__":
asyncio.run(check_sun_data())

View File

@ -1,78 +0,0 @@
-- Clean up duplicate celestial events
-- This script removes duplicate events and adds a unique index to prevent future duplicates
BEGIN;
-- Step 1: Show current duplicate count
SELECT
'Duplicate events before cleanup' as status,
COUNT(*) as total_duplicates
FROM (
SELECT
body_id,
event_type,
DATE_TRUNC('minute', event_time) as event_time_minute,
COUNT(*) as cnt
FROM celestial_events
GROUP BY body_id, event_type, DATE_TRUNC('minute', event_time)
HAVING COUNT(*) > 1
) duplicates;
-- Step 2: Remove duplicate events (keep the earliest created_at)
WITH duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (
PARTITION BY body_id, event_type, DATE_TRUNC('minute', event_time)
ORDER BY created_at ASC
) AS rn
FROM celestial_events
)
DELETE FROM celestial_events
WHERE id IN (
SELECT id FROM duplicates WHERE rn > 1
)
RETURNING id;
-- Step 3: Add unique index to prevent future duplicates
CREATE UNIQUE INDEX IF NOT EXISTS idx_celestial_events_unique
ON celestial_events (
body_id,
event_type,
DATE_TRUNC('minute', event_time)
);
-- Step 4: Verify no duplicates remain
SELECT
'Duplicate events after cleanup' as status,
COUNT(*) as total_duplicates
FROM (
SELECT
body_id,
event_type,
DATE_TRUNC('minute', event_time) as event_time_minute,
COUNT(*) as cnt
FROM celestial_events
GROUP BY body_id, event_type, DATE_TRUNC('minute', event_time)
HAVING COUNT(*) > 1
) duplicates;
-- Step 5: Show summary statistics
SELECT
source,
COUNT(*) as total_events,
COUNT(DISTINCT body_id) as unique_bodies,
MIN(event_time) as earliest_event,
MAX(event_time) as latest_event
FROM celestial_events
GROUP BY source
ORDER BY source;
COMMIT;
-- Verify the index was created
SELECT
indexname,
indexdef
FROM pg_indexes
WHERE tablename = 'celestial_events' AND indexname = 'idx_celestial_events_unique';

View File

@ -1,42 +0,0 @@
-- 清理数据库重复数据
-- 1. 清理 positions 表的重复数据
-- 保留每个 (body_id, time) 组合的最新一条记录
WITH duplicates AS (
SELECT id,
ROW_NUMBER() OVER (
PARTITION BY body_id, time
ORDER BY created_at DESC
) as rn
FROM positions
)
DELETE FROM positions
WHERE id IN (
SELECT id FROM duplicates WHERE rn > 1
);
-- 2. 清理 nasa_cache 表的重复数据
-- 保留每个 cache_key 的最新一条记录
WITH duplicates AS (
SELECT id,
ROW_NUMBER() OVER (
PARTITION BY cache_key
ORDER BY created_at DESC
) as rn
FROM nasa_cache
)
DELETE FROM nasa_cache
WHERE id IN (
SELECT id FROM duplicates WHERE rn > 1
);
-- 3. 验证清理结果
SELECT 'Positions duplicates check' as check_name,
COUNT(*) - COUNT(DISTINCT (body_id, time)) as duplicate_count
FROM positions
UNION ALL
SELECT 'NASA cache duplicates check' as check_name,
COUNT(*) - COUNT(DISTINCT cache_key) as duplicate_count
FROM nasa_cache;

View File

@ -1,88 +0,0 @@
-- ============================================================
-- Create orbits table for storing precomputed orbital paths
-- ============================================================
-- Purpose: Store complete orbital trajectories for planets and dwarf planets
-- This eliminates the need to query NASA Horizons API for orbit visualization
--
-- Usage:
-- psql -U your_user -d cosmo < create_orbits_table.sql
-- OR execute in your SQL client/tool
--
-- Version: 1.0
-- Created: 2025-11-29
-- ============================================================
-- Create orbits table
CREATE TABLE IF NOT EXISTS orbits (
id SERIAL PRIMARY KEY,
body_id TEXT NOT NULL,
points JSONB NOT NULL, -- Array of orbital points: [{"x": 1.0, "y": 0.0, "z": 0.0}, ...]
num_points INTEGER NOT NULL, -- Number of points in the orbit
period_days FLOAT, -- Orbital period in days
color VARCHAR(20), -- Orbit line color (hex format: #RRGGBB)
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT orbits_body_id_unique UNIQUE(body_id),
CONSTRAINT orbits_body_id_fkey FOREIGN KEY (body_id) REFERENCES celestial_bodies(id) ON DELETE CASCADE
);
-- Create index on body_id for fast lookups
CREATE INDEX IF NOT EXISTS idx_orbits_body_id ON orbits(body_id);
-- Create index on updated_at for tracking data freshness
CREATE INDEX IF NOT EXISTS idx_orbits_updated_at ON orbits(updated_at);
-- Add comments to table
COMMENT ON TABLE orbits IS 'Precomputed orbital paths for celestial bodies';
COMMENT ON COLUMN orbits.body_id IS 'Foreign key to celestial_bodies.id';
COMMENT ON COLUMN orbits.points IS 'Array of 3D points (x,y,z in AU) defining the orbital path';
COMMENT ON COLUMN orbits.num_points IS 'Total number of points in the orbit';
COMMENT ON COLUMN orbits.period_days IS 'Orbital period in Earth days';
COMMENT ON COLUMN orbits.color IS 'Hex color code for rendering the orbit line';
-- ============================================================
-- Sample data for testing (optional - can be removed)
-- ============================================================
-- Uncomment below to insert sample orbit for Earth
/*
INSERT INTO orbits (body_id, points, num_points, period_days, color)
VALUES (
'399', -- Earth
'[
{"x": 1.0, "y": 0.0, "z": 0.0},
{"x": 0.707, "y": 0.707, "z": 0.0},
{"x": 0.0, "y": 1.0, "z": 0.0},
{"x": -0.707, "y": 0.707, "z": 0.0},
{"x": -1.0, "y": 0.0, "z": 0.0},
{"x": -0.707, "y": -0.707, "z": 0.0},
{"x": 0.0, "y": -1.0, "z": 0.0},
{"x": 0.707, "y": -0.707, "z": 0.0}
]'::jsonb,
8,
365.25,
'#4A90E2'
)
ON CONFLICT (body_id) DO UPDATE
SET
points = EXCLUDED.points,
num_points = EXCLUDED.num_points,
period_days = EXCLUDED.period_days,
color = EXCLUDED.color,
updated_at = NOW();
*/
-- ============================================================
-- Verification queries (execute separately if needed)
-- ============================================================
-- Check if table was created successfully
-- SELECT schemaname, tablename, tableowner FROM pg_tables WHERE tablename = 'orbits';
-- Check indexes
-- SELECT indexname, indexdef FROM pg_indexes WHERE tablename = 'orbits';
-- Show table structure
-- SELECT column_name, data_type, is_nullable, column_default
-- FROM information_schema.columns
-- WHERE table_name = 'orbits'
-- ORDER BY ordinal_position;

View File

@ -1,177 +0,0 @@
"""
Fetch Interstellar Data (Nearby Stars & Exoplanets)
Phase 3: Interstellar Expansion
This script fetches data from the NASA Exoplanet Archive using astroquery.
It retrieves the nearest stars (within 100pc) and their planetary system details.
The data is stored in the `static_data` table with category 'interstellar'.
"""
import asyncio
import os
import sys
import math
from sqlalchemy import select, text, func
from sqlalchemy.dialects.postgresql import insert
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db.static_data import StaticData
# Try to import astroquery/astropy, handle if missing
try:
from astroquery.ipac.nexsci.nasa_exoplanet_archive import NasaExoplanetArchive
from astropy.coordinates import SkyCoord
from astropy import units as u
except ImportError:
print("❌ Error: astroquery or astropy not installed.")
print(" Please run: pip install astroquery astropy")
sys.exit(1)
async def fetch_and_store_interstellar_data():
print("🌌 Fetching Interstellar Data (Phase 3)...")
# 1. Query NASA Exoplanet Archive
# We query the Planetary Systems (PS) table
# sy_dist: System Distance [pc]
# ra, dec: Coordinates [deg]
# sy_pnum: Number of Planets
# st_spectype: Spectral Type
# st_rad: Stellar Radius [Solar Radii]
# st_mass: Stellar Mass [Solar Mass]
# st_teff: Effective Temperature [K]
# pl_name: Planet Name
# pl_orbsmax: Semi-Major Axis [AU]
# pl_orbper: Orbital Period [days]
# pl_orbeccen: Eccentricity
# pl_rade: Planet Radius [Earth Radii]
print(" Querying NASA Exoplanet Archive (this may take a while)...")
try:
# We fetch systems within 100 parsecs
table = NasaExoplanetArchive.query_criteria(
table="ps",
select="hostname, sy_dist, ra, dec, sy_pnum, st_spectype, st_rad, st_mass, st_teff, pl_name, pl_orbsmax, pl_orbper, pl_orbeccen, pl_rade, pl_eqt",
where="sy_dist < 50", # Limit to 50pc for initial Phase 3 to keep it fast and relevant
order="sy_dist"
)
print(f" ✅ Fetched {len(table)} records.")
except Exception as e:
print(f" ❌ Query failed: {e}")
return
# 2. Process Data
# We need to group planets by host star
systems = {}
print(" Processing data...")
for row in table:
hostname = str(row['hostname'])
# Helper function to safely get value from potential Quantity object
def get_val(obj):
if hasattr(obj, 'value'):
return obj.value
return obj
if hostname not in systems:
# Coordinate conversion: Spherical (RA/Dec/Dist) -> Cartesian (X/Y/Z)
dist_pc = float(get_val(row['sy_dist']))
ra_deg = float(get_val(row['ra']))
dec_deg = float(get_val(row['dec']))
# Convert to Cartesian (X, Y, Z) in Parsecs
# Z is up (towards North Celestial Pole?) - Standard Astropy conversion
c = SkyCoord(ra=ra_deg*u.deg, dec=dec_deg*u.deg, distance=dist_pc*u.pc)
x = c.cartesian.x.value
y = c.cartesian.y.value
z = c.cartesian.z.value
# Determine color based on Spectral Type (simplified)
spectype = str(row['st_spectype']) if row['st_spectype'] else 'G'
color = '#FFFFFF' # Default
if 'O' in spectype: color = '#9db4ff'
elif 'B' in spectype: color = '#aabfff'
elif 'A' in spectype: color = '#cad8ff'
elif 'F' in spectype: color = '#fbf8ff'
elif 'G' in spectype: color = '#fff4e8'
elif 'K' in spectype: color = '#ffddb4'
elif 'M' in spectype: color = '#ffbd6f'
systems[hostname] = {
"category": "interstellar",
"name": hostname,
"name_zh": hostname, # Placeholder, maybe need translation map later
"data": {
"distance_pc": dist_pc,
"ra": ra_deg,
"dec": dec_deg,
"position": {"x": x, "y": y, "z": z},
"spectral_type": spectype,
"radius_solar": float(get_val(row['st_rad'])) if get_val(row['st_rad']) is not None else 1.0,
"mass_solar": float(get_val(row['st_mass'])) if get_val(row['st_mass']) is not None else 1.0,
"temperature_k": float(get_val(row['st_teff'])) if get_val(row['st_teff']) is not None else 5700,
"planet_count": int(get_val(row['sy_pnum'])),
"color": color,
"planets": []
}
}
# Add planet info
planet = {
"name": str(row['pl_name']),
"semi_major_axis_au": float(get_val(row['pl_orbsmax'])) if get_val(row['pl_orbsmax']) is not None else 0.0,
"period_days": float(get_val(row['pl_orbper'])) if get_val(row['pl_orbper']) is not None else 0.0,
"eccentricity": float(get_val(row['pl_orbeccen'])) if get_val(row['pl_orbeccen']) is not None else 0.0,
"radius_earth": float(get_val(row['pl_rade'])) if get_val(row['pl_rade']) is not None else 1.0,
"temperature_k": float(get_val(row['pl_eqt'])) if get_val(row['pl_eqt']) is not None else None
}
systems[hostname]["data"]["planets"].append(planet)
print(f" Processed {len(systems)} unique star systems.")
# 3. Store in Database
print(" Storing in database...")
# Helper to clean NaN values for JSON compatibility
def clean_nan(obj):
if isinstance(obj, float):
return None if math.isnan(obj) else obj
elif isinstance(obj, dict):
return {k: clean_nan(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [clean_nan(v) for v in obj]
return obj
async for session in get_db():
try:
count = 0
for hostname, info in systems.items():
# Clean data
cleaned_data = clean_nan(info["data"])
# Use UPSERT
stmt = insert(StaticData).values(
category=info["category"],
name=info["name"],
name_zh=info["name_zh"],
data=cleaned_data
).on_conflict_do_update(
constraint="uq_category_name",
set_={"data": cleaned_data, "updated_at": func.now()}
)
await session.execute(stmt)
count += 1
await session.commit()
print(f" ✅ Successfully stored {count} interstellar systems.")
except Exception as e:
await session.rollback()
print(f" ❌ Database error: {e}")
finally:
break
if __name__ == "__main__":
asyncio.run(fetch_and_store_interstellar_data())

View File

@ -1,119 +0,0 @@
"""
Fix enum type and add columns
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text
from app.database import engine
async def fix_enum_and_migrate():
"""Fix enum type and add columns"""
async with engine.begin() as conn:
# First check enum values
result = await conn.execute(text("""
SELECT enumlabel
FROM pg_enum
WHERE enumtypid = 'jobtype'::regtype
ORDER BY enumsortorder
"""))
enum_values = [row[0] for row in result.fetchall()]
print(f"Current enum values: {enum_values}")
# Add missing enum values if needed
if 'predefined' not in enum_values:
await conn.execute(text("ALTER TYPE jobtype ADD VALUE 'predefined'"))
print("✅ Added 'predefined' to enum")
if 'custom_code' not in enum_values:
await conn.execute(text("ALTER TYPE jobtype ADD VALUE 'custom_code'"))
print("✅ Added 'custom_code' to enum")
# Now add columns in separate transaction
async with engine.begin() as conn:
print("\n🔄 Adding columns to scheduled_jobs table...")
# Add job_type column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN job_type jobtype DEFAULT 'custom_code'::jobtype NOT NULL
"""))
print("✅ Added job_type column")
except Exception as e:
if "already exists" in str(e):
print(" job_type column already exists")
else:
raise
# Add predefined_function column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN predefined_function VARCHAR(100)
"""))
print("✅ Added predefined_function column")
except Exception as e:
if "already exists" in str(e):
print(" predefined_function column already exists")
else:
raise
# Add function_params column
try:
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD COLUMN function_params JSONB DEFAULT '{}'::jsonb
"""))
print("✅ Added function_params column")
except Exception as e:
if "already exists" in str(e):
print(" function_params column already exists")
else:
raise
# Set defaults and constraints in separate transaction
async with engine.begin() as conn:
# Set default for future records
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ALTER COLUMN job_type SET DEFAULT 'predefined'::jobtype
"""))
print("✅ Set default job_type to 'predefined'")
# Drop and recreate check constraint
await conn.execute(text("""
ALTER TABLE scheduled_jobs
DROP CONSTRAINT IF EXISTS chk_job_type_fields
"""))
await conn.execute(text("""
ALTER TABLE scheduled_jobs
ADD CONSTRAINT chk_job_type_fields
CHECK (
(job_type = 'predefined' AND predefined_function IS NOT NULL)
OR
(job_type = 'custom_code' AND python_code IS NOT NULL)
)
"""))
print("✅ Added check constraint")
print("\n📋 Final table structure:")
result = await conn.execute(text("""
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position
"""))
rows = result.fetchall()
for row in rows:
print(f" - {row[0]}: {row[1]} (nullable: {row[2]})")
if __name__ == "__main__":
asyncio.run(fix_enum_and_migrate())

View File

@ -1,59 +0,0 @@
"""
Fix positions table CHECK constraint to include 'nasa_horizons_cron'
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text
from app.database import engine
async def fix_constraint():
"""Fix positions table source constraint"""
async with engine.begin() as conn:
print("🔍 Checking current constraint...")
# Check current constraint definition
result = await conn.execute(text("""
SELECT pg_get_constraintdef(oid)
FROM pg_constraint
WHERE conname = 'chk_source' AND conrelid = 'positions'::regclass;
"""))
current = result.fetchone()
if current:
print(f"📋 Current constraint: {current[0]}")
else:
print("⚠️ No constraint found!")
print("\n🔧 Dropping old constraint...")
await conn.execute(text("""
ALTER TABLE positions DROP CONSTRAINT IF EXISTS chk_source;
"""))
print("✅ Old constraint dropped")
print("\n🆕 Creating new constraint with 'nasa_horizons_cron'...")
await conn.execute(text("""
ALTER TABLE positions ADD CONSTRAINT chk_source
CHECK (source IN ('nasa_horizons', 'nasa_horizons_cron', 'calculated', 'user_defined', 'imported'));
"""))
print("✅ New constraint created")
# Verify new constraint
result = await conn.execute(text("""
SELECT pg_get_constraintdef(oid)
FROM pg_constraint
WHERE conname = 'chk_source' AND conrelid = 'positions'::regclass;
"""))
new_constraint = result.fetchone()
if new_constraint:
print(f"\n✅ New constraint: {new_constraint[0]}")
print("\n🎉 Constraint update completed successfully!")
if __name__ == "__main__":
asyncio.run(fix_constraint())

View File

@ -1,58 +0,0 @@
"""
Fix missing Sun position
"""
import asyncio
import os
import sys
from datetime import datetime
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db import Position
async def fix_sun_position():
"""Insert missing position for Sun at 2025-12-04 00:00:00"""
async for session in get_db():
try:
target_time = datetime(2025, 12, 4, 0, 0, 0)
print(f"Fixing Sun position for {target_time}...")
# Check if it exists first (double check)
from sqlalchemy import select, func
stmt = select(func.count(Position.id)).where(
Position.body_id == '10',
Position.time == target_time
)
result = await session.execute(stmt)
count = result.scalar()
if count > 0:
print("✅ Position already exists!")
return
# Insert
new_pos = Position(
body_id='10',
time=target_time,
x=0.0,
y=0.0,
z=0.0,
vx=0.0,
vy=0.0,
vz=0.0,
source='calculated'
)
session.add(new_pos)
await session.commit()
print("✅ Successfully inserted Sun position!")
except Exception as e:
print(f"❌ Error: {e}")
await session.rollback()
finally:
break
if __name__ == "__main__":
asyncio.run(fix_sun_position())

View File

@ -1,39 +0,0 @@
import asyncio
import os
import sys
from sqlalchemy import select
from datetime import datetime
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db import Position
async def inspect_sun_positions():
async for session in get_db():
try:
# List all positions for Sun
stmt = select(Position.time).where(Position.body_id == '10').order_by(Position.time.desc()).limit(10)
result = await session.execute(stmt)
times = result.scalars().all()
print("Recent Sun positions:")
for t in times:
print(f" - {t} (type: {type(t)})")
# Check specifically for 2025-12-04
target = datetime(2025, 12, 4, 0, 0, 0)
stmt = select(Position).where(
Position.body_id == '10',
Position.time == target
)
result = await session.execute(stmt)
pos = result.scalar()
print(f"\nExact match for {target}: {pos}")
finally:
break
if __name__ == "__main__":
asyncio.run(inspect_sun_positions())

View File

@ -1,184 +0,0 @@
#!/usr/bin/env python3
"""
Data migration script
Migrates existing data from code/JSON files to PostgreSQL database:
1. CELESTIAL_BODIES dict celestial_bodies table
2. Frontend JSON files static_data table
Usage:
python scripts/migrate_data.py [--force | --skip-existing]
Options:
--force Overwrite existing data without prompting
--skip-existing Skip migration if data already exists
"""
import asyncio
import sys
from pathlib import Path
import json
import argparse
sys.path.insert(0, str(Path(__file__).parent.parent))
from app.database import AsyncSessionLocal
from app.models.celestial import CELESTIAL_BODIES
from app.models.db import CelestialBody, StaticData
from sqlalchemy import select
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def migrate_celestial_bodies(force: bool = False, skip_existing: bool = False):
"""Migrate CELESTIAL_BODIES dict to database"""
logger.info("=" * 60)
logger.info("Migrating celestial bodies...")
logger.info("=" * 60)
async with AsyncSessionLocal() as session:
# Check if data already exists
result = await session.execute(select(CelestialBody))
existing_count = len(result.scalars().all())
if existing_count > 0:
logger.warning(f"Found {existing_count} existing celestial bodies in database")
if skip_existing:
logger.info("Skipping celestial bodies migration (--skip-existing)")
return
if not force:
response = input("Do you want to overwrite? (yes/no): ")
if response.lower() not in ['yes', 'y']:
logger.info("Skipping celestial bodies migration")
return
else:
logger.info("Overwriting existing data (--force)")
# Delete existing data
from sqlalchemy import text
await session.execute(text("DELETE FROM celestial_bodies"))
logger.info(f"Deleted {existing_count} existing records")
# Insert new data
count = 0
for body_id, info in CELESTIAL_BODIES.items():
body = CelestialBody(
id=body_id,
name=info["name"],
name_zh=info.get("name_zh"),
type=info["type"],
description=info.get("description"),
extra_data={
"launch_date": info.get("launch_date"),
"status": info.get("status"),
} if "launch_date" in info or "status" in info else None
)
session.add(body)
count += 1
await session.commit()
logger.info(f"✓ Migrated {count} celestial bodies")
async def migrate_static_data(force: bool = False, skip_existing: bool = False):
"""Migrate frontend JSON files to database"""
logger.info("=" * 60)
logger.info("Migrating static data from JSON files...")
logger.info("=" * 60)
# Define JSON files to migrate
frontend_data_dir = Path(__file__).parent.parent.parent / "frontend" / "public" / "data"
json_files = {
"nearby-stars.json": "star",
"constellations.json": "constellation",
"galaxies.json": "galaxy",
}
async with AsyncSessionLocal() as session:
for filename, category in json_files.items():
file_path = frontend_data_dir / filename
if not file_path.exists():
logger.warning(f"File not found: {file_path}")
continue
# Load JSON data
with open(file_path, 'r', encoding='utf-8') as f:
data_list = json.load(f)
# Check if category data already exists
result = await session.execute(
select(StaticData).where(StaticData.category == category)
)
existing = result.scalars().all()
if existing:
logger.warning(f"Found {len(existing)} existing {category} records")
if skip_existing:
logger.info(f"Skipping {category} migration (--skip-existing)")
continue
if not force:
response = input(f"Overwrite {category} data? (yes/no): ")
if response.lower() not in ['yes', 'y']:
logger.info(f"Skipping {category} migration")
continue
else:
logger.info(f"Overwriting {category} data (--force)")
# Delete existing
for record in existing:
await session.delete(record)
# Insert new data
count = 0
for item in data_list:
static_item = StaticData(
category=category,
name=item.get("name", "Unknown"),
name_zh=item.get("name_zh"),
data=item
)
session.add(static_item)
count += 1
await session.commit()
logger.info(f"✓ Migrated {count} {category} records")
async def main():
"""Run all migrations"""
# Parse command line arguments
parser = argparse.ArgumentParser(description='Migrate data to PostgreSQL database')
group = parser.add_mutually_exclusive_group()
group.add_argument('--force', action='store_true', help='Overwrite existing data without prompting')
group.add_argument('--skip-existing', action='store_true', help='Skip migration if data already exists')
args = parser.parse_args()
logger.info("\n" + "=" * 60)
logger.info("Cosmo Data Migration")
logger.info("=" * 60 + "\n")
try:
# Migrate celestial bodies
await migrate_celestial_bodies(force=args.force, skip_existing=args.skip_existing)
# Migrate static data
await migrate_static_data(force=args.force, skip_existing=args.skip_existing)
logger.info("\n" + "=" * 60)
logger.info("✓ Migration completed successfully!")
logger.info("=" * 60)
except Exception as e:
logger.error(f"\n✗ Migration failed: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,342 +0,0 @@
#!/usr/bin/env python3
"""
迁移 static_data 中的 interstellar 数据到 star_systems celestial_bodies
包含自动中文名翻译功能
"""
import asyncio
import sys
from pathlib import Path
# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import select, func, update
from sqlalchemy.dialects.postgresql import insert
from app.database import AsyncSessionLocal
from app.models.db.static_data import StaticData
from app.models.db.star_system import StarSystem
from app.models.db.celestial_body import CelestialBody
# 恒星名称中文翻译字典(常见恒星)
STAR_NAME_ZH = {
'Proxima Cen': '比邻星',
"Barnard's star": '巴纳德星',
'eps Eri': '天苑四',
'Lalande 21185': '莱兰21185',
'61 Cyg A': '天鹅座61 A',
'61 Cyg B': '天鹅座61 B',
'tau Cet': '天仓五',
'Kapteyn': '开普敦星',
'Lacaille 9352': '拉卡伊9352',
'Ross 128': '罗斯128',
'Wolf 359': '狼359',
'Sirius': '天狼星',
'Alpha Centauri': '南门二',
'TRAPPIST-1': 'TRAPPIST-1',
'Kepler-442': '开普勒-442',
'Kepler-452': '开普勒-452',
'Gliese 581': '格利泽581',
'Gliese 667C': '格利泽667C',
'HD 40307': 'HD 40307',
}
# 常见恒星系后缀翻译
SYSTEM_SUFFIX_ZH = {
'System': '系统',
'system': '系统',
}
def translate_star_name(english_name: str) -> str:
"""
翻译恒星名称为中文
优先使用字典否则保留英文名
"""
# 直接匹配
if english_name in STAR_NAME_ZH:
return STAR_NAME_ZH[english_name]
# 移除常见后缀尝试匹配
base_name = english_name.replace(' A', '').replace(' B', '').replace(' C', '').strip()
if base_name in STAR_NAME_ZH:
suffix = english_name.replace(base_name, '').strip()
return STAR_NAME_ZH[base_name] + suffix
# Kepler/TRAPPIST 等编号星
if english_name.startswith('Kepler-'):
return f'开普勒-{english_name.split("-")[1]}'
if english_name.startswith('TRAPPIST-'):
return f'TRAPPIST-{english_name.split("-")[1]}'
if english_name.startswith('Gliese '):
return f'格利泽{english_name.split(" ")[1]}'
if english_name.startswith('GJ '):
return f'GJ {english_name.split(" ")[1]}'
if english_name.startswith('HD '):
return f'HD {english_name.split(" ")[1]}'
if english_name.startswith('HIP '):
return f'HIP {english_name.split(" ")[1]}'
# 默认返回英文名
return english_name
def translate_system_name(english_name: str) -> str:
"""翻译恒星系名称"""
if ' System' in english_name:
star_name = english_name.replace(' System', '').strip()
star_name_zh = translate_star_name(star_name)
return f'{star_name_zh}系统'
return translate_star_name(english_name)
def translate_planet_name(english_name: str) -> str:
"""
翻译系外行星名称
格式恒星名 + 行星字母
"""
# 分离恒星名和行星字母
parts = english_name.rsplit(' ', 1)
if len(parts) == 2:
star_name, planet_letter = parts
star_name_zh = translate_star_name(star_name)
return f'{star_name_zh} {planet_letter}'
return english_name
async def deduplicate_planets(planets: list) -> list:
"""
去除重复的行星记录
保留字段最完整的记录
"""
if not planets:
return []
planet_map = {}
for planet in planets:
name = planet.get('name', '')
if not name:
continue
if name not in planet_map:
planet_map[name] = planet
else:
# 比较字段完整度
existing = planet_map[name]
existing_fields = sum(1 for v in existing.values() if v is not None and v != '')
current_fields = sum(1 for v in planet.values() if v is not None and v != '')
if current_fields > existing_fields:
planet_map[name] = planet
return list(planet_map.values())
async def migrate_star_systems():
"""迁移恒星系统数据"""
async with AsyncSessionLocal() as session:
print("=" * 60)
print("开始迁移系外恒星系数据...")
print("=" * 60)
# 读取所有 interstellar 数据
result = await session.execute(
select(StaticData)
.where(StaticData.category == 'interstellar')
.order_by(StaticData.name)
)
interstellar_data = result.scalars().all()
print(f"\n📊 共找到 {len(interstellar_data)} 个恒星系统")
migrated_systems = 0
migrated_planets = 0
skipped_systems = 0
for star_data in interstellar_data:
try:
data = star_data.data
star_name = star_data.name
# 翻译中文名
star_name_zh = translate_star_name(star_name)
system_name = f"{star_name} System"
system_name_zh = translate_system_name(system_name)
# 创建恒星系统记录
system = StarSystem(
name=system_name,
name_zh=system_name_zh,
host_star_name=star_name,
distance_pc=data.get('distance_pc'),
distance_ly=data.get('distance_ly'),
ra=data.get('ra'),
dec=data.get('dec'),
position_x=data.get('position', {}).get('x') if 'position' in data else None,
position_y=data.get('position', {}).get('y') if 'position' in data else None,
position_z=data.get('position', {}).get('z') if 'position' in data else None,
spectral_type=data.get('spectral_type'),
radius_solar=data.get('radius_solar'),
mass_solar=data.get('mass_solar'),
temperature_k=data.get('temperature_k'),
magnitude=data.get('magnitude'),
color=data.get('color', '#FFFFFF'),
planet_count=0, # 将在迁移行星后更新
description=f"距离地球 {data.get('distance_ly', 0):.2f} 光年的恒星系统。"
)
session.add(system)
await session.flush() # 获取 system.id
print(f"\n✅ 恒星系: {system_name} ({system_name_zh})")
print(f" 距离: {data.get('distance_pc', 0):.2f} pc (~{data.get('distance_ly', 0):.2f} ly)")
# 处理行星数据
planets = data.get('planets', [])
if planets:
# 去重
unique_planets = await deduplicate_planets(planets)
print(f" 行星: {len(planets)} 条记录 → {len(unique_planets)} 颗独立行星(去重 {len(planets) - len(unique_planets)} 条)")
# 迁移行星
for planet_data in unique_planets:
planet_name = planet_data.get('name', '')
if not planet_name:
continue
planet_name_zh = translate_planet_name(planet_name)
# 创建系外行星记录
planet = CelestialBody(
id=f"exo-{system.id}-{planet_name.replace(' ', '-')}", # 生成唯一ID
name=planet_name,
name_zh=planet_name_zh,
type='planet',
system_id=system.id,
description=f"{system_name_zh}的系外行星。",
extra_data={
'semi_major_axis_au': planet_data.get('semi_major_axis_au'),
'period_days': planet_data.get('period_days'),
'eccentricity': planet_data.get('eccentricity'),
'radius_earth': planet_data.get('radius_earth'),
'mass_earth': planet_data.get('mass_earth'),
'temperature_k': planet_data.get('temperature_k'),
}
)
session.add(planet)
migrated_planets += 1
print(f"{planet_name} ({planet_name_zh})")
# 更新恒星系的行星数量
system.planet_count = len(unique_planets)
migrated_systems += 1
# 每100个系统提交一次
if migrated_systems % 100 == 0:
await session.commit()
print(f"\n💾 已提交 {migrated_systems} 个恒星系统...")
except Exception as e:
print(f"\n❌ 错误:迁移 {star_name} 失败 - {str(e)[:200]}")
skipped_systems += 1
# 简单回滚,继续下一个
try:
await session.rollback()
except:
pass
continue
# 最终提交
await session.commit()
print("\n" + "=" * 60)
print("迁移完成!")
print("=" * 60)
print(f"✅ 成功迁移恒星系: {migrated_systems}")
print(f"✅ 成功迁移行星: {migrated_planets}")
print(f"⚠️ 跳过的恒星系: {skipped_systems}")
print(f"📊 平均每个恒星系: {migrated_planets / migrated_systems:.1f} 颗行星")
async def update_solar_system_count():
"""更新太阳系的天体数量"""
async with AsyncSessionLocal() as session:
result = await session.execute(
select(func.count(CelestialBody.id))
.where(CelestialBody.system_id == 1)
)
count = result.scalar()
await session.execute(
update(StarSystem)
.where(StarSystem.id == 1)
.values(planet_count=count - 1) # 减去太阳本身
)
await session.commit()
print(f"\n✅ 更新太阳系天体数量: {count} (不含太阳: {count - 1})")
async def verify_migration():
"""验证迁移结果"""
async with AsyncSessionLocal() as session:
print("\n" + "=" * 60)
print("验证迁移结果...")
print("=" * 60)
# 统计恒星系
result = await session.execute(select(func.count(StarSystem.id)))
system_count = result.scalar()
print(f"\n📊 恒星系统总数: {system_count}")
# 统计各系统的行星数量
result = await session.execute(
select(StarSystem.name, StarSystem.name_zh, StarSystem.planet_count)
.order_by(StarSystem.planet_count.desc())
.limit(10)
)
print("\n🏆 行星最多的恒星系前10:")
for name, name_zh, count in result:
print(f" {name} ({name_zh}): {count} 颗行星")
# 统计天体类型分布
result = await session.execute(
select(CelestialBody.type, CelestialBody.system_id, func.count(CelestialBody.id))
.group_by(CelestialBody.type, CelestialBody.system_id)
.order_by(CelestialBody.system_id, CelestialBody.type)
)
print("\n📈 天体类型分布:")
for type_, system_id, count in result:
system_name = "太阳系" if system_id == 1 else f"系外恒星系"
print(f" {system_name} - {type_}: {count}")
async def main():
"""主函数"""
print("\n" + "=" * 60)
print("Cosmo 系外恒星系数据迁移工具")
print("=" * 60)
try:
# 执行迁移
await migrate_star_systems()
# 更新太阳系统计
await update_solar_system_count()
# 验证结果
await verify_migration()
print("\n✅ 所有操作完成!")
except Exception as e:
print(f"\n❌ 迁移失败: {str(e)}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,56 +0,0 @@
"""
Optimize orbit data by downsampling excessively detailed orbits
灶神星(Vesta)的轨道数据被过度采样了31,825个点降采样到合理的数量
"""
import asyncio
from sqlalchemy import text
from app.database import engine
async def optimize_vesta_orbit():
"""Downsample Vesta orbit from 31,825 points to ~1,326 points (every 24th point)"""
async with engine.begin() as conn:
# Get current Vesta orbit data
result = await conn.execute(text("""
SELECT points, num_points
FROM orbits
WHERE body_id = '2000004'
"""))
row = result.fetchone()
if not row:
print("❌ Vesta orbit not found")
return
points = row[0] # JSONB array
current_count = row[1]
print(f"当前Vesta轨道点数: {current_count}")
print(f"实际数组长度: {len(points)}")
# Downsample: take every 24th point (0.04 days * 24 ≈ 1 day per point)
downsampled = points[::24]
new_count = len(downsampled)
print(f"降采样后点数: {new_count}")
print(f"数据大小减少: {current_count - new_count}")
print(f"降采样比例: {current_count / new_count:.1f}x")
# Calculate size reduction
import json
old_size = len(json.dumps(points))
new_size = len(json.dumps(downsampled))
print(f"JSON大小: {old_size:,} -> {new_size:,} bytes ({old_size/new_size:.1f}x)")
# Update database
await conn.execute(text("""
UPDATE orbits
SET points = :points, num_points = :num_points
WHERE body_id = '2000004'
"""), {"points": json.dumps(downsampled), "num_points": new_count})
print("✅ Vesta轨道数据已优化")
if __name__ == "__main__":
asyncio.run(optimize_vesta_orbit())

View File

@ -1,41 +0,0 @@
-- Phase 5 Database Schema Changes (Updated)
-- Run this script to add tables for Celestial Events and User Follows
-- Note: Channel messages are now stored in Redis, so no table is created for them.
BEGIN;
-- 1. Celestial Events Table
CREATE TABLE IF NOT EXISTS "public"."celestial_events" (
"id" SERIAL PRIMARY KEY,
"body_id" VARCHAR(50) NOT NULL REFERENCES "public"."celestial_bodies"("id") ON DELETE CASCADE,
"title" VARCHAR(200) NOT NULL,
"event_type" VARCHAR(50) NOT NULL, -- 'approach' (close approach), 'opposition' (冲日), etc.
"event_time" TIMESTAMP NOT NULL,
"description" TEXT,
"details" JSONB, -- Store distance (nominal_dist_au), v_rel, etc.
"source" VARCHAR(50) DEFAULT 'nasa_sbdb',
"created_at" TIMESTAMP DEFAULT NOW()
);
CREATE INDEX "idx_celestial_events_body_id" ON "public"."celestial_events" ("body_id");
CREATE INDEX "idx_celestial_events_time" ON "public"."celestial_events" ("event_time");
COMMENT ON TABLE "public"."celestial_events" IS '天体动态事件表 (如飞掠、冲日)';
-- 2. User Follows Table (Relationships)
CREATE TABLE IF NOT EXISTS "public"."user_follows" (
"user_id" INTEGER NOT NULL REFERENCES "public"."users"("id") ON DELETE CASCADE,
"body_id" VARCHAR(50) NOT NULL REFERENCES "public"."celestial_bodies"("id") ON DELETE CASCADE,
"created_at" TIMESTAMP DEFAULT NOW(),
PRIMARY KEY ("user_id", "body_id")
);
CREATE INDEX "idx_user_follows_user" ON "public"."user_follows" ("user_id");
COMMENT ON TABLE "public"."user_follows" IS '用户关注天体关联表';
-- 3. Ensure 'icon' is in resources check constraint (Idempotent check)
-- Dropping and recreating constraint is the safest way to ensure 'icon' is present if it wasn't
ALTER TABLE "public"."resources" DROP CONSTRAINT IF EXISTS "chk_resource_type";
ALTER TABLE "public"."resources" ADD CONSTRAINT "chk_resource_type"
CHECK (resource_type IN ('texture', 'model', 'icon', 'thumbnail', 'data'));
COMMIT;

View File

@ -1,283 +0,0 @@
"""
Populate Primary Stars for Star Systems
Phase 4.1: Data Migration
This script creates primary star records in celestial_bodies table
for all star systems in the star_systems table.
It does NOT fetch new data from NASA - all data already exists in star_systems.
"""
import asyncio
import sys
import os
import json
from datetime import datetime
# Add backend to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sqlalchemy import text
from app.database import get_db
async def populate_primary_stars():
"""Create primary star records for all star systems"""
print("=" * 70)
print("🌟 Phase 4.1: Populate Primary Stars")
print("=" * 70)
print()
async for session in get_db():
try:
# Step 1: Check current status
print("📊 Step 1: Checking current status...")
result = await session.execute(text(
"SELECT COUNT(*) FROM star_systems"
))
total_systems = result.scalar()
print(f" Total star systems: {total_systems}")
result = await session.execute(text(
"SELECT COUNT(*) FROM celestial_bodies WHERE type = 'star'"
))
existing_stars = result.scalar()
print(f" Existing stars in celestial_bodies: {existing_stars}")
print()
# Step 2: Fetch all star systems
print("📥 Step 2: Fetching star systems data...")
result = await session.execute(text("""
SELECT
id, name, name_zh, host_star_name,
spectral_type, radius_solar, mass_solar,
temperature_k, luminosity_solar, color,
description
FROM star_systems
ORDER BY id
"""))
systems = result.fetchall()
print(f" Fetched {len(systems)} star systems")
print()
# Step 3: Create primary star records
print("✨ Step 3: Creating primary star records...")
created_count = 0
updated_count = 0
skipped_count = 0
for system in systems:
star_id = f"star-{system.id}-primary"
# Derive star name from system info
# Remove "System" suffix from name_zh if present
star_name_zh = system.name_zh
if star_name_zh:
star_name_zh = star_name_zh.replace('系统', '').replace('System', '').strip()
# Create metadata JSON
metadata = {
"star_role": "primary",
"spectral_type": system.spectral_type,
"radius_solar": system.radius_solar,
"mass_solar": system.mass_solar,
"temperature_k": system.temperature_k,
"luminosity_solar": system.luminosity_solar,
"color": system.color
}
# Description
description = f"光谱类型: {system.spectral_type or 'Unknown'}"
if system.temperature_k:
description += f", 表面温度: {int(system.temperature_k)}K"
# Convert metadata to JSON string
metadata_json = json.dumps(metadata)
# Check if star already exists
check_result = await session.execute(
text("SELECT id FROM celestial_bodies WHERE id = :star_id").bindparams(star_id=star_id)
)
existing = check_result.fetchone()
if existing:
# Update existing record
await session.execute(
text("""
UPDATE celestial_bodies
SET name = :name,
name_zh = :name_zh,
type = 'star',
description = :description,
extra_data = CAST(:extra_data AS jsonb),
updated_at = NOW()
WHERE id = :star_id
""").bindparams(
name=system.host_star_name,
name_zh=star_name_zh,
description=description,
extra_data=metadata_json,
star_id=star_id
)
)
updated_count += 1
else:
# Insert new record
await session.execute(
text("""
INSERT INTO celestial_bodies (
id, system_id, name, name_zh, type,
description, extra_data, is_active,
created_at, updated_at
) VALUES (
:star_id, :system_id, :name, :name_zh, 'star',
:description, CAST(:extra_data AS jsonb), TRUE,
NOW(), NOW()
)
""").bindparams(
star_id=star_id,
system_id=system.id,
name=system.host_star_name,
name_zh=star_name_zh,
description=description,
extra_data=metadata_json
)
)
created_count += 1
# Progress indicator
if (created_count + updated_count) % 50 == 0:
print(f" Progress: {created_count + updated_count}/{len(systems)}")
await session.commit()
print(f" ✅ Created: {created_count}")
print(f" 🔄 Updated: {updated_count}")
print(f" ⏭️ Skipped: {skipped_count}")
print()
# Step 4: Create default positions (0, 0, 0) for all primary stars
print("📍 Step 4: Creating default positions...")
# First, check which stars don't have positions
result = await session.execute(text("""
SELECT cb.id
FROM celestial_bodies cb
WHERE cb.type = 'star'
AND cb.id LIKE 'star-%-primary'
AND NOT EXISTS (
SELECT 1 FROM positions p WHERE p.body_id = cb.id
)
"""))
stars_without_positions = result.fetchall()
print(f" Stars without positions: {len(stars_without_positions)}")
position_count = 0
for star_row in stars_without_positions:
star_id = star_row.id
# Create position at (0, 0, 0) - center of the system
await session.execute(
text("""
INSERT INTO positions (
body_id, time, x, y, z,
vx, vy, vz, source, created_at
) VALUES (
:body_id, NOW(), 0, 0, 0,
0, 0, 0, 'calculated', NOW()
)
""").bindparams(body_id=star_id)
)
position_count += 1
await session.commit()
print(f" ✅ Created {position_count} position records")
print()
# Step 5: Verification
print("🔍 Step 5: Verification...")
# Count stars by system
result = await session.execute(text("""
SELECT
COUNT(DISTINCT cb.system_id) as systems_with_stars,
COUNT(*) as total_stars
FROM celestial_bodies cb
WHERE cb.type = 'star' AND cb.id LIKE 'star-%-primary'
"""))
verification = result.fetchone()
print(f" Systems with primary stars: {verification.systems_with_stars}/{total_systems}")
print(f" Total primary star records: {verification.total_stars}")
# Check for systems without stars
result = await session.execute(text("""
SELECT ss.id, ss.name
FROM star_systems ss
WHERE NOT EXISTS (
SELECT 1 FROM celestial_bodies cb
WHERE cb.system_id = ss.id AND cb.type = 'star'
)
LIMIT 5
"""))
missing_stars = result.fetchall()
if missing_stars:
print(f" ⚠️ Systems without stars: {len(missing_stars)}")
for sys in missing_stars[:5]:
print(f" - {sys.name} (ID: {sys.id})")
else:
print(f" ✅ All systems have primary stars!")
print()
# Step 6: Sample data check
print("📋 Step 6: Sample data check...")
result = await session.execute(text("""
SELECT
cb.id, cb.name, cb.name_zh, cb.extra_data,
ss.name as system_name
FROM celestial_bodies cb
JOIN star_systems ss ON cb.system_id = ss.id
WHERE cb.type = 'star' AND cb.id LIKE 'star-%-primary'
ORDER BY ss.distance_pc
LIMIT 5
"""))
samples = result.fetchall()
print(" Nearest star systems:")
for sample in samples:
print(f"{sample.name} ({sample.name_zh})")
print(f" System: {sample.system_name}")
print(f" Extra Data: {sample.extra_data}")
print()
print("=" * 70)
print("✅ Phase 4.1 Completed Successfully!")
print("=" * 70)
print()
print(f"Summary:")
print(f" • Total star systems: {total_systems}")
print(f" • Primary stars created: {created_count}")
print(f" • Primary stars updated: {updated_count}")
print(f" • Positions created: {position_count}")
print(f" • Coverage: {verification.systems_with_stars}/{total_systems} systems")
print()
except Exception as e:
await session.rollback()
print(f"\n❌ Error: {e}")
import traceback
traceback.print_exc()
raise
finally:
await session.close()
if __name__ == "__main__":
asyncio.run(populate_primary_stars())

View File

@ -1,27 +0,0 @@
"""
Recreate resources table with unique constraint
"""
import asyncio
from app.database import engine
from app.models.db.resource import Resource
from sqlalchemy import text
async def recreate_resources_table():
"""Drop and recreate resources table"""
async with engine.begin() as conn:
# Drop the table
print("🗑️ Dropping resources table...")
await conn.execute(text("DROP TABLE IF EXISTS resources CASCADE"))
print("✓ Table dropped")
# Recreate the table
print("📦 Creating resources table with new schema...")
await conn.run_sync(Resource.metadata.create_all)
print("✓ Table created")
print("\n✨ Resources table recreated successfully!")
if __name__ == "__main__":
asyncio.run(recreate_resources_table())

View File

@ -1,53 +0,0 @@
"""
Reset position data to fix units (KM -> AU)
"""
import asyncio
import os
import sys
# Add backend directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.database import get_db
from app.models.db import Position
from app.services.redis_cache import redis_cache
from sqlalchemy import text
async def reset_data():
"""Clear positions and cache to force re-fetch in AU"""
print("🧹 Clearing old data (KM) to prepare for AU...")
async for session in get_db():
try:
# Clear positions table
print(" Truncating positions table...")
await session.execute(text("TRUNCATE TABLE positions RESTART IDENTITY CASCADE"))
# Clear nasa_cache table (if it exists as a table, or if it's just redis?)
# nasa_cache is in db models?
# Let's check models/db directory...
# It seems nasa_cache is a table based on `nasa_cache_service`.
print(" Truncating nasa_cache table...")
try:
await session.execute(text("TRUNCATE TABLE nasa_cache RESTART IDENTITY CASCADE"))
except Exception as e:
print(f" (Note: nasa_cache might not exist or failed: {e})")
await session.commit()
print("✅ Database tables cleared.")
# Clear Redis
await redis_cache.connect()
await redis_cache.clear_pattern("positions:*")
await redis_cache.clear_pattern("nasa:*")
print("✅ Redis cache cleared.")
await redis_cache.disconnect()
except Exception as e:
print(f"❌ Error: {e}")
await session.rollback()
finally:
break
if __name__ == "__main__":
asyncio.run(reset_data())

View File

@ -1,51 +0,0 @@
"""
Run database migration for scheduled_jobs table
"""
import asyncio
import asyncpg
from pathlib import Path
async def run_migration():
"""Run the migration SQL script"""
# Read the migration file
migration_file = Path(__file__).parent.parent / "migrations" / "add_predefined_jobs_support.sql"
with open(migration_file, 'r') as f:
sql = f.read()
# Connect to database
conn = await asyncpg.connect(
user='postgres',
password='cosmo2024',
database='cosmo_db',
host='localhost',
port=5432
)
try:
print("🔄 Running migration: add_predefined_jobs_support.sql")
# Execute the migration
await conn.execute(sql)
print("✅ Migration completed successfully!")
# Verify the changes
result = await conn.fetch("""
SELECT column_name, data_type, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position
""")
print("\n📋 Current scheduled_jobs table structure:")
for row in result:
print(f" - {row['column_name']}: {row['data_type']} (nullable: {row['is_nullable']})")
finally:
await conn.close()
if __name__ == "__main__":
asyncio.run(run_migration())

View File

@ -1,84 +0,0 @@
"""
Run database migration for scheduled_jobs table
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text
from app.database import engine
async def run_migration():
"""Run the migration SQL script"""
# Read the migration file
migration_file = Path(__file__).parent.parent / "migrations" / "add_predefined_jobs_support.sql"
with open(migration_file, 'r') as f:
sql_content = f.read()
# Split SQL into individual statements
# Remove comments and split by semicolon
statements = []
current_stmt = []
in_do_block = False
for line in sql_content.split('\n'):
stripped = line.strip()
# Skip comments
if stripped.startswith('--') or not stripped:
continue
# Handle DO blocks specially
if stripped.startswith('DO $$'):
in_do_block = True
current_stmt.append(line)
elif stripped == 'END $$;':
current_stmt.append(line)
statements.append('\n'.join(current_stmt))
current_stmt = []
in_do_block = False
elif in_do_block or not stripped.endswith(';'):
current_stmt.append(line)
else:
# Regular statement ending with ;
current_stmt.append(line)
statements.append('\n'.join(current_stmt))
current_stmt = []
async with engine.begin() as conn:
print("🔄 Running migration: add_predefined_jobs_support.sql")
# Execute each statement separately
for i, stmt in enumerate(statements):
if stmt.strip():
try:
print(f" Executing statement {i+1}/{len(statements)}...")
await conn.execute(text(stmt))
except Exception as e:
# Some statements might fail if already applied, that's okay
print(f" ⚠️ Statement {i+1} warning: {e}")
print("✅ Migration completed successfully!")
# Verify the changes
result = await conn.execute(text("""
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'scheduled_jobs'
ORDER BY ordinal_position
"""))
rows = result.fetchall()
print("\n📋 Current scheduled_jobs table structure:")
for row in rows:
print(f" - {row[0]}: {row[1]} (nullable: {row[2]})")
if __name__ == "__main__":
asyncio.run(run_migration())

View File

@ -1,49 +0,0 @@
"""
Test fetching Pluto position from NASA Horizons
"""
import asyncio
from datetime import datetime, UTC
from app.services.horizons import HorizonsService
async def test_pluto():
"""Test if we can fetch Pluto's position"""
print("🔍 Testing Pluto position fetch from NASA Horizons API...")
horizons = HorizonsService()
try:
# Fetch current position for Pluto (ID: 999)
now = datetime.now(UTC)
positions = horizons.get_body_positions(
body_id="999",
start_time=now,
end_time=now,
step="1d"
)
if positions:
print(f"\n✅ Successfully fetched Pluto position!")
print(f" Time: {positions[0].time}")
print(f" Position (AU):")
print(f" X: {positions[0].x:.4f}")
print(f" Y: {positions[0].y:.4f}")
print(f" Z: {positions[0].z:.4f}")
# Calculate distance from Sun
import math
distance = math.sqrt(
positions[0].x**2 +
positions[0].y**2 +
positions[0].z**2
)
print(f" Distance from Sun: {distance:.2f} AU")
else:
print("❌ No position data returned")
except Exception as e:
print(f"❌ Error fetching Pluto position: {e}")
if __name__ == "__main__":
asyncio.run(test_pluto())

View File

@ -1,17 +0,0 @@
-- Update check constraint for static_data table to include 'interstellar'
-- Run this manually via: python backend/scripts/run_sql.py backend/scripts/update_category_constraint.sql
ALTER TABLE static_data DROP CONSTRAINT IF EXISTS chk_category;
ALTER TABLE static_data
ADD CONSTRAINT chk_category
CHECK (category IN (
'constellation',
'galaxy',
'star',
'nebula',
'cluster',
'asteroid_belt',
'kuiper_belt',
'interstellar'
));

View File

@ -1,169 +0,0 @@
"""
更新太阳系行星和矮行星的轨道参数到 extra_data 字段
将硬编码在 celestial_orbit.py 中的轨道周期和颜色迁移到数据库的 extra_data 字段
这样用户可以在后台界面直接编辑这些参数
"""
import asyncio
import sys
from pathlib import Path
# Add backend directory to path
backend_dir = Path(__file__).parent.parent
sys.path.insert(0, str(backend_dir))
from sqlalchemy import select, update
from app.database import AsyncSessionLocal
from app.models.db.celestial_body import CelestialBody
# 轨道参数(从 celestial_orbit.py 迁移)
ORBIT_PARAMS = {
# 行星 - 完整公转周期
"199": {
"orbit_period_days": 88.0,
"orbit_color": "#8C7853",
"name_zh": "水星"
},
"299": {
"orbit_period_days": 224.7,
"orbit_color": "#FFC649",
"name_zh": "金星"
},
"399": {
"orbit_period_days": 365.25,
"orbit_color": "#4A90E2",
"name_zh": "地球"
},
"499": {
"orbit_period_days": 687.0,
"orbit_color": "#CD5C5C",
"name_zh": "火星"
},
"599": {
"orbit_period_days": 4333.0,
"orbit_color": "#DAA520",
"name_zh": "木星"
},
"699": {
"orbit_period_days": 10759.0,
"orbit_color": "#F4A460",
"name_zh": "土星"
},
"799": {
"orbit_period_days": 30687.0,
"orbit_color": "#4FD1C5",
"name_zh": "天王星"
},
"899": {
"orbit_period_days": 60190.0,
"orbit_color": "#4169E1",
"name_zh": "海王星"
},
# 矮行星 - 完整公转周期
"999": {
"orbit_period_days": 90560.0,
"orbit_color": "#8B7355",
"name_zh": "冥王星"
},
"2000001": {
"orbit_period_days": 1680.0,
"orbit_color": "#9E9E9E",
"name_zh": "谷神星"
},
"136199": {
"orbit_period_days": 203500.0,
"orbit_color": "#E0E0E0",
"name_zh": "阋神星"
},
"136108": {
"orbit_period_days": 104000.0,
"orbit_color": "#D4A574",
"name_zh": "妊神星"
},
"136472": {
"orbit_period_days": 112897.0,
"orbit_color": "#C49A6C",
"name_zh": "鸟神星"
},
}
async def update_orbit_parameters():
"""更新数据库中的轨道参数"""
async with AsyncSessionLocal() as session:
print("🔄 开始更新轨道参数...\n")
updated_count = 0
not_found_count = 0
for body_id, params in ORBIT_PARAMS.items():
# 查询天体
result = await session.execute(
select(CelestialBody).where(CelestialBody.id == body_id)
)
body = result.scalar_one_or_none()
if not body:
print(f"⚠️ 天体 {body_id} ({params['name_zh']}) 未找到")
not_found_count += 1
continue
# 合并 extra_data
extra_data = body.extra_data or {}
extra_data["orbit_period_days"] = params["orbit_period_days"]
extra_data["orbit_color"] = params["orbit_color"]
# 更新数据库
await session.execute(
update(CelestialBody)
.where(CelestialBody.id == body_id)
.values(extra_data=extra_data)
)
print(f"{params['name_zh']:8s} (ID: {body_id:7s}) - "
f"周期: {params['orbit_period_days']:8.1f} 天 ({params['orbit_period_days']/365.25:6.2f} 年), "
f"颜色: {params['orbit_color']}")
updated_count += 1
await session.commit()
print(f"\n{'='*80}")
print(f"✅ 更新完成: {updated_count} 个天体")
if not_found_count > 0:
print(f"⚠️ 未找到: {not_found_count} 个天体")
print(f"{'='*80}")
async def main():
"""主函数"""
print("=" * 80)
print("太阳系行星和矮行星轨道参数更新工具")
print("=" * 80)
print()
await update_orbit_parameters()
print("\n灶神星Vesta轨道参数")
print("=" * 80)
print("JPL Horizons ID: 2000004")
print("英文名: Vesta")
print("中文名: 灶神星")
print("类型: 矮行星 (dwarf_planet)")
print()
print("轨道参数:")
print(" - 轨道周期: 1325.46 天 (约 3.63 年)")
print(" - 建议颜色: #A8A8A8 (浅灰色)")
print(" - 半长轴: 2.36 AU")
print(" - 离心率: 0.089")
print(" - 轨道倾角: 7.14°")
print()
print("描述: 灶神星是小行星带中第二大的小行星直径约525公里。")
print(" 它是唯一一颗肉眼可见的小行星,也是黎明号探测器访问过的天体。")
print("=" * 80)
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,87 +0,0 @@
"""
Update existing job to use predefined task and add new event sync job
"""
import asyncio
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sqlalchemy import text, update
from app.database import engine
from app.models.db.scheduled_job import ScheduledJob, JobType
async def update_jobs():
"""Update existing job and add new event sync job"""
async with engine.begin() as conn:
print("🔄 Updating scheduled jobs...")
# 1. Update existing job to use predefined task
result = await conn.execute(text("""
UPDATE scheduled_jobs
SET
job_type = 'predefined',
predefined_function = 'sync_solar_system_positions',
function_params = '{"days": 7, "source": "nasa_horizons_cron"}'::jsonb,
description = '每日同步太阳系天体位置数据(使用内置任务)'
WHERE id = 1
RETURNING id, name
"""))
updated = result.fetchone()
if updated:
print(f"✅ Updated job ID {updated[0]}: {updated[1]} -> predefined task")
# 2. Add new celestial events sync job (disabled)
result = await conn.execute(text("""
INSERT INTO scheduled_jobs (
name,
job_type,
predefined_function,
function_params,
cron_expression,
description,
is_active
)
VALUES (
'天体事件同步',
'predefined',
'sync_celestial_events',
'{"days_ahead": 30}'::jsonb,
'0 3 * * *',
'每日凌晨3点同步未来30天的天体事件预留功能暂未实现',
false
)
ON CONFLICT DO NOTHING
RETURNING id, name
"""))
new_job = result.fetchone()
if new_job:
print(f"✅ Added new job ID {new_job[0]}: {new_job[1]} (disabled)")
else:
print(" Event sync job already exists")
# 3. Show all jobs
print("\n📋 Current scheduled jobs:")
result = await conn.execute(text("""
SELECT
id,
name,
job_type,
predefined_function,
is_active,
cron_expression
FROM scheduled_jobs
ORDER BY id
"""))
for row in result.fetchall():
status = "🟢 启用" if row[4] else "🔴 禁用"
job_type_display = "内置任务" if row[2] == 'predefined' else "自定义代码"
print(f" {status} ID {row[0]}: {row[1]}")
print(f" 类型: {job_type_display} | 函数: {row[3]} | CRON: {row[5]}")
if __name__ == "__main__":
asyncio.run(update_jobs())

View File

@ -1,274 +0,0 @@
-- ============================================================
-- Production Database Upgrade Script (Complete)
-- ============================================================
-- 严格按照要求执行以下升级:
-- 1. celestial_bodies - 增加 short_name 字段
-- 2. menus + role_menus - 完整导入
-- 3. celestial_events - 完整导入(新表)
-- 4. scheduled_jobs - 完整导入(新表)
-- 5. system_settings - 完整导入
-- 6. user_follows - 完整导入(新表)
-- ============================================================
-- 开启"上帝模式":忽略外键约束和触发器
SET session_replication_role = 'replica';
BEGIN;
-- ============================================================
-- 1. Add short_name column to celestial_bodies
-- ============================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'celestial_bodies'
AND column_name = 'short_name'
) THEN
ALTER TABLE celestial_bodies ADD COLUMN short_name VARCHAR(50);
RAISE NOTICE 'Added short_name column';
END IF;
END $$;
-- ============================================================
-- 2. 创建并导入 menus 和 role_menus 表
-- ============================================================
-- 清空现有数据(如果表存在)
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'menus') THEN
TRUNCATE TABLE menus CASCADE;
END IF;
END $$;
-- 创建 menus 表(如果不存在)
CREATE TABLE IF NOT EXISTS menus (
id SERIAL PRIMARY KEY,
parent_id INTEGER REFERENCES menus(id) ON DELETE CASCADE,
name VARCHAR(100) NOT NULL,
title VARCHAR(100) NOT NULL,
icon VARCHAR(50),
path VARCHAR(200),
component VARCHAR(200),
sort_order INTEGER DEFAULT 0,
is_active BOOLEAN DEFAULT true,
description TEXT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW()
);
-- 插入 menus 数据
INSERT INTO menus (id, parent_id, name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at) VALUES
(1, NULL, 'dashboard', '控制台', 'dashboard', '/admin/dashboard', 'admin/Dashboard', 1, true, '系统控制台', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(2, NULL, 'data_management', '数据管理', 'database', '', '', 2, true, '数据管理模块', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(6, NULL, 'platform_management', '平台管理', 'settings', '', '', 3, true, '管理用户和系统参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(14, NULL, 'user_profile', '个人资料', 'profile', '/user/profile', 'user/Profile', 1, true, '个人资料管理', '2025-12-18 16:26:11.778475', '2025-12-18 16:26:11.778475'),
(15, NULL, 'user_follow', '我的天体', 'star', '/user/follow', 'user/UserFollow', 2, true, '我关注的天体', '2025-12-18 16:27:48.688747', '2025-12-18 16:27:48.688747'),
(11, 2, 'star_systems', '恒星系统管理', 'StarOutlined', '/admin/star-systems', 'StarSystems', 1, true, '管理太阳系和系外恒星系统', '2025-12-06 02:35:21.137234', '2025-12-06 02:35:21.137234'),
(3, 2, 'celestial_bodies', '天体数据管理', NULL, '/admin/celestial-bodies', 'admin/CelestialBodies', 2, true, '查看和管理天体数据', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(4, 2, 'static_data', '静态数据管理', NULL, '/admin/static-data', 'admin/StaticData', 2, true, '查看和管理静态数据(星座、星系等)', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(5, 2, 'nasa_data', 'Horizon数据下载', NULL, '/admin/nasa-data', 'admin/NasaData', 3, true, '管理NASA Horizons数据下载', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(13, 2, 'celestial_events', '天体事件', 'CalendarOutlined', '/admin/celestial-events', '', 4, true, '', '2025-12-15 03:20:39.798021', '2025-12-15 03:20:39.798021'),
(7, 6, 'user_management', '用户管理', NULL, '/admin/users', 'admin/Users', 1, true, '管理系统用户账号', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(8, 6, 'platform_parameters_management', '平台参数管理', NULL, '/admin/settings', 'admin/Settings', 2, true, '管理系统通用配置参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(12, 6, 'scheduled_jobs', '定时任务设置', 'ClockCircleOutlined', '/admin/scheduled-jobs', 'admin/ScheduledJobs', 5, true, '管理系统定时任务及脚本', '2025-12-10 17:42:38.031518', '2025-12-10 17:42:38.031518'),
(10, 6, 'system_tasks', '系统任务监控', 'schedule', '/admin/tasks', 'admin/Tasks', 30, true, '', '2025-11-30 16:04:59.572869', '2025-11-30 16:04:59.572869');
-- Reset sequence
SELECT setval('menus_id_seq', (SELECT MAX(id) FROM menus));
-- 创建 role_menus 表(如果不存在)
CREATE TABLE IF NOT EXISTS role_menus (
role_id INTEGER NOT NULL,
menu_id INTEGER NOT NULL,
PRIMARY KEY (role_id, menu_id)
);
-- 清空并插入 role_menus 数据
TRUNCATE TABLE role_menus;
-- 动态获取 admin 和 user 角色 ID 并插入
DO $$
DECLARE
admin_role_id INTEGER;
user_role_id INTEGER;
BEGIN
SELECT id INTO admin_role_id FROM roles WHERE name = 'admin' LIMIT 1;
SELECT id INTO user_role_id FROM roles WHERE name = 'user' LIMIT 1;
IF admin_role_id IS NOT NULL THEN
INSERT INTO role_menus (role_id, menu_id) VALUES
(admin_role_id, 1), (admin_role_id, 2), (admin_role_id, 3), (admin_role_id, 4),
(admin_role_id, 5), (admin_role_id, 6), (admin_role_id, 7), (admin_role_id, 8),
(admin_role_id, 10), (admin_role_id, 11), (admin_role_id, 12), (admin_role_id, 13),
(admin_role_id, 14), (admin_role_id, 15);
END IF;
IF user_role_id IS NOT NULL THEN
INSERT INTO role_menus (role_id, menu_id) VALUES
(user_role_id, 14), (user_role_id, 15);
END IF;
END $$;
-- ============================================================
-- 3. 创建并导入 celestial_events 表(新表)
-- ============================================================
-- 创建表
CREATE TABLE IF NOT EXISTS celestial_events (
id SERIAL PRIMARY KEY,
body_id VARCHAR(20) NOT NULL,
body_name VARCHAR(100),
event_type VARCHAR(50) NOT NULL,
event_time TIMESTAMP NOT NULL,
longitude DECIMAL(10, 6),
latitude DECIMAL(10, 6),
description TEXT,
source VARCHAR(50),
related_body_id VARCHAR(20),
related_body_name VARCHAR(100),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW()
);
-- 创建索引
CREATE INDEX IF NOT EXISTS idx_celestial_events_body_id ON celestial_events(body_id);
CREATE INDEX IF NOT EXISTS idx_celestial_events_event_time ON celestial_events(event_time);
CREATE INDEX IF NOT EXISTS idx_celestial_events_event_type ON celestial_events(event_type);
-- 清空数据(如果需要导入具体数据,请在这里添加 INSERT 语句)
TRUNCATE TABLE celestial_events;
-- ============================================================
-- 4. 创建并导入 scheduled_jobs 表(新表)
-- ============================================================
-- 创建表
CREATE TABLE IF NOT EXISTS scheduled_jobs (
id SERIAL PRIMARY KEY,
name VARCHAR(200) NOT NULL,
cron_expression VARCHAR(100) NOT NULL,
python_code TEXT,
is_active BOOLEAN DEFAULT true,
last_run_at TIMESTAMP,
last_run_status VARCHAR(50),
next_run_at TIMESTAMP,
description TEXT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
job_type VARCHAR(50) DEFAULT 'custom',
predefined_function VARCHAR(200),
function_params JSONB
);
-- 清空并插入数据
TRUNCATE TABLE scheduled_jobs;
INSERT INTO scheduled_jobs (id, name, cron_expression, python_code, is_active, last_run_at, last_run_status, next_run_at, description, created_at, updated_at, job_type, predefined_function, function_params) VALUES
(1, '每日更新天体位置数据', '0 2 * * *', NULL, false, NULL, NULL, NULL, '每天凌晨2点自动从NASA Horizons下载主要天体的位置数据', '2025-12-10 17:43:01.234567', '2025-12-10 17:43:01.234567', 'predefined', 'download_positions_task', '{"body_ids": ["10", "199", "299", "399", "301", "499", "599", "699", "799", "899"], "days_range": "3"}'),
(2, '获取主要天体的食、合、冲等事件', '0 3 1 * *', NULL, true, NULL, NULL, NULL, '每月1日凌晨3点计算未来一年的主要天文事件', '2025-12-10 17:43:01.234567', '2025-12-10 17:43:01.234567', 'predefined', 'calculate_planetary_events', '{"body_ids": ["199", "299", "499", "599", "699", "799", "899"], "days_ahead": "365", "clean_old_events": true, "threshold_degrees": "5", "calculate_close_approaches": true}');
-- Reset sequence
SELECT setval('scheduled_jobs_id_seq', (SELECT MAX(id) FROM scheduled_jobs));
-- ============================================================
-- 5. 导入 system_settings 表
-- ============================================================
-- 创建表(如果不存在)
CREATE TABLE IF NOT EXISTS system_settings (
key VARCHAR(100) PRIMARY KEY,
value TEXT,
value_type VARCHAR(20) DEFAULT 'string',
category VARCHAR(50),
label VARCHAR(200),
description TEXT,
is_public BOOLEAN DEFAULT false,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW()
);
-- 插入或更新数据
INSERT INTO system_settings (key, value, value_type, category, label, description, is_public, created_at, updated_at) VALUES
('view_mode', 'solar', 'string', 'ui', '默认视图模式', '系统默认的3D场景视图模式solar或galaxy', true, NOW(), NOW()),
('nasa_api_timeout', '120', 'int', 'api', 'NASA API超时时间', 'NASA Horizons API请求超时时间', false, NOW(), NOW()),
('auto_download_positions', 'False', 'bool', 'system', '自动下载位置数据', '当位置数据不存在时是否自动从NASA Horizons下载', false, NOW(), NOW())
ON CONFLICT (key) DO UPDATE SET
value = EXCLUDED.value,
value_type = EXCLUDED.value_type,
category = EXCLUDED.category,
label = EXCLUDED.label,
description = EXCLUDED.description,
is_public = EXCLUDED.is_public,
updated_at = NOW();
-- ============================================================
-- 6. 创建并导入 user_follows 表(新表)
-- ============================================================
-- 创建表
CREATE TABLE IF NOT EXISTS user_follows (
user_id INTEGER NOT NULL,
body_id INTEGER NOT NULL,
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (user_id, body_id)
);
-- 创建索引
CREATE INDEX IF NOT EXISTS idx_user_follows_user_id ON user_follows(user_id);
CREATE INDEX IF NOT EXISTS idx_user_follows_body_id ON user_follows(body_id);
-- 清空数据(保留现有用户关注数据,不做导入)
-- 如果需要导入数据,请在这里添加 INSERT 语句
-- ============================================================
-- 提交事务
-- ============================================================
COMMIT;
-- ============================================================
-- 恢复正常模式
-- ============================================================
SET session_replication_role = 'origin';
-- ============================================================
-- 验证结果
-- ============================================================
SELECT 'celestial_bodies.short_name' as "Item",
CASE WHEN EXISTS(
SELECT 1 FROM information_schema.columns
WHERE table_name='celestial_bodies' AND column_name='short_name'
) THEN '✓ EXISTS' ELSE '✗ MISSING' END as "Status"
UNION ALL
SELECT 'menus table',
CASE WHEN EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='menus')
THEN '✓ EXISTS' ELSE '✗ MISSING' END
UNION ALL
SELECT 'celestial_events table',
CASE WHEN EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='celestial_events')
THEN '✓ EXISTS' ELSE '✗ MISSING' END
UNION ALL
SELECT 'scheduled_jobs table',
CASE WHEN EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='scheduled_jobs')
THEN '✓ EXISTS' ELSE '✗ MISSING' END
UNION ALL
SELECT 'user_follows table',
CASE WHEN EXISTS(SELECT 1 FROM information_schema.tables WHERE table_name='user_follows')
THEN '✓ EXISTS' ELSE '✗ MISSING' END;
-- 检查记录数
SELECT 'menus' as "Table", COUNT(*)::text || ' records' as "Count" FROM menus
UNION ALL
SELECT 'role_menus', COUNT(*)::text || ' records' FROM role_menus
UNION ALL
SELECT 'celestial_events', COUNT(*)::text || ' records' FROM celestial_events
UNION ALL
SELECT 'scheduled_jobs', COUNT(*)::text || ' records' FROM scheduled_jobs
UNION ALL
SELECT 'system_settings', COUNT(*)::text || ' records' FROM system_settings
UNION ALL
SELECT 'user_follows', COUNT(*)::text || ' records' FROM user_follows;

View File

@ -1,249 +0,0 @@
#!/bin/bash
# ============================================================
# 生产数据库终极升级脚本
# ============================================================
# 使用 session_replication_role 绕过外键约束
# 大幅提升升级速度和成功率
# ============================================================
set -e
# 配置
CONTAINER="cosmo_postgres"
DB_NAME="cosmo_db"
DB_USER="postgres"
BACKUP_FILE="backup_$(date +%Y%m%d_%H%M%S).sql"
SCRIPT_FILE="upgrade_production_final.sql"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# 颜色
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
print_info() { echo -e "${BLUE} ${1}${NC}"; }
print_success() { echo -e "${GREEN}${1}${NC}"; }
print_warning() { echo -e "${YELLOW}⚠️ ${1}${NC}"; }
print_error() { echo -e "${RED}${1}${NC}"; }
print_step() { echo -e "${CYAN}${1}${NC}"; }
# 检查容器
check_container() {
print_step "检查 Docker 容器状态..."
if ! docker ps --format '{{.Names}}' | grep -q "^${CONTAINER}$"; then
print_error "容器 ${CONTAINER} 未运行!"
docker ps --format "table {{.Names}}\t{{.Status}}"
exit 1
fi
print_success "容器运行正常"
}
# 检查脚本
check_script() {
print_step "检查升级脚本..."
if [ ! -f "${SCRIPT_DIR}/${SCRIPT_FILE}" ]; then
print_error "找不到 ${SCRIPT_FILE}"
exit 1
fi
print_success "脚本就绪"
}
# 检查权限
check_permissions() {
print_step "检查数据库权限..."
SUPERUSER=$(docker exec ${CONTAINER} psql -U ${DB_USER} -d ${DB_NAME} -t -c \
"SELECT usesuper FROM pg_user WHERE usename = current_user;" | tr -d ' ')
if [ "$SUPERUSER" != "t" ]; then
print_error "用户 ${DB_USER} 不是 superuser"
echo ""
print_warning "session_replication_role 需要 superuser 权限"
echo "解决方案:"
echo " 1. 使用 superuser 账号执行升级"
echo " 2. 或临时授予权限: ALTER USER ${DB_USER} WITH SUPERUSER;"
exit 1
fi
print_success "权限检查通过 (superuser)"
}
# 检查 display_name 字段
check_display_name() {
print_step "检查 roles 表结构..."
HAS_DISPLAY_NAME=$(docker exec ${CONTAINER} psql -U ${DB_USER} -d ${DB_NAME} -t -c \
"SELECT COUNT(*) FROM information_schema.columns
WHERE table_name = 'roles' AND column_name = 'display_name';" | tr -d ' ')
if [ "$HAS_DISPLAY_NAME" = "1" ]; then
print_info "检测到 display_name 字段(将使用对应版本)"
echo ""
print_warning "请确认 upgrade_production_final.sql 中:"
echo " - 第 20-27 行(带 display_name未注释"
echo " - 第 29-36 行(不带 display_name已注释"
echo ""
read -p "是否确认脚本已正确配置? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
print_info "升级已取消,请检查脚本配置"
exit 0
fi
else
print_info "未检测到 display_name 字段"
echo ""
print_warning "请确认 upgrade_production_final.sql 中:"
echo " - 第 20-27 行(带 display_name已注释"
echo " - 第 29-36 行(不带 display_name未注释"
echo ""
read -p "是否确认脚本已正确配置? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
print_info "升级已取消,请检查脚本配置"
exit 0
fi
fi
}
# 备份数据库
backup_database() {
print_step "备份数据库..."
if docker exec ${CONTAINER} pg_dump -U ${DB_USER} -d ${DB_NAME} > "${SCRIPT_DIR}/${BACKUP_FILE}"; then
SIZE=$(ls -lh "${SCRIPT_DIR}/${BACKUP_FILE}" | awk '{print $5}')
print_success "备份完成: ${BACKUP_FILE} (${SIZE})"
else
print_error "备份失败!"
exit 1
fi
}
# 执行升级
execute_upgrade() {
print_step "执行数据库升级..."
echo "========================================================"
if cat "${SCRIPT_DIR}/${SCRIPT_FILE}" | docker exec -i ${CONTAINER} psql -U ${DB_USER} -d ${DB_NAME}; then
echo "========================================================"
print_success "升级执行完成!"
return 0
else
echo "========================================================"
print_error "升级失败!"
return 1
fi
}
# 显示验证结果
show_verification() {
print_step "数据验证..."
echo ""
docker exec ${CONTAINER} psql -U ${DB_USER} -d ${DB_NAME} -c "
SELECT
'celestial_bodies.short_name' as item,
CASE WHEN EXISTS(
SELECT 1 FROM information_schema.columns
WHERE table_name='celestial_bodies' AND column_name='short_name'
) THEN '✓ 存在' ELSE '✗ 缺失' END as status
UNION ALL
SELECT
'roles',
COUNT(*)::text || ' 条记录'
FROM roles
UNION ALL
SELECT
'menus',
COUNT(*)::text || ' 条记录'
FROM menus
UNION ALL
SELECT
'role_menus',
COUNT(*)::text || ' 条记录'
FROM role_menus
UNION ALL
SELECT
'scheduled_jobs',
COUNT(*)::text || ' 条记录'
FROM scheduled_jobs
UNION ALL
SELECT
'system_settings',
COUNT(*)::text || ' 条记录'
FROM system_settings;
" -t
echo ""
}
# 显示回滚信息
show_rollback_info() {
echo ""
print_warning "如需回滚,执行:"
echo "cat ${SCRIPT_DIR}/${BACKUP_FILE} | docker exec -i ${CONTAINER} psql -U ${DB_USER} -d ${DB_NAME}"
echo ""
}
# 主函数
main() {
echo "============================================================"
echo " 生产数据库终极升级脚本"
echo " 使用 session_replication_role 技术"
echo "============================================================"
echo ""
# 检查
check_container
check_script
check_permissions
check_display_name
# 确认
echo ""
print_warning "即将执行以下操作:"
echo " 1. 备份当前数据库"
echo " 2. 使用 replica 模式绕过外键约束"
echo " 3. 导入所有数据(无需关心顺序)"
echo " 4. 恢复正常模式并验证数据完整性"
echo ""
echo "受影响的表:"
echo " • celestial_bodies - 添加 short_name 字段"
echo " • roles - 创建/更新记录"
echo " • menus - 清空并重新导入 (14条)"
echo " • role_menus - 清空并重新导入 (16条)"
echo " • celestial_events - 清空"
echo " • scheduled_jobs - 清空并重新导入 (2条)"
echo " • system_settings - 导入/更新 (3条)"
echo " • user_roles - 为现有用户分配角色"
echo ""
read -p "是否继续? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
print_info "升级已取消"
exit 0
fi
# 执行
echo ""
backup_database
if execute_upgrade; then
show_verification
print_success "🎉 数据库升级成功!"
show_rollback_info
echo ""
print_info "后续步骤:"
echo " 1. 重启后端服务: docker restart cosmo-backend"
echo " 2. 登录系统验证菜单显示"
echo " 3. 测试用户功能"
echo ""
exit 0
else
print_error "升级失败(已自动回滚)"
show_rollback_info
exit 1
fi
}
main

View File

@ -1,126 +0,0 @@
-- ============================================================
-- Production Database Upgrade Script (Simplified)
-- ============================================================
-- 目标:仅更新菜单相关数据
-- 1. 添加 celestial_bodies.short_name 字段
-- 2. 更新 menus 表
-- 3. 更新 role_menus 表
-- ============================================================
-- 开启"上帝模式":忽略外键约束和触发器
SET session_replication_role = 'replica';
BEGIN;
-- ============================================================
-- 1. Add short_name column to celestial_bodies
-- ============================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'celestial_bodies'
AND column_name = 'short_name'
) THEN
ALTER TABLE celestial_bodies ADD COLUMN short_name VARCHAR(50);
RAISE NOTICE 'Added short_name column to celestial_bodies';
ELSE
RAISE NOTICE 'short_name column already exists';
END IF;
END $$;
-- ============================================================
-- 2. 更新 menus 表
-- ============================================================
-- 清空现有菜单数据
TRUNCATE TABLE menus CASCADE;
-- 插入新的菜单数据
INSERT INTO menus (id, parent_id, name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at) VALUES
(1, NULL, 'dashboard', '控制台', 'dashboard', '/admin/dashboard', 'admin/Dashboard', 1, true, '系统控制台', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(2, NULL, 'data_management', '数据管理', 'database', '', '', 2, true, '数据管理模块', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(6, NULL, 'platform_management', '平台管理', 'settings', '', '', 3, true, '管理用户和系统参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(14, NULL, 'user_profile', '个人资料', 'profile', '/user/profile', 'user/Profile', 1, true, '个人资料管理', '2025-12-18 16:26:11.778475', '2025-12-18 16:26:11.778475'),
(15, NULL, 'user_follow', '我的天体', 'star', '/user/follow', 'user/UserFollow', 2, true, '我关注的天体', '2025-12-18 16:27:48.688747', '2025-12-18 16:27:48.688747'),
(11, 2, 'star_systems', '恒星系统管理', 'StarOutlined', '/admin/star-systems', 'StarSystems', 1, true, '管理太阳系和系外恒星系统', '2025-12-06 02:35:21.137234', '2025-12-06 02:35:21.137234'),
(3, 2, 'celestial_bodies', '天体数据管理', NULL, '/admin/celestial-bodies', 'admin/CelestialBodies', 2, true, '查看和管理天体数据', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(4, 2, 'static_data', '静态数据管理', NULL, '/admin/static-data', 'admin/StaticData', 2, true, '查看和管理静态数据(星座、星系等)', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(5, 2, 'nasa_data', 'Horizon数据下载', NULL, '/admin/nasa-data', 'admin/NasaData', 3, true, '管理NASA Horizons数据下载', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(13, 2, 'celestial_events', '天体事件', 'CalendarOutlined', '/admin/celestial-events', '', 4, true, '', '2025-12-15 03:20:39.798021', '2025-12-15 03:20:39.798021'),
(7, 6, 'user_management', '用户管理', NULL, '/admin/users', 'admin/Users', 1, true, '管理系统用户账号', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(8, 6, 'platform_parameters_management', '平台参数管理', NULL, '/admin/settings', 'admin/Settings', 2, true, '管理系统通用配置参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(12, 6, 'scheduled_jobs', '定时任务设置', 'ClockCircleOutlined', '/admin/scheduled-jobs', 'admin/ScheduledJobs', 5, true, '管理系统定时任务及脚本', '2025-12-10 17:42:38.031518', '2025-12-10 17:42:38.031518'),
(10, 6, 'system_tasks', '系统任务监控', 'schedule', '/admin/tasks', 'admin/Tasks', 30, true, '', '2025-11-30 16:04:59.572869', '2025-11-30 16:04:59.572869');
-- Reset sequence for menus
SELECT setval('menus_id_seq', (SELECT MAX(id) FROM menus));
-- ============================================================
-- 3. 更新 role_menus 表
-- ============================================================
-- 获取 admin 和 user 角色的实际 ID因为生产环境是 3 和 4
DO $$
DECLARE
admin_role_id INTEGER;
user_role_id INTEGER;
BEGIN
-- 查找 admin 角色 ID
SELECT id INTO admin_role_id FROM roles WHERE name = 'admin' LIMIT 1;
-- 查找 user 角色 ID
SELECT id INTO user_role_id FROM roles WHERE name = 'user' LIMIT 1;
IF admin_role_id IS NULL OR user_role_id IS NULL THEN
RAISE EXCEPTION 'Cannot find admin or user role';
END IF;
-- 插入 role_menus使用实际的 role_id
-- Admin role 可以访问所有菜单
INSERT INTO role_menus (role_id, menu_id) VALUES
(admin_role_id, 1), (admin_role_id, 2), (admin_role_id, 3), (admin_role_id, 4),
(admin_role_id, 5), (admin_role_id, 6), (admin_role_id, 7), (admin_role_id, 8),
(admin_role_id, 10), (admin_role_id, 11), (admin_role_id, 12), (admin_role_id, 13),
(admin_role_id, 14), (admin_role_id, 15);
-- User role 只能访问用户菜单
INSERT INTO role_menus (role_id, menu_id) VALUES
(user_role_id, 14), (user_role_id, 15);
RAISE NOTICE 'Inserted role_menus for admin_role_id=% and user_role_id=%', admin_role_id, user_role_id;
END $$;
-- ============================================================
-- 提交事务
-- ============================================================
COMMIT;
-- ============================================================
-- 恢复正常模式
-- ============================================================
SET session_replication_role = 'origin';
-- ============================================================
-- 验证结果
-- ============================================================
-- 检查 short_name 字段
SELECT 'celestial_bodies.short_name' as "Item",
CASE WHEN EXISTS(
SELECT 1 FROM information_schema.columns
WHERE table_name='celestial_bodies' AND column_name='short_name'
) THEN '✓ EXISTS' ELSE '✗ MISSING' END as "Status";
-- 检查记录数
SELECT 'menus' as "Table", COUNT(*)::text || ' records' as "Count" FROM menus
UNION ALL
SELECT 'role_menus', COUNT(*)::text || ' records' FROM role_menus;
-- 检查角色菜单分配
SELECT r.name as role, COUNT(rm.menu_id) as menu_count
FROM roles r
LEFT JOIN role_menus rm ON r.id = rm.role_id
GROUP BY r.id, r.name
ORDER BY r.name;

View File

@ -1,237 +0,0 @@
-- ============================================================
-- Production Database Upgrade Script (Final Version)
-- ============================================================
-- 使用 session_replication_role 方法绕过外键约束检查
-- 这是数据迁移的最佳实践,显著提升升级效率
-- ============================================================
--
-- 优势:
-- 1. 无需关心插入顺序
-- 2. 大幅提升导入速度
-- 3. 事务安全,失败自动回滚
--
-- 注意:需要 superuser 权限
-- ============================================================
-- 开启"上帝模式":忽略外键约束和触发器
SET session_replication_role = 'replica';
BEGIN;
-- ============================================================
-- 0. Ensure roles exist (适配 display_name 字段)
-- ============================================================
-- 方式1: 如果 roles 表有 display_name 字段,使用这个
INSERT INTO roles (name, display_name, description, created_at, updated_at)
VALUES
('admin', '管理员', '管理员角色,拥有所有权限', NOW(), NOW()),
('user', '普通用户', '普通用户角色,只能访问基本功能', NOW(), NOW())
ON CONFLICT (name) DO UPDATE SET
display_name = EXCLUDED.display_name,
description = EXCLUDED.description,
updated_at = NOW();
-- 方式2: 如果没有 display_name 字段,注释掉上面的,使用下面的
-- INSERT INTO roles (name, description, created_at, updated_at)
-- VALUES
-- ('admin', '管理员角色,拥有所有权限', NOW(), NOW()),
-- ('user', '普通用户角色,只能访问基本功能', NOW(), NOW())
-- ON CONFLICT (name) DO UPDATE SET
-- description = EXCLUDED.description,
-- updated_at = NOW();
-- Reset sequence for roles
SELECT setval('roles_id_seq', (SELECT COALESCE(MAX(id), 0) FROM roles));
-- ============================================================
-- 1. Add short_name column to celestial_bodies
-- ============================================================
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'celestial_bodies'
AND column_name = 'short_name'
) THEN
ALTER TABLE celestial_bodies ADD COLUMN short_name VARCHAR(50);
RAISE NOTICE 'Added short_name column to celestial_bodies';
ELSE
RAISE NOTICE 'short_name column already exists';
END IF;
END $$;
-- ============================================================
-- 2. Import menus and role_menus
-- ============================================================
-- 清空现有数据(因为禁用了约束,可以直接 TRUNCATE
TRUNCATE TABLE menus CASCADE;
-- 插入菜单数据(无需关心父子顺序!)
INSERT INTO menus (id, parent_id, name, title, icon, path, component, sort_order, is_active, description, created_at, updated_at) VALUES
(1, NULL, 'dashboard', '控制台', 'dashboard', '/admin/dashboard', 'admin/Dashboard', 1, true, '系统控制台', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(2, NULL, 'data_management', '数据管理', 'database', '', '', 2, true, '数据管理模块', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(6, NULL, 'platform_management', '平台管理', 'settings', '', '', 3, true, '管理用户和系统参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(14, NULL, 'user_profile', '个人资料', 'profile', '/user/profile', 'user/Profile', 1, true, '个人资料管理', '2025-12-18 16:26:11.778475', '2025-12-18 16:26:11.778475'),
(15, NULL, 'user_follow', '我的天体', 'star', '/user/follow', 'user/UserFollow', 2, true, '我关注的天体', '2025-12-18 16:27:48.688747', '2025-12-18 16:27:48.688747'),
(11, 2, 'star_systems', '恒星系统管理', 'StarOutlined', '/admin/star-systems', 'StarSystems', 1, true, '管理太阳系和系外恒星系统', '2025-12-06 02:35:21.137234', '2025-12-06 02:35:21.137234'),
(3, 2, 'celestial_bodies', '天体数据管理', NULL, '/admin/celestial-bodies', 'admin/CelestialBodies', 2, true, '查看和管理天体数据', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(4, 2, 'static_data', '静态数据管理', NULL, '/admin/static-data', 'admin/StaticData', 2, true, '查看和管理静态数据(星座、星系等)', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(5, 2, 'nasa_data', 'Horizon数据下载', NULL, '/admin/nasa-data', 'admin/NasaData', 3, true, '管理NASA Horizons数据下载', '2025-11-28 18:07:11.767382', '2025-11-28 18:07:11.767382'),
(13, 2, 'celestial_events', '天体事件', 'CalendarOutlined', '/admin/celestial-events', '', 4, true, '', '2025-12-15 03:20:39.798021', '2025-12-15 03:20:39.798021'),
(7, 6, 'user_management', '用户管理', NULL, '/admin/users', 'admin/Users', 1, true, '管理系统用户账号', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(8, 6, 'platform_parameters_management', '平台参数管理', NULL, '/admin/settings', 'admin/Settings', 2, true, '管理系统通用配置参数', '2025-11-29 19:03:08.776597', '2025-11-29 19:03:08.776597'),
(12, 6, 'scheduled_jobs', '定时任务设置', 'ClockCircleOutlined', '/admin/scheduled-jobs', 'admin/ScheduledJobs', 5, true, '管理系统定时任务及脚本', '2025-12-10 17:42:38.031518', '2025-12-10 17:42:38.031518'),
(10, 6, 'system_tasks', '系统任务监控', 'schedule', '/admin/tasks', 'admin/Tasks', 30, true, '', '2025-11-30 16:04:59.572869', '2025-11-30 16:04:59.572869');
-- Reset sequence for menus
SELECT setval('menus_id_seq', (SELECT MAX(id) FROM menus));
-- 插入 role_menus无需担心 roles 是否存在!)
INSERT INTO role_menus (role_id, menu_id) VALUES
-- Admin role (role_id = 1) has access to all menus
(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 10), (1, 11), (1, 12), (1, 13), (1, 14), (1, 15),
-- User role (role_id = 2) has access to user menus only
(2, 14), (2, 15);
-- ============================================================
-- 3. Import celestial_events
-- ============================================================
TRUNCATE TABLE celestial_events;
-- ============================================================
-- 4. Import scheduled_jobs
-- ============================================================
TRUNCATE TABLE scheduled_jobs CASCADE;
INSERT INTO scheduled_jobs (id, name, cron_expression, python_code, is_active, last_run_at, last_run_status, next_run_at, description, created_at, updated_at, job_type, predefined_function, function_params) VALUES
(1, '每日更新天体位置数据', '0 2 * * *', NULL, false, NULL, NULL, NULL, '每天凌晨2点自动从NASA Horizons下载主要天体的位置数据', '2025-12-10 17:43:01.234567', '2025-12-10 17:43:01.234567', 'predefined', 'download_positions_task', '{"body_ids": ["10", "199", "299", "399", "301", "499", "599", "699", "799", "899"], "days_range": "3"}'),
(2, '获取主要天体的食、合、冲等事件', '0 3 1 * *', NULL, true, NULL, NULL, NULL, '每月1日凌晨3点计算未来一年的主要天文事件', '2025-12-10 17:43:01.234567', '2025-12-10 17:43:01.234567', 'predefined', 'calculate_planetary_events', '{"body_ids": ["199", "299", "499", "599", "699", "799", "899"], "days_ahead": "365", "clean_old_events": true, "threshold_degrees": "5", "calculate_close_approaches": true}');
-- Reset sequence
SELECT setval('scheduled_jobs_id_seq', (SELECT MAX(id) FROM scheduled_jobs));
-- ============================================================
-- 5. Import system_settings
-- ============================================================
INSERT INTO system_settings (key, value, value_type, category, label, description, is_public, created_at, updated_at) VALUES
('view_mode', 'solar', 'string', 'ui', '默认视图模式', '系统默认的3D场景视图模式solar或galaxy', true, NOW(), NOW()),
('nasa_api_timeout', '120', 'int', 'api', 'NASA API超时时间', 'NASA Horizons API请求超时时间', false, NOW(), NOW()),
('auto_download_positions', 'False', 'bool', 'system', '自动下载位置数据', '当位置数据不存在时是否自动从NASA Horizons下载', false, NOW(), NOW())
ON CONFLICT (key) DO UPDATE SET
value = EXCLUDED.value,
value_type = EXCLUDED.value_type,
category = EXCLUDED.category,
label = EXCLUDED.label,
description = EXCLUDED.description,
is_public = EXCLUDED.is_public,
updated_at = NOW();
-- ============================================================
-- 6. Ensure existing users have roles assigned
-- ============================================================
DO $$
DECLARE
user_record RECORD;
user_role_id INTEGER := 2; -- user role
BEGIN
FOR user_record IN SELECT id FROM users LOOP
IF NOT EXISTS (
SELECT 1 FROM user_roles WHERE user_id = user_record.id
) THEN
INSERT INTO user_roles (user_id, role_id)
VALUES (user_record.id, user_role_id);
RAISE NOTICE 'Assigned user role to user %', user_record.id;
END IF;
END LOOP;
END $$;
-- ============================================================
-- 提交事务
-- ============================================================
COMMIT;
-- ============================================================
-- 恢复正常模式(关键步骤!)
-- ============================================================
SET session_replication_role = 'origin';
-- ============================================================
-- 数据一致性验证(在恢复约束后执行)
-- ============================================================
-- 验证外键一致性
DO $$
DECLARE
invalid_count INTEGER;
BEGIN
-- 检查 role_menus 中是否有无效的 role_id
SELECT COUNT(*) INTO invalid_count
FROM role_menus rm
WHERE NOT EXISTS (SELECT 1 FROM roles r WHERE r.id = rm.role_id);
IF invalid_count > 0 THEN
RAISE WARNING 'Found % invalid role_id references in role_menus', invalid_count;
END IF;
-- 检查 role_menus 中是否有无效的 menu_id
SELECT COUNT(*) INTO invalid_count
FROM role_menus rm
WHERE NOT EXISTS (SELECT 1 FROM menus m WHERE m.id = rm.menu_id);
IF invalid_count > 0 THEN
RAISE WARNING 'Found % invalid menu_id references in role_menus', invalid_count;
END IF;
-- 检查 menus 中是否有无效的 parent_id
SELECT COUNT(*) INTO invalid_count
FROM menus m1
WHERE m1.parent_id IS NOT NULL
AND NOT EXISTS (SELECT 1 FROM menus m2 WHERE m2.id = m1.parent_id);
IF invalid_count > 0 THEN
RAISE WARNING 'Found % invalid parent_id references in menus', invalid_count;
END IF;
RAISE NOTICE 'Data integrity validation completed';
END $$;
-- ============================================================
-- Verification queries
-- ============================================================
-- Check roles
SELECT id, name, description FROM roles ORDER BY id;
-- Check if short_name column exists
SELECT 'celestial_bodies.short_name' as "Item",
CASE WHEN EXISTS(
SELECT 1 FROM information_schema.columns
WHERE table_name='celestial_bodies' AND column_name='short_name'
) THEN '✓ EXISTS' ELSE '✗ MISSING' END as "Status";
-- Check record counts
SELECT 'roles' as "Table", COUNT(*)::text || ' records' as "Count" FROM roles
UNION ALL
SELECT 'menus', COUNT(*)::text || ' records' FROM menus
UNION ALL
SELECT 'role_menus', COUNT(*)::text || ' records' FROM role_menus
UNION ALL
SELECT 'scheduled_jobs', COUNT(*)::text || ' records' FROM scheduled_jobs
UNION ALL
SELECT 'system_settings', COUNT(*)::text || ' records' FROM system_settings;
-- Check user role assignments
SELECT u.id, u.username, COALESCE(array_agg(r.name), ARRAY[]::varchar[]) as roles
FROM users u
LEFT JOIN user_roles ur ON u.id = ur.user_id
LEFT JOIN roles r ON ur.role_id = r.id
GROUP BY u.id, u.username
ORDER BY u.id;