Phase 2
parent
de5447c5e5
commit
c10efe0588
|
|
@ -1,5 +1,5 @@
|
|||
# Application Settings
|
||||
APP_NAME=Cosmo - Deep Space Explorer
|
||||
APP_NAME=COSMO - Deep Space Explorer
|
||||
API_PREFIX=/api
|
||||
|
||||
# CORS Settings (comma-separated list)
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ REDIS_MAX_CONNECTIONS=50 # 最大连接数
|
|||
### 3. 应用配置
|
||||
|
||||
```bash
|
||||
APP_NAME=Cosmo - Deep Space Explorer
|
||||
APP_NAME=COSMO - Deep Space Explorer
|
||||
API_PREFIX=/api
|
||||
CORS_ORIGINS=["*"] # 开发环境允许所有来源
|
||||
CACHE_TTL_DAYS=3 # NASA API 缓存天数
|
||||
|
|
|
|||
|
|
@ -1,15 +1,65 @@
|
|||
# Cosmo 数据库表结构设计
|
||||
# Cosmo 数据库表结构设计文档
|
||||
|
||||
## 数据库信息
|
||||
- **数据库类型**: PostgreSQL 15+
|
||||
- **数据库名称**: cosmo_db
|
||||
- **字符集**: UTF8
|
||||
## 📋 文档目录
|
||||
|
||||
- [1. 数据库信息](#1-数据库信息)
|
||||
- [2. 数据表索引](#2-数据表索引)
|
||||
- [3. 核心业务表](#3-核心业务表)
|
||||
- [3.1 celestial_bodies - 天体基本信息表](#31-celestial_bodies---天体基本信息表)
|
||||
- [3.2 positions - 位置历史表](#32-positions---位置历史表)
|
||||
- [3.3 orbits - 轨道路径表](#33-orbits---轨道路径表)
|
||||
- [3.4 resources - 资源文件管理表](#34-resources---资源文件管理表)
|
||||
- [3.5 static_data - 静态天文数据表](#35-static_data---静态天文数据表)
|
||||
- [4. 系统管理表](#4-系统管理表)
|
||||
- [4.1 users - 用户表](#41-users---用户表)
|
||||
- [4.2 roles - 角色表](#42-roles---角色表)
|
||||
- [4.3 user_roles - 用户角色关联表](#43-user_roles---用户角色关联表)
|
||||
- [4.4 menus - 菜单表](#44-menus---菜单表)
|
||||
- [4.5 role_menus - 角色菜单关联表](#45-role_menus---角色菜单关联表)
|
||||
- [4.6 system_settings - 系统配置表](#46-system_settings---系统配置表)
|
||||
- [4.7 tasks - 后台任务表](#47-tasks---后台任务表)
|
||||
- [5. 缓存表](#5-缓存表)
|
||||
- [5.1 nasa_cache - NASA API缓存表](#51-nasa_cache---nasa-api缓存表)
|
||||
- [6. 数据关系图](#6-数据关系图)
|
||||
- [7. 初始化脚本](#7-初始化脚本)
|
||||
- [8. 查询示例](#8-查询示例)
|
||||
- [9. 维护建议](#9-维护建议)
|
||||
|
||||
---
|
||||
|
||||
## 表结构
|
||||
## 1. 数据库信息
|
||||
|
||||
### 1. celestial_bodies - 天体基本信息表
|
||||
- **数据库类型**: PostgreSQL 15+
|
||||
- **数据库名称**: cosmo_db
|
||||
- **字符集**: UTF8
|
||||
- **时区**: UTC
|
||||
- **连接池**: 20 (可配置)
|
||||
|
||||
---
|
||||
|
||||
## 2. 数据表索引
|
||||
|
||||
| 序号 | 表名 | 说明 | 记录数量级 |
|
||||
|------|------|------|-----------|
|
||||
| 1 | celestial_bodies | 天体基本信息 | 数百 |
|
||||
| 2 | positions | 天体位置历史(时间序列) | 百万级 |
|
||||
| 3 | orbits | 轨道路径数据 | 数百 |
|
||||
| 4 | resources | 资源文件管理 | 数千 |
|
||||
| 5 | static_data | 静态天文数据 | 数千 |
|
||||
| 6 | users | 用户账号 | 数千 |
|
||||
| 7 | roles | 角色定义 | 十位数 |
|
||||
| 8 | user_roles | 用户角色关联 | 数千 |
|
||||
| 9 | menus | 菜单配置 | 数十 |
|
||||
| 10 | role_menus | 角色菜单权限 | 数百 |
|
||||
| 11 | system_settings | 系统配置参数 | 数十 |
|
||||
| 12 | tasks | 后台任务 | 数万 |
|
||||
| 13 | nasa_cache | NASA API缓存 | 数万 |
|
||||
|
||||
---
|
||||
|
||||
## 3. 核心业务表
|
||||
|
||||
### 3.1 celestial_bodies - 天体基本信息表
|
||||
|
||||
存储所有天体的基本信息和元数据。
|
||||
|
||||
|
|
@ -18,25 +68,31 @@ CREATE TABLE celestial_bodies (
|
|||
id VARCHAR(50) PRIMARY KEY, -- JPL Horizons ID 或自定义ID
|
||||
name VARCHAR(200) NOT NULL, -- 英文名称
|
||||
name_zh VARCHAR(200), -- 中文名称
|
||||
type VARCHAR(50) NOT NULL, -- 天体类型: star, planet, moon, probe, comet, asteroid, etc.
|
||||
type VARCHAR(50) NOT NULL, -- <EFBFBD><EFBFBD><EFBFBD>体类型
|
||||
description TEXT, -- 描述
|
||||
metadata JSONB, -- 扩展元数据(launch_date, status, mass, radius等)
|
||||
is_active bool, -- 天体有效状态
|
||||
details TEXT, -- 详细信息(Markdown格式)
|
||||
metadata JSONB, -- 扩展元数据
|
||||
is_active BOOLEAN DEFAULT TRUE, -- 天体有效状态
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT chk_type CHECK (type IN ('star', 'planet', 'moon', 'probe', 'comet', 'asteroid', 'dwarf_planet', 'satellite'))
|
||||
CONSTRAINT chk_type CHECK (type IN (
|
||||
'star', 'planet', 'dwarf_planet', 'satellite',
|
||||
'probe', 'comet', 'asteroid'
|
||||
))
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_celestial_bodies_type ON celestial_bodies(type);
|
||||
CREATE INDEX idx_celestial_bodies_name ON celestial_bodies(name);
|
||||
CREATE INDEX idx_celestial_bodies_active ON celestial_bodies(is_active);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE celestial_bodies IS '天体基本信息表';
|
||||
COMMENT ON COLUMN celestial_bodies.id IS 'JPL Horizons ID(如-31代表Voyager 1)或自定义ID';
|
||||
COMMENT ON COLUMN celestial_bodies.type IS '天体类型:star(恒星), planet(行星), moon(卫星), probe(探测器), comet(彗星), asteroid(小行星)';
|
||||
COMMENT ON COLUMN celestial_bodies.metadata IS 'JSON格式的扩展元数据,例如:{"launch_date": "1977-09-05", "status": "active", "mass": 722, "radius": 2575}';
|
||||
COMMENT ON COLUMN celestial_bodies.type IS '天体类型:star(恒星), planet(行星), dwarf_planet(矮行星), satellite(卫星), probe(探测器), comet(彗星), asteroid(小行星)';
|
||||
COMMENT ON COLUMN celestial_bodies.details IS '详细信息,支持Markdown格式,在详情面板中展示';
|
||||
COMMENT ON COLUMN celestial_bodies.metadata IS 'JSON格式的扩展元数据';
|
||||
```
|
||||
|
||||
**metadata JSONB字段示例**:
|
||||
|
|
@ -44,39 +100,44 @@ COMMENT ON COLUMN celestial_bodies.metadata IS 'JSON格式的扩展元数据,
|
|||
{
|
||||
"launch_date": "1977-09-05",
|
||||
"status": "active",
|
||||
"mass": 722, // kg
|
||||
"radius": 2575, // km
|
||||
"orbit_period": 365.25, // days
|
||||
"rotation_period": 24, // hours
|
||||
"mass_kg": 722,
|
||||
"radius_km": 2575,
|
||||
"orbit_period_days": 365.25,
|
||||
"rotation_period_hours": 24,
|
||||
"discovery_date": "1930-02-18",
|
||||
"discoverer": "Clyde Tombaugh"
|
||||
"discoverer": "Clyde Tombaugh",
|
||||
"surface_temp_k": 288,
|
||||
"atmosphere": ["N2", "O2"],
|
||||
"moons": 1
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. positions - 位置历史表(时间序列)
|
||||
### 3.2 positions - 位置历史表
|
||||
|
||||
存储天体的位置历史数据,支持历史查询和轨迹回放。
|
||||
存储天体的位置历史数据,支持历史查询和轨迹回放。这是一个时间序列表,数据量可达百万级。
|
||||
|
||||
```sql
|
||||
CREATE TABLE positions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
body_id VARCHAR(50) NOT NULL REFERENCES celestial_bodies(id) ON DELETE CASCADE,
|
||||
time TIMESTAMP NOT NULL, -- 位置时间点
|
||||
time TIMESTAMP NOT NULL, -- 位置时间点(UTC)
|
||||
x DOUBLE PRECISION NOT NULL, -- X坐标(AU,日心坐标系)
|
||||
y DOUBLE PRECISION NOT NULL, -- Y坐标(AU)
|
||||
z DOUBLE PRECISION NOT NULL, -- Z坐标(AU)
|
||||
vx DOUBLE PRECISION, -- X方向速度(可选)
|
||||
vx DOUBLE PRECISION, -- X方向速度(AU/day,可选)
|
||||
vy DOUBLE PRECISION, -- Y方向速度(可选)
|
||||
vz DOUBLE PRECISION, -- Z方向速度(可选)
|
||||
source VARCHAR(50) DEFAULT 'nasa_horizons', -- 数据来源
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT chk_source CHECK (source IN ('nasa_horizons', 'calculated', 'user_defined', 'imported'))
|
||||
CONSTRAINT chk_source CHECK (source IN (
|
||||
'nasa_horizons', 'calculated', 'user_defined', 'imported'
|
||||
))
|
||||
);
|
||||
|
||||
-- 索引(非常重要,用于高效查询)
|
||||
-- 索引(性能关键!)
|
||||
CREATE INDEX idx_positions_body_time ON positions(body_id, time DESC);
|
||||
CREATE INDEX idx_positions_time ON positions(time);
|
||||
CREATE INDEX idx_positions_body_id ON positions(body_id);
|
||||
|
|
@ -93,10 +154,51 @@ COMMENT ON COLUMN positions.source IS '数据来源:nasa_horizons(NASA API), c
|
|||
- 查询某天体在某时间点的位置
|
||||
- 查询某天体在时间范围内的轨迹
|
||||
- 支持时间旅行功能(回放历史位置)
|
||||
- 轨迹可视化
|
||||
|
||||
---
|
||||
|
||||
### 3. resources - 资源文件管理表
|
||||
### 3.3 orbits - 轨道路径表
|
||||
|
||||
存储预计算的轨道路径数据,用于3D可视化渲染。
|
||||
|
||||
```sql
|
||||
CREATE TABLE orbits (
|
||||
id SERIAL PRIMARY KEY,
|
||||
body_id VARCHAR(50) NOT NULL REFERENCES celestial_bodies(id) ON DELETE CASCADE,
|
||||
points JSONB NOT NULL, -- 轨道点数组 [{x, y, z}, ...]
|
||||
num_points INTEGER NOT NULL, -- 轨道点数量
|
||||
period_days DOUBLE PRECISION, -- 轨道周期(天)
|
||||
color VARCHAR(20), -- 轨道线颜色(HEX)
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT uq_orbits_body_id UNIQUE (body_id)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_orbits_body_id ON orbits(body_id);
|
||||
CREATE INDEX idx_orbits_updated_at ON orbits(updated_at);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE orbits IS '轨道路径数据表';
|
||||
COMMENT ON COLUMN orbits.points IS 'JSON数组格式的轨道点:[{"x": 1.0, "y": 0.0, "z": 0.0}, ...]';
|
||||
COMMENT ON COLUMN orbits.num_points IS '轨道点数量,用于性能优化';
|
||||
COMMENT ON COLUMN orbits.color IS '轨道线显示颜色,HEX格式,如#FF5733';
|
||||
```
|
||||
|
||||
**points JSONB字段示例**:
|
||||
```json
|
||||
[
|
||||
{"x": 1.0, "y": 0.0, "z": 0.0},
|
||||
{"x": 0.99, "y": 0.05, "z": 0.01},
|
||||
{"x": 0.97, "y": 0.10, "z": 0.02}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3.4 resources - 资源文件管理表
|
||||
|
||||
统一管理纹理、3D模型、图标等静态资源。
|
||||
|
||||
|
|
@ -106,13 +208,15 @@ CREATE TABLE resources (
|
|||
body_id VARCHAR(50) REFERENCES celestial_bodies(id) ON DELETE CASCADE,
|
||||
resource_type VARCHAR(50) NOT NULL, -- 资源类型
|
||||
file_path VARCHAR(500) NOT NULL, -- 相对于upload目录的路径
|
||||
file_size INTEGER, -- 文件大小(bytes)
|
||||
file_size INTEGER, -- 文<EFBFBD><EFBFBD>大小(bytes)
|
||||
mime_type VARCHAR(100), -- MIME类型
|
||||
metadata JSONB, -- 扩展信息(分辨率、格式等)
|
||||
metadata JSONB, -- 扩展信息
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT chk_resource_type CHECK (resource_type IN ('texture', 'model', 'icon', 'thumbnail', 'data'))
|
||||
CONSTRAINT chk_resource_type CHECK (resource_type IN (
|
||||
'texture', 'model', 'icon', 'thumbnail', 'data'
|
||||
))
|
||||
);
|
||||
|
||||
-- 索引
|
||||
|
|
@ -123,7 +227,7 @@ CREATE INDEX idx_resources_type ON resources(resource_type);
|
|||
COMMENT ON TABLE resources IS '资源文件管理表(纹理、模型、图标等)';
|
||||
COMMENT ON COLUMN resources.resource_type IS '资源类型:texture(纹理), model(3D模型), icon(图标), thumbnail(缩略图), data(数据文件)';
|
||||
COMMENT ON COLUMN resources.file_path IS '相对路径,例如:textures/planets/earth_2k.jpg';
|
||||
COMMENT ON COLUMN resources.metadata IS 'JSON格式元数据,例如:{"width": 2048, "height": 1024, "format": "jpg"}';
|
||||
COMMENT ON COLUMN resources.metadata IS 'JSON格式元数据';
|
||||
```
|
||||
|
||||
**metadata JSONB字段示例**:
|
||||
|
|
@ -134,15 +238,16 @@ COMMENT ON COLUMN resources.metadata IS 'JSON格式元数据,例如:{"width"
|
|||
"format": "jpg",
|
||||
"color_space": "sRGB",
|
||||
"model_format": "glb",
|
||||
"polygon_count": 15000
|
||||
"polygon_count": 15000,
|
||||
"compression": "gzip"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. static_data - 静态数据表
|
||||
### 3.5 static_data - 静态天文数据表
|
||||
|
||||
存储星座、星系、恒星等不需要动态计算的静态天文数据。
|
||||
存储星座、星系、恒星等不需要动态计算的静态天文数据<EFBFBD><EFBFBD><EFBFBD>
|
||||
|
||||
```sql
|
||||
CREATE TABLE static_data (
|
||||
|
|
@ -154,7 +259,9 @@ CREATE TABLE static_data (
|
|||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT chk_category CHECK (category IN ('constellation', 'galaxy', 'star', 'nebula', 'cluster')),
|
||||
CONSTRAINT chk_category CHECK (category IN (
|
||||
'constellation', 'galaxy', 'star', 'nebula', 'cluster'
|
||||
)),
|
||||
CONSTRAINT uq_category_name UNIQUE (category, name)
|
||||
);
|
||||
|
||||
|
|
@ -169,9 +276,7 @@ COMMENT ON COLUMN static_data.category IS '数据分类:constellation(星座),
|
|||
COMMENT ON COLUMN static_data.data IS 'JSON格式的完整数据,结构根据category不同而不同';
|
||||
```
|
||||
|
||||
**data JSONB字段示例**:
|
||||
|
||||
**星座数据**:
|
||||
**data JSONB字段示例 - 星座**:
|
||||
```json
|
||||
{
|
||||
"stars": [
|
||||
|
|
@ -183,28 +288,259 @@ COMMENT ON COLUMN static_data.data IS 'JSON格式的完整数据,结构根据c
|
|||
}
|
||||
```
|
||||
|
||||
**星系数据**:
|
||||
**data JSONB字段示例 - 恒星**:
|
||||
```json
|
||||
{
|
||||
"type": "spiral",
|
||||
"distance_mly": 2.537,
|
||||
"ra": 10.68,
|
||||
"dec": 41.27,
|
||||
"magnitude": 3.44,
|
||||
"diameter_kly": 220,
|
||||
"color": "#88aaff"
|
||||
"distance_ly": 4.37,
|
||||
"ra": 219.90,
|
||||
"dec": -60.83,
|
||||
"magnitude": -0.27,
|
||||
"color": "#FFF8E7",
|
||||
"spectral_type": "G2V",
|
||||
"mass_solar": 1.0,
|
||||
"radius_solar": 1.0
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 5. nasa_cache - NASA API缓存表
|
||||
## 4. 系统管理表
|
||||
|
||||
### 4.1 users - 用户表
|
||||
|
||||
存储用户账号信息。
|
||||
|
||||
```sql
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(50) UNIQUE NOT NULL, -- 用户名(唯一)
|
||||
password_hash VARCHAR(255) NOT NULL, -- 密码哈希(bcrypt)
|
||||
email VARCHAR(255) UNIQUE, -- 邮箱地址
|
||||
full_name VARCHAR(100), -- 全名
|
||||
is_active BOOLEAN DEFAULT TRUE NOT NULL, -- 账号状态
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
last_login_at TIMESTAMP, -- 最后登录时间
|
||||
|
||||
CONSTRAINT chk_username_length CHECK (LENGTH(username) >= 3)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_users_username ON users(username);
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
CREATE INDEX idx_users_active ON users(is_active);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE users IS '用户账号表';
|
||||
COMMENT ON COLUMN users.password_hash IS '使用bcrypt加密的密码哈希';
|
||||
COMMENT ON COLUMN users.is_active IS '账号激活状态,false表示禁用';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4.2 roles - 角色表
|
||||
|
||||
定义系统角色(如admin、user等)。
|
||||
|
||||
```sql
|
||||
CREATE TABLE roles (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(50) UNIQUE NOT NULL, -- 角色名称(如'admin')
|
||||
display_name VARCHAR(100) NOT NULL, -- 显示名称
|
||||
description TEXT, -- 角色描述
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_roles_name ON roles(name);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE roles IS '角色定义表';
|
||||
COMMENT ON COLUMN roles.name IS '角色标识符,如admin、user、guest';
|
||||
COMMENT ON COLUMN roles.display_name IS '显示名称,如管理员、普通用户';
|
||||
```
|
||||
|
||||
**预置角色**:
|
||||
- `admin`: 系统管理员(全部权限)
|
||||
- `user`: 普通用户(基础权限)
|
||||
|
||||
---
|
||||
|
||||
### 4.3 user_roles - 用户角色关联表
|
||||
|
||||
多对多关系:一个用户可以有多个角色,一个角色可以分配给多个用户。
|
||||
|
||||
```sql
|
||||
CREATE TABLE user_roles (
|
||||
user_id INTEGER REFERENCES users(id) ON DELETE CASCADE,
|
||||
role_id INTEGER REFERENCES roles(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
PRIMARY KEY (user_id, role_id)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_user_roles_user_id ON user_roles(user_id);
|
||||
CREATE INDEX idx_user_roles_role_id ON user_roles(role_id);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE user_roles IS '用户角色关联表(多对多)';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4.4 menus - 菜单表
|
||||
|
||||
后台管理菜单配置。
|
||||
|
||||
```sql
|
||||
CREATE TABLE menus (
|
||||
id SERIAL PRIMARY KEY,
|
||||
parent_id INTEGER REFERENCES menus(id) ON DELETE CASCADE, -- 父菜单ID
|
||||
name VARCHAR(100) NOT NULL, -- 菜单名称
|
||||
title VARCHAR(100) NOT NULL, -- 显示标题
|
||||
icon VARCHAR(100), -- 图标名称
|
||||
path VARCHAR(255), -- 路由路径
|
||||
component VARCHAR(255), -- 组件路径
|
||||
sort_order INTEGER DEFAULT 0 NOT NULL, -- 显示顺序
|
||||
is_active BOOLEAN DEFAULT TRUE NOT NULL, -- 菜单状态
|
||||
description TEXT, -- 菜单描述
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_menus_parent_id ON menus(parent_id);
|
||||
CREATE INDEX idx_menus_sort_order ON menus(sort_order);
|
||||
CREATE INDEX idx_menus_active ON menus(is_active);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE menus IS '后台管理菜单配置表';
|
||||
COMMENT ON COLUMN menus.parent_id IS '父菜单ID,NULL表示根菜单';
|
||||
COMMENT ON COLUMN menus.path IS '前端路由路径,如/admin/celestial-bodies';
|
||||
COMMENT ON COLUMN menus.component IS 'Vue/React组件路径';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4.5 role_menus - 角色菜单关联表
|
||||
|
||||
定义角色可访问的菜单(权限控制)。
|
||||
|
||||
```sql
|
||||
CREATE TABLE role_menus (
|
||||
id SERIAL PRIMARY KEY,
|
||||
role_id INTEGER REFERENCES roles(id) ON DELETE CASCADE,
|
||||
menu_id INTEGER REFERENCES menus(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT uq_role_menu UNIQUE (role_id, menu_id)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_role_menus_role_id ON role_menus(role_id);
|
||||
CREATE INDEX idx_role_menus_menu_id ON role_menus(menu_id);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE role_menus IS '角色菜单权限关联表';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4.6 system_settings - 系统配置表
|
||||
|
||||
存储平台配置参数,支持动态配置。
|
||||
|
||||
```sql
|
||||
CREATE TABLE system_settings (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(100) UNIQUE NOT NULL, -- 配置键
|
||||
value TEXT NOT NULL, -- 配置值
|
||||
value_type VARCHAR(20) NOT NULL DEFAULT 'string', -- 值类型
|
||||
category VARCHAR(50) NOT NULL DEFAULT 'general', -- 分类
|
||||
label VARCHAR(200) NOT NULL, -- 显示标签
|
||||
description TEXT, -- 描述
|
||||
is_public BOOLEAN DEFAULT FALSE, -- 是否前端可访问
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT chk_value_type CHECK (value_type IN (
|
||||
'string', 'int', 'float', 'bool', 'json'
|
||||
))
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_system_settings_key ON system_settings(key);
|
||||
CREATE INDEX idx_system_settings_category ON system_settings(category);
|
||||
CREATE INDEX idx_system_settings_public ON system_settings(is_public);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE system_settings IS '系统配置参数表';
|
||||
COMMENT ON COLUMN system_settings.key IS '配置键,如timeline_interval_days';
|
||||
COMMENT ON COLUMN system_settings.value_type IS '值类型:string, int, float, bool, json';
|
||||
COMMENT ON COLUMN system_settings.is_public IS '是否允许前端访问该配置';
|
||||
```
|
||||
|
||||
**配置示例**:
|
||||
```sql
|
||||
INSERT INTO system_settings (key, value, value_type, category, label, description, is_public) VALUES
|
||||
('timeline_interval_days', '7', 'int', 'visualization', '时间轴播放间隔(天)', '时间轴播放模式下的时间间隔', true),
|
||||
('max_orbit_points', '500', 'int', 'visualization', '最大轨道点数', '轨道可视化的最大点数', true),
|
||||
('cache_ttl_hours', '24', 'int', 'cache', '缓存过期时间(小时)', 'Redis缓存的默认过期时间', false);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4.7 tasks - 后台任务表
|
||||
|
||||
记录后台异步任务的执行状态。
|
||||
|
||||
```sql
|
||||
CREATE TABLE tasks (
|
||||
id SERIAL PRIMARY KEY,
|
||||
task_type VARCHAR(50) NOT NULL, -- 任务类型
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'pending', -- 任务状态
|
||||
description VARCHAR(255), -- 任务描述
|
||||
params JSON, -- 输入参数
|
||||
result JSON, -- 输出结果
|
||||
progress INTEGER DEFAULT 0, -- 进度(0-100)
|
||||
error_message TEXT, -- 错误信息
|
||||
created_by INTEGER, -- 创建用户ID
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
started_at TIMESTAMP, -- 开始时间
|
||||
completed_at TIMESTAMP, -- 完成时间
|
||||
|
||||
CONSTRAINT chk_status CHECK (status IN (
|
||||
'pending', 'running', 'completed', 'failed', 'cancelled'
|
||||
)),
|
||||
CONSTRAINT chk_progress CHECK (progress >= 0 AND progress <= 100)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX idx_tasks_status ON tasks(status);
|
||||
CREATE INDEX idx_tasks_type ON tasks(task_type);
|
||||
CREATE INDEX idx_tasks_created_at ON tasks(created_at DESC);
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE tasks IS '后台任务表';
|
||||
COMMENT ON COLUMN tasks.task_type IS '任务类型,如nasa_download、orbit_calculate';
|
||||
COMMENT ON COLUMN tasks.status IS '任务状态:pending(待执行), running(执行中), completed(已完成), failed(失败), cancelled(已取消)';
|
||||
COMMENT ON COLUMN tasks.progress IS '任务进度百分比(0-100)';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. 缓存表
|
||||
|
||||
### 5.1 nasa_cache - NASA API缓存表
|
||||
|
||||
持久化NASA Horizons API的响应结果,减少API调用。
|
||||
|
||||
```sql
|
||||
CREATE TABLE nasa_cache (
|
||||
cache_key VARCHAR(500) PRIMARY KEY, -- 缓存键(body_id:start:end:step)
|
||||
cache_key VARCHAR(500) PRIMARY KEY, -- 缓存键
|
||||
body_id VARCHAR(50),
|
||||
start_time TIMESTAMP, -- 查询起始时间
|
||||
end_time TIMESTAMP, -- 查询结束时间
|
||||
|
|
@ -221,42 +557,46 @@ CREATE INDEX idx_nasa_cache_body_id ON nasa_cache(body_id);
|
|||
CREATE INDEX idx_nasa_cache_expires ON nasa_cache(expires_at);
|
||||
CREATE INDEX idx_nasa_cache_time_range ON nasa_cache(body_id, start_time, end_time);
|
||||
|
||||
-- 自动清理过期缓存(可选,需要pg_cron扩展)
|
||||
-- SELECT cron.schedule('clean_expired_cache', '0 0 * * *', 'DELETE FROM nasa_cache WHERE expires_at < NOW()');
|
||||
|
||||
-- 注释
|
||||
COMMENT ON TABLE nasa_cache IS 'NASA Horizons API响应缓存表';
|
||||
COMMENT ON COLUMN nasa_cache.cache_key IS '缓存键格式:{body_id}:{start}:{end}:{step},例如:-31:2025-11-27:2025-11-28:1d';
|
||||
COMMENT ON COLUMN nasa_cache.cache_key IS '缓存键格式:{body_id}:{start}:{end}:{step}';
|
||||
COMMENT ON COLUMN nasa_cache.data IS 'NASA API的完整JSON响应';
|
||||
COMMENT ON COLUMN nasa_cache.expires_at IS '缓存过期时间,过期后自动失效';
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 初始化脚本
|
||||
## 6. 数据关系图
|
||||
|
||||
```
|
||||
celestial_bodies (天体)
|
||||
├── positions (1:N) - 天体位置历史
|
||||
├── orbits (1:1) - 轨道路径
|
||||
└── resources (1:N) - 资源文件
|
||||
|
||||
users (用户)
|
||||
└── user_roles (N:M) ←→ roles (角色)
|
||||
└── role_menus (N:M) ←→ menus (菜单)
|
||||
|
||||
tasks (任务) - 独立表
|
||||
system_settings (配置) - 独立表
|
||||
static_data (静态数据) - 独立表
|
||||
nasa_cache (缓存) - 独立表
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. 初始化脚本
|
||||
|
||||
### 创建数据库
|
||||
```sql
|
||||
-- 连接到PostgreSQL
|
||||
psql -U postgres
|
||||
|
||||
-- 创建数据库
|
||||
CREATE DATABASE cosmo_db
|
||||
WITH
|
||||
ENCODING = 'UTF8'
|
||||
LC_COLLATE = 'en_US.UTF-8'
|
||||
LC_CTYPE = 'en_US.UTF-8'
|
||||
TEMPLATE = template0;
|
||||
|
||||
-- 连接到新数据库
|
||||
\c cosmo_db
|
||||
|
||||
-- 创建必要的扩展(可选)
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; -- UUID生成
|
||||
CREATE EXTENSION IF NOT EXISTS "pg_trgm"; -- 模糊搜索
|
||||
```bash
|
||||
# 使用Docker容器
|
||||
docker exec -it cosmo_postgres psql -U postgres -c "CREATE DATABASE cosmo_db WITH ENCODING='UTF8';"
|
||||
```
|
||||
|
||||
### 完整建表脚本
|
||||
|
||||
```sql
|
||||
-- 按依赖顺序创建表
|
||||
|
||||
|
|
@ -267,10 +607,12 @@ CREATE TABLE celestial_bodies (
|
|||
name_zh VARCHAR(200),
|
||||
type VARCHAR(50) NOT NULL,
|
||||
description TEXT,
|
||||
details TEXT,
|
||||
metadata JSONB,
|
||||
is_active BOOLEAN DEFAULT TRUE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT chk_type CHECK (type IN ('star', 'planet', 'moon', 'probe', 'comet', 'asteroid', 'dwarf_planet', 'satellite'))
|
||||
CONSTRAINT chk_type CHECK (type IN ('star', 'planet', 'dwarf_planet', 'satellite', 'probe', 'comet', 'asteroid'))
|
||||
);
|
||||
CREATE INDEX idx_celestial_bodies_type ON celestial_bodies(type);
|
||||
CREATE INDEX idx_celestial_bodies_name ON celestial_bodies(name);
|
||||
|
|
@ -291,10 +633,22 @@ CREATE TABLE positions (
|
|||
CONSTRAINT chk_source CHECK (source IN ('nasa_horizons', 'calculated', 'user_defined', 'imported'))
|
||||
);
|
||||
CREATE INDEX idx_positions_body_time ON positions(body_id, time DESC);
|
||||
CREATE INDEX idx_positions_time ON positions(time);
|
||||
CREATE INDEX idx_positions_body_id ON positions(body_id);
|
||||
|
||||
-- 3. 资源管理表
|
||||
-- 3. 轨道路径表
|
||||
CREATE TABLE orbits (
|
||||
id SERIAL PRIMARY KEY,
|
||||
body_id VARCHAR(50) NOT NULL REFERENCES celestial_bodies(id) ON DELETE CASCADE,
|
||||
points JSONB NOT NULL,
|
||||
num_points INTEGER NOT NULL,
|
||||
period_days DOUBLE PRECISION,
|
||||
color VARCHAR(20),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT uq_orbits_body_id UNIQUE (body_id)
|
||||
);
|
||||
CREATE INDEX idx_orbits_body_id ON orbits(body_id);
|
||||
|
||||
-- 4. 资源管理表
|
||||
CREATE TABLE resources (
|
||||
id SERIAL PRIMARY KEY,
|
||||
body_id VARCHAR(50) REFERENCES celestial_bodies(id) ON DELETE CASCADE,
|
||||
|
|
@ -307,10 +661,8 @@ CREATE TABLE resources (
|
|||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT chk_resource_type CHECK (resource_type IN ('texture', 'model', 'icon', 'thumbnail', 'data'))
|
||||
);
|
||||
CREATE INDEX idx_resources_body_id ON resources(body_id);
|
||||
CREATE INDEX idx_resources_type ON resources(resource_type);
|
||||
|
||||
-- 4. 静态数据表
|
||||
-- 5. 静态数据表
|
||||
CREATE TABLE static_data (
|
||||
id SERIAL PRIMARY KEY,
|
||||
category VARCHAR(50) NOT NULL,
|
||||
|
|
@ -322,11 +674,95 @@ CREATE TABLE static_data (
|
|||
CONSTRAINT chk_category CHECK (category IN ('constellation', 'galaxy', 'star', 'nebula', 'cluster')),
|
||||
CONSTRAINT uq_category_name UNIQUE (category, name)
|
||||
);
|
||||
CREATE INDEX idx_static_data_category ON static_data(category);
|
||||
CREATE INDEX idx_static_data_name ON static_data(name);
|
||||
CREATE INDEX idx_static_data_data ON static_data USING GIN(data);
|
||||
|
||||
-- 5. NASA缓存表
|
||||
-- 6. 用户表
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(50) UNIQUE NOT NULL,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
email VARCHAR(255) UNIQUE,
|
||||
full_name VARCHAR(100),
|
||||
is_active BOOLEAN DEFAULT TRUE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
last_login_at TIMESTAMP
|
||||
);
|
||||
|
||||
-- 7. 角色表
|
||||
CREATE TABLE roles (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(50) UNIQUE NOT NULL,
|
||||
display_name VARCHAR(100) NOT NULL,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 8. 用户角色关联表
|
||||
CREATE TABLE user_roles (
|
||||
user_id INTEGER REFERENCES users(id) ON DELETE CASCADE,
|
||||
role_id INTEGER REFERENCES roles(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (user_id, role_id)
|
||||
);
|
||||
|
||||
-- 9. 菜单表
|
||||
CREATE TABLE menus (
|
||||
id SERIAL PRIMARY KEY,
|
||||
parent_id INTEGER REFERENCES menus(id) ON DELETE CASCADE,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
title VARCHAR(100) NOT NULL,
|
||||
icon VARCHAR(100),
|
||||
path VARCHAR(255),
|
||||
component VARCHAR(255),
|
||||
sort_order INTEGER DEFAULT 0,
|
||||
is_active BOOLEAN DEFAULT TRUE,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 10. 角色菜单关联表
|
||||
CREATE TABLE role_menus (
|
||||
id SERIAL PRIMARY KEY,
|
||||
role_id INTEGER REFERENCES roles(id) ON DELETE CASCADE,
|
||||
menu_id INTEGER REFERENCES menus(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT uq_role_menu UNIQUE (role_id, menu_id)
|
||||
);
|
||||
|
||||
-- 11. 系统配置表
|
||||
CREATE TABLE system_settings (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(100) UNIQUE NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
value_type VARCHAR(20) NOT NULL DEFAULT 'string',
|
||||
category VARCHAR(50) NOT NULL DEFAULT 'general',
|
||||
label VARCHAR(200) NOT NULL,
|
||||
description TEXT,
|
||||
is_public BOOLEAN DEFAULT FALSE,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 12. 后台任务表
|
||||
CREATE TABLE tasks (
|
||||
id SERIAL PRIMARY KEY,
|
||||
task_type VARCHAR(50) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'pending',
|
||||
description VARCHAR(255),
|
||||
params JSON,
|
||||
result JSON,
|
||||
progress INTEGER DEFAULT 0,
|
||||
error_message TEXT,
|
||||
created_by INTEGER,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP
|
||||
);
|
||||
|
||||
-- 13. NASA缓存表
|
||||
CREATE TABLE nasa_cache (
|
||||
cache_key VARCHAR(500) PRIMARY KEY,
|
||||
body_id VARCHAR(50),
|
||||
|
|
@ -335,30 +771,13 @@ CREATE TABLE nasa_cache (
|
|||
step VARCHAR(10),
|
||||
data JSONB NOT NULL,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT chk_time_range CHECK (end_time >= start_time)
|
||||
created_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
CREATE INDEX idx_nasa_cache_body_id ON nasa_cache(body_id);
|
||||
CREATE INDEX idx_nasa_cache_expires ON nasa_cache(expires_at);
|
||||
CREATE INDEX idx_nasa_cache_time_range ON nasa_cache(body_id, start_time, end_time);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 数据关系图
|
||||
|
||||
```
|
||||
celestial_bodies (天体)
|
||||
├── positions (1:N) - 天体位置历史
|
||||
├── resources (1:N) - 天体资源文件
|
||||
└── nasa_cache (1:N) - NASA API缓存
|
||||
|
||||
static_data (静态数据) - 独立表,不关联celestial_bodies
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 查询示例
|
||||
## 8. 查询示例
|
||||
|
||||
### 查询某天体的最新位置
|
||||
```sql
|
||||
|
|
@ -373,78 +792,64 @@ LEFT JOIN LATERAL (
|
|||
WHERE b.id = '-31';
|
||||
```
|
||||
|
||||
### 查询某天体在时间范围内的轨迹
|
||||
### 查询用户的所有菜单权限
|
||||
```sql
|
||||
SELECT time, x, y, z
|
||||
FROM positions
|
||||
WHERE body_id = '-31'
|
||||
AND time BETWEEN '2025-01-01' AND '2025-12-31'
|
||||
ORDER BY time;
|
||||
SELECT DISTINCT m.id, m.name, m.title, m.path, m.icon
|
||||
FROM users u
|
||||
JOIN user_roles ur ON u.id = ur.user_id
|
||||
JOIN role_menus rm ON ur.role_id = rm.role_id
|
||||
JOIN menus m ON rm.menu_id = m.id
|
||||
WHERE u.id = 1 AND m.is_active = true
|
||||
ORDER BY m.sort_order;
|
||||
```
|
||||
|
||||
### 查询所有带纹理的行星
|
||||
### 查询所有运行中的任务
|
||||
```sql
|
||||
SELECT b.name, r.file_path
|
||||
FROM celestial_bodies b
|
||||
INNER JOIN resources r ON b.id = r.body_id
|
||||
WHERE b.type = 'planet' AND r.resource_type = 'texture';
|
||||
```
|
||||
|
||||
### 查询所有活跃的探测器
|
||||
```sql
|
||||
SELECT id, name, name_zh, metadata->>'status' as status
|
||||
FROM celestial_bodies
|
||||
WHERE type = 'probe'
|
||||
AND metadata->>'status' = 'active';
|
||||
SELECT id, task_type, description, progress, started_at
|
||||
FROM tasks
|
||||
WHERE status = 'running'
|
||||
ORDER BY started_at DESC;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 维护建议
|
||||
## 9. 维护建议
|
||||
|
||||
1. **定期清理过期缓存**:
|
||||
### 定期清理
|
||||
```sql
|
||||
-- 清理过期缓存
|
||||
DELETE FROM nasa_cache WHERE expires_at < NOW();
|
||||
|
||||
-- 清理旧任务记录(保留90天)
|
||||
DELETE FROM tasks WHERE created_at < NOW() - INTERVAL '90 days' AND status IN ('completed', 'failed');
|
||||
```
|
||||
|
||||
2. **分析表性能**:
|
||||
### 性能优化
|
||||
```sql
|
||||
-- 分析表
|
||||
ANALYZE celestial_bodies;
|
||||
ANALYZE positions;
|
||||
ANALYZE nasa_cache;
|
||||
```
|
||||
|
||||
3. **重建索引(如果性能下降)**:
|
||||
```sql
|
||||
-- 重建索引
|
||||
REINDEX TABLE positions;
|
||||
|
||||
-- 清理死元组
|
||||
VACUUM FULL positions;
|
||||
```
|
||||
|
||||
4. **备份数据库**:
|
||||
### 备份策略
|
||||
```bash
|
||||
# 每日备份
|
||||
pg_dump -U postgres cosmo_db > backup_$(date +%Y%m%d).sql
|
||||
|
||||
# 增量备份(推荐使用WAL归档)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 扩展建议
|
||||
## 文档版本
|
||||
|
||||
### 未来可能需要的表
|
||||
|
||||
1. **users** - 用户表(如果需要用户系统)
|
||||
2. **user_favorites** - 用户收藏(收藏的天体)
|
||||
3. **observation_logs** - 观测日志(用户记录)
|
||||
4. **simulation_configs** - 模拟配置(用户自定义场景)
|
||||
|
||||
### 性能优化扩展
|
||||
|
||||
1. **TimescaleDB** - 时间序列优化
|
||||
```sql
|
||||
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||
SELECT create_hypertable('positions', 'time');
|
||||
```
|
||||
|
||||
2. **PostGIS** - 空间数据扩展
|
||||
```sql
|
||||
CREATE EXTENSION IF NOT EXISTS postgis;
|
||||
ALTER TABLE positions ADD COLUMN geom geometry(POINTZ, 4326);
|
||||
```
|
||||
- **版本**: 2.0
|
||||
- **更新日期**: 2025-12-05
|
||||
- **对应阶段**: Phase 2 完成
|
||||
- **下一步**: Phase 3 - 恒星际扩展
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ class CelestialBodyCreate(BaseModel):
|
|||
name_zh: Optional[str] = None
|
||||
type: str
|
||||
description: Optional[str] = None
|
||||
details: Optional[str] = None
|
||||
is_active: bool = True
|
||||
extra_data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
|
@ -34,6 +35,7 @@ class CelestialBodyUpdate(BaseModel):
|
|||
name_zh: Optional[str] = None
|
||||
type: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
details: Optional[str] = None
|
||||
is_active: Optional[bool] = None
|
||||
extra_data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
|
@ -58,7 +60,8 @@ async def create_celestial_body(
|
|||
|
||||
@router.get("/search")
|
||||
async def search_celestial_body(
|
||||
name: str = Query(..., description="Body name or ID to search in NASA Horizons")
|
||||
name: str = Query(..., description="Body name or ID to search in NASA Horizons"),
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Search for a celestial body in NASA Horizons database by name or ID
|
||||
|
|
@ -68,7 +71,7 @@ async def search_celestial_body(
|
|||
logger.info(f"Searching for celestial body: {name}")
|
||||
|
||||
try:
|
||||
result = horizons_service.search_body_by_name(name)
|
||||
result = await horizons_service.search_body_by_name(name, db)
|
||||
|
||||
if result["success"]:
|
||||
logger.info(f"Found body: {result['full_name']}")
|
||||
|
|
@ -172,6 +175,7 @@ async def get_body_info(body_id: str, db: AsyncSession = Depends(get_db)):
|
|||
name=body.name,
|
||||
type=body.type,
|
||||
description=body.description,
|
||||
details=body.details,
|
||||
launch_date=extra_data.get("launch_date"),
|
||||
status=extra_data.get("status"),
|
||||
)
|
||||
|
|
@ -211,6 +215,7 @@ async def list_bodies(
|
|||
"name_zh": body.name_zh,
|
||||
"type": body.type,
|
||||
"description": body.description,
|
||||
"details": body.details,
|
||||
"is_active": body.is_active,
|
||||
"resources": resources_by_type,
|
||||
"has_resources": len(resources) > 0,
|
||||
|
|
|
|||
|
|
@ -76,7 +76,8 @@ async def get_celestial_positions(
|
|||
# Check Redis cache first (persistent across restarts)
|
||||
start_str = "now"
|
||||
end_str = "now"
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step)
|
||||
body_ids_str = body_ids if body_ids else "all"
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
||||
redis_cached = await redis_cache.get(redis_key)
|
||||
if redis_cached is not None:
|
||||
logger.info("Cache hit (Redis) for recent positions")
|
||||
|
|
@ -194,7 +195,8 @@ async def get_celestial_positions(
|
|||
# Cache in Redis for persistence across restarts
|
||||
start_str = start_dt.isoformat() if start_dt else "now"
|
||||
end_str = end_dt.isoformat() if end_dt else "now"
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step)
|
||||
body_ids_str = body_ids if body_ids else "all"
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
||||
await redis_cache.set(redis_key, bodies_data, get_ttl_seconds("current_positions"))
|
||||
return CelestialDataResponse(bodies=bodies_data)
|
||||
else:
|
||||
|
|
@ -204,7 +206,8 @@ async def get_celestial_positions(
|
|||
# Check Redis cache first (persistent across restarts)
|
||||
start_str = start_dt.isoformat() if start_dt else "now"
|
||||
end_str = end_dt.isoformat() if end_dt else "now"
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step)
|
||||
body_ids_str = body_ids if body_ids else "all" # Include body_ids in cache key
|
||||
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
||||
redis_cached = await redis_cache.get(redis_key)
|
||||
if redis_cached is not None:
|
||||
logger.info("Cache hit (Redis) for positions")
|
||||
|
|
@ -222,7 +225,9 @@ async def get_celestial_positions(
|
|||
|
||||
# Filter bodies if body_ids specified
|
||||
if body_id_list:
|
||||
logger.info(f"Filtering bodies from {len(all_bodies)} total. Requested IDs: {body_id_list}")
|
||||
all_bodies = [b for b in all_bodies if b.id in body_id_list]
|
||||
logger.info(f"After filtering: {len(all_bodies)} bodies. IDs: {[b.id for b in all_bodies]}")
|
||||
|
||||
use_db_cache = True
|
||||
db_cached_bodies = []
|
||||
|
|
@ -334,15 +339,15 @@ async def get_celestial_positions(
|
|||
# Special handling for Cassini (mission ended 2017-09-15)
|
||||
elif body.id == "-82":
|
||||
cassini_date = datetime(2017, 9, 15, 11, 58, 0)
|
||||
pos_data = horizons_service.get_body_positions(body.id, cassini_date, cassini_date, step)
|
||||
pos_data = await horizons_service.get_body_positions(body.id, cassini_date, cassini_date, step)
|
||||
positions_list = [
|
||||
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
|
||||
for p in pos_data
|
||||
]
|
||||
|
||||
else:
|
||||
# Query NASA Horizons for other bodies
|
||||
pos_data = horizons_service.get_body_positions(body.id, start_dt, end_dt, step)
|
||||
# Download from NASA Horizons
|
||||
pos_data = await horizons_service.get_body_positions(body.id, start_dt, end_dt, step)
|
||||
positions_list = [
|
||||
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
|
||||
for p in pos_data
|
||||
|
|
|
|||
|
|
@ -217,7 +217,8 @@ async def download_positions(
|
|||
continue
|
||||
|
||||
# Download from NASA Horizons
|
||||
positions = horizons_service.get_body_positions(
|
||||
logger.info(f"Downloading position for body {body_id} on {date_str}")
|
||||
positions = await horizons_service.get_body_positions(
|
||||
body_id=body_id,
|
||||
start_time=target_date,
|
||||
end_time=target_date,
|
||||
|
|
@ -225,6 +226,7 @@ async def download_positions(
|
|||
)
|
||||
|
||||
if positions and len(positions) > 0:
|
||||
logger.info(f"Received position data for body {body_id}: x={positions[0].x}, y={positions[0].y}, z={positions[0].z}")
|
||||
# Save to database
|
||||
position_data = [{
|
||||
"time": target_date,
|
||||
|
|
@ -242,6 +244,17 @@ async def download_positions(
|
|||
source="nasa_horizons",
|
||||
session=db
|
||||
)
|
||||
logger.info(f"Saved position for body {body_id} on {date_str}")
|
||||
|
||||
# Invalidate caches for this date to ensure fresh data is served
|
||||
from app.services.redis_cache import redis_cache, make_cache_key
|
||||
start_str = target_date.isoformat()
|
||||
end_str = target_date.isoformat()
|
||||
# Clear both "all bodies" cache and specific body cache
|
||||
for body_ids_str in ["all", body_id]:
|
||||
redis_key = make_cache_key("positions", start_str, end_str, "1d", body_ids_str)
|
||||
await redis_cache.delete(redis_key)
|
||||
logger.debug(f"Invalidated cache: {redis_key}")
|
||||
|
||||
body_results["dates"].append({
|
||||
"date": date_str,
|
||||
|
|
@ -282,3 +295,89 @@ async def download_positions(
|
|||
except Exception as e:
|
||||
logger.error(f"Download failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/delete")
|
||||
async def delete_positions(
|
||||
request: DownloadPositionRequest,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Delete position data for specified bodies on specified dates
|
||||
|
||||
Args:
|
||||
- body_ids: List of celestial body IDs
|
||||
- dates: List of dates (YYYY-MM-DD format)
|
||||
|
||||
Returns:
|
||||
- Summary of deleted data
|
||||
"""
|
||||
logger.info(f"Deleting positions for {len(request.body_ids)} bodies on {len(request.dates)} dates")
|
||||
|
||||
try:
|
||||
total_deleted = 0
|
||||
from sqlalchemy import text
|
||||
|
||||
for body_id in request.body_ids:
|
||||
# Invalidate caches for this body
|
||||
from app.services.redis_cache import redis_cache, make_cache_key
|
||||
|
||||
# We need to loop dates to delete specific records
|
||||
for date_str in request.dates:
|
||||
try:
|
||||
# Parse date
|
||||
target_date = datetime.strptime(date_str, "%Y-%m-%d")
|
||||
# End of day
|
||||
end_of_day = target_date.replace(hour=23, minute=59, second=59, microsecond=999999)
|
||||
|
||||
# Execute deletion
|
||||
# Using text() for raw SQL is often simpler for range deletes,
|
||||
# but ORM is safer. Let's use ORM with execute.
|
||||
# But since position_service might not have delete, we do it here.
|
||||
|
||||
stmt = text("""
|
||||
DELETE FROM positions
|
||||
WHERE body_id = :body_id
|
||||
AND time >= :start_time
|
||||
AND time <= :end_time
|
||||
""")
|
||||
|
||||
result = await db.execute(stmt, {
|
||||
"body_id": body_id,
|
||||
"start_time": target_date,
|
||||
"end_time": end_of_day
|
||||
})
|
||||
|
||||
deleted_count = result.rowcount
|
||||
total_deleted += deleted_count
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Deleted {deleted_count} records for {body_id} on {date_str}")
|
||||
|
||||
# Invalidate cache for this specific date/body combo
|
||||
# Note: This is approximate as cache keys might cover ranges
|
||||
start_str = target_date.isoformat()
|
||||
end_str = target_date.isoformat()
|
||||
# Clear both "all bodies" cache and specific body cache
|
||||
for body_ids_str in ["all", body_id]:
|
||||
# We try to clear '1d' step cache
|
||||
redis_key = make_cache_key("positions", start_str, end_str, "1d", body_ids_str)
|
||||
await redis_cache.delete(redis_key)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete data for {body_id} on {date_str}: {e}")
|
||||
|
||||
await db.commit()
|
||||
|
||||
# Clear general patterns to be safe if ranges were cached
|
||||
await redis_cache.clear_pattern("positions:*")
|
||||
|
||||
return {
|
||||
"message": f"Successfully deleted {total_deleted} position records",
|
||||
"total_deleted": total_deleted
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
await db.rollback()
|
||||
logger.error(f"Delete failed: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
|
|
|||
|
|
@ -3,7 +3,9 @@ System Settings API Routes
|
|||
"""
|
||||
from fastapi import APIRouter, HTTPException, Query, Depends, status
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy import select, func
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
|
@ -11,6 +13,7 @@ from app.services.system_settings_service import system_settings_service
|
|||
from app.services.redis_cache import redis_cache
|
||||
from app.services.cache import cache_service
|
||||
from app.database import get_db
|
||||
from app.models.db import Position
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -251,3 +254,51 @@ async def initialize_default_settings(
|
|||
await db.commit()
|
||||
|
||||
return {"message": "Default settings initialized successfully"}
|
||||
|
||||
|
||||
@router.get("/data-cutoff-date")
|
||||
async def get_data_cutoff_date(
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get the data cutoff date based on the Sun's (ID=10) last available data
|
||||
|
||||
This endpoint returns the latest date for which we have position data
|
||||
in the database. It's used by the frontend to determine:
|
||||
- The current date to display on the homepage
|
||||
- The maximum date for timeline playback
|
||||
|
||||
Returns:
|
||||
- cutoff_date: ISO format date string (YYYY-MM-DD)
|
||||
- timestamp: Unix timestamp
|
||||
- datetime: Full ISO datetime string
|
||||
"""
|
||||
try:
|
||||
# Query the latest position data for the Sun (body_id = 10)
|
||||
stmt = select(func.max(Position.time)).where(
|
||||
Position.body_id == '10'
|
||||
)
|
||||
result = await db.execute(stmt)
|
||||
latest_time = result.scalar_one_or_none()
|
||||
|
||||
if latest_time is None:
|
||||
# No data available, return current date as fallback
|
||||
logger.warning("No position data found for Sun (ID=10), using current date as fallback")
|
||||
latest_time = datetime.utcnow()
|
||||
|
||||
# Format the response
|
||||
cutoff_date = latest_time.strftime("%Y-%m-%d")
|
||||
|
||||
return {
|
||||
"cutoff_date": cutoff_date,
|
||||
"timestamp": int(latest_time.timestamp()),
|
||||
"datetime": latest_time.isoformat(),
|
||||
"message": "Data cutoff date retrieved successfully"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving data cutoff date: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Failed to retrieve data cutoff date: {str(e)}"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class Settings(BaseSettings):
|
|||
)
|
||||
|
||||
# Application
|
||||
app_name: str = "Cosmo - Deep Space Explorer"
|
||||
app_name: str = "COSMO - Deep Space Explorer"
|
||||
api_prefix: str = "/api"
|
||||
|
||||
# CORS settings - stored as string in env, converted to list
|
||||
|
|
@ -67,6 +67,7 @@ class Settings(BaseSettings):
|
|||
# Proxy settings (for accessing NASA JPL Horizons API in China)
|
||||
http_proxy: str = ""
|
||||
https_proxy: str = ""
|
||||
nasa_api_timeout: int = 30
|
||||
|
||||
@property
|
||||
def proxy_dict(self) -> dict[str, str] | None:
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ class BodyInfo(BaseModel):
|
|||
name: str
|
||||
type: Literal["planet", "probe", "star", "dwarf_planet", "satellite", "comet"]
|
||||
description: str
|
||||
details: str | None = None
|
||||
launch_date: str | None = None
|
||||
status: str | None = None
|
||||
|
||||
|
|
@ -200,4 +201,26 @@ CELESTIAL_BODIES = {
|
|||
"type": "dwarf_planet",
|
||||
"description": "鸟神星,柯伊伯带中第二亮的天体",
|
||||
},
|
||||
# Comets / Interstellar Objects
|
||||
"1I": {
|
||||
"name": "1I/'Oumuamua",
|
||||
"name_zh": "奥陌陌",
|
||||
"type": "comet",
|
||||
"description": "原定名 1I/2017 U1,是已知第一颗经过太阳系的星际天体。它于2017年10月18日(UT)在距离地球约0.2 AU(30,000,000 km;19,000,000 mi)处被泛星1号望远镜发现,并在极端双曲线的轨道上运行。",
|
||||
"status": "active",
|
||||
},
|
||||
"3I": {
|
||||
"name": "3I/ATLAS",
|
||||
"name_zh": "3I/ATLAS",
|
||||
"type": "comet",
|
||||
"description": "又称C/2025 N1 (ATLAS),是一颗星际彗星,由位于智利里奥乌尔塔多的小行星陆地撞击持续报警系统于2025年7月1日发现",
|
||||
"status": "active",
|
||||
},
|
||||
"90000030": {
|
||||
"name": "1P/Halley",
|
||||
"name_zh": "哈雷彗星",
|
||||
"type": "comet",
|
||||
"description": "哈雷彗星(正式名称为1P/Halley)是著名的短周期彗星,每隔75-76年就能从地球上被观测到[5],亦是唯一能用肉眼直接从地球看到的短周期彗星,人的一生中可能经历两次其来访。",
|
||||
"status": "active",
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ class CelestialBody(Base):
|
|||
name_zh = Column(String(200), nullable=True, comment="Chinese name")
|
||||
type = Column(String(50), nullable=False, comment="Body type")
|
||||
description = Column(Text, nullable=True, comment="Description")
|
||||
details = Column(Text, nullable=True, comment="Detailed description (Markdown)")
|
||||
is_active = Column(Boolean, nullable=True, comment="Active status for probes (True=active, False=inactive)")
|
||||
extra_data = Column(JSONB, nullable=True, comment="Extended metadata (JSON)")
|
||||
created_at = Column(TIMESTAMP, server_default=func.now())
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@
|
|||
NASA JPL Horizons data query service
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
from astroquery.jplhorizons import Horizons
|
||||
from astropy.time import Time
|
||||
import logging
|
||||
import re
|
||||
import httpx
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import AsyncSession # Added this import
|
||||
|
||||
from app.models.celestial import Position, CelestialBody
|
||||
from app.config import settings
|
||||
|
|
@ -21,15 +21,7 @@ class HorizonsService:
|
|||
def __init__(self):
|
||||
"""Initialize the service"""
|
||||
self.location = "@sun" # Heliocentric coordinates
|
||||
|
||||
# Set proxy for astroquery if configured
|
||||
# astroquery uses standard HTTP_PROXY and HTTPS_PROXY environment variables
|
||||
if settings.http_proxy:
|
||||
os.environ['HTTP_PROXY'] = settings.http_proxy
|
||||
logger.info(f"Set HTTP_PROXY for astroquery: {settings.http_proxy}")
|
||||
if settings.https_proxy:
|
||||
os.environ['HTTPS_PROXY'] = settings.https_proxy
|
||||
logger.info(f"Set HTTPS_PROXY for astroquery: {settings.https_proxy}")
|
||||
# Proxy is handled via settings.proxy_dict in each request
|
||||
|
||||
async def get_object_data_raw(self, body_id: str) -> str:
|
||||
"""
|
||||
|
|
@ -56,13 +48,13 @@ class HorizonsService:
|
|||
|
||||
try:
|
||||
# Configure proxy if available
|
||||
client_kwargs = {"timeout": 5.0}
|
||||
client_kwargs = {"timeout": settings.nasa_api_timeout}
|
||||
if settings.proxy_dict:
|
||||
client_kwargs["proxies"] = settings.proxy_dict
|
||||
logger.info(f"Using proxy for NASA API: {settings.proxy_dict}")
|
||||
|
||||
async with httpx.AsyncClient(**client_kwargs) as client:
|
||||
logger.info(f"Fetching raw data for body {body_id}")
|
||||
logger.info(f"Fetching raw data for body {body_id} with timeout {settings.nasa_api_timeout}s")
|
||||
response = await client.get(url, params=params)
|
||||
|
||||
if response.status_code != 200:
|
||||
|
|
@ -73,7 +65,7 @@ class HorizonsService:
|
|||
logger.error(f"Error fetching raw data for {body_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
def get_body_positions(
|
||||
async def get_body_positions(
|
||||
self,
|
||||
body_id: str,
|
||||
start_time: datetime | None = None,
|
||||
|
|
@ -99,157 +91,254 @@ class HorizonsService:
|
|||
if end_time is None:
|
||||
end_time = start_time
|
||||
|
||||
# Convert to astropy Time objects for single point queries
|
||||
# For ranges, use ISO format strings which Horizons prefers
|
||||
# Format time for Horizons
|
||||
# NASA Horizons accepts: 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS'
|
||||
# When querying a single point (same start/end date), we need STOP > START
|
||||
# So we add 1 second and use precise time format
|
||||
|
||||
# Create time range
|
||||
if start_time == end_time:
|
||||
# Single time point - use JD format
|
||||
epochs = Time(start_time).jd
|
||||
if start_time.date() == end_time.date():
|
||||
# Single day query - use the date at 00:00 and next second
|
||||
start_str = start_time.strftime('%Y-%m-%d')
|
||||
# For STOP, add 1 day to satisfy STOP > START requirement
|
||||
# But use step='1d' so we only get one data point
|
||||
end_time_adjusted = start_time + timedelta(days=1)
|
||||
end_str = end_time_adjusted.strftime('%Y-%m-%d')
|
||||
else:
|
||||
# Time range - use ISO format (YYYY-MM-DD HH:MM)
|
||||
# Horizons expects this format for ranges
|
||||
start_str = start_time.strftime('%Y-%m-%d %H:%M')
|
||||
end_str = end_time.strftime('%Y-%m-%d %H:%M')
|
||||
epochs = {"start": start_str, "stop": end_str, "step": step}
|
||||
# Multi-day range query
|
||||
start_str = start_time.strftime('%Y-%m-%d')
|
||||
end_str = end_time.strftime('%Y-%m-%d')
|
||||
|
||||
logger.info(f"Querying Horizons for body {body_id} from {start_time} to {end_time}")
|
||||
logger.info(f"Querying Horizons (httpx) for body {body_id} from {start_str} to {end_str}")
|
||||
|
||||
# Query JPL Horizons
|
||||
obj = Horizons(id=body_id, location=self.location, epochs=epochs)
|
||||
vectors = obj.vectors()
|
||||
url = "https://ssd.jpl.nasa.gov/api/horizons.api"
|
||||
cmd_val = f"'{body_id}'" if not body_id.startswith("'") else body_id
|
||||
|
||||
# Extract positions
|
||||
positions = []
|
||||
if isinstance(epochs, dict):
|
||||
# Multiple time points
|
||||
for i in range(len(vectors)):
|
||||
pos = Position(
|
||||
time=Time(vectors["datetime_jd"][i], format="jd").datetime,
|
||||
x=float(vectors["x"][i]),
|
||||
y=float(vectors["y"][i]),
|
||||
z=float(vectors["z"][i]),
|
||||
)
|
||||
positions.append(pos)
|
||||
else:
|
||||
# Single time point
|
||||
pos = Position(
|
||||
time=start_time,
|
||||
x=float(vectors["x"][0]),
|
||||
y=float(vectors["y"][0]),
|
||||
z=float(vectors["z"][0]),
|
||||
)
|
||||
positions.append(pos)
|
||||
params = {
|
||||
"format": "text",
|
||||
"COMMAND": cmd_val,
|
||||
"OBJ_DATA": "NO",
|
||||
"MAKE_EPHEM": "YES",
|
||||
"EPHEM_TYPE": "VECTORS",
|
||||
"CENTER": self.location,
|
||||
"START_TIME": start_str,
|
||||
"STOP_TIME": end_str,
|
||||
"STEP_SIZE": step,
|
||||
"CSV_FORMAT": "YES",
|
||||
"OUT_UNITS": "AU-D"
|
||||
}
|
||||
|
||||
logger.info(f"Successfully retrieved {len(positions)} positions for body {body_id}")
|
||||
return positions
|
||||
# Configure proxy if available
|
||||
client_kwargs = {"timeout": settings.nasa_api_timeout}
|
||||
if settings.proxy_dict:
|
||||
client_kwargs["proxies"] = settings.proxy_dict
|
||||
logger.info(f"Using proxy for NASA API: {settings.proxy_dict}")
|
||||
|
||||
async with httpx.AsyncClient(**client_kwargs) as client:
|
||||
response = await client.get(url, params=params)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"NASA API returned status {response.status_code}")
|
||||
|
||||
return self._parse_vectors(response.text)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error querying Horizons for body {body_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
def search_body_by_name(self, name: str) -> dict:
|
||||
def _parse_vectors(self, text: str) -> list[Position]:
|
||||
"""
|
||||
Search for a celestial body by name in NASA Horizons database
|
||||
Parse Horizons CSV output for vector data
|
||||
|
||||
Args:
|
||||
name: Body name or ID to search for
|
||||
Format looks like:
|
||||
$$SOE
|
||||
2460676.500000000, A.D. 2025-Jan-01 00:00:00.0000, 9.776737278236609E-01, -1.726677228793678E-01, -1.636678733289160E-05, ...
|
||||
$$EOE
|
||||
"""
|
||||
positions = []
|
||||
|
||||
Returns:
|
||||
Dictionary with search results:
|
||||
{
|
||||
"success": bool,
|
||||
"id": str (extracted or input),
|
||||
"name": str (short name),
|
||||
"full_name": str (complete name from NASA),
|
||||
"error": str (if failed)
|
||||
}
|
||||
# Extract data block between $$SOE and $$EOE
|
||||
match = re.search(r'\$\$SOE(.*?)\$\$EOE', text, re.DOTALL)
|
||||
if not match:
|
||||
logger.warning("No data block ($$SOE...$$EOE) found in Horizons response")
|
||||
# Log full response for debugging
|
||||
logger.info(f"Full response for debugging:\n{text}")
|
||||
return []
|
||||
|
||||
data_block = match.group(1).strip()
|
||||
lines = data_block.split('\n')
|
||||
|
||||
for line in lines:
|
||||
parts = [p.strip() for p in line.split(',')]
|
||||
if len(parts) < 5:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Index 0: JD, 1: Date, 2: X, 3: Y, 4: Z, 5: VX, 6: VY, 7: VZ
|
||||
# Time parsing: 2460676.500000000 is JD.
|
||||
# A.D. 2025-Jan-01 00:00:00.0000 is Calendar.
|
||||
# We can use JD or parse the string. Using JD via astropy is accurate.
|
||||
|
||||
jd_str = parts[0]
|
||||
time_obj = Time(float(jd_str), format="jd").datetime
|
||||
|
||||
x = float(parts[2])
|
||||
y = float(parts[3])
|
||||
z = float(parts[4])
|
||||
|
||||
# Velocity if available (indices 5, 6, 7)
|
||||
vx = float(parts[5]) if len(parts) > 5 else None
|
||||
vy = float(parts[6]) if len(parts) > 6 else None
|
||||
vz = float(parts[7]) if len(parts) > 7 else None
|
||||
|
||||
pos = Position(
|
||||
time=time_obj,
|
||||
x=x,
|
||||
y=y,
|
||||
z=z,
|
||||
vx=vx,
|
||||
vy=vy,
|
||||
vz=vz
|
||||
)
|
||||
positions.append(pos)
|
||||
except ValueError as e:
|
||||
logger.warning(f"Failed to parse line: {line}. Error: {e}")
|
||||
continue
|
||||
|
||||
return positions
|
||||
|
||||
async def search_body_by_name(self, name: str, db: AsyncSession) -> dict:
|
||||
"""
|
||||
Search for a celestial body by name in NASA Horizons database using httpx.
|
||||
This method replaces the astroquery-based search to unify proxy and timeout control.
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Searching Horizons for: {name}")
|
||||
logger.info(f"Searching Horizons (httpx) for: {name}")
|
||||
|
||||
# Try to query with the name
|
||||
obj = Horizons(id=name, location=self.location)
|
||||
vec = obj.vectors()
|
||||
url = "https://ssd.jpl.nasa.gov/api/horizons.api"
|
||||
cmd_val = f"'{name}'" # Name can be ID or actual name
|
||||
|
||||
# Get the full target name from response
|
||||
targetname = vec['targetname'][0]
|
||||
logger.info(f"Found target: {targetname}")
|
||||
|
||||
# Extract ID and name from targetname
|
||||
# Possible formats:
|
||||
# 1. "136472 Makemake (2005 FY9)" - ID at start
|
||||
# 2. "Voyager 1 (spacecraft) (-31)" - ID in parentheses
|
||||
# 3. "Mars (499)" - ID in parentheses
|
||||
# 4. "Parker Solar Probe (spacecraft)" - no ID
|
||||
# 5. "Hubble Space Telescope (spacecra" - truncated
|
||||
|
||||
numeric_id = None
|
||||
short_name = None
|
||||
|
||||
# Check if input is already a numeric ID
|
||||
input_is_numeric = re.match(r'^-?\d+$', name.strip())
|
||||
if input_is_numeric:
|
||||
numeric_id = name.strip()
|
||||
# Extract name from targetname
|
||||
# Remove leading ID if present
|
||||
name_part = re.sub(r'^\d+\s+', '', targetname)
|
||||
short_name = name_part.split('(')[0].strip()
|
||||
else:
|
||||
# Try to extract ID from start of targetname (format: "136472 Makemake")
|
||||
start_match = re.match(r'^(\d+)\s+(.+)', targetname)
|
||||
if start_match:
|
||||
numeric_id = start_match.group(1)
|
||||
short_name = start_match.group(2).split('(')[0].strip()
|
||||
else:
|
||||
# Try to extract ID from parentheses (format: "Name (-31)" or "Name (499)")
|
||||
id_match = re.search(r'\((-?\d+)\)', targetname)
|
||||
if id_match:
|
||||
numeric_id = id_match.group(1)
|
||||
short_name = targetname.split('(')[0].strip()
|
||||
else:
|
||||
# No numeric ID found, use input name as ID
|
||||
numeric_id = name
|
||||
short_name = targetname.split('(')[0].strip()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"id": numeric_id,
|
||||
"name": short_name,
|
||||
"full_name": targetname,
|
||||
"error": None
|
||||
params = {
|
||||
"format": "text",
|
||||
"COMMAND": cmd_val,
|
||||
"OBJ_DATA": "YES", # Request object data to get canonical name/ID
|
||||
"MAKE_EPHEM": "NO", # Don't need ephemeris
|
||||
"EPHEM_TYPE": "OBSERVER", # Arbitrary, won't be used since MAKE_EPHEM=NO
|
||||
"CENTER": "@ssb" # Search from Solar System Barycenter for consistent object IDs
|
||||
}
|
||||
|
||||
timeout = settings.nasa_api_timeout
|
||||
client_kwargs = {"timeout": timeout}
|
||||
if settings.proxy_dict:
|
||||
client_kwargs["proxies"] = settings.proxy_dict
|
||||
logger.info(f"Using proxy for NASA API: {settings.proxy_dict}")
|
||||
|
||||
async with httpx.AsyncClient(**client_kwargs) as client:
|
||||
response = await client.get(url, params=params)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"NASA API returned status {response.status_code}")
|
||||
|
||||
response_text = response.text
|
||||
|
||||
# Log full response for debugging (temporarily)
|
||||
logger.info(f"Full NASA API response for '{name}':\n{response_text}")
|
||||
|
||||
# Check for "Ambiguous target name"
|
||||
if "Ambiguous target name" in response_text:
|
||||
logger.warning(f"Ambiguous target name for: {name}")
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": "名称不唯一,请提供更具体的名称或 JPL Horizons ID"
|
||||
}
|
||||
# Check for "No matches found" or "Unknown target"
|
||||
if "No matches found" in response_text or "Unknown target" in response_text:
|
||||
logger.warning(f"No matches found for: {name}")
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": "未找到匹配的天体,请检查名称或 ID"
|
||||
}
|
||||
|
||||
# Try multiple parsing patterns for different response formats
|
||||
# Pattern 1: "Target body name: Jupiter Barycenter (599)"
|
||||
target_name_match = re.search(r"Target body name:\s*(.+?)\s+\((\-?\d+)\)", response_text)
|
||||
|
||||
if not target_name_match:
|
||||
# Pattern 2: " Revised: Mar 12, 2021 Ganymede / (Jupiter) 503"
|
||||
# This pattern appears in the header section of many bodies
|
||||
revised_match = re.search(r"Revised:.*?\s{2,}(.+?)\s{2,}(\-?\d+)\s*$", response_text, re.MULTILINE)
|
||||
if revised_match:
|
||||
full_name = revised_match.group(1).strip()
|
||||
numeric_id = revised_match.group(2).strip()
|
||||
short_name = full_name.split('/')[0].strip() # Remove parent body info like "/ (Jupiter)"
|
||||
|
||||
logger.info(f"Found target (pattern 2): {full_name} with ID: {numeric_id}")
|
||||
return {
|
||||
"success": True,
|
||||
"id": numeric_id,
|
||||
"name": short_name,
|
||||
"full_name": full_name,
|
||||
"error": None
|
||||
}
|
||||
|
||||
if not target_name_match:
|
||||
# Pattern 3: Look for body name in title section (works for comets and other objects)
|
||||
# Example: "JPL/HORIZONS ATLAS (C/2025 N1) 2025-Dec-"
|
||||
title_match = re.search(r"JPL/HORIZONS\s+(.+?)\s{2,}", response_text)
|
||||
if title_match:
|
||||
full_name = title_match.group(1).strip()
|
||||
# For this pattern, the ID was in the original COMMAND, use it
|
||||
numeric_id = name.strip("'\"")
|
||||
short_name = full_name.split('(')[0].strip()
|
||||
|
||||
logger.info(f"Found target (pattern 3): {full_name} with ID: {numeric_id}")
|
||||
return {
|
||||
"success": True,
|
||||
"id": numeric_id,
|
||||
"name": short_name,
|
||||
"full_name": full_name,
|
||||
"error": None
|
||||
}
|
||||
|
||||
if target_name_match:
|
||||
full_name = target_name_match.group(1).strip()
|
||||
numeric_id = target_name_match.group(2).strip()
|
||||
short_name = full_name.split('(')[0].strip() # Remove any part after '('
|
||||
|
||||
logger.info(f"Found target (pattern 1): {full_name} with ID: {numeric_id}")
|
||||
return {
|
||||
"success": True,
|
||||
"id": numeric_id,
|
||||
"name": short_name,
|
||||
"full_name": full_name,
|
||||
"error": None
|
||||
}
|
||||
else:
|
||||
# Fallback if specific pattern not found, might be a valid but weird response
|
||||
logger.warning(f"Could not parse target name/ID from response for: {name}. Response snippet: {response_text[:500]}")
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": f"未能解析 JPL Horizons 响应,请尝试精确 ID: {name}"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
logger.error(f"Error searching for {name}: {error_msg}")
|
||||
|
||||
# Check for specific error types
|
||||
if 'Ambiguous target name' in error_msg:
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": "名称不唯一,请提供更具体的名称或 JPL Horizons ID"
|
||||
}
|
||||
elif 'No matches found' in error_msg or 'Unknown target' in error_msg:
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": "未找到匹配的天体,请检查名称或 ID"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": f"查询失败: {error_msg}"
|
||||
}
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"id": None,
|
||||
"name": None,
|
||||
"full_name": None,
|
||||
"error": f"查询失败: {error_msg}"
|
||||
}
|
||||
|
||||
# Singleton instance
|
||||
horizons_service = HorizonsService()
|
||||
horizons_service = HorizonsService()
|
||||
|
|
@ -60,7 +60,7 @@ async def download_positions_task(task_id: int, body_ids: List[str], dates: List
|
|||
success_count += 1
|
||||
else:
|
||||
# Download
|
||||
positions = horizons_service.get_body_positions(
|
||||
positions = await horizons_service.get_body_positions(
|
||||
body_id=body_id,
|
||||
start_time=target_date,
|
||||
end_time=target_date,
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ class OrbitService:
|
|||
|
||||
try:
|
||||
# Get positions from Horizons (synchronous call)
|
||||
positions = horizons_service.get_body_positions(
|
||||
positions = await horizons_service.get_body_positions(
|
||||
body_id=body_id,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
"""
|
||||
Check database status: bodies, positions, resources
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# Add backend directory to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from app.database import get_db
|
||||
from app.models.db.celestial_body import CelestialBody
|
||||
from app.models.db.position import Position
|
||||
from app.models.db.resource import Resource
|
||||
from sqlalchemy import select, func
|
||||
|
||||
async def check_status():
|
||||
"""Check database status"""
|
||||
print("🔍 Checking database status...")
|
||||
|
||||
async for session in get_db():
|
||||
try:
|
||||
# 1. Check Celestial Bodies
|
||||
stmt = select(func.count(CelestialBody.id))
|
||||
result = await session.execute(stmt)
|
||||
body_count = result.scalar()
|
||||
print(f"✅ Celestial Bodies: {body_count}")
|
||||
|
||||
# 2. Check Positions
|
||||
stmt = select(func.count(Position.id))
|
||||
result = await session.execute(stmt)
|
||||
position_count = result.scalar()
|
||||
print(f"✅ Total Positions: {position_count}")
|
||||
|
||||
# Check positions for Sun (10) and Earth (399)
|
||||
for body_id in ['10', '399']:
|
||||
stmt = select(func.count(Position.id)).where(Position.body_id == body_id)
|
||||
result = await session.execute(stmt)
|
||||
count = result.scalar()
|
||||
print(f" - Positions for {body_id}: {count}")
|
||||
|
||||
if count > 0:
|
||||
# Get latest position date
|
||||
stmt = select(func.max(Position.time)).where(Position.body_id == body_id)
|
||||
result = await session.execute(stmt)
|
||||
latest_date = result.scalar()
|
||||
print(f" Latest date: {latest_date}")
|
||||
|
||||
# 3. Check Resources
|
||||
stmt = select(func.count(Resource.id))
|
||||
result = await session.execute(stmt)
|
||||
resource_count = result.scalar()
|
||||
print(f"✅ Total Resources: {resource_count}")
|
||||
|
||||
# Check resources for Sun (10)
|
||||
stmt = select(Resource).where(Resource.body_id == '10')
|
||||
result = await session.execute(stmt)
|
||||
resources = result.scalars().all()
|
||||
print(f" - Resources for Sun (10): {len(resources)}")
|
||||
for r in resources:
|
||||
print(f" - {r.resource_type}: {r.file_path}")
|
||||
|
||||
finally:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(check_status())
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# Add backend directory to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from app.database import get_db
|
||||
from app.models.db import Position
|
||||
from sqlalchemy import select, func
|
||||
|
||||
async def check_sun_data():
|
||||
"""Check data for 2025-12-04 00:00:00"""
|
||||
async for session in get_db():
|
||||
try:
|
||||
target_time = datetime(2025, 12, 4, 0, 0, 0)
|
||||
print(f"Checking data for all bodies at {target_time}...")
|
||||
|
||||
# Get all bodies
|
||||
from app.models.db.celestial_body import CelestialBody
|
||||
stmt = select(CelestialBody.id, CelestialBody.name, CelestialBody.type).where(CelestialBody.is_active != False)
|
||||
result = await session.execute(stmt)
|
||||
all_bodies = result.all()
|
||||
print(f"Total active bodies: {len(all_bodies)}")
|
||||
|
||||
# Check positions for each
|
||||
missing_bodies = []
|
||||
for body_id, body_name, body_type in all_bodies:
|
||||
stmt = select(func.count(Position.id)).where(
|
||||
Position.body_id == body_id,
|
||||
Position.time == target_time
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
count = result.scalar()
|
||||
if count == 0:
|
||||
missing_bodies.append(f"{body_name} ({body_id}) [{body_type}]")
|
||||
|
||||
if missing_bodies:
|
||||
print(f"❌ Missing data for {len(missing_bodies)} bodies:")
|
||||
for b in missing_bodies:
|
||||
print(f" - {b}")
|
||||
else:
|
||||
print("✅ All active bodies have data for this time!")
|
||||
|
||||
finally:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(check_sun_data())
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
"""
|
||||
Fix missing Sun position
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
# Add backend directory to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from app.database import get_db
|
||||
from app.models.db import Position
|
||||
|
||||
async def fix_sun_position():
|
||||
"""Insert missing position for Sun at 2025-12-04 00:00:00"""
|
||||
async for session in get_db():
|
||||
try:
|
||||
target_time = datetime(2025, 12, 4, 0, 0, 0)
|
||||
print(f"Fixing Sun position for {target_time}...")
|
||||
|
||||
# Check if it exists first (double check)
|
||||
from sqlalchemy import select, func
|
||||
stmt = select(func.count(Position.id)).where(
|
||||
Position.body_id == '10',
|
||||
Position.time == target_time
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
count = result.scalar()
|
||||
|
||||
if count > 0:
|
||||
print("✅ Position already exists!")
|
||||
return
|
||||
|
||||
# Insert
|
||||
new_pos = Position(
|
||||
body_id='10',
|
||||
time=target_time,
|
||||
x=0.0,
|
||||
y=0.0,
|
||||
z=0.0,
|
||||
vx=0.0,
|
||||
vy=0.0,
|
||||
vz=0.0,
|
||||
source='calculated'
|
||||
)
|
||||
session.add(new_pos)
|
||||
await session.commit()
|
||||
print("✅ Successfully inserted Sun position!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
await session.rollback()
|
||||
finally:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(fix_sun_position())
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from sqlalchemy import select
|
||||
from datetime import datetime
|
||||
|
||||
# Add backend directory to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from app.database import get_db
|
||||
from app.models.db import Position
|
||||
|
||||
async def inspect_sun_positions():
|
||||
async for session in get_db():
|
||||
try:
|
||||
# List all positions for Sun
|
||||
stmt = select(Position.time).where(Position.body_id == '10').order_by(Position.time.desc()).limit(10)
|
||||
result = await session.execute(stmt)
|
||||
times = result.scalars().all()
|
||||
|
||||
print("Recent Sun positions:")
|
||||
for t in times:
|
||||
print(f" - {t} (type: {type(t)})")
|
||||
|
||||
# Check specifically for 2025-12-04
|
||||
target = datetime(2025, 12, 4, 0, 0, 0)
|
||||
stmt = select(Position).where(
|
||||
Position.body_id == '10',
|
||||
Position.time == target
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
pos = result.scalar()
|
||||
print(f"\nExact match for {target}: {pos}")
|
||||
|
||||
finally:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(inspect_sun_positions())
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
"""
|
||||
Reset position data to fix units (KM -> AU)
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add backend directory to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
|
||||
from app.database import get_db
|
||||
from app.models.db import Position
|
||||
from app.services.redis_cache import redis_cache
|
||||
from sqlalchemy import text
|
||||
|
||||
async def reset_data():
|
||||
"""Clear positions and cache to force re-fetch in AU"""
|
||||
print("🧹 Clearing old data (KM) to prepare for AU...")
|
||||
|
||||
async for session in get_db():
|
||||
try:
|
||||
# Clear positions table
|
||||
print(" Truncating positions table...")
|
||||
await session.execute(text("TRUNCATE TABLE positions RESTART IDENTITY CASCADE"))
|
||||
|
||||
# Clear nasa_cache table (if it exists as a table, or if it's just redis?)
|
||||
# nasa_cache is in db models?
|
||||
# Let's check models/db directory...
|
||||
# It seems nasa_cache is a table based on `nasa_cache_service`.
|
||||
print(" Truncating nasa_cache table...")
|
||||
try:
|
||||
await session.execute(text("TRUNCATE TABLE nasa_cache RESTART IDENTITY CASCADE"))
|
||||
except Exception as e:
|
||||
print(f" (Note: nasa_cache might not exist or failed: {e})")
|
||||
|
||||
await session.commit()
|
||||
print("✅ Database tables cleared.")
|
||||
|
||||
# Clear Redis
|
||||
await redis_cache.connect()
|
||||
await redis_cache.clear_pattern("positions:*")
|
||||
await redis_cache.clear_pattern("nasa:*")
|
||||
print("✅ Redis cache cleared.")
|
||||
await redis_cache.disconnect()
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
await session.rollback()
|
||||
finally:
|
||||
break
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(reset_data())
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 69 KiB |
Loading…
Reference in New Issue