cosmo/backend/app/api/routes.py

1044 lines
38 KiB
Python

"""
API routes for celestial data
"""
from datetime import datetime
from fastapi import APIRouter, HTTPException, Query, Depends, UploadFile, File, status
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Optional, Dict, Any
import logging
from pydantic import BaseModel
from app.models.celestial import (
CelestialDataResponse,
BodyInfo,
)
from app.models.db import Resource
from app.services.horizons import horizons_service
from app.services.cache import cache_service
from app.services.redis_cache import redis_cache, make_cache_key, get_ttl_seconds
from app.services.cache_preheat import preheat_all_caches, preheat_current_positions, preheat_historical_positions
from app.services.db_service import (
celestial_body_service,
position_service,
nasa_cache_service,
static_data_service,
resource_service,
)
from app.services.orbit_service import orbit_service
from app.database import get_db
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/celestial", tags=["celestial"])
# Pydantic models for CRUD
class CelestialBodyCreate(BaseModel):
id: str
name: str
name_zh: Optional[str] = None
type: str
description: Optional[str] = None
is_active: bool = True
extra_data: Optional[Dict[str, Any]] = None
class CelestialBodyUpdate(BaseModel):
name: Optional[str] = None
name_zh: Optional[str] = None
type: Optional[str] = None
description: Optional[str] = None
is_active: Optional[bool] = None
extra_data: Optional[Dict[str, Any]] = None
@router.post("/", status_code=status.HTTP_201_CREATED)
async def create_celestial_body(
body_data: CelestialBodyCreate,
db: AsyncSession = Depends(get_db)
):
"""Create a new celestial body"""
# Check if exists
existing = await celestial_body_service.get_body_by_id(body_data.id, db)
if existing:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Body with ID {body_data.id} already exists"
)
new_body = await celestial_body_service.create_body(body_data.dict(), db)
return new_body
@router.put("/{body_id}")
async def update_celestial_body(
body_id: str,
body_data: CelestialBodyUpdate,
db: AsyncSession = Depends(get_db)
):
"""Update a celestial body"""
# Filter out None values
update_data = {k: v for k, v in body_data.dict().items() if v is not None}
updated = await celestial_body_service.update_body(body_id, update_data, db)
if not updated:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Body {body_id} not found"
)
return updated
@router.delete("/{body_id}")
async def delete_celestial_body(
body_id: str,
db: AsyncSession = Depends(get_db)
):
"""Delete a celestial body"""
deleted = await celestial_body_service.delete_body(body_id, db)
if not deleted:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Body {body_id} not found"
)
return {"message": "Body deleted successfully"}
@router.get("/positions", response_model=CelestialDataResponse)
async def get_celestial_positions(
start_time: Optional[str] = Query(
None,
description="Start time in ISO 8601 format (e.g., 2025-01-01T00:00:00Z)",
),
end_time: Optional[str] = Query(
None,
description="End time in ISO 8601 format",
),
step: str = Query(
"1d",
description="Time step (e.g., '1d' for 1 day, '12h' for 12 hours)",
),
body_ids: Optional[str] = Query(
None,
description="Comma-separated list of body IDs to fetch (e.g., '999,2000001')",
),
db: AsyncSession = Depends(get_db),
):
"""
Get positions of all celestial bodies for a time range
If only start_time is provided, returns a single snapshot.
If both start_time and end_time are provided, returns positions at intervals defined by step.
Use body_ids to filter specific bodies (e.g., body_ids=999,2000001 for Pluto and Ceres).
"""
try:
# Parse time strings
start_dt = None if start_time is None else datetime.fromisoformat(start_time.replace("Z", "+00:00"))
end_dt = None if end_time is None else datetime.fromisoformat(end_time.replace("Z", "+00:00"))
# Parse body_ids filter
body_id_list = None
if body_ids:
body_id_list = [bid.strip() for bid in body_ids.split(',')]
logger.info(f"Filtering for bodies: {body_id_list}")
# OPTIMIZATION: If no time specified, return most recent positions from database
if start_dt is None and end_dt is None:
logger.info("No time specified - fetching most recent positions from database")
# Check Redis cache first (persistent across restarts)
start_str = "now"
end_str = "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
redis_cached = await redis_cache.get(redis_key)
if redis_cached is not None:
logger.info("Cache hit (Redis) for recent positions")
return CelestialDataResponse(bodies=redis_cached)
# Check memory cache (faster but not persistent)
cached_data = cache_service.get(start_dt, end_dt, step)
if cached_data is not None:
logger.info("Cache hit (Memory) for recent positions")
return CelestialDataResponse(bodies=cached_data)
# Get all bodies from database
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
# For each body, get the most recent position
bodies_data = []
from datetime import timedelta
now = datetime.utcnow()
recent_window = now - timedelta(hours=24) # Look for positions in last 24 hours
for body in all_bodies:
try:
# Get most recent position for this body
recent_positions = await position_service.get_positions(
body_id=body.id,
start_time=recent_window,
end_time=now,
session=db
)
if recent_positions and len(recent_positions) > 0:
# Use the most recent position
latest_pos = recent_positions[-1]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active, # Include probe active status
"positions": [{
"time": latest_pos.time.isoformat(),
"x": latest_pos.x,
"y": latest_pos.y,
"z": latest_pos.z,
}]
}
bodies_data.append(body_dict)
else:
# For inactive probes without recent positions, try to get last known position
if body.type == 'probe' and body.is_active is False:
# Get the most recent position ever recorded
all_positions = await position_service.get_positions(
body_id=body.id,
start_time=None,
end_time=None,
session=db
)
if all_positions and len(all_positions) > 0:
# Use the last known position
last_pos = all_positions[-1]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": [{
"time": last_pos.time.isoformat(),
"x": last_pos.x,
"y": last_pos.y,
"z": last_pos.z,
}]
}
bodies_data.append(body_dict)
else:
# No position data at all, still include with empty positions
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": []
}
bodies_data.append(body_dict)
logger.info(f"Including inactive probe {body.name} with no position data")
except Exception as e:
logger.warning(f"Error processing {body.name}: {e}")
# For inactive probes, still try to include them
if body.type == 'probe' and body.is_active is False:
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": []
}
bodies_data.append(body_dict)
continue
# If we have recent data for all bodies, return it
if len(bodies_data) == len(all_bodies):
logger.info(f"✅ Returning recent positions from database ({len(bodies_data)} bodies) - FAST!")
# Cache in memory
cache_service.set(bodies_data, start_dt, end_dt, step)
# Cache in Redis for persistence across restarts
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
await redis_cache.set(redis_key, bodies_data, get_ttl_seconds("current_positions"))
return CelestialDataResponse(bodies=bodies_data)
else:
logger.info(f"Incomplete recent data ({len(bodies_data)}/{len(all_bodies)} bodies), falling back to Horizons")
# Fall through to query Horizons below
# Check Redis cache first (persistent across restarts)
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
redis_cached = await redis_cache.get(redis_key)
if redis_cached is not None:
logger.info("Cache hit (Redis) for positions")
return CelestialDataResponse(bodies=redis_cached)
# Check memory cache (faster but not persistent)
cached_data = cache_service.get(start_dt, end_dt, step)
if cached_data is not None:
logger.info("Cache hit (Memory) for positions")
return CelestialDataResponse(bodies=cached_data)
# Check database cache (NASA API responses)
# For each body, check if we have cached NASA response
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
use_db_cache = True
db_cached_bodies = []
for body in all_bodies:
cached_response = await nasa_cache_service.get_cached_response(
body.id, start_dt, end_dt, step, db
)
if cached_response:
db_cached_bodies.append({
"id": body.id,
"name": body.name,
"type": body.type,
"positions": cached_response.get("positions", [])
})
else:
use_db_cache = False
break
if use_db_cache and db_cached_bodies:
logger.info("Cache hit (Database) for positions")
# Cache in memory
cache_service.set(db_cached_bodies, start_dt, end_dt, step)
# Cache in Redis for faster access next time
await redis_cache.set(redis_key, db_cached_bodies, get_ttl_seconds("historical_positions"))
return CelestialDataResponse(bodies=db_cached_bodies)
# Check positions table for historical data (prefetched data)
# This is faster than querying NASA Horizons for historical queries
if start_dt and end_dt:
logger.info(f"Checking positions table for historical data: {start_dt} to {end_dt}")
all_bodies_positions = []
has_complete_data = True
# Remove timezone info for database query (TIMESTAMP WITHOUT TIME ZONE)
start_dt_naive = start_dt.replace(tzinfo=None)
end_dt_naive = end_dt.replace(tzinfo=None)
for body in all_bodies:
# Query positions table for this body in the time range
positions = await position_service.get_positions(
body_id=body.id,
start_time=start_dt_naive,
end_time=end_dt_naive,
session=db
)
if positions and len(positions) > 0:
# Convert database positions to API format
all_bodies_positions.append({
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active,
"positions": [
{
"time": pos.time.isoformat(),
"x": pos.x,
"y": pos.y,
"z": pos.z,
}
for pos in positions
]
})
else:
# For inactive probes, missing data is expected and acceptable
if body.type == 'probe' and body.is_active is False:
logger.debug(f"Skipping inactive probe {body.name} with no data for {start_dt_naive}")
continue
# Missing data for active body - need to query Horizons
has_complete_data = False
break
if has_complete_data and all_bodies_positions:
logger.info(f"Using prefetched historical data from positions table ({len(all_bodies_positions)} bodies)")
# Cache in memory
cache_service.set(all_bodies_positions, start_dt, end_dt, step)
# Cache in Redis for faster access next time
await redis_cache.set(redis_key, all_bodies_positions, get_ttl_seconds("historical_positions"))
return CelestialDataResponse(bodies=all_bodies_positions)
else:
logger.info("Incomplete historical data in positions table, falling back to Horizons")
# Query Horizons (no cache available) - fetch from database + Horizons API
logger.info(f"Fetching celestial data from Horizons: start={start_dt}, end={end_dt}, step={step}")
# Get all bodies from database
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
bodies_data = []
for body in all_bodies:
try:
# Special handling for Sun (always at origin)
if body.id == "10":
sun_start = start_dt if start_dt else datetime.utcnow()
sun_end = end_dt if end_dt else sun_start
positions_list = [{"time": sun_start.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0}]
if sun_start != sun_end:
positions_list.append({"time": sun_end.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0})
# Special handling for Cassini (mission ended 2017-09-15)
elif body.id == "-82":
cassini_date = datetime(2017, 9, 15, 11, 58, 0)
pos_data = horizons_service.get_body_positions(body.id, cassini_date, cassini_date, step)
positions_list = [
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
for p in pos_data
]
else:
# Query NASA Horizons for other bodies
pos_data = horizons_service.get_body_positions(body.id, start_dt, end_dt, step)
positions_list = [
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
for p in pos_data
]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"positions": positions_list
}
bodies_data.append(body_dict)
except Exception as e:
logger.error(f"Failed to get data for {body.name}: {str(e)}")
# Continue with other bodies even if one fails
continue
# Save to database cache and position records
for body_dict in bodies_data:
body_id = body_dict["id"]
positions = body_dict.get("positions", [])
if positions:
# Save NASA API response to cache
await nasa_cache_service.save_response(
body_id=body_id,
start_time=start_dt,
end_time=end_dt,
step=step,
response_data={"positions": positions},
ttl_days=7,
session=db
)
# Save position data to positions table
position_records = []
for pos in positions:
# Parse time and remove timezone for database storage
pos_time = pos["time"]
if isinstance(pos_time, str):
pos_time = datetime.fromisoformat(pos["time"].replace("Z", "+00:00"))
# Remove timezone info for TIMESTAMP WITHOUT TIME ZONE
pos_time_naive = pos_time.replace(tzinfo=None) if hasattr(pos_time, 'replace') else pos_time
position_records.append({
"time": pos_time_naive,
"x": pos["x"],
"y": pos["y"],
"z": pos["z"],
"vx": pos.get("vx"),
"vy": pos.get("vy"),
"vz": pos.get("vz"),
})
if position_records:
await position_service.save_positions(
body_id=body_id,
positions=position_records,
source="nasa_horizons",
session=db
)
logger.info(f"Saved {len(position_records)} positions for {body_id}")
# Cache in memory
cache_service.set(bodies_data, start_dt, end_dt, step)
# Cache in Redis for persistence across restarts
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
# Use longer TTL for historical data that was fetched from Horizons
ttl = get_ttl_seconds("historical_positions") if start_dt and end_dt else get_ttl_seconds("current_positions")
await redis_cache.set(redis_key, bodies_data, ttl)
logger.info(f"Cached data in Redis with key: {redis_key} (TTL: {ttl}s)")
return CelestialDataResponse(bodies=bodies_data)
except ValueError as e:
raise HTTPException(status_code=400, detail=f"Invalid time format: {str(e)}")
except Exception as e:
logger.error(f"Error fetching celestial positions: {str(e)}")
import traceback
traceback.print_exc()
raise HTTPException(status_code=500, detail=f"Failed to fetch data: {str(e)}")
@router.get("/info/{body_id}", response_model=BodyInfo)
async def get_body_info(body_id: str, db: AsyncSession = Depends(get_db)):
"""
Get detailed information about a specific celestial body
Args:
body_id: JPL Horizons ID (e.g., '-31' for Voyager 1, '399' for Earth)
"""
body = await celestial_body_service.get_body_by_id(body_id, db)
if not body:
raise HTTPException(status_code=404, detail=f"Body {body_id} not found")
# Extract extra_data fields
extra_data = body.extra_data or {}
return BodyInfo(
id=body.id,
name=body.name,
type=body.type,
description=body.description,
launch_date=extra_data.get("launch_date"),
status=extra_data.get("status"),
)
@router.get("/list")
async def list_bodies(
body_type: Optional[str] = Query(None, description="Filter by body type"),
db: AsyncSession = Depends(get_db)
):
"""
Get a list of all available celestial bodies
"""
bodies = await celestial_body_service.get_all_bodies(db, body_type)
bodies_list = []
for body in bodies:
bodies_list.append(
{
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active,
}
)
return {"bodies": bodies_list}
@router.post("/cache/clear")
async def clear_cache():
"""
Clear the data cache (admin endpoint)
"""
cache_service.clear()
return {"message": "Cache cleared successfully"}
@router.post("/cache/preheat")
async def preheat_cache(
mode: str = Query("all", description="Preheat mode: 'all', 'current', 'historical'"),
days: int = Query(3, description="Number of days for historical preheat", ge=1, le=30)
):
"""
Manually trigger cache preheat (admin endpoint)
Args:
mode: 'all' (both current and historical), 'current' (current positions only), 'historical' (historical only)
days: Number of days to preheat for historical mode (default: 3, max: 30)
"""
try:
if mode == "all":
await preheat_all_caches()
return {"message": f"Successfully preheated all caches (current + {days} days historical)"}
elif mode == "current":
await preheat_current_positions()
return {"message": "Successfully preheated current positions"}
elif mode == "historical":
await preheat_historical_positions(days=days)
return {"message": f"Successfully preheated {days} days of historical positions"}
else:
raise HTTPException(status_code=400, detail=f"Invalid mode: {mode}. Use 'all', 'current', or 'historical'")
except Exception as e:
logger.error(f"Cache preheat failed: {e}")
raise HTTPException(status_code=500, detail=f"Preheat failed: {str(e)}")
# Static Data CRUD Models
class StaticDataCreate(BaseModel):
category: str
name: str
name_zh: Optional[str] = None
data: Dict[str, Any]
class StaticDataUpdate(BaseModel):
category: Optional[str] = None
name: Optional[str] = None
name_zh: Optional[str] = None
data: Optional[Dict[str, Any]] = None
# === Static Data Endpoints ===
@router.get("/static/list")
async def list_static_data(db: AsyncSession = Depends(get_db)):
"""Get all static data items"""
items = await static_data_service.get_all_items(db)
result = []
for item in items:
result.append({
"id": item.id,
"category": item.category,
"name": item.name,
"name_zh": item.name_zh,
"data": item.data
})
return {"items": result}
@router.post("/static", status_code=status.HTTP_201_CREATED)
async def create_static_data(
item_data: StaticDataCreate,
db: AsyncSession = Depends(get_db)
):
"""Create new static data"""
new_item = await static_data_service.create_static(item_data.dict(), db)
return new_item
@router.put("/static/{item_id}")
async def update_static_data(
item_id: int,
item_data: StaticDataUpdate,
db: AsyncSession = Depends(get_db)
):
"""Update static data"""
update_data = {k: v for k, v in item_data.dict().items() if v is not None}
updated = await static_data_service.update_static(item_id, update_data, db)
if not updated:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Static data {item_id} not found"
)
return updated
@router.delete("/static/{item_id}")
async def delete_static_data(
item_id: int,
db: AsyncSession = Depends(get_db)
):
"""Delete static data"""
deleted = await static_data_service.delete_static(item_id, db)
if not deleted:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Static data {item_id} not found"
)
return {"message": "Deleted successfully"}
@router.get("/static/categories")
async def get_static_categories(db: AsyncSession = Depends(get_db)):
"""
Get all available static data categories
"""
categories = await static_data_service.get_all_categories(db)
return {"categories": categories}
@router.get("/static/{category}")
async def get_static_data(
category: str,
db: AsyncSession = Depends(get_db)
):
"""
Get all static data items for a specific category
(e.g., 'star', 'constellation', 'galaxy')
"""
items = await static_data_service.get_by_category(category, db)
if not items:
raise HTTPException(
status_code=404,
detail=f"No data found for category '{category}'"
)
result = []
for item in items:
result.append({
"id": item.id,
"name": item.name,
"name_zh": item.name_zh,
"data": item.data
})
return {"category": category, "items": result}
# === Resource Management Endpoints ===
@router.post("/resources/upload")
async def upload_resource(
body_id: Optional[str] = None,
resource_type: str = Query(..., description="Type: texture, model, icon, thumbnail, data"),
file: UploadFile = File(...),
db: AsyncSession = Depends(get_db)
):
"""
Upload a resource file (texture, model, icon, etc.)
"""
import os
import aiofiles
from pathlib import Path
# Validate resource type
valid_types = ["texture", "model", "icon", "thumbnail", "data"]
if resource_type not in valid_types:
raise HTTPException(
status_code=400,
detail=f"Invalid resource_type. Must be one of: {valid_types}"
)
# Create upload directory structure
upload_dir = Path("upload") / resource_type
upload_dir.mkdir(parents=True, exist_ok=True)
# Generate unique filename
import uuid
file_ext = os.path.splitext(file.filename)[1]
unique_filename = f"{uuid.uuid4()}{file_ext}"
file_path = upload_dir / unique_filename
# Save file
try:
async with aiofiles.open(file_path, 'wb') as f:
content = await file.read()
await f.write(content)
# Get file size
file_size = os.path.getsize(file_path)
# Create resource record
resource = await resource_service.create_resource(
{
"body_id": body_id,
"resource_type": resource_type,
"file_path": str(file_path),
"file_size": file_size,
"mime_type": file.content_type,
},
db
)
logger.info(f"Uploaded resource: {file_path} ({file_size} bytes)")
return {
"id": resource.id,
"resource_type": resource.resource_type,
"file_path": resource.file_path,
"file_size": resource.file_size,
"message": "File uploaded successfully"
}
except Exception as e:
# Clean up file if database operation fails
if file_path.exists():
os.remove(file_path)
logger.error(f"Error uploading file: {e}")
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
@router.get("/resources/{body_id}")
async def get_body_resources(
body_id: str,
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
db: AsyncSession = Depends(get_db)
):
"""
Get all resources associated with a celestial body
"""
resources = await resource_service.get_resources_by_body(body_id, resource_type, db)
result = []
for resource in resources:
result.append({
"id": resource.id,
"resource_type": resource.resource_type,
"file_path": resource.file_path,
"file_size": resource.file_size,
"mime_type": resource.mime_type,
"created_at": resource.created_at.isoformat(),
})
return {"body_id": body_id, "resources": result}
@router.delete("/resources/{resource_id}")
async def delete_resource(
resource_id: int,
db: AsyncSession = Depends(get_db)
):
"""
Delete a resource file and its database record
"""
import os
from sqlalchemy import select
# Get resource record
result = await db.execute(
select(Resource).where(Resource.id == resource_id)
)
resource = result.scalar_one_or_none()
if not resource:
raise HTTPException(status_code=404, detail="Resource not found")
# Delete file if it exists
file_path = resource.file_path
if os.path.exists(file_path):
try:
os.remove(file_path)
logger.info(f"Deleted file: {file_path}")
except Exception as e:
logger.warning(f"Failed to delete file {file_path}: {e}")
# Delete database record
deleted = await resource_service.delete_resource(resource_id, db)
if deleted:
return {"message": "Resource deleted successfully"}
else:
raise HTTPException(status_code=500, detail="Failed to delete resource")
# ============================================================
# Orbit Management APIs
# ============================================================
@router.get("/orbits")
async def get_orbits(
body_type: Optional[str] = Query(None, description="Filter by body type (planet, dwarf_planet)"),
db: AsyncSession = Depends(get_db)
):
"""
Get all precomputed orbital data
Query parameters:
- body_type: Optional filter by celestial body type (planet, dwarf_planet)
Returns:
- List of orbits with points, colors, and metadata
"""
logger.info(f"Fetching orbits (type filter: {body_type})")
try:
orbits = await orbit_service.get_all_orbits(db, body_type=body_type)
result = []
for orbit in orbits:
# Get body info
body = await celestial_body_service.get_body_by_id(orbit.body_id, db)
result.append({
"body_id": orbit.body_id,
"body_name": body.name if body else "Unknown",
"body_name_zh": body.name_zh if body else None,
"points": orbit.points,
"num_points": orbit.num_points,
"period_days": orbit.period_days,
"color": orbit.color,
"updated_at": orbit.updated_at.isoformat() if orbit.updated_at else None
})
logger.info(f"✅ Returning {len(result)} orbits")
return {"orbits": result}
except Exception as e:
logger.error(f"Failed to fetch orbits: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/admin/orbits/generate")
async def generate_orbits(
body_ids: Optional[str] = Query(None, description="Comma-separated body IDs to generate. If empty, generates for all planets and dwarf planets"),
db: AsyncSession = Depends(get_db)
):
"""
Generate orbital data for celestial bodies
This endpoint queries NASA Horizons API to get complete orbital paths
and stores them in the orbits table for fast frontend rendering.
Query parameters:
- body_ids: Optional comma-separated list of body IDs (e.g., "399,999")
If not provided, generates orbits for all planets and dwarf planets
Returns:
- List of generated orbits with success/failure status
"""
logger.info("🌌 Starting orbit generation...")
# Orbital periods in days (from astronomical data)
# Note: NASA Horizons data is limited to ~2199 for most bodies
# We use single complete orbits that fit within this range
ORBITAL_PERIODS = {
# Planets - single complete orbit
"199": 88.0, # Mercury
"299": 224.7, # Venus
"399": 365.25, # Earth
"499": 687.0, # Mars
"599": 4333.0, # Jupiter (11.86 years)
"699": 10759.0, # Saturn (29.46 years)
"799": 30687.0, # Uranus (84.01 years)
"899": 60190.0, # Neptune (164.79 years)
# Dwarf Planets - single complete orbit
"999": 90560.0, # Pluto (247.94 years - full orbit)
"2000001": 1680.0, # Ceres (4.6 years)
"136199": 203500.0, # Eris (557 years - full orbit)
"136108": 104000.0, # Haumea (285 years - full orbit)
"136472": 112897.0, # Makemake (309 years - full orbit)
}
# Default colors for orbits
DEFAULT_COLORS = {
"199": "#8C7853", # Mercury - brownish
"299": "#FFC649", # Venus - yellowish
"399": "#4A90E2", # Earth - blue
"499": "#CD5C5C", # Mars - red
"599": "#DAA520", # Jupiter - golden
"699": "#F4A460", # Saturn - sandy brown
"799": "#4FD1C5", # Uranus - cyan
"899": "#4169E1", # Neptune - royal blue
"999": "#8B7355", # Pluto - brown
"2000001": "#9E9E9E", # Ceres - gray
"136199": "#E0E0E0", # Eris - light gray
"136108": "#D4A574", # Haumea - tan
"136472": "#C49A6C", # Makemake - beige
}
try:
# Determine which bodies to generate orbits for
if body_ids:
# Parse comma-separated list
target_body_ids = [bid.strip() for bid in body_ids.split(",")]
bodies_to_process = []
for bid in target_body_ids:
body = await celestial_body_service.get_body_by_id(bid, db)
if body:
bodies_to_process.append(body)
else:
logger.warning(f"Body {bid} not found in database")
else:
# Get all planets and dwarf planets
all_bodies = await celestial_body_service.get_all_bodies(db)
bodies_to_process = [
b for b in all_bodies
if b.type in ["planet", "dwarf_planet"] and b.id in ORBITAL_PERIODS
]
if not bodies_to_process:
raise HTTPException(status_code=400, detail="No valid bodies to process")
logger.info(f"📋 Generating orbits for {len(bodies_to_process)} bodies")
results = []
success_count = 0
failure_count = 0
for body in bodies_to_process:
try:
period = ORBITAL_PERIODS.get(body.id)
if not period:
logger.warning(f"No orbital period defined for {body.name}, skipping")
continue
color = DEFAULT_COLORS.get(body.id, "#CCCCCC")
# Generate orbit
orbit = await orbit_service.generate_orbit(
body_id=body.id,
body_name=body.name_zh or body.name,
period_days=period,
color=color,
session=db,
horizons_service=horizons_service
)
results.append({
"body_id": body.id,
"body_name": body.name_zh or body.name,
"status": "success",
"num_points": orbit.num_points,
"period_days": orbit.period_days
})
success_count += 1
except Exception as e:
logger.error(f"Failed to generate orbit for {body.name}: {e}")
results.append({
"body_id": body.id,
"body_name": body.name_zh or body.name,
"status": "failed",
"error": str(e)
})
failure_count += 1
logger.info(f"🎉 Orbit generation complete: {success_count} succeeded, {failure_count} failed")
return {
"message": f"Generated {success_count} orbits ({failure_count} failed)",
"results": results
}
except Exception as e:
logger.error(f"Orbit generation failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/admin/orbits/{body_id}")
async def delete_orbit(
body_id: str,
db: AsyncSession = Depends(get_db)
):
"""Delete orbit data for a specific body"""
logger.info(f"Deleting orbit for body {body_id}")
deleted = await orbit_service.delete_orbit(body_id, db)
if deleted:
return {"message": f"Orbit for {body_id} deleted successfully"}
else:
raise HTTPException(status_code=404, detail="Orbit not found")