cosmo/backend/app/api/routes.py.bak

1548 lines
55 KiB
Python

"""
API routes for celestial data
"""
from datetime import datetime
from fastapi import APIRouter, HTTPException, Query, Depends, UploadFile, File, status, BackgroundTasks
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Optional, Dict, Any
import logging
from pydantic import BaseModel
from app.models.celestial import (
CelestialDataResponse,
BodyInfo,
)
from app.models.db import Resource, Task
from app.services.horizons import horizons_service
from app.services.cache import cache_service
from app.services.redis_cache import redis_cache, make_cache_key, get_ttl_seconds
from app.services.cache_preheat import preheat_all_caches, preheat_current_positions, preheat_historical_positions
from app.services.db_service import (
celestial_body_service,
position_service,
nasa_cache_service,
static_data_service,
resource_service,
)
from app.services.orbit_service import orbit_service
from app.services.system_settings_service import system_settings_service
from app.services.task_service import task_service
from app.services.nasa_worker import download_positions_task
from app.database import get_db
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/celestial", tags=["celestial"])
# Pydantic models for CRUD
class CelestialBodyCreate(BaseModel):
id: str
name: str
name_zh: Optional[str] = None
type: str
description: Optional[str] = None
is_active: bool = True
extra_data: Optional[Dict[str, Any]] = None
class CelestialBodyUpdate(BaseModel):
name: Optional[str] = None
name_zh: Optional[str] = None
type: Optional[str] = None
description: Optional[str] = None
is_active: Optional[bool] = None
extra_data: Optional[Dict[str, Any]] = None
class ResourceUpdate(BaseModel):
extra_data: Optional[Dict[str, Any]] = None
@router.post("/", status_code=status.HTTP_201_CREATED)
async def create_celestial_body(
body_data: CelestialBodyCreate,
db: AsyncSession = Depends(get_db)
):
"""Create a new celestial body"""
# Check if exists
existing = await celestial_body_service.get_body_by_id(body_data.id, db)
if existing:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Body with ID {body_data.id} already exists"
)
new_body = await celestial_body_service.create_body(body_data.dict(), db)
return new_body
@router.get("/search")
async def search_celestial_body(
name: str = Query(..., description="Body name or ID to search in NASA Horizons")
):
"""
Search for a celestial body in NASA Horizons database by name or ID
Returns body information if found, including suggested ID and full name
"""
logger.info(f"Searching for celestial body: {name}")
try:
result = horizons_service.search_body_by_name(name)
if result["success"]:
logger.info(f"Found body: {result['full_name']}")
return {
"success": True,
"data": {
"id": result["id"],
"name": result["name"],
"full_name": result["full_name"],
}
}
else:
logger.warning(f"Search failed: {result['error']}")
return {
"success": False,
"error": result["error"]
}
except Exception as e:
logger.error(f"Search error: {e}")
raise HTTPException(
status_code=500,
detail=f"Search failed: {str(e)}"
)
@router.get("/{body_id}/nasa-data")
async def get_celestial_nasa_data(
body_id: str,
db: AsyncSession = Depends(get_db)
):
"""
Get raw text data from NASA Horizons for a celestial body
(Hacker terminal style output)
"""
# Check if body exists
body = await celestial_body_service.get_body_by_id(body_id, db)
if not body:
raise HTTPException(status_code=404, detail="Celestial body not found")
try:
# Fetch raw text from Horizons using the body_id
# Note: body.id corresponds to JPL Horizons ID
raw_text = await horizons_service.get_object_data_raw(body.id)
return {"id": body.id, "name": body.name, "raw_data": raw_text}
except Exception as e:
logger.error(f"Failed to fetch raw data for {body_id}: {e}")
raise HTTPException(status_code=500, detail=f"Failed to fetch NASA data: {str(e)}")
@router.put("/{body_id}")
async def update_celestial_body(
body_id: str,
body_data: CelestialBodyUpdate,
db: AsyncSession = Depends(get_db)
):
"""Update a celestial body"""
# Filter out None values
update_data = {k: v for k, v in body_data.dict().items() if v is not None}
updated = await celestial_body_service.update_body(body_id, update_data, db)
if not updated:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Body {body_id} not found"
)
return updated
@router.delete("/{body_id}")
async def delete_celestial_body(
body_id: str,
db: AsyncSession = Depends(get_db)
):
"""Delete a celestial body"""
deleted = await celestial_body_service.delete_body(body_id, db)
if not deleted:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Body {body_id} not found"
)
return {"message": "Body deleted successfully"}
@router.get("/positions", response_model=CelestialDataResponse)
async def get_celestial_positions(
start_time: Optional[str] = Query(
None,
description="Start time in ISO 8601 format (e.g., 2025-01-01T00:00:00Z)",
),
end_time: Optional[str] = Query(
None,
description="End time in ISO 8601 format",
),
step: str = Query(
"1d",
description="Time step (e.g., '1d' for 1 day, '12h' for 12 hours)",
),
body_ids: Optional[str] = Query(
None,
description="Comma-separated list of body IDs to fetch (e.g., '999,2000001')",
),
db: AsyncSession = Depends(get_db),
):
"""
Get positions of all celestial bodies for a time range
If only start_time is provided, returns a single snapshot.
If both start_time and end_time are provided, returns positions at intervals defined by step.
Use body_ids to filter specific bodies (e.g., body_ids=999,2000001 for Pluto and Ceres).
"""
try:
# Parse time strings
start_dt = None if start_time is None else datetime.fromisoformat(start_time.replace("Z", "+00:00"))
end_dt = None if end_time is None else datetime.fromisoformat(end_time.replace("Z", "+00:00"))
# Parse body_ids filter
body_id_list = None
if body_ids:
body_id_list = [bid.strip() for bid in body_ids.split(',')]
logger.info(f"Filtering for bodies: {body_id_list}")
# OPTIMIZATION: If no time specified, return most recent positions from database
if start_dt is None and end_dt is None:
logger.info("No time specified - fetching most recent positions from database")
# Check Redis cache first (persistent across restarts)
start_str = "now"
end_str = "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
redis_cached = await redis_cache.get(redis_key)
if redis_cached is not None:
logger.info("Cache hit (Redis) for recent positions")
return CelestialDataResponse(bodies=redis_cached)
# Check memory cache (faster but not persistent)
cached_data = cache_service.get(start_dt, end_dt, step)
if cached_data is not None:
logger.info("Cache hit (Memory) for recent positions")
return CelestialDataResponse(bodies=cached_data)
# Get all bodies from database
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
# For each body, get the most recent position
bodies_data = []
from datetime import timedelta
now = datetime.utcnow()
recent_window = now - timedelta(hours=24) # Look for positions in last 24 hours
for body in all_bodies:
try:
# Get most recent position for this body
recent_positions = await position_service.get_positions(
body_id=body.id,
start_time=recent_window,
end_time=now,
session=db
)
if recent_positions and len(recent_positions) > 0:
# Use the most recent position
latest_pos = recent_positions[-1]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active, # Include probe active status
"positions": [{
"time": latest_pos.time.isoformat(),
"x": latest_pos.x,
"y": latest_pos.y,
"z": latest_pos.z,
}]
}
bodies_data.append(body_dict)
else:
# For inactive probes without recent positions, try to get last known position
if body.type == 'probe' and body.is_active is False:
# Get the most recent position ever recorded
all_positions = await position_service.get_positions(
body_id=body.id,
start_time=None,
end_time=None,
session=db
)
if all_positions and len(all_positions) > 0:
# Use the last known position
last_pos = all_positions[-1]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": [{
"time": last_pos.time.isoformat(),
"x": last_pos.x,
"y": last_pos.y,
"z": last_pos.z,
}]
}
bodies_data.append(body_dict)
else:
# No position data at all, still include with empty positions
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": []
}
bodies_data.append(body_dict)
logger.info(f"Including inactive probe {body.name} with no position data")
except Exception as e:
logger.warning(f"Error processing {body.name}: {e}")
# For inactive probes, still try to include them
if body.type == 'probe' and body.is_active is False:
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": False,
"positions": []
}
bodies_data.append(body_dict)
continue
# If we have recent data for all bodies, return it
if len(bodies_data) == len(all_bodies):
logger.info(f"✅ Returning recent positions from database ({len(bodies_data)} bodies) - FAST!")
# Cache in memory
cache_service.set(bodies_data, start_dt, end_dt, step)
# Cache in Redis for persistence across restarts
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
await redis_cache.set(redis_key, bodies_data, get_ttl_seconds("current_positions"))
return CelestialDataResponse(bodies=bodies_data)
else:
logger.info(f"Incomplete recent data ({len(bodies_data)}/{len(all_bodies)} bodies), falling back to Horizons")
# Fall through to query Horizons below
# Check Redis cache first (persistent across restarts)
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
redis_cached = await redis_cache.get(redis_key)
if redis_cached is not None:
logger.info("Cache hit (Redis) for positions")
return CelestialDataResponse(bodies=redis_cached)
# Check memory cache (faster but not persistent)
cached_data = cache_service.get(start_dt, end_dt, step)
if cached_data is not None:
logger.info("Cache hit (Memory) for positions")
return CelestialDataResponse(bodies=cached_data)
# Check database cache (NASA API responses)
# For each body, check if we have cached NASA response
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
use_db_cache = True
db_cached_bodies = []
for body in all_bodies:
cached_response = await nasa_cache_service.get_cached_response(
body.id, start_dt, end_dt, step, db
)
if cached_response:
db_cached_bodies.append({
"id": body.id,
"name": body.name,
"type": body.type,
"positions": cached_response.get("positions", [])
})
else:
use_db_cache = False
break
if use_db_cache and db_cached_bodies:
logger.info("Cache hit (Database) for positions")
# Cache in memory
cache_service.set(db_cached_bodies, start_dt, end_dt, step)
# Cache in Redis for faster access next time
await redis_cache.set(redis_key, db_cached_bodies, get_ttl_seconds("historical_positions"))
return CelestialDataResponse(bodies=db_cached_bodies)
# Check positions table for historical data (prefetched data)
# This is faster than querying NASA Horizons for historical queries
if start_dt and end_dt:
logger.info(f"Checking positions table for historical data: {start_dt} to {end_dt}")
all_bodies_positions = []
has_complete_data = True
# Remove timezone info for database query (TIMESTAMP WITHOUT TIME ZONE)
start_dt_naive = start_dt.replace(tzinfo=None)
end_dt_naive = end_dt.replace(tzinfo=None)
for body in all_bodies:
# Query positions table for this body in the time range
positions = await position_service.get_positions(
body_id=body.id,
start_time=start_dt_naive,
end_time=end_dt_naive,
session=db
)
if positions and len(positions) > 0:
# Convert database positions to API format
all_bodies_positions.append({
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active,
"positions": [
{
"time": pos.time.isoformat(),
"x": pos.x,
"y": pos.y,
"z": pos.z,
}
for pos in positions
]
})
else:
# For inactive probes, missing data is expected and acceptable
if body.type == 'probe' and body.is_active is False:
logger.debug(f"Skipping inactive probe {body.name} with no data for {start_dt_naive}")
continue
# Missing data for active body - need to query Horizons
has_complete_data = False
break
if has_complete_data and all_bodies_positions:
logger.info(f"Using prefetched historical data from positions table ({len(all_bodies_positions)} bodies)")
# Cache in memory
cache_service.set(all_bodies_positions, start_dt, end_dt, step)
# Cache in Redis for faster access next time
await redis_cache.set(redis_key, all_bodies_positions, get_ttl_seconds("historical_positions"))
return CelestialDataResponse(bodies=all_bodies_positions)
else:
logger.info("Incomplete historical data in positions table, falling back to Horizons")
# Query Horizons (no cache available) - fetch from database + Horizons API
logger.info(f"Fetching celestial data from Horizons: start={start_dt}, end={end_dt}, step={step}")
# Get all bodies from database
all_bodies = await celestial_body_service.get_all_bodies(db)
# Filter bodies if body_ids specified
if body_id_list:
all_bodies = [b for b in all_bodies if b.id in body_id_list]
bodies_data = []
for body in all_bodies:
try:
# Special handling for Sun (always at origin)
if body.id == "10":
sun_start = start_dt if start_dt else datetime.utcnow()
sun_end = end_dt if end_dt else sun_start
positions_list = [{"time": sun_start.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0}]
if sun_start != sun_end:
positions_list.append({"time": sun_end.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0})
# Special handling for Cassini (mission ended 2017-09-15)
elif body.id == "-82":
cassini_date = datetime(2017, 9, 15, 11, 58, 0)
pos_data = horizons_service.get_body_positions(body.id, cassini_date, cassini_date, step)
positions_list = [
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
for p in pos_data
]
else:
# Query NASA Horizons for other bodies
pos_data = horizons_service.get_body_positions(body.id, start_dt, end_dt, step)
positions_list = [
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
for p in pos_data
]
body_dict = {
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"positions": positions_list
}
bodies_data.append(body_dict)
except Exception as e:
logger.error(f"Failed to get data for {body.name}: {str(e)}")
# Continue with other bodies even if one fails
continue
# Save to database cache and position records
for body_dict in bodies_data:
body_id = body_dict["id"]
positions = body_dict.get("positions", [])
if positions:
# Save NASA API response to cache
await nasa_cache_service.save_response(
body_id=body_id,
start_time=start_dt,
end_time=end_dt,
step=step,
response_data={"positions": positions},
ttl_days=7,
session=db
)
# Save position data to positions table
position_records = []
for pos in positions:
# Parse time and remove timezone for database storage
pos_time = pos["time"]
if isinstance(pos_time, str):
pos_time = datetime.fromisoformat(pos["time"].replace("Z", "+00:00"))
# Remove timezone info for TIMESTAMP WITHOUT TIME ZONE
pos_time_naive = pos_time.replace(tzinfo=None) if hasattr(pos_time, 'replace') else pos_time
position_records.append({
"time": pos_time_naive,
"x": pos["x"],
"y": pos["y"],
"z": pos["z"],
"vx": pos.get("vx"),
"vy": pos.get("vy"),
"vz": pos.get("vz"),
})
if position_records:
await position_service.save_positions(
body_id=body_id,
positions=position_records,
source="nasa_horizons",
session=db
)
logger.info(f"Saved {len(position_records)} positions for {body_id}")
# Cache in memory
cache_service.set(bodies_data, start_dt, end_dt, step)
# Cache in Redis for persistence across restarts
start_str = start_dt.isoformat() if start_dt else "now"
end_str = end_dt.isoformat() if end_dt else "now"
redis_key = make_cache_key("positions", start_str, end_str, step)
# Use longer TTL for historical data that was fetched from Horizons
ttl = get_ttl_seconds("historical_positions") if start_dt and end_dt else get_ttl_seconds("current_positions")
await redis_cache.set(redis_key, bodies_data, ttl)
logger.info(f"Cached data in Redis with key: {redis_key} (TTL: {ttl}s)")
return CelestialDataResponse(bodies=bodies_data)
except ValueError as e:
raise HTTPException(status_code=400, detail=f"Invalid time format: {str(e)}")
except Exception as e:
logger.error(f"Error fetching celestial positions: {str(e)}")
import traceback
traceback.print_exc()
raise HTTPException(status_code=500, detail=f"Failed to fetch data: {str(e)}")
@router.get("/info/{body_id}", response_model=BodyInfo)
async def get_body_info(body_id: str, db: AsyncSession = Depends(get_db)):
"""
Get detailed information about a specific celestial body
Args:
body_id: JPL Horizons ID (e.g., '-31' for Voyager 1, '399' for Earth)
"""
body = await celestial_body_service.get_body_by_id(body_id, db)
if not body:
raise HTTPException(status_code=404, detail=f"Body {body_id} not found")
# Extract extra_data fields
extra_data = body.extra_data or {}
return BodyInfo(
id=body.id,
name=body.name,
type=body.type,
description=body.description,
launch_date=extra_data.get("launch_date"),
status=extra_data.get("status"),
)
@router.get("/list")
async def list_bodies(
body_type: Optional[str] = Query(None, description="Filter by body type"),
db: AsyncSession = Depends(get_db)
):
"""
Get a list of all available celestial bodies
"""
bodies = await celestial_body_service.get_all_bodies(db, body_type)
bodies_list = []
for body in bodies:
# Get resources for this body
resources = await resource_service.get_resources_by_body(body.id, None, db)
# Group resources by type
resources_by_type = {}
for resource in resources:
if resource.resource_type not in resources_by_type:
resources_by_type[resource.resource_type] = []
resources_by_type[resource.resource_type].append({
"id": resource.id,
"file_path": resource.file_path,
"file_size": resource.file_size,
"mime_type": resource.mime_type,
})
bodies_list.append(
{
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"description": body.description,
"is_active": body.is_active,
"resources": resources_by_type,
"has_resources": len(resources) > 0,
}
)
return {"bodies": bodies_list}
@router.post("/cache/clear")
async def clear_cache():
"""
Clear the data cache (admin endpoint)
Clears both memory cache and Redis cache
"""
# Clear memory cache
cache_service.clear()
# Clear Redis cache
positions_cleared = await redis_cache.clear_pattern("positions:*")
nasa_cleared = await redis_cache.clear_pattern("nasa:*")
total_cleared = positions_cleared + nasa_cleared
return {
"message": f"Cache cleared successfully ({total_cleared} Redis keys deleted)",
"memory_cache": "cleared",
"redis_cache": {
"positions_keys": positions_cleared,
"nasa_keys": nasa_cleared,
"total": total_cleared
}
}
@router.post("/cache/preheat")
async def preheat_cache(
mode: str = Query("all", description="Preheat mode: 'all', 'current', 'historical'"),
days: int = Query(3, description="Number of days for historical preheat", ge=1, le=30)
):
"""
Manually trigger cache preheat (admin endpoint)
Args:
mode: 'all' (both current and historical), 'current' (current positions only), 'historical' (historical only)
days: Number of days to preheat for historical mode (default: 3, max: 30)
"""
try:
if mode == "all":
await preheat_all_caches()
return {"message": f"Successfully preheated all caches (current + {days} days historical)"}
elif mode == "current":
await preheat_current_positions()
return {"message": "Successfully preheated current positions"}
elif mode == "historical":
await preheat_historical_positions(days=days)
return {"message": f"Successfully preheated {days} days of historical positions"}
else:
raise HTTPException(status_code=400, detail=f"Invalid mode: {mode}. Use 'all', 'current', or 'historical'")
except Exception as e:
logger.error(f"Cache preheat failed: {e}")
raise HTTPException(status_code=500, detail=f"Preheat failed: {str(e)}")
# Static Data CRUD Models
class StaticDataCreate(BaseModel):
category: str
name: str
name_zh: Optional[str] = None
data: Dict[str, Any]
class StaticDataUpdate(BaseModel):
category: Optional[str] = None
name: Optional[str] = None
name_zh: Optional[str] = None
data: Optional[Dict[str, Any]] = None
# === Static Data Endpoints ===
@router.get("/static/list")
async def list_static_data(db: AsyncSession = Depends(get_db)):
"""Get all static data items"""
items = await static_data_service.get_all_items(db)
result = []
for item in items:
result.append({
"id": item.id,
"category": item.category,
"name": item.name,
"name_zh": item.name_zh,
"data": item.data
})
return {"items": result}
@router.post("/static", status_code=status.HTTP_201_CREATED)
async def create_static_data(
item_data: StaticDataCreate,
db: AsyncSession = Depends(get_db)
):
"""Create new static data"""
new_item = await static_data_service.create_static(item_data.dict(), db)
return new_item
@router.put("/static/{item_id}")
async def update_static_data(
item_id: int,
item_data: StaticDataUpdate,
db: AsyncSession = Depends(get_db)
):
"""Update static data"""
update_data = {k: v for k, v in item_data.dict().items() if v is not None}
updated = await static_data_service.update_static(item_id, update_data, db)
if not updated:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Static data {item_id} not found"
)
return updated
@router.delete("/static/{item_id}")
async def delete_static_data(
item_id: int,
db: AsyncSession = Depends(get_db)
):
"""Delete static data"""
deleted = await static_data_service.delete_static(item_id, db)
if not deleted:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Static data {item_id} not found"
)
return {"message": "Deleted successfully"}
@router.get("/static/categories")
async def get_static_categories(db: AsyncSession = Depends(get_db)):
"""
Get all available static data categories
"""
categories = await static_data_service.get_all_categories(db)
return {"categories": categories}
@router.get("/static/{category}")
async def get_static_data(
category: str,
db: AsyncSession = Depends(get_db)
):
"""
Get all static data items for a specific category
(e.g., 'star', 'constellation', 'galaxy')
"""
items = await static_data_service.get_by_category(category, db)
if not items:
raise HTTPException(
status_code=404,
detail=f"No data found for category '{category}'"
)
result = []
for item in items:
result.append({
"id": item.id,
"name": item.name,
"name_zh": item.name_zh,
"data": item.data
})
return {"category": category, "items": result}
# === Resource Management Endpoints ===
@router.post("/resources/upload")
async def upload_resource(
body_id: str = Query(..., description="Celestial body ID"),
resource_type: str = Query(..., description="Type: texture, model, icon, thumbnail, data"),
file: UploadFile = File(...),
db: AsyncSession = Depends(get_db)
):
"""
Upload a resource file (texture, model, icon, etc.)
Upload directory logic:
- Probes (type='probe'): upload to 'model' directory
- Others (planet, satellite, etc.): upload to 'texture' directory
"""
import os
import aiofiles
from pathlib import Path
# Validate resource type
valid_types = ["texture", "model", "icon", "thumbnail", "data"]
if resource_type not in valid_types:
raise HTTPException(
status_code=400,
detail=f"Invalid resource_type. Must be one of: {valid_types}"
)
# Get celestial body to determine upload directory
body = await celestial_body_service.get_body_by_id(body_id, db)
if not body:
raise HTTPException(status_code=404, detail=f"Celestial body {body_id} not found")
# Determine upload directory based on body type
# Probes -> model directory, Others -> texture directory
if body.type == 'probe' and resource_type in ['model', 'texture']:
upload_subdir = 'model'
elif resource_type in ['model', 'texture']:
upload_subdir = 'texture'
else:
# For icon, thumbnail, data, use resource_type as directory
upload_subdir = resource_type
# Create upload directory structure
upload_dir = Path("upload") / upload_subdir
upload_dir.mkdir(parents=True, exist_ok=True)
# Use original filename
original_filename = file.filename
file_path = upload_dir / original_filename
# If file already exists, append timestamp to make it unique
if file_path.exists():
from datetime import datetime
name_without_ext = os.path.splitext(original_filename)[0]
file_ext = os.path.splitext(original_filename)[1]
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
original_filename = f"{name_without_ext}_{timestamp}{file_ext}"
file_path = upload_dir / original_filename
# Save file
try:
async with aiofiles.open(file_path, 'wb') as f:
content = await file.read()
await f.write(content)
# Get file size
file_size = os.path.getsize(file_path)
# Store relative path (from upload directory)
relative_path = f"{upload_subdir}/{original_filename}"
# Determine MIME type
mime_type = file.content_type
# Create resource record
resource = await resource_service.create_resource(
{
"body_id": body_id,
"resource_type": resource_type,
"file_path": relative_path,
"file_size": file_size,
"mime_type": mime_type,
},
db
)
# Commit the transaction
await db.commit()
await db.refresh(resource)
logger.info(f"Uploaded resource for {body.name} ({body.type}): {relative_path} ({file_size} bytes)")
return {
"id": resource.id,
"resource_type": resource.resource_type,
"file_path": resource.file_path,
"file_size": resource.file_size,
"upload_directory": upload_subdir,
"message": f"File uploaded successfully to {upload_subdir} directory"
}
except Exception as e:
# Rollback transaction
await db.rollback()
# Clean up file if database operation fails
if file_path.exists():
os.remove(file_path)
logger.error(f"Error uploading file: {e}")
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
@router.get("/resources/{body_id}")
async def get_body_resources(
body_id: str,
resource_type: Optional[str] = Query(None, description="Filter by resource type"),
db: AsyncSession = Depends(get_db)
):
"""
Get all resources associated with a celestial body
"""
resources = await resource_service.get_resources_by_body(body_id, resource_type, db)
result = []
for resource in resources:
result.append({
"id": resource.id,
"resource_type": resource.resource_type,
"file_path": resource.file_path,
"file_size": resource.file_size,
"mime_type": resource.mime_type,
"created_at": resource.created_at.isoformat(),
"extra_data": resource.extra_data,
})
return {"body_id": body_id, "resources": result}
@router.delete("/resources/{resource_id}")
async def delete_resource(
resource_id: int,
db: AsyncSession = Depends(get_db)
):
"""
Delete a resource file and its database record
"""
import os
from sqlalchemy import select
# Get resource record
result = await db.execute(
select(Resource).where(Resource.id == resource_id)
)
resource = result.scalar_one_or_none()
if not resource:
raise HTTPException(status_code=404, detail="Resource not found")
# Delete file if it exists
file_path = resource.file_path
if os.path.exists(file_path):
try:
os.remove(file_path)
logger.info(f"Deleted file: {file_path}")
except Exception as e:
logger.warning(f"Failed to delete file {file_path}: {e}")
# Delete database record
deleted = await resource_service.delete_resource(resource_id, db)
if deleted:
return {"message": "Resource deleted successfully"}
else:
raise HTTPException(status_code=500, detail="Failed to delete resource")
@router.put("/resources/{resource_id}")
async def update_resource(
resource_id: int,
update_data: ResourceUpdate,
db: AsyncSession = Depends(get_db)
):
"""
Update resource metadata (e.g., scale parameter for models)
"""
from sqlalchemy import select, update
# Get resource record
result = await db.execute(
select(Resource).where(Resource.id == resource_id)
)
resource = result.scalar_one_or_none()
if not resource:
raise HTTPException(status_code=404, detail="Resource not found")
# Update extra_data
await db.execute(
update(Resource)
.where(Resource.id == resource_id)
.values(extra_data=update_data.extra_data)
)
await db.commit()
# Get updated resource
result = await db.execute(
select(Resource).where(Resource.id == resource_id)
)
updated_resource = result.scalar_one_or_none()
return {
"id": updated_resource.id,
"extra_data": updated_resource.extra_data,
"message": "Resource updated successfully"
}
# ============================================================
# Orbit Management APIs
# ============================================================
@router.get("/orbits")
async def get_orbits(
body_type: Optional[str] = Query(None, description="Filter by body type (planet, dwarf_planet)"),
db: AsyncSession = Depends(get_db)
):
"""
Get all precomputed orbital data
Query parameters:
- body_type: Optional filter by celestial body type (planet, dwarf_planet)
Returns:
- List of orbits with points, colors, and metadata
"""
logger.info(f"Fetching orbits (type filter: {body_type})")
try:
orbits = await orbit_service.get_all_orbits(db, body_type=body_type)
result = []
for orbit in orbits:
# Get body info
body = await celestial_body_service.get_body_by_id(orbit.body_id, db)
result.append({
"body_id": orbit.body_id,
"body_name": body.name if body else "Unknown",
"body_name_zh": body.name_zh if body else None,
"points": orbit.points,
"num_points": orbit.num_points,
"period_days": orbit.period_days,
"color": orbit.color,
"updated_at": orbit.updated_at.isoformat() if orbit.updated_at else None
})
logger.info(f"✅ Returning {len(result)} orbits")
return {"orbits": result}
except Exception as e:
logger.error(f"Failed to fetch orbits: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/admin/orbits/generate")
async def generate_orbits(
body_ids: Optional[str] = Query(None, description="Comma-separated body IDs to generate. If empty, generates for all planets and dwarf planets"),
db: AsyncSession = Depends(get_db)
):
"""
Generate orbital data for celestial bodies
This endpoint queries NASA Horizons API to get complete orbital paths
and stores them in the orbits table for fast frontend rendering.
Query parameters:
- body_ids: Optional comma-separated list of body IDs (e.g., "399,999")
If not provided, generates orbits for all planets and dwarf planets
Returns:
- List of generated orbits with success/failure status
"""
logger.info("🌌 Starting orbit generation...")
# Orbital periods in days (from astronomical data)
# Note: NASA Horizons data is limited to ~2199 for most bodies
# We use single complete orbits that fit within this range
ORBITAL_PERIODS = {
# Planets - single complete orbit
"199": 88.0, # Mercury
"299": 224.7, # Venus
"399": 365.25, # Earth
"499": 687.0, # Mars
"599": 4333.0, # Jupiter (11.86 years)
"699": 10759.0, # Saturn (29.46 years)
"799": 30687.0, # Uranus (84.01 years)
"899": 60190.0, # Neptune (164.79 years)
# Dwarf Planets - single complete orbit
"999": 90560.0, # Pluto (247.94 years - full orbit)
"2000001": 1680.0, # Ceres (4.6 years)
"136199": 203500.0, # Eris (557 years - full orbit)
"136108": 104000.0, # Haumea (285 years - full orbit)
"136472": 112897.0, # Makemake (309 years - full orbit)
}
# Default colors for orbits
DEFAULT_COLORS = {
"199": "#8C7853", # Mercury - brownish
"299": "#FFC649", # Venus - yellowish
"399": "#4A90E2", # Earth - blue
"499": "#CD5C5C", # Mars - red
"599": "#DAA520", # Jupiter - golden
"699": "#F4A460", # Saturn - sandy brown
"799": "#4FD1C5", # Uranus - cyan
"899": "#4169E1", # Neptune - royal blue
"999": "#8B7355", # Pluto - brown
"2000001": "#9E9E9E", # Ceres - gray
"136199": "#E0E0E0", # Eris - light gray
"136108": "#D4A574", # Haumea - tan
"136472": "#C49A6C", # Makemake - beige
}
try:
# Determine which bodies to generate orbits for
if body_ids:
# Parse comma-separated list
target_body_ids = [bid.strip() for bid in body_ids.split(",")]
bodies_to_process = []
for bid in target_body_ids:
body = await celestial_body_service.get_body_by_id(bid, db)
if body:
bodies_to_process.append(body)
else:
logger.warning(f"Body {bid} not found in database")
else:
# Get all planets and dwarf planets
all_bodies = await celestial_body_service.get_all_bodies(db)
bodies_to_process = [
b for b in all_bodies
if b.type in ["planet", "dwarf_planet"] and b.id in ORBITAL_PERIODS
]
if not bodies_to_process:
raise HTTPException(status_code=400, detail="No valid bodies to process")
logger.info(f"📋 Generating orbits for {len(bodies_to_process)} bodies")
results = []
success_count = 0
failure_count = 0
for body in bodies_to_process:
try:
period = ORBITAL_PERIODS.get(body.id)
if not period:
logger.warning(f"No orbital period defined for {body.name}, skipping")
continue
color = DEFAULT_COLORS.get(body.id, "#CCCCCC")
# Generate orbit
orbit = await orbit_service.generate_orbit(
body_id=body.id,
body_name=body.name_zh or body.name,
period_days=period,
color=color,
session=db,
horizons_service=horizons_service
)
results.append({
"body_id": body.id,
"body_name": body.name_zh or body.name,
"status": "success",
"num_points": orbit.num_points,
"period_days": orbit.period_days
})
success_count += 1
except Exception as e:
logger.error(f"Failed to generate orbit for {body.name}: {e}")
results.append({
"body_id": body.id,
"body_name": body.name_zh or body.name,
"status": "failed",
"error": str(e)
})
failure_count += 1
logger.info(f"🎉 Orbit generation complete: {success_count} succeeded, {failure_count} failed")
return {
"message": f"Generated {success_count} orbits ({failure_count} failed)",
"results": results
}
except Exception as e:
logger.error(f"Orbit generation failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/admin/orbits/{body_id}")
async def delete_orbit(
body_id: str,
db: AsyncSession = Depends(get_db)
):
"""Delete orbit data for a specific body"""
logger.info(f"Deleting orbit for body {body_id}")
deleted = await orbit_service.delete_orbit(body_id, db)
if deleted:
return {"message": f"Orbit for {body_id} deleted successfully"}
else:
raise HTTPException(status_code=404, detail="Orbit not found")
# ============================================================
# NASA Data Download APIs
# ============================================================
@router.get("/positions/download/bodies")
async def get_downloadable_bodies(
db: AsyncSession = Depends(get_db)
):
"""
Get list of celestial bodies available for NASA data download, grouped by type
Returns:
- Dictionary with body types as keys and lists of bodies as values
"""
logger.info("Fetching downloadable bodies for NASA data download")
try:
# Get all active celestial bodies
all_bodies = await celestial_body_service.get_all_bodies(db)
# Group bodies by type
grouped_bodies = {}
for body in all_bodies:
if body.type not in grouped_bodies:
grouped_bodies[body.type] = []
grouped_bodies[body.type].append({
"id": body.id,
"name": body.name,
"name_zh": body.name_zh,
"type": body.type,
"is_active": body.is_active,
"description": body.description
})
# Sort each group by name
for body_type in grouped_bodies:
grouped_bodies[body_type].sort(key=lambda x: x["name"])
logger.info(f"✅ Returning {len(all_bodies)} bodies in {len(grouped_bodies)} groups")
return {"bodies": grouped_bodies}
except Exception as e:
logger.error(f"Failed to fetch downloadable bodies: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/positions/download/status")
async def get_download_status(
body_id: str = Query(..., description="Celestial body ID"),
start_date: str = Query(..., description="Start date (YYYY-MM-DD)"),
end_date: str = Query(..., description="End date (YYYY-MM-DD)"),
db: AsyncSession = Depends(get_db)
):
"""
Get data availability status for a specific body within a date range
Returns:
- List of dates that have position data
"""
logger.info(f"Checking download status for {body_id} from {start_date} to {end_date}")
try:
# Parse dates
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=23, minute=59, second=59)
# Get available dates
available_dates = await position_service.get_available_dates(
body_id=body_id,
start_time=start_dt,
end_time=end_dt,
session=db
)
# Convert dates to ISO format strings
available_date_strings = [
date.isoformat() if hasattr(date, 'isoformat') else str(date)
for date in available_dates
]
logger.info(f"✅ Found {len(available_date_strings)} dates with data")
return {
"body_id": body_id,
"start_date": start_date,
"end_date": end_date,
"available_dates": available_date_strings
}
except ValueError as e:
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
except Exception as e:
logger.error(f"Failed to check download status: {e}")
raise HTTPException(status_code=500, detail=str(e))
class DownloadPositionRequest(BaseModel):
body_ids: list[str]
dates: list[str] # List of dates in YYYY-MM-DD format
@router.post("/positions/download-async")
async def download_positions_async(
request: DownloadPositionRequest,
background_tasks: BackgroundTasks,
db: AsyncSession = Depends(get_db)
):
"""
Start asynchronous background task to download position data
"""
# Create task record
task = await task_service.create_task(
db,
task_type="nasa_download",
description=f"Download positions for {len(request.body_ids)} bodies on {len(request.dates)} dates",
params=request.dict(),
created_by=None
)
# Add to background tasks
background_tasks.add_task(
download_positions_task,
task.id,
request.body_ids,
request.dates
)
return {
"message": "Download task started",
"task_id": task.id
}
@router.post("/positions/download")
async def download_positions(
request: DownloadPositionRequest,
db: AsyncSession = Depends(get_db)
):
"""
Download position data for specified bodies on specified dates (Synchronous)
This endpoint will:
1. Query NASA Horizons API for the position at 00:00:00 UTC on each date
2. Save the data to the positions table
3. Return the downloaded data
Args:
- body_ids: List of celestial body IDs
- dates: List of dates (YYYY-MM-DD format)
Returns:
- Summary of downloaded data with success/failure status
"""
logger.info(f"Downloading positions (sync) for {len(request.body_ids)} bodies on {len(request.dates)} dates")
try:
results = []
total_success = 0
total_failed = 0
for body_id in request.body_ids:
# Check if body exists
body = await celestial_body_service.get_body_by_id(body_id, db)
if not body:
results.append({
"body_id": body_id,
"status": "failed",
"error": "Body not found"
})
total_failed += 1
continue
body_results = {
"body_id": body_id,
"body_name": body.name_zh or body.name,
"dates": []
}
for date_str in request.dates:
try:
# Parse date and set to midnight UTC
target_date = datetime.strptime(date_str, "%Y-%m-%d")
# Check if data already exists for this date
existing = await position_service.get_positions(
body_id=body_id,
start_time=target_date,
end_time=target_date.replace(hour=23, minute=59, second=59),
session=db
)
if existing and len(existing) > 0:
body_results["dates"].append({
"date": date_str,
"status": "exists",
"message": "Data already exists"
})
total_success += 1
continue
# Download from NASA Horizons
positions = horizons_service.get_body_positions(
body_id=body_id,
start_time=target_date,
end_time=target_date,
step="1d"
)
if positions and len(positions) > 0:
# Save to database
position_data = [{
"time": target_date,
"x": positions[0].x,
"y": positions[0].y,
"z": positions[0].z,
"vx": getattr(positions[0], 'vx', None),
"vy": getattr(positions[0], 'vy', None),
"vz": getattr(positions[0], 'vz', None),
}]
await position_service.save_positions(
body_id=body_id,
positions=position_data,
source="nasa_horizons",
session=db
)
body_results["dates"].append({
"date": date_str,
"status": "success",
"position": {
"x": positions[0].x,
"y": positions[0].y,
"z": positions[0].z
}
})
total_success += 1
else:
body_results["dates"].append({
"date": date_str,
"status": "failed",
"error": "No data returned from NASA"
})
total_failed += 1
except Exception as e:
logger.error(f"Failed to download {body_id} on {date_str}: {e}")
body_results["dates"].append({
"date": date_str,
"status": "failed",
"error": str(e)
})
total_failed += 1
results.append(body_results)
return {
"message": f"Downloaded {total_success} positions ({total_failed} failed)",
"total_success": total_success,
"total_failed": total_failed,
"results": results
}
except Exception as e:
logger.error(f"Download failed: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/tasks")
async def list_tasks(
limit: int = 20,
offset: int = 0,
db: AsyncSession = Depends(get_db)
):
"""List background tasks"""
from sqlalchemy import select, desc
result = await db.execute(
select(Task).order_by(desc(Task.created_at)).limit(limit).offset(offset)
)
tasks = result.scalars().all()
return tasks
@router.get("/tasks/{task_id}")
async def get_task_status(
task_id: int,
db: AsyncSession = Depends(get_db)
):
"""Get task status"""
# Check Redis first for real-time progress
redis_data = await task_service.get_task_progress_from_redis(task_id)
# Get DB record
task = await task_service.get_task(db, task_id)
if not task:
raise HTTPException(status_code=404, detail="Task not found")
# Merge Redis data if available (Redis has fresher progress)
response = {
"id": task.id,
"task_type": task.task_type,
"status": task.status,
"progress": task.progress,
"description": task.description,
"created_at": task.created_at,
"started_at": task.started_at,
"completed_at": task.completed_at,
"error_message": task.error_message,
"result": task.result
}
if redis_data:
response["status"] = redis_data.get("status", task.status)
response["progress"] = redis_data.get("progress", task.progress)
if "error" in redis_data:
response["error_message"] = redis_data["error"]
return response