449 lines
20 KiB
Python
449 lines
20 KiB
Python
"""
|
|
Celestial Position Query API routes
|
|
Handles the core position data query with multi-layer caching strategy
|
|
"""
|
|
import logging
|
|
from datetime import datetime, timedelta
|
|
from fastapi import APIRouter, HTTPException, Depends, Query
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
from typing import Optional
|
|
|
|
from app.database import get_db
|
|
from app.models.celestial import CelestialDataResponse
|
|
from app.services.horizons import horizons_service
|
|
from app.services.cache import cache_service
|
|
from app.services.redis_cache import redis_cache, make_cache_key, get_ttl_seconds
|
|
from app.services.db_service import (
|
|
celestial_body_service,
|
|
position_service,
|
|
nasa_cache_service,
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
router = APIRouter(prefix="/celestial", tags=["celestial-position"])
|
|
|
|
|
|
@router.get("/positions", response_model=CelestialDataResponse)
|
|
async def get_celestial_positions(
|
|
start_time: Optional[str] = Query(
|
|
None,
|
|
description="Start time in ISO 8601 format (e.g., 2025-01-01T00:00:00Z)",
|
|
),
|
|
end_time: Optional[str] = Query(
|
|
None,
|
|
description="End time in ISO 8601 format",
|
|
),
|
|
step: str = Query(
|
|
"1d",
|
|
description="Time step (e.g., '1d' for 1 day, '12h' for 12 hours)",
|
|
),
|
|
body_ids: Optional[str] = Query(
|
|
None,
|
|
description="Comma-separated list of body IDs to fetch (e.g., '999,2000001')",
|
|
),
|
|
db: AsyncSession = Depends(get_db),
|
|
):
|
|
"""
|
|
Get positions of all celestial bodies for a time range
|
|
|
|
Multi-layer caching strategy:
|
|
1. Redis cache (persistent across restarts)
|
|
2. Memory cache (fastest)
|
|
3. Database cache (NASA API responses)
|
|
4. Positions table (prefetched historical data)
|
|
5. NASA Horizons API (fallback)
|
|
|
|
If only start_time is provided, returns a single snapshot.
|
|
If both start_time and end_time are provided, returns positions at intervals defined by step.
|
|
Use body_ids to filter specific bodies (e.g., body_ids=999,2000001 for Pluto and Ceres).
|
|
"""
|
|
try:
|
|
# Parse time strings
|
|
start_dt = None if start_time is None else datetime.fromisoformat(start_time.replace("Z", "+00:00"))
|
|
end_dt = None if end_time is None else datetime.fromisoformat(end_time.replace("Z", "+00:00"))
|
|
|
|
# Parse body_ids filter
|
|
body_id_list = None
|
|
if body_ids:
|
|
body_id_list = [bid.strip() for bid in body_ids.split(',')]
|
|
logger.info(f"Filtering for bodies: {body_id_list}")
|
|
|
|
# OPTIMIZATION: If no time specified, return most recent positions from database
|
|
if start_dt is None and end_dt is None:
|
|
logger.info("No time specified - fetching most recent positions from database")
|
|
|
|
# Check Redis cache first (persistent across restarts)
|
|
start_str = "now"
|
|
end_str = "now"
|
|
body_ids_str = body_ids if body_ids else "all"
|
|
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
|
redis_cached = await redis_cache.get(redis_key)
|
|
if redis_cached is not None:
|
|
logger.info("Cache hit (Redis) for recent positions")
|
|
return CelestialDataResponse(bodies=redis_cached)
|
|
|
|
# Check memory cache (faster but not persistent)
|
|
cached_data = cache_service.get(start_dt, end_dt, step)
|
|
if cached_data is not None:
|
|
logger.info("Cache hit (Memory) for recent positions")
|
|
return CelestialDataResponse(bodies=cached_data)
|
|
|
|
# Get all bodies from database
|
|
all_bodies = await celestial_body_service.get_all_bodies(db)
|
|
|
|
# Filter to only Solar System bodies (system_id = 1 or NULL for legacy data)
|
|
# Exclude stars and exoplanets from other star systems
|
|
all_bodies = [b for b in all_bodies if b.system_id == 1]
|
|
|
|
# Filter bodies if body_ids specified
|
|
if body_id_list:
|
|
all_bodies = [b for b in all_bodies if b.id in body_id_list]
|
|
|
|
# For each body, get the most recent position
|
|
bodies_data = []
|
|
now = datetime.utcnow()
|
|
recent_window = now - timedelta(hours=24) # Look for positions in last 24 hours
|
|
|
|
for body in all_bodies:
|
|
try:
|
|
# Get most recent position for this body
|
|
recent_positions = await position_service.get_positions(
|
|
body_id=body.id,
|
|
start_time=recent_window,
|
|
end_time=now,
|
|
session=db
|
|
)
|
|
|
|
if recent_positions and len(recent_positions) > 0:
|
|
# Use the most recent position
|
|
latest_pos = recent_positions[-1]
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"is_active": body.is_active, # Include probe active status
|
|
"positions": [{
|
|
"time": latest_pos.time.isoformat(),
|
|
"x": latest_pos.x,
|
|
"y": latest_pos.y,
|
|
"z": latest_pos.z,
|
|
}]
|
|
}
|
|
bodies_data.append(body_dict)
|
|
else:
|
|
# For inactive probes without recent positions, try to get last known position
|
|
if body.type == 'probe' and body.is_active is False:
|
|
# Get the most recent position ever recorded
|
|
all_positions = await position_service.get_positions(
|
|
body_id=body.id,
|
|
start_time=None,
|
|
end_time=None,
|
|
session=db
|
|
)
|
|
|
|
if all_positions and len(all_positions) > 0:
|
|
# Use the last known position
|
|
last_pos = all_positions[-1]
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"is_active": False,
|
|
"positions": [{
|
|
"time": last_pos.time.isoformat(),
|
|
"x": last_pos.x,
|
|
"y": last_pos.y,
|
|
"z": last_pos.z,
|
|
}]
|
|
}
|
|
bodies_data.append(body_dict)
|
|
else:
|
|
# No position data at all, still include with empty positions
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"is_active": False,
|
|
"positions": []
|
|
}
|
|
bodies_data.append(body_dict)
|
|
logger.info(f"Including inactive probe {body.name} with no position data")
|
|
except Exception as e:
|
|
logger.warning(f"Error processing {body.name}: {e}")
|
|
# For inactive probes, still try to include them
|
|
if body.type == 'probe' and body.is_active is False:
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"is_active": False,
|
|
"positions": []
|
|
}
|
|
bodies_data.append(body_dict)
|
|
continue
|
|
|
|
# If we have recent data for all bodies, return it
|
|
if len(bodies_data) == len(all_bodies):
|
|
logger.info(f"✅ Returning recent positions from database ({len(bodies_data)} bodies) - FAST!")
|
|
# Cache in memory
|
|
cache_service.set(bodies_data, start_dt, end_dt, step)
|
|
# Cache in Redis for persistence across restarts
|
|
start_str = start_dt.isoformat() if start_dt else "now"
|
|
end_str = end_dt.isoformat() if end_dt else "now"
|
|
body_ids_str = body_ids if body_ids else "all"
|
|
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
|
await redis_cache.set(redis_key, bodies_data, get_ttl_seconds("current_positions"))
|
|
return CelestialDataResponse(bodies=bodies_data)
|
|
else:
|
|
logger.info(f"Incomplete recent data ({len(bodies_data)}/{len(all_bodies)} bodies), falling back to Horizons")
|
|
# Fall through to query Horizons below
|
|
|
|
# Check Redis cache first (persistent across restarts)
|
|
start_str = start_dt.isoformat() if start_dt else "now"
|
|
end_str = end_dt.isoformat() if end_dt else "now"
|
|
body_ids_str = body_ids if body_ids else "all" # Include body_ids in cache key
|
|
redis_key = make_cache_key("positions", start_str, end_str, step, body_ids_str)
|
|
redis_cached = await redis_cache.get(redis_key)
|
|
if redis_cached is not None:
|
|
logger.info("Cache hit (Redis) for positions")
|
|
return CelestialDataResponse(bodies=redis_cached)
|
|
|
|
# Check memory cache (faster but not persistent)
|
|
cached_data = cache_service.get(start_dt, end_dt, step)
|
|
if cached_data is not None:
|
|
logger.info("Cache hit (Memory) for positions")
|
|
return CelestialDataResponse(bodies=cached_data)
|
|
|
|
# Check database cache (NASA API responses)
|
|
# For each body, check if we have cached NASA response
|
|
all_bodies = await celestial_body_service.get_all_bodies(db)
|
|
|
|
# Filter to only Solar System bodies (system_id = 1 or NULL for legacy data)
|
|
# Exclude stars and exoplanets from other star systems
|
|
all_bodies = [b for b in all_bodies if b.system_id is None or b.system_id == 1]
|
|
|
|
# Filter bodies if body_ids specified
|
|
if body_id_list:
|
|
logger.info(f"Filtering bodies from {len(all_bodies)} total. Requested IDs: {body_id_list}")
|
|
all_bodies = [b for b in all_bodies if b.id in body_id_list]
|
|
logger.info(f"After filtering: {len(all_bodies)} bodies. IDs: {[b.id for b in all_bodies]}")
|
|
|
|
use_db_cache = True
|
|
db_cached_bodies = []
|
|
|
|
for body in all_bodies:
|
|
cached_response = await nasa_cache_service.get_cached_response(
|
|
body.id, start_dt, end_dt, step, db
|
|
)
|
|
if cached_response:
|
|
db_cached_bodies.append({
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"type": body.type,
|
|
"positions": cached_response.get("positions", [])
|
|
})
|
|
else:
|
|
use_db_cache = False
|
|
break
|
|
|
|
if use_db_cache and db_cached_bodies:
|
|
logger.info("Cache hit (Database) for positions")
|
|
# Cache in memory
|
|
cache_service.set(db_cached_bodies, start_dt, end_dt, step)
|
|
# Cache in Redis for faster access next time
|
|
await redis_cache.set(redis_key, db_cached_bodies, get_ttl_seconds("historical_positions"))
|
|
return CelestialDataResponse(bodies=db_cached_bodies)
|
|
|
|
# Check positions table for historical data (prefetched data)
|
|
# This is faster than querying NASA Horizons for historical queries
|
|
if start_dt and end_dt:
|
|
logger.info(f"Checking positions table for historical data: {start_dt} to {end_dt}")
|
|
all_bodies_positions = []
|
|
has_complete_data = True
|
|
|
|
# Remove timezone info for database query (TIMESTAMP WITHOUT TIME ZONE)
|
|
start_dt_naive = start_dt.replace(tzinfo=None)
|
|
end_dt_naive = end_dt.replace(tzinfo=None)
|
|
|
|
for body in all_bodies:
|
|
# Query positions table for this body in the time range
|
|
positions = await position_service.get_positions(
|
|
body_id=body.id,
|
|
start_time=start_dt_naive,
|
|
end_time=end_dt_naive,
|
|
session=db
|
|
)
|
|
|
|
if positions and len(positions) > 0:
|
|
# Convert database positions to API format
|
|
all_bodies_positions.append({
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"is_active": body.is_active,
|
|
"positions": [
|
|
{
|
|
"time": pos.time.isoformat(),
|
|
"x": pos.x,
|
|
"y": pos.y,
|
|
"z": pos.z,
|
|
}
|
|
for pos in positions
|
|
]
|
|
})
|
|
else:
|
|
# For inactive probes, missing data is expected and acceptable
|
|
if body.type == 'probe' and body.is_active is False:
|
|
logger.debug(f"Skipping inactive probe {body.name} with no data for {start_dt_naive}")
|
|
continue
|
|
|
|
# Missing data for active body - need to query Horizons
|
|
has_complete_data = False
|
|
break
|
|
|
|
if has_complete_data and all_bodies_positions:
|
|
logger.info(f"Using prefetched historical data from positions table ({len(all_bodies_positions)} bodies)")
|
|
# Cache in memory
|
|
cache_service.set(all_bodies_positions, start_dt, end_dt, step)
|
|
# Cache in Redis for faster access next time
|
|
await redis_cache.set(redis_key, all_bodies_positions, get_ttl_seconds("historical_positions"))
|
|
return CelestialDataResponse(bodies=all_bodies_positions)
|
|
else:
|
|
logger.info("Incomplete historical data in positions table, falling back to Horizons")
|
|
|
|
# Query Horizons (no cache available) - fetch from database + Horizons API
|
|
logger.info(f"Fetching celestial data from Horizons: start={start_dt}, end={end_dt}, step={step}")
|
|
|
|
# Get all bodies from database
|
|
all_bodies = await celestial_body_service.get_all_bodies(db)
|
|
|
|
# Filter to only Solar System bodies (system_id = 1 or NULL for legacy data)
|
|
# Exclude stars and exoplanets from other star systems
|
|
all_bodies = [b for b in all_bodies if b.system_id == 1]
|
|
|
|
# Filter bodies if body_ids specified
|
|
if body_id_list:
|
|
all_bodies = [b for b in all_bodies if b.id in body_id_list]
|
|
|
|
bodies_data = []
|
|
for body in all_bodies:
|
|
try:
|
|
# Special handling for Sun (always at origin)
|
|
if body.id == "10":
|
|
sun_start = start_dt if start_dt else datetime.utcnow()
|
|
sun_end = end_dt if end_dt else sun_start
|
|
|
|
positions_list = [{"time": sun_start.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0}]
|
|
if sun_start != sun_end:
|
|
positions_list.append({"time": sun_end.isoformat(), "x": 0.0, "y": 0.0, "z": 0.0})
|
|
|
|
# Special handling for Cassini (mission ended 2017-09-15)
|
|
elif body.id == "-82":
|
|
cassini_date = datetime(2017, 9, 15, 11, 58, 0)
|
|
pos_data = await horizons_service.get_body_positions(body.id, cassini_date, cassini_date, step)
|
|
positions_list = [
|
|
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
|
|
for p in pos_data
|
|
]
|
|
|
|
else:
|
|
# Download from NASA Horizons
|
|
pos_data = await horizons_service.get_body_positions(body.id, start_dt, end_dt, step)
|
|
positions_list = [
|
|
{"time": p.time.isoformat(), "x": p.x, "y": p.y, "z": p.z}
|
|
for p in pos_data
|
|
]
|
|
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"positions": positions_list
|
|
}
|
|
bodies_data.append(body_dict)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to get data for {body.name}: {str(e)}")
|
|
# Continue with other bodies even if one fails
|
|
continue
|
|
|
|
# Save to database cache and position records
|
|
for body_dict in bodies_data:
|
|
body_id = body_dict["id"]
|
|
positions = body_dict.get("positions", [])
|
|
|
|
if positions:
|
|
# Save NASA API response to cache
|
|
await nasa_cache_service.save_response(
|
|
body_id=body_id,
|
|
start_time=start_dt,
|
|
end_time=end_dt,
|
|
step=step,
|
|
response_data={"positions": positions},
|
|
ttl_days=7,
|
|
session=db
|
|
)
|
|
|
|
# Save position data to positions table
|
|
position_records = []
|
|
for pos in positions:
|
|
# Parse time and remove timezone for database storage
|
|
pos_time = pos["time"]
|
|
if isinstance(pos_time, str):
|
|
pos_time = datetime.fromisoformat(pos["time"].replace("Z", "+00:00"))
|
|
# Remove timezone info for TIMESTAMP WITHOUT TIME ZONE
|
|
pos_time_naive = pos_time.replace(tzinfo=None) if hasattr(pos_time, 'replace') else pos_time
|
|
|
|
position_records.append({
|
|
"time": pos_time_naive,
|
|
"x": pos["x"],
|
|
"y": pos["y"],
|
|
"z": pos["z"],
|
|
"vx": pos.get("vx"),
|
|
"vy": pos.get("vy"),
|
|
"vz": pos.get("vz"),
|
|
})
|
|
|
|
if position_records:
|
|
await position_service.save_positions(
|
|
body_id=body_id,
|
|
positions=position_records,
|
|
source="nasa_horizons",
|
|
session=db
|
|
)
|
|
logger.info(f"Saved {len(position_records)} positions for {body_id}")
|
|
|
|
# Cache in memory
|
|
cache_service.set(bodies_data, start_dt, end_dt, step)
|
|
# Cache in Redis for persistence across restarts
|
|
start_str = start_dt.isoformat() if start_dt else "now"
|
|
end_str = end_dt.isoformat() if end_dt else "now"
|
|
redis_key = make_cache_key("positions", start_str, end_str, step)
|
|
# Use longer TTL for historical data that was fetched from Horizons
|
|
ttl = get_ttl_seconds("historical_positions") if start_dt and end_dt else get_ttl_seconds("current_positions")
|
|
await redis_cache.set(redis_key, bodies_data, ttl)
|
|
logger.info(f"Cached data in Redis with key: {redis_key} (TTL: {ttl}s)")
|
|
|
|
return CelestialDataResponse(bodies=bodies_data)
|
|
|
|
except ValueError as e:
|
|
raise HTTPException(status_code=400, detail=f"Invalid time format: {str(e)}")
|
|
except Exception as e:
|
|
logger.error(f"Error fetching celestial positions: {str(e)}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
raise HTTPException(status_code=500, detail=f"Failed to fetch data: {str(e)}")
|