251 lines
9.9 KiB
Python
251 lines
9.9 KiB
Python
"""
|
|
Cache preheating service
|
|
Loads data from database to Redis on startup
|
|
"""
|
|
import logging
|
|
from datetime import datetime, timedelta
|
|
from typing import List, Dict, Any
|
|
|
|
from app.database import get_db
|
|
from app.services.redis_cache import redis_cache, make_cache_key, get_ttl_seconds
|
|
from app.services.db_service import celestial_body_service, position_service
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
async def preheat_current_positions():
|
|
"""
|
|
Preheat current positions from database to Redis
|
|
Loads the most recent single-point position for all bodies
|
|
Strategy: Get the latest position for each body (should be current hour or most recent)
|
|
"""
|
|
logger.info("=" * 60)
|
|
logger.info("Starting cache preheat: Current positions")
|
|
logger.info("=" * 60)
|
|
|
|
try:
|
|
async for db in get_db():
|
|
# Get all celestial bodies
|
|
all_bodies = await celestial_body_service.get_all_bodies(db)
|
|
|
|
# Filter to only Solar System bodies (system_id = 1)
|
|
# Exclude stars and exoplanets from other star systems
|
|
all_bodies = [b for b in all_bodies if b.system_id == 1]
|
|
|
|
logger.info(f"Found {len(all_bodies)} Solar System celestial bodies")
|
|
|
|
# Get current time rounded to the hour
|
|
now = datetime.utcnow()
|
|
current_hour = now.replace(minute=0, second=0, microsecond=0)
|
|
|
|
# Define time window: current hour ± 1 hour
|
|
start_window = current_hour - timedelta(hours=1)
|
|
end_window = current_hour + timedelta(hours=1)
|
|
|
|
# Collect positions for all bodies
|
|
bodies_data = []
|
|
successful_bodies = 0
|
|
|
|
for body in all_bodies:
|
|
try:
|
|
# Get position closest to current hour
|
|
recent_positions = await position_service.get_positions(
|
|
body_id=body.id,
|
|
start_time=start_window,
|
|
end_time=end_window,
|
|
session=db
|
|
)
|
|
|
|
if recent_positions and len(recent_positions) > 0:
|
|
# Use the position closest to current hour
|
|
# Find the one with time closest to current_hour
|
|
closest_pos = min(
|
|
recent_positions,
|
|
key=lambda p: abs((p.time - current_hour).total_seconds())
|
|
)
|
|
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"positions": [{
|
|
"time": closest_pos.time.isoformat(),
|
|
"x": closest_pos.x,
|
|
"y": closest_pos.y,
|
|
"z": closest_pos.z,
|
|
}]
|
|
}
|
|
bodies_data.append(body_dict)
|
|
successful_bodies += 1
|
|
logger.debug(f" ✓ Loaded position for {body.name} at {closest_pos.time}")
|
|
else:
|
|
logger.warning(f" ⚠ No position found for {body.name} near {current_hour}")
|
|
|
|
except Exception as e:
|
|
logger.warning(f" ✗ Failed to load position for {body.name}: {e}")
|
|
continue
|
|
|
|
# Write to Redis if we have data
|
|
if bodies_data:
|
|
# Cache key for current hour
|
|
time_str = current_hour.isoformat()
|
|
redis_key = make_cache_key("positions", time_str, time_str, "1h")
|
|
ttl = get_ttl_seconds("current_positions")
|
|
|
|
success = await redis_cache.set(redis_key, bodies_data, ttl)
|
|
|
|
if success:
|
|
logger.info(f"✅ Preheated current positions: {successful_bodies}/{len(all_bodies)} bodies")
|
|
logger.info(f" Time: {current_hour}")
|
|
logger.info(f" Redis key: {redis_key}")
|
|
logger.info(f" TTL: {ttl}s ({ttl // 3600}h)")
|
|
else:
|
|
logger.error("❌ Failed to write to Redis")
|
|
else:
|
|
logger.warning("⚠ No position data available to preheat")
|
|
|
|
break # Only process first database session
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Cache preheat failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
logger.info("=" * 60)
|
|
|
|
|
|
async def preheat_historical_positions(days: int = 3):
|
|
"""
|
|
Preheat historical positions for timeline mode
|
|
Strategy: For each day, cache the position at 00:00:00 UTC (single point per day)
|
|
|
|
Args:
|
|
days: Number of days to preheat (default: 3)
|
|
"""
|
|
logger.info("=" * 60)
|
|
logger.info(f"Starting cache preheat: Historical positions ({days} days)")
|
|
logger.info("=" * 60)
|
|
|
|
try:
|
|
async for db in get_db():
|
|
# Get all celestial bodies
|
|
all_bodies = await celestial_body_service.get_all_bodies(db)
|
|
|
|
# Filter to only Solar System bodies (system_id = 1)
|
|
# Exclude stars and exoplanets from other star systems
|
|
all_bodies = [b for b in all_bodies if b.system_id == 1]
|
|
|
|
logger.info(f"Found {len(all_bodies)} Solar System celestial bodies")
|
|
|
|
# Define time window
|
|
end_date = datetime.utcnow()
|
|
start_date = end_date - timedelta(days=days)
|
|
|
|
logger.info(f"Time range: {start_date.date()} to {end_date.date()}")
|
|
|
|
# Preheat each day separately (single point at 00:00:00 per day)
|
|
cached_days = 0
|
|
for day_offset in range(days):
|
|
# Target time: midnight (00:00:00) of this day
|
|
target_day = start_date + timedelta(days=day_offset)
|
|
target_midnight = target_day.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
|
|
# Search window: ±30 minutes around midnight
|
|
search_start = target_midnight - timedelta(minutes=30)
|
|
search_end = target_midnight + timedelta(minutes=30)
|
|
|
|
# Collect positions for all bodies for this specific time
|
|
bodies_data = []
|
|
successful_bodies = 0
|
|
|
|
for body in all_bodies:
|
|
try:
|
|
# Query positions near midnight of this day
|
|
positions = await position_service.get_positions(
|
|
body_id=body.id,
|
|
start_time=search_start,
|
|
end_time=search_end,
|
|
session=db
|
|
)
|
|
|
|
if positions and len(positions) > 0:
|
|
# Find the position closest to midnight
|
|
closest_pos = min(
|
|
positions,
|
|
key=lambda p: abs((p.time - target_midnight).total_seconds())
|
|
)
|
|
|
|
body_dict = {
|
|
"id": body.id,
|
|
"name": body.name,
|
|
"name_zh": body.name_zh,
|
|
"type": body.type,
|
|
"description": body.description,
|
|
"positions": [
|
|
{
|
|
"time": closest_pos.time.isoformat(),
|
|
"x": closest_pos.x,
|
|
"y": closest_pos.y,
|
|
"z": closest_pos.z,
|
|
}
|
|
]
|
|
}
|
|
bodies_data.append(body_dict)
|
|
successful_bodies += 1
|
|
|
|
except Exception as e:
|
|
logger.warning(f" ✗ Failed to load {body.name} for {target_midnight.date()}: {e}")
|
|
continue
|
|
|
|
# Write to Redis if we have complete data
|
|
if bodies_data and successful_bodies == len(all_bodies):
|
|
# Cache key for this specific midnight timestamp
|
|
time_str = target_midnight.isoformat()
|
|
redis_key = make_cache_key("positions", time_str, time_str, "1d")
|
|
ttl = get_ttl_seconds("historical_positions")
|
|
|
|
success = await redis_cache.set(redis_key, bodies_data, ttl)
|
|
|
|
if success:
|
|
cached_days += 1
|
|
logger.info(f" ✓ Cached {target_midnight.date()} 00:00 UTC: {successful_bodies} bodies")
|
|
else:
|
|
logger.warning(f" ✗ Failed to cache {target_midnight.date()}")
|
|
else:
|
|
logger.warning(f" ⚠ Incomplete data for {target_midnight.date()}: {successful_bodies}/{len(all_bodies)} bodies")
|
|
|
|
logger.info(f"✅ Preheated {cached_days}/{days} days of historical data")
|
|
|
|
break # Only process first database session
|
|
|
|
except Exception as e:
|
|
logger.error(f"❌ Historical cache preheat failed: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
logger.info("=" * 60)
|
|
|
|
|
|
async def preheat_all_caches():
|
|
"""
|
|
Preheat all caches on startup
|
|
Priority:
|
|
1. Current positions (most important)
|
|
2. Historical positions for timeline (3 days)
|
|
"""
|
|
logger.info("")
|
|
logger.info("🔥 Starting full cache preheat...")
|
|
logger.info("")
|
|
|
|
# 1. Preheat current positions
|
|
await preheat_current_positions()
|
|
|
|
# 2. Preheat historical positions (3 days)
|
|
await preheat_historical_positions(days=3)
|
|
|
|
logger.info("")
|
|
logger.info("🔥 Cache preheat completed!")
|
|
logger.info("")
|