first commit
This commit is contained in:
273
main_layered.py
Normal file
273
main_layered.py
Normal file
@@ -0,0 +1,273 @@
|
||||
"""
|
||||
Main application entry point with layered architecture
|
||||
This is the new structured version of the FastAPI application
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
from contextlib import asynccontextmanager
|
||||
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import logging
|
||||
|
||||
# Import layered components
|
||||
from layers.infrastructure.database_connection import database_connection
|
||||
from layers.infrastructure.redis_connection import redis_connection
|
||||
from layers.business.sensor_service import SensorService
|
||||
from layers.business.cleanup_service import cleanup_service
|
||||
from layers.presentation.websocket_handler import websocket_manager
|
||||
from layers.presentation.redis_subscriber import redis_subscriber
|
||||
from layers.presentation.api_routes import router as api_router
|
||||
from models import HealthCheck
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Application startup time for uptime calculation
|
||||
app_start_time = time.time()
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""Application lifespan manager with proper layer initialization"""
|
||||
# Startup
|
||||
logger.info("Application starting up...")
|
||||
|
||||
try:
|
||||
# Initialize infrastructure layer
|
||||
await database_connection.connect()
|
||||
await redis_connection.connect()
|
||||
logger.info("Infrastructure layer initialized")
|
||||
|
||||
# Initialize business layer
|
||||
sensor_service = SensorService() # Services are initialized on-demand
|
||||
logger.info("Business layer initialized")
|
||||
|
||||
# Initialize presentation layer
|
||||
await redis_subscriber.start_subscription("energy_data")
|
||||
await cleanup_service.start_scheduled_cleanup(24) # Daily cleanup
|
||||
logger.info("Presentation layer initialized")
|
||||
|
||||
logger.info("Application startup complete")
|
||||
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
logger.info("Application shutting down...")
|
||||
|
||||
# Stop background tasks
|
||||
await redis_subscriber.stop_subscription()
|
||||
await cleanup_service.stop_scheduled_cleanup()
|
||||
|
||||
# Close connections
|
||||
await database_connection.disconnect()
|
||||
await redis_connection.disconnect()
|
||||
|
||||
logger.info("Application shutdown complete")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during application lifecycle: {e}")
|
||||
raise
|
||||
|
||||
app = FastAPI(
|
||||
title="Energy Monitoring Dashboard API",
|
||||
description="Real-time energy monitoring and IoT sensor data management system (Layered Architecture)",
|
||||
version="2.0.0",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# Add CORS middleware
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # Configure appropriately for production
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Include API router with version prefix
|
||||
app.include_router(api_router, prefix="/api/v1")
|
||||
|
||||
@app.websocket("/ws")
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
"""
|
||||
WebSocket endpoint for real-time data streaming
|
||||
Presentation Layer - handles WebSocket connections
|
||||
"""
|
||||
await websocket_manager.connect(websocket)
|
||||
try:
|
||||
while True:
|
||||
# Keep the connection alive by waiting for messages
|
||||
await websocket.receive_text()
|
||||
except WebSocketDisconnect:
|
||||
websocket_manager.disconnect(websocket)
|
||||
|
||||
@app.get("/")
|
||||
async def read_root():
|
||||
"""Root endpoint with basic system information"""
|
||||
return {
|
||||
"message": "Energy Monitoring Dashboard Backend (Layered Architecture)",
|
||||
"version": "2.0.0",
|
||||
"status": "running",
|
||||
"uptime_seconds": time.time() - app_start_time,
|
||||
"architecture": "3-layer (Presentation, Business, Infrastructure)"
|
||||
}
|
||||
|
||||
@app.get("/health", response_model=HealthCheck)
|
||||
async def health_check():
|
||||
"""
|
||||
Comprehensive health check endpoint
|
||||
Checks all layers and dependencies
|
||||
"""
|
||||
try:
|
||||
# Check infrastructure layer
|
||||
mongodb_connected = True
|
||||
redis_connected = True
|
||||
|
||||
try:
|
||||
db = await database_connection.get_database()
|
||||
await db.command("ping")
|
||||
except:
|
||||
mongodb_connected = False
|
||||
|
||||
try:
|
||||
redis_client = await redis_connection.get_client()
|
||||
await redis_client.ping()
|
||||
except:
|
||||
redis_connected = False
|
||||
|
||||
# Check business layer through service
|
||||
sensor_service = SensorService()
|
||||
from layers.infrastructure.repositories import SensorReadingRepository
|
||||
stats_repo = SensorReadingRepository()
|
||||
|
||||
# Get basic statistics
|
||||
try:
|
||||
# Simple count queries to test business layer
|
||||
total_readings = await stats_repo.count_by_query({})
|
||||
active_sensors_data = await redis_connection.get_keys_by_pattern("sensor:latest:*")
|
||||
total_sensors = len(active_sensors_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting stats for health check: {e}")
|
||||
total_readings = 0
|
||||
total_sensors = 0
|
||||
|
||||
# Check presentation layer
|
||||
websocket_connections = websocket_manager.get_connection_count()
|
||||
redis_subscription_active = redis_subscriber.is_subscriber_running()
|
||||
|
||||
# Determine overall status
|
||||
status = "healthy"
|
||||
if not mongodb_connected or not redis_connected:
|
||||
status = "degraded"
|
||||
if not mongodb_connected and not redis_connected:
|
||||
status = "unhealthy"
|
||||
|
||||
return HealthCheck(
|
||||
status=status,
|
||||
mongodb_connected=mongodb_connected,
|
||||
redis_connected=redis_connected,
|
||||
total_sensors=total_sensors,
|
||||
active_sensors=total_sensors, # Approximation
|
||||
total_readings=total_readings,
|
||||
uptime_seconds=time.time() - app_start_time
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Health check failed: {e}")
|
||||
raise HTTPException(status_code=503, detail="Service Unavailable")
|
||||
|
||||
@app.get("/status")
|
||||
async def system_status():
|
||||
"""
|
||||
Detailed system status endpoint with layer-specific information
|
||||
"""
|
||||
try:
|
||||
# Infrastructure layer status
|
||||
infrastructure_status = {
|
||||
"database_connected": True,
|
||||
"redis_connected": True
|
||||
}
|
||||
|
||||
try:
|
||||
db = await database_connection.get_database()
|
||||
await db.command("ping")
|
||||
except:
|
||||
infrastructure_status["database_connected"] = False
|
||||
|
||||
try:
|
||||
redis_client = await redis_connection.get_client()
|
||||
await redis_client.ping()
|
||||
except:
|
||||
infrastructure_status["redis_connected"] = False
|
||||
|
||||
# Business layer status
|
||||
business_status = {
|
||||
"cleanup_service_running": cleanup_service.is_cleanup_running()
|
||||
}
|
||||
|
||||
# Presentation layer status
|
||||
presentation_status = {
|
||||
"active_websocket_connections": websocket_manager.get_connection_count(),
|
||||
"redis_subscriber_running": redis_subscriber.is_subscriber_running()
|
||||
}
|
||||
|
||||
# Get subscriber status details
|
||||
subscriber_status = await redis_subscriber.get_subscriber_status()
|
||||
|
||||
return {
|
||||
"timestamp": time.time(),
|
||||
"uptime_seconds": time.time() - app_start_time,
|
||||
"architecture": "layered",
|
||||
"layers": {
|
||||
"infrastructure": infrastructure_status,
|
||||
"business": business_status,
|
||||
"presentation": presentation_status
|
||||
},
|
||||
"redis_subscriber": subscriber_status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Status check failed: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal Server Error")
|
||||
|
||||
@app.get("/system/cleanup", summary="Get cleanup service status")
|
||||
async def get_cleanup_status():
|
||||
"""Get data cleanup service status and statistics"""
|
||||
try:
|
||||
# Get cleanup service status
|
||||
cleanup_running = cleanup_service.is_cleanup_running()
|
||||
|
||||
# Get storage statistics
|
||||
storage_stats = await cleanup_service.get_storage_statistics()
|
||||
|
||||
# Get retention policy info
|
||||
retention_info = await cleanup_service.get_data_retention_info()
|
||||
|
||||
return {
|
||||
"cleanup_service_running": cleanup_running,
|
||||
"storage_statistics": storage_stats,
|
||||
"retention_policies": retention_info
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cleanup status: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal Server Error")
|
||||
|
||||
@app.post("/system/cleanup", summary="Run manual cleanup")
|
||||
async def run_manual_cleanup():
|
||||
"""Manually trigger data cleanup process"""
|
||||
try:
|
||||
cleanup_results = await cleanup_service.cleanup_old_data()
|
||||
|
||||
return {
|
||||
"message": "Manual cleanup completed",
|
||||
"results": cleanup_results
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error running manual cleanup: {e}")
|
||||
raise HTTPException(status_code=500, detail="Internal Server Error")
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
Reference in New Issue
Block a user