Compare commits

..

16 Commits

Author SHA1 Message Date
rafaeldpsilva
4bedcecf5d feat: Implement HTTP Poller for IoT device data ingestion
- Added iots-right.json configuration file to define IoT devices and their sensors.
- Developed HttpPoller class to handle polling of IoT devices via HTTP.
- Created IoT configuration loader to validate and load device configurations from JSON.
- Introduced models for device status, polling metrics, and data sources.
- Implemented API routes for health checks, device status retrieval, and configuration management.
- Enhanced error handling and logging throughout the data ingestion process.
2025-12-22 16:35:22 +00:00
rafaeldpsilva
ccf5f5a5c3 modular monolythic 2025-12-20 00:57:59 +00:00
rafaeldpsilva
4779eb9ded modular monolythic 2025-12-20 00:51:04 +00:00
rafaeldpsilva
6ed61b06e8 update 2025-12-20 00:36:48 +00:00
rafaeldpsilva
ed0b4c750c update 2025-12-20 00:33:03 +00:00
rafaeldpsilva
a84d3632da update 2025-12-20 00:31:34 +00:00
rafaeldpsilva
7547e6b229 demand response 2025-12-10 15:26:34 +00:00
rafaeldpsilva
b54999f7a7 Add rb option to deploy script and update sensors endpoint path 2025-10-08 14:05:16 +01:00
rafaeldpsilva
da1fb2a058 Add Token model and update endpoints to use it
Refactor token validation and saving endpoints to accept a Token model
instead of a raw string. Set default values for token generation request
fields. Update TokenService cleanup to use datetime.now().
2025-10-08 14:05:11 +01:00
rafaeldpsilva
38fb3e6e96 Add dynamic energy data collections per building
Energy data is now stored in separate collections for each
SLGs/Community/Building directory. Collections are created and indexed
dynamically. Metadata and API queries have been updated to support this
structure.
2025-10-08 14:05:00 +01:00
rafaeldpsilva
8ff20935fc Refactor auth middleware and update service config usage
- Remove redundant comments and docstrings in auth_middleware.py - Use
TOKEN_SERVICE_URL env variable for AuthMiddleware - Fix sensor service
proxy path in main.py - Use datetime.now() for request_stats start_time
2025-10-08 14:04:50 +01:00
rafaeldpsilva
863e0161b0 Implement dynamic energy data collections per building
- Store energy data in separate MongoDB collections for each
SLGs/Community/Building directory - Update FTP monitor and database
manager to track directory paths and select appropriate collections -
Add collection stats to database statistics API - Update sensor and
token services for improved API consistency - Add 'rb' (rebuild and
restart) option to deploy.sh script
2025-10-08 14:03:57 +01:00
rafaeldpsilva
ba99b09e08 Remove comments and verbose logging from services 2025-09-23 15:05:59 +01:00
rafaeldpsilva
78151feb86 Documentation files 2025-09-22 15:25:52 +01:00
rafaeldpsilva
02a0d54e14 Add room and analytics services with CRUD API endpoints
- Implement RoomService for room management and metrics - Add
AnalyticsService for sensor data analytics and trends - Extend models
with Room, RoomCreate, RoomUpdate, RoomInfo - Add room CRUD endpoints to
FastAPI app - Add database connection logic for MongoDB and Redis -
Refactor sensor service logic into SensorService class
2025-09-22 15:13:14 +01:00
rafaeldpsilva
2008ea0e70 Refactor service registry and load balancer integration
- Pass service registry to load balancer for dependency injection -
Remove dynamic imports of service registry in load balancer - Update
service registration and health check logic - Enable token-service in
docker-compose and service config - Add room names and rooms proxy
endpoints - Improve logging for proxy requests and health checks -
Update deploy script project name to sa4cps - Add test script for
coroutine fix - Minor code cleanup and formatting
2025-09-22 15:13:06 +01:00
67 changed files with 15474 additions and 697 deletions

2
.gitignore vendored
View File

@@ -173,4 +173,6 @@ poetry.toml
# LSP config files # LSP config files
pyrightconfig.json pyrightconfig.json
CLAUDE.md
# End of https://www.toptal.com/developers/gitignore/api/python # End of https://www.toptal.com/developers/gitignore/api/python

559
bootstrap_sensors.py Normal file
View File

@@ -0,0 +1,559 @@
#!/usr/bin/env python3
"""
Bootstrap script to populate the database with sensors and room configurations.
This script creates a realistic smart building sensor setup for testing and development.
"""
import asyncio
import json
import sys
import logging
from datetime import datetime
from typing import List, Dict, Any
import aiohttp
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Base URLs
API_BASE_URL = "http://localhost:8000"
SENSOR_SERVICE_URL = "http://localhost:8007"
s = {
'id': 'sensor_1',
'name': 'Energy Monitor 1',
'type': 'energy',
'room': 'Conference Room A',
'status': 'online',
'lastSeen': Date.now() / 1000,
'capabilities': {
'monitoring': ['energy'],
'actions': [],
},
'metadata': {
'location': 'Wall mounted',
'model': 'EM-100',
'firmware': '2.1.0',
},
},{
'id': 'sensor_2',
'name': 'HVAC Controller 1',
'type': 'hvac',
'room': 'Conference Room A',
'status': 'online',
'lastSeen': Date.now() / 1000,
'capabilities': {
'monitoring': ['temperature', 'co2'],
'actions': [
{
'id': 'temp_adjust',
'name': 'Adjust Temperature',
'type': 'adjust',
'icon': '🌡️',
'parameters': { min: 18, max: 28, step: 0.5 },
},
{
'id': 'fan_speed',
'name': 'Fan Speed',
'type': 'adjust',
'icon': '💨',
'parameters': { min: 0, max: 5, step: 1 },
},
{ 'id': 'power_toggle', 'name': 'Power', 'type': 'toggle', 'icon': '' },
],
},
metadata: {
location: 'Ceiling mounted',
model: 'HVAC-200',
firmware: '3.2.1',
},
},
{
id: 'sensor_3',
name: 'Smart Light Controller',
type: 'lighting',
room: 'Office Floor 1',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['energy'],
actions: [
{
id: 'brightness',
name: 'Brightness',
type: 'adjust',
icon: '💡',
parameters: { min: 0, max: 100, step: 5 },
},
{ id: 'power_toggle', name: 'Power', type: 'toggle', icon: '' },
{
id: 'scene',
name: 'Scene',
type: 'adjust',
icon: '🎨',
parameters: { options: ['Work', 'Meeting', 'Presentation', 'Relax'] },
},
],
},
metadata: {
location: 'Ceiling grid',
model: 'SL-300',
firmware: '1.5.2',
},
},
{
id: 'sensor_4',
name: 'CO2 Sensor',
type: 'co2',
room: 'Meeting Room 1',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['co2', 'temperature', 'humidity'],
actions: [{ id: 'calibrate', name: 'Calibrate', type: 'trigger', icon: '⚙️' }],
},
metadata: {
location: 'Wall mounted',
model: 'CO2-150',
firmware: '2.0.3',
battery: 85,
},
},
{
id: 'sensor_5',
name: 'Security Camera',
type: 'security',
room: 'Lobby',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['motion'],
actions: [
{ id: 'record_toggle', name: 'Recording', type: 'toggle', icon: '📹' },
{ id: 'ptz_control', name: 'Pan/Tilt/Zoom', type: 'trigger', icon: '🎥' },
{ id: 'night_mode', name: 'Night Mode', type: 'toggle', icon: '🌙' },
],
},
metadata: {
location: 'Corner ceiling',
model: 'SEC-400',
firmware: '4.1.0',
},
},
# Bootstrap data configuration
BOOTSTRAP_SENSORS = [
# Living Room Sensors
{
"sensor_id": "lr_energy_001",
"name": "Living Room Main Energy Monitor",
"sensor_type": "energy",
"room": "living_room",
"location": "Main electrical panel - Living Room circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "lr_co2_001",
"name": "Living Room Air Quality Monitor",
"sensor_type": "co2",
"room": "living_room",
"location": "Wall mounted near seating area",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Pro",
"status": "online"
},
{
"sensor_id": "lr_temp_001",
"name": "Living Room Temperature Sensor",
"sensor_type": "temperature",
"room": "living_room",
"location": "Central wall position",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T200",
"status": "online"
},
# Kitchen Sensors
{
"sensor_id": "kt_energy_001",
"name": "Kitchen Appliances Energy Monitor",
"sensor_type": "energy",
"room": "kitchen",
"location": "Kitchen appliance circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "kt_humidity_001",
"name": "Kitchen Humidity Sensor",
"sensor_type": "humidity",
"room": "kitchen",
"location": "Above sink area",
"floor": "1",
"manufacturer": "HumidSense",
"model": "HS-H150",
"status": "online"
},
{
"sensor_id": "kt_temp_001",
"name": "Kitchen Temperature Monitor",
"sensor_type": "temperature",
"room": "kitchen",
"location": "Central kitchen position",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T200",
"status": "online"
},
# Bedroom Sensors
{
"sensor_id": "br_energy_001",
"name": "Bedroom Energy Monitor",
"sensor_type": "energy",
"room": "bedroom",
"location": "Bedroom electrical circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E200",
"status": "online"
},
{
"sensor_id": "br_co2_001",
"name": "Bedroom Air Quality Monitor",
"sensor_type": "co2",
"room": "bedroom",
"location": "Bedside wall mount",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Basic",
"status": "online"
},
{
"sensor_id": "br_temp_001",
"name": "Bedroom Temperature Sensor",
"sensor_type": "temperature",
"room": "bedroom",
"location": "Opposite wall from bed",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T100",
"status": "online"
},
# Office Sensors
{
"sensor_id": "of_energy_001",
"name": "Office Equipment Energy Monitor",
"sensor_type": "energy",
"room": "office",
"location": "Office equipment circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "of_co2_001",
"name": "Office Air Quality Monitor",
"sensor_type": "co2",
"room": "office",
"location": "Desk area wall mount",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Pro",
"status": "online"
},
{
"sensor_id": "of_motion_001",
"name": "Office Motion Detector",
"sensor_type": "motion",
"room": "office",
"location": "Ceiling mounted - center",
"floor": "1",
"manufacturer": "MotionTech",
"model": "MT-M100",
"status": "online"
},
# Bathroom Sensors
{
"sensor_id": "bt_humidity_001",
"name": "Bathroom Humidity Monitor",
"sensor_type": "humidity",
"room": "bathroom",
"location": "Ceiling mounted",
"floor": "1",
"manufacturer": "HumidSense",
"model": "HS-H200",
"status": "online"
},
{
"sensor_id": "bt_temp_001",
"name": "Bathroom Temperature Sensor",
"sensor_type": "temperature",
"room": "bathroom",
"location": "Wall mounted near mirror",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T150",
"status": "online"
},
# Garage Sensors
{
"sensor_id": "gr_energy_001",
"name": "Garage Energy Monitor",
"sensor_type": "energy",
"room": "garage",
"location": "Garage main circuit",
"floor": "0",
"manufacturer": "SmartMeter Co",
"model": "SM-E100",
"status": "online"
},
{
"sensor_id": "gr_motion_001",
"name": "Garage Motion Detector",
"sensor_type": "motion",
"room": "garage",
"location": "Ceiling mounted - entrance",
"floor": "0",
"manufacturer": "MotionTech",
"model": "MT-M200",
"status": "online"
}
]
BOOTSTRAP_ROOMS = [
{
"name": "living_room",
"display_name": "Living Room",
"description": "Main living area with entertainment center",
"floor": "1",
"area_sqm": 35.5,
"room_type": "living"
},
{
"name": "kitchen",
"display_name": "Kitchen",
"description": "Main kitchen with appliances",
"floor": "1",
"area_sqm": 15.2,
"room_type": "kitchen"
},
{
"name": "bedroom",
"display_name": "Master Bedroom",
"description": "Primary bedroom",
"floor": "1",
"area_sqm": 20.1,
"room_type": "bedroom"
},
{
"name": "office",
"display_name": "Home Office",
"description": "Work from home office space",
"floor": "1",
"area_sqm": 12.8,
"room_type": "office"
},
{
"name": "bathroom",
"display_name": "Main Bathroom",
"description": "Primary bathroom",
"floor": "1",
"area_sqm": 8.5,
"room_type": "bathroom"
},
{
"name": "garage",
"display_name": "Garage",
"description": "Two-car garage with workshop area",
"floor": "0",
"area_sqm": 42.0,
"room_type": "garage"
}
]
async def generate_auth_token() -> str:
"""Generate authentication token for API calls"""
token_payload = {
"name": "bootstrap_user",
"list_of_resources": ["sensors", "rooms", "analytics", "health", "data", "export", "events"],
"data_aggregation": True,
"time_aggregation": True,
"embargo": 0,
"exp_hours": 24
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{API_BASE_URL}/api/v1/tokens/generate",
json=token_payload,
headers={"Content-Type": "application/json"}
) as response:
if response.status == 200:
data = await response.json()
logger.info("Successfully generated authentication token")
return data["token"]
else:
error_text = await response.text()
raise Exception(f"Failed to generate token: {response.status} - {error_text}")
async def create_rooms(auth_token: str) -> bool:
"""Create rooms in the database"""
logger.info("Creating bootstrap rooms...")
headers = {
"Authorization": f"Bearer {auth_token}",
"Content-Type": "application/json"
}
success_count = 0
async with aiohttp.ClientSession() as session:
for room in BOOTSTRAP_ROOMS:
try:
async with session.post(
f"{SENSOR_SERVICE_URL}/rooms",
json=room,
headers=headers
) as response:
if response.status in [200, 201]:
success_count += 1
logger.info(f"✓ Created room: {room['display_name']}")
elif response.status == 400:
# Room might already exist
error_data = await response.json()
if "already exists" in error_data.get("detail", ""):
logger.info(f"○ Room already exists: {room['display_name']}")
success_count += 1
else:
logger.error(f"✗ Failed to create room {room['name']}: {error_data}")
else:
error_text = await response.text()
logger.error(f"✗ Failed to create room {room['name']}: {response.status} - {error_text}")
except Exception as e:
logger.error(f"✗ Exception creating room {room['name']}: {e}")
logger.info(f"Rooms created: {success_count}/{len(BOOTSTRAP_ROOMS)}")
return success_count > 0
async def create_sensors(auth_token: str) -> bool:
"""Create sensors in the database"""
logger.info("Creating bootstrap sensors...")
headers = {
"Authorization": f"Bearer {auth_token}",
"Content-Type": "application/json"
}
success_count = 0
async with aiohttp.ClientSession() as session:
for sensor in BOOTSTRAP_SENSORS:
try:
async with session.post(
f"{SENSOR_SERVICE_URL}/sensors",
json=sensor,
headers=headers
) as response:
if response.status in [200, 201]:
success_count += 1
logger.info(f"✓ Created sensor: {sensor['name']} ({sensor['sensor_id']})")
elif response.status == 400:
# Sensor might already exist
error_data = await response.json()
if "already exists" in error_data.get("detail", ""):
logger.info(f"○ Sensor already exists: {sensor['sensor_id']}")
success_count += 1
else:
logger.error(f"✗ Failed to create sensor {sensor['sensor_id']}: {error_data}")
else:
error_text = await response.text()
logger.error(f"✗ Failed to create sensor {sensor['sensor_id']}: {response.status} - {error_text}")
except Exception as e:
logger.error(f"✗ Exception creating sensor {sensor['sensor_id']}: {e}")
logger.info(f"Sensors created: {success_count}/{len(BOOTSTRAP_SENSORS)}")
return success_count > 0
async def verify_bootstrap() -> None:
"""Verify that sensors were created successfully"""
logger.info("Verifying bootstrap results...")
try:
# Check sensors directly on sensor service (no auth required for health checks)
async with aiohttp.ClientSession() as session:
async with session.get(f"{SENSOR_SERVICE_URL}/sensors/get") as response:
if response.status == 200:
data = await response.json()
logger.info(f"✓ Total sensors in database: {data['count']}")
# Group by room
rooms = {}
for sensor in data.get('sensors', []):
room = sensor.get('room', 'unknown')
if room not in rooms:
rooms[room] = []
rooms[room].append(sensor['sensor_id'])
for room, sensors in rooms.items():
logger.info(f" - {room}: {len(sensors)} sensors")
else:
logger.error(f"Failed to verify sensors: {response.status}")
async with session.get(f"{SENSOR_SERVICE_URL}/rooms") as response:
if response.status == 200:
data = await response.json()
logger.info(f"✓ Total rooms in database: {data.get('count', 0)}")
else:
logger.error(f"Failed to verify rooms: {response.status}")
except Exception as e:
logger.error(f"✗ Exception during verification: {e}")
async def main():
"""Main bootstrap function"""
logger.info("=== Starting Sensor Bootstrap Process ===")
try:
# Step 1: Generate authentication token
logger.info("Step 1: Generating authentication token...")
auth_token = await generate_auth_token()
# Step 2: Create rooms
logger.info("Step 2: Creating rooms...")
rooms_success = await create_rooms(auth_token)
# Step 3: Create sensors
logger.info("Step 3: Creating sensors...")
sensors_success = await create_sensors(auth_token)
# Step 4: Verify results
logger.info("Step 4: Verifying bootstrap...")
await verify_bootstrap()
if rooms_success and sensors_success:
logger.info("=== Bootstrap Complete! ===")
logger.info("You can now run the data simulator to generate real-time sensor data.")
logger.info("Run: python data_simulator_enhanced.py")
return True
else:
logger.error("=== Bootstrap Failed ===")
return False
except Exception as e:
logger.error(f"Bootstrap failed with error: {e}")
return False
if __name__ == "__main__":
# Run the bootstrap
success = asyncio.run(main())
sys.exit(0 if success else 1)

354
data_simulator_enhanced.py Normal file
View File

@@ -0,0 +1,354 @@
#!/usr/bin/env python3
"""
Enhanced Data Simulator for Bootstrap Sensors
Generates realistic real-time sensor data for the bootstrap sensors created by bootstrap_sensors.py
"""
import redis
import time
import random
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any
import math
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Redis configuration
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_CHANNEL = "energy_data"
# Bootstrap sensor IDs (must match bootstrap_sensors.py)
BOOTSTRAP_SENSORS = {
# Living Room Sensors
"lr_energy_001": {"type": "energy", "room": "living_room", "base_value": 2.5, "variance": 1.2},
"lr_co2_001": {"type": "co2", "room": "living_room", "base_value": 420, "variance": 80},
"lr_temp_001": {"type": "temperature", "room": "living_room", "base_value": 22.0, "variance": 2.0},
# Kitchen Sensors
"kt_energy_001": {"type": "energy", "room": "kitchen", "base_value": 3.8, "variance": 2.1},
"kt_humidity_001": {"type": "humidity", "room": "kitchen", "base_value": 45.0, "variance": 15.0},
"kt_temp_001": {"type": "temperature", "room": "kitchen", "base_value": 24.0, "variance": 3.0},
# Bedroom Sensors
"br_energy_001": {"type": "energy", "room": "bedroom", "base_value": 1.2, "variance": 0.8},
"br_co2_001": {"type": "co2", "room": "bedroom", "base_value": 480, "variance": 120},
"br_temp_001": {"type": "temperature", "room": "bedroom", "base_value": 20.5, "variance": 1.5},
# Office Sensors
"of_energy_001": {"type": "energy", "room": "office", "base_value": 2.1, "variance": 1.5},
"of_co2_001": {"type": "co2", "room": "office", "base_value": 450, "variance": 100},
"of_motion_001": {"type": "motion", "room": "office", "base_value": 0, "variance": 1},
# Bathroom Sensors
"bt_humidity_001": {"type": "humidity", "room": "bathroom", "base_value": 65.0, "variance": 20.0},
"bt_temp_001": {"type": "temperature", "room": "bathroom", "base_value": 23.0, "variance": 2.5},
# Garage Sensors
"gr_energy_001": {"type": "energy", "room": "garage", "base_value": 0.8, "variance": 0.5},
"gr_motion_001": {"type": "motion", "room": "garage", "base_value": 0, "variance": 1}
}
class SensorDataGenerator:
"""Generates realistic sensor data with time-based patterns"""
def __init__(self):
self.start_time = time.time()
self.motion_states = {} # Track motion sensor states
# Initialize motion states
for sensor_id, config in BOOTSTRAP_SENSORS.items():
if config["type"] == "motion":
self.motion_states[sensor_id] = {"active": False, "last_change": time.time()}
def get_time_factor(self) -> float:
"""Get time-based multiplier for realistic daily patterns"""
current_hour = datetime.now().hour
# Energy usage patterns (higher during day, lower at night)
if 6 <= current_hour <= 22: # Daytime
return 1.0 + 0.3 * math.sin((current_hour - 6) * math.pi / 16)
else: # Nighttime
return 0.3 + 0.2 * random.random()
def get_occupancy_factor(self, room: str) -> float:
"""Get occupancy-based multiplier for different rooms"""
current_hour = datetime.now().hour
occupancy_patterns = {
"living_room": 1.2 if 18 <= current_hour <= 23 else 0.8,
"kitchen": 1.5 if 7 <= current_hour <= 9 or 17 <= current_hour <= 20 else 0.6,
"bedroom": 1.3 if 22 <= current_hour or current_hour <= 7 else 0.4,
"office": 1.4 if 9 <= current_hour <= 17 else 0.3,
"bathroom": 1.0, # Consistent usage
"garage": 0.8 if 7 <= current_hour <= 9 or 17 <= current_hour <= 19 else 0.2
}
return occupancy_patterns.get(room, 1.0)
def generate_energy_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic energy consumption reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Apply time and occupancy factors
time_factor = self.get_time_factor()
occupancy_factor = self.get_occupancy_factor(room)
# Add some randomness
random_factor = 1.0 + (random.random() - 0.5) * 0.4
# Calculate final value
value = base_value * time_factor * occupancy_factor * random_factor
value = max(0.1, value) # Ensure minimum consumption
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "energy",
"timestamp": int(time.time()),
"energy": {
"value": round(value, 3),
"unit": "kWh"
},
"metadata": {
"time_factor": round(time_factor, 2),
"occupancy_factor": round(occupancy_factor, 2)
}
}
def generate_co2_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic CO2 level reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# CO2 increases with occupancy
occupancy_factor = self.get_occupancy_factor(room)
co2_increase = (occupancy_factor - 0.5) * 150
# Add random fluctuation
random_variation = (random.random() - 0.5) * variance
value = base_value + co2_increase + random_variation
value = max(350, min(2000, value)) # Realistic CO2 range
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "co2",
"timestamp": int(time.time()),
"co2": {
"value": round(value, 1),
"unit": "ppm"
},
"metadata": {
"quality_level": "good" if value < 600 else "moderate" if value < 1000 else "poor"
}
}
def generate_temperature_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic temperature reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Temperature varies with time of day and occupancy
current_hour = datetime.now().hour
daily_variation = 2 * math.sin((current_hour - 6) * math.pi / 12)
occupancy_factor = self.get_occupancy_factor(room)
occupancy_heat = (occupancy_factor - 0.5) * 1.5
random_variation = (random.random() - 0.5) * variance
value = base_value + daily_variation + occupancy_heat + random_variation
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "temperature",
"timestamp": int(time.time()),
"temperature": {
"value": round(value, 1),
"unit": "°C"
}
}
def generate_humidity_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic humidity reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Humidity patterns based on room usage
if room == "bathroom":
# Higher spikes during usage times
current_hour = datetime.now().hour
if 7 <= current_hour <= 9 or 19 <= current_hour <= 22:
usage_spike = random.uniform(10, 25)
else:
usage_spike = 0
elif room == "kitchen":
# Cooking increases humidity
current_hour = datetime.now().hour
if 17 <= current_hour <= 20:
usage_spike = random.uniform(5, 15)
else:
usage_spike = 0
else:
usage_spike = 0
random_variation = (random.random() - 0.5) * variance
value = base_value + usage_spike + random_variation
value = max(20, min(95, value)) # Realistic humidity range
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "humidity",
"timestamp": int(time.time()),
"humidity": {
"value": round(value, 1),
"unit": "%"
}
}
def generate_motion_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic motion detection reading"""
room = config["room"]
current_time = time.time()
# Get current state
if sensor_id not in self.motion_states:
self.motion_states[sensor_id] = {"active": False, "last_change": current_time}
state = self.motion_states[sensor_id]
# Determine if motion should be detected based on occupancy patterns
occupancy_factor = self.get_occupancy_factor(room)
motion_probability = occupancy_factor * 0.3 # 30% chance when occupied
# Change state based on probability and time since last change
time_since_change = current_time - state["last_change"]
if state["active"]:
# If motion is active, chance to stop after some time
if time_since_change > 30: # At least 30 seconds of motion
if random.random() < 0.4: # 40% chance to stop
state["active"] = False
state["last_change"] = current_time
else:
# If no motion, chance to start based on occupancy
if time_since_change > 10: # At least 10 seconds of no motion
if random.random() < motion_probability:
state["active"] = True
state["last_change"] = current_time
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "motion",
"timestamp": int(time.time()),
"motion": {
"value": 1 if state["active"] else 0,
"unit": "detected"
},
"metadata": {
"duration_seconds": int(time_since_change) if state["active"] else 0
}
}
def generate_sensor_reading(self, sensor_id: str) -> Dict[str, Any]:
"""Generate appropriate reading based on sensor type"""
if sensor_id not in BOOTSTRAP_SENSORS:
logger.warning(f"Unknown sensor ID: {sensor_id}")
return None
config = BOOTSTRAP_SENSORS[sensor_id]
sensor_type = config["type"]
if sensor_type == "energy":
return self.generate_energy_reading(sensor_id, config)
elif sensor_type == "co2":
return self.generate_co2_reading(sensor_id, config)
elif sensor_type == "temperature":
return self.generate_temperature_reading(sensor_id, config)
elif sensor_type == "humidity":
return self.generate_humidity_reading(sensor_id, config)
elif sensor_type == "motion":
return self.generate_motion_reading(sensor_id, config)
else:
logger.warning(f"Unknown sensor type: {sensor_type}")
return None
def main():
"""Main simulation loop"""
logger.info("=== Starting Enhanced Data Simulator ===")
# Connect to Redis
try:
redis_client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, decode_responses=True)
redis_client.ping()
logger.info(f"Successfully connected to Redis at {REDIS_HOST}:{REDIS_PORT}")
except redis.exceptions.ConnectionError as e:
logger.error(f"Could not connect to Redis: {e}")
return
# Initialize data generator
generator = SensorDataGenerator()
logger.info(f"Loaded {len(BOOTSTRAP_SENSORS)} bootstrap sensors")
logger.info(f"Publishing to Redis channel: '{REDIS_CHANNEL}'")
logger.info("Press Ctrl+C to stop simulation")
sensor_ids = list(BOOTSTRAP_SENSORS.keys())
try:
while True:
sensors_produced = []
for a in range(5):
# Generate data for a random sensor
sensor_id = random.choice(sensor_ids)
sensors_produced.append(sensor_id)
reading = generator.generate_sensor_reading(sensor_id)
if reading:
# Publish to Redis
payload = json.dumps(reading)
redis_client.publish(REDIS_CHANNEL, payload)
# Log the reading
sensor_type = reading["sensor_type"]
room = reading["room"]
value_info = ""
if "energy" in reading:
value_info = f"{reading['energy']['value']} {reading['energy']['unit']}"
elif "co2" in reading:
value_info = f"{reading['co2']['value']} {reading['co2']['unit']}"
elif "temperature" in reading:
value_info = f"{reading['temperature']['value']} {reading['temperature']['unit']}"
elif "humidity" in reading:
value_info = f"{reading['humidity']['value']} {reading['humidity']['unit']}"
elif "motion" in reading:
value_info = f"{'DETECTED' if reading['motion']['value'] else 'CLEAR'}"
logger.info(f"📊 {sensor_id} ({room}/{sensor_type}): {value_info}")
# Random interval between readings (1-5 seconds)
time.sleep(random.uniform(1, 5))
except KeyboardInterrupt:
logger.info("Stopping data simulation...")
except Exception as e:
logger.error(f"Simulation error: {e}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,976 @@
# Demand Response System - Architecture & Logic Documentation
## Table of Contents
1. [System Overview](#system-overview)
2. [Component Locations](#component-locations)
3. [Architecture & Data Flow](#architecture--data-flow)
4. [Key Components](#key-components)
5. [Invitation Lifecycle](#invitation-lifecycle)
6. [Integration Points](#integration-points)
7. [API Reference](#api-reference)
8. [Complete Event Flow Example](#complete-event-flow-example)
---
## System Overview
The IoT Building Monitoring system includes a comprehensive Demand Response (DR) management system that enables buildings to participate in grid flexibility programs by reducing power consumption during peak demand periods.
**Key Capabilities:**
- Create and manage DR invitations with target load reductions
- Auto-accept or manual approval of DR events
- Track power reduction in real-time during events
- Calculate financial benefits from DR participation
- Forecast available flexibility by device and time
- Configure device-specific DR participation instructions
---
## Component Locations
### Core Components
| Component | Path | Purpose |
|-----------|------|---------|
| **Service Layer** | `services/DemandResponseService.py` | Business logic for DR operations |
| **Database Layer** | `database/DemandResponseRepository.py` | MongoDB data access for DR |
| **Execution Engine** | `core/DemandResponseAtuator.py` | Runs DR events, tracks power reduction |
| **Main Orchestrator** | `core/Core.py` | Coordinates DR events and accumulates reduction |
| **IoT Model** | `model/IoT.py` | Device configuration with DR capabilities |
| **API Endpoints** | `api/main.py:230-329` | REST endpoints for DR operations |
| **Configuration** | `config/f.json` | System and device configuration |
---
## Architecture & Data Flow
### High-Level Architecture
```
┌─────────────────────────────────────┐
│ REST API Endpoints │
│ (Flask: api/main.py) │
│ - /invitation/* │
│ - /event/check │
│ - /dr/benefit │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ DemandResponseService │
│ (Business Logic Layer) │
│ - Invitation management │
│ - Auto-answer configuration │
│ - Response tracking │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ DemandResponseRepository │
│ (Data Access Layer) │
│ - MongoDB operations │
│ - Query optimization │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ MongoDB Collections │
│ - demand_response_invitations │
│ - config (auto_answer) │
│ - benefit (financial tracking) │
│ - instructions (hourly rules) │
└─────────────────────────────────────┘
```
### Execution Architecture
```
┌──────────────────────────────────────┐
│ Core.py (Main Thread) │
│ - Manages IoT device fleet │
│ - Tracks dr_reduced_power │
│ - Calculates total flexibility │
└──────────────┬───────────────────────┘
│ schedule_event(time, iot)
┌──────────────────────────────────────┐
│ DemandResponseAtuator (New Thread) │
│ - Spawned per device per event │
│ - Runs for 1 hour (59 minutes) │
│ - Updates core.dr_reduced_power │
│ - Auto-terminates at event end │
└──────────────────────────────────────┘
```
### Data Models
#### MongoDB Collection: `demand_response_invitations`
```json
{
"_id": "ObjectId",
"datetime": "2025-12-10 13:45:32", // Invitation creation time
"event_time": "2025-12-10 14:00:00", // When DR event occurs
"load_kwh": 5.2, // Target reduction in kWh
"load_percentage": 15.0, // Reduction as % of total load
"iots": ["AC1", "AC2", "Lighting"], // Participating devices
"response": "WAITING|YES|NO" // Participant decision
}
```
#### MongoDB Collection: `config`
```json
{
"config": "config",
"auto_answer": true // Auto-accept DR invitations
}
```
#### MongoDB Collection: `benefit`
```json
{
"source": "dr", // "dr" or "p2p"
"product": "AC1", // Device name
"value": 5.50, // Financial benefit (€)
"datetime": "2025-12-10 14:00:00"
}
```
#### MongoDB Collection: `instructions`
```json
{
"AC1": {
"0": "participation", // Hour 0: full DR participation
"1": "shifting", // Hour 1: 0-20% participation
"2": "off", // Hour 2: no DR participation
"3": "participation",
// ... hours 4-23
},
"AC2": { /* ... */ }
}
```
---
## Key Components
### 1. DemandResponseService
**Location:** `services/DemandResponseService.py`
**Responsibilities:**
- Manages DR invitation lifecycle
- Handles participant responses
- Configures auto-accept behavior
- Queries invitation status
**Key Methods:**
```python
def invitation(event_time, load_kwh, load_percentage, iots):
"""Create new DR invitation"""
# Checks auto_answer config
# Sets response to YES if auto-enabled, else WAITING
# Stores in MongoDB via repository
def answer_invitation(event_time, iot, response):
"""Record YES/NO response for specific device"""
# Updates invitation response field
# Used for manual acceptance workflow
def get_unanswered_invitations():
"""Get all pending invitations awaiting response"""
# Returns invitations with response="WAITING"
def get_answered_invitations():
"""Get last 5 completed invitations"""
# Returns historical invitations (YES/NO)
def get_auto_answer_config():
"""Check if auto-accept is enabled"""
# Returns boolean from config collection
def set_auto_answer_config(auto_answer):
"""Enable/disable auto-accept"""
# Updates MongoDB config collection
```
**Auto-Accept Logic:**
```python
# Line 35-38 in DemandResponseService.py
if self.get_auto_answer_config():
response = "YES" # Auto-accept enabled
else:
response = "WAITING" # Require manual approval
```
---
### 2. DemandResponseRepository
**Location:** `database/DemandResponseRepository.py`
**Responsibilities:**
- Direct MongoDB operations
- Query optimization and filtering
- Data persistence
**Key Methods:**
```python
def insert_invitation(datetime, event_time, load_kwh, load_percentage, iots, response):
"""Store new DR invitation in MongoDB"""
def answer_invitation(event_time, iot, response):
"""Update invitation response status"""
# Updates document where event_time matches and iot in iots array
def get_unanswered_invitations():
"""Query: {response: "WAITING"}"""
def get_answered_invitations():
"""Query: {response: {$ne: "WAITING"}}, limit 5, sort by datetime desc"""
def get_accepted_upcoming_invitations():
"""Query: {response: "YES", event_time: {$gte: now}}"""
def get_invitation(event_time):
"""Find specific invitation by event time"""
```
---
### 3. DemandResponseAtuator
**Location:** `core/DemandResponseAtuator.py`
**Responsibilities:**
- Executes DR event for a single device
- Runs as separate thread during event
- Accumulates power reduction in real-time
- Auto-terminates after 1 hour
**Architecture:**
```python
class DemandResponseAtuator(Thread):
def __init__(self, core, iot):
self.core = core # Reference to Core instance
self.iot = iot # IoT device participating in DR
self.event_on = True # Event active flag
def run(self):
# Schedule event end at 59 minutes from now
end_time = (datetime.now() + timedelta(minutes=59))
end_time_formatted = end_time.strftime('%H:%M:%S')
schedule.every().day.at(end_time_formatted).do(self.end_event)
# Main loop: accumulate power reduction every second
while self.event_on:
# Add device's current power to reduction accumulator
self.core.dr_reduced_power += self.iot.get_power()
schedule.run_pending()
time.sleep(1)
def end_event(self):
"""Called automatically at event end"""
self.event_on = False
return schedule.CancelJob
```
**Key Characteristics:**
- **Threading Model:** One thread per device per event
- **Update Frequency:** Every 1 second
- **Duration:** Exactly 59 minutes (scheduled termination)
- **Power Tracking:** Cumulative reduction added to `core.dr_reduced_power`
---
### 4. Core (Main Orchestrator)
**Location:** `core/Core.py`
**DR-Related Attributes:**
```python
class Core(Thread):
def __init__(self):
self.dr_reduced_power = 0.0 # Accumulator for power reduction
self.iots_consumption = [] # List of controllable devices
self.iots = [] # All IoT devices
```
**Key DR Methods:**
```python
def schedule_event(self, event_time, iot_name):
"""Initiate DR event for specified device"""
# Find device by name
iot = [i for i in self.iots if i.name == iot_name][0]
# Create and start DemandResponseAtuator thread
dr = DemandResponseAtuator(self, iot)
dr.start()
def get_total_consumption(self):
"""Returns consumption MINUS DR reductions"""
# Sum all device power
totalPower = sum(iot.get_power() for iot in self.iots_consumption)
# Subtract DR reduction
reduce = self.dr_reduced_power
self.dr_reduced_power = 0 # Reset accumulator
return totalPower - reduce
def get_total_flexibility(self):
"""Calculate available flexibility for DR"""
# Sum power of devices with demandresponse=true
return sum(iot.get_power() for iot in self.iots_consumption
if iot.demandresponse)
```
**How Power Reduction Works:**
1. During DR event, `DemandResponseAtuator` continuously adds to `dr_reduced_power`
2. When `get_total_consumption()` is called, reduction is subtracted from total
3. `dr_reduced_power` is reset to 0 after each reading
4. This creates effective "virtual" power reduction in reported consumption
---
### 5. IoT Model
**Location:** `model/IoT.py`
**DR-Related Attributes:**
```python
class IoT:
def __init__(self, config):
self.name = config['name']
self.demandresponse = config['control'].get('demandresponse', False)
self.instructions = {} # Hourly DR instructions
```
**Configuration Example (config/f.json):**
```json
{
"resources": {
"iots": [
{
"name": "AC1",
"type": "hvac",
"uri": "http://192.168.2.91:30000/api/realtime/H_01/AC1",
"control": {
"demandresponse": true // Device can participate in DR
}
}
]
}
}
```
**DR-Capable Devices:**
- AC1, AC2, AC3, AC4 (HVAC systems)
- Water Heater
- Lighting
- Refrigerator
**Instruction Types:**
- `"participation"` - Full DR participation (100% reduction if needed)
- `"shifting"` - Partial participation (0-20% reduction)
- `"off"` - No DR participation for that hour
---
## Invitation Lifecycle
### 1. Create Invitation
**Endpoint:** `POST /invitation/send`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"kwh": 5.2,
"percentage": 15,
"iots": ["AC1", "AC2", "Lighting"]
}
```
**Response:**
```json
{
"event_time": "2025-12-10 14:00:00"
}
```
**Logic Flow:**
1. Validates event_time format
2. Checks auto_answer configuration
3. Sets response = "YES" if auto-enabled, else "WAITING"
4. Stores invitation in MongoDB
5. Returns event_time as confirmation
---
### 2. Check Invitation Status
**Endpoint:** `POST /invitation/get`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00"
}
```
**Response:**
```json
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 5.2,
"load_percentage": 15,
"iots": ["AC1", "AC2", "Lighting"],
"response": "WAITING"
}
```
---
### 3. Get Pending Invitations
**Endpoint:** `GET /invitation/unanswered`
**Response:**
```json
[
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 5.2,
"load_percentage": 15,
"iots": ["AC1", "AC2"],
"response": "WAITING"
},
{
"datetime": "2025-12-10 14:20:15",
"event_time": "2025-12-10 16:00:00",
"load_kwh": 3.8,
"load_percentage": 10,
"iots": ["Water Heater"],
"response": "WAITING"
}
]
```
**Use Case:** Display pending DR invitations requiring participant decision
---
### 4. Answer Invitation
**Endpoint:** `POST /invitation/answer`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"iot": "AC1",
"response": "YES"
}
```
**Response:**
```json
{
"message": "answered"
}
```
**Logic:**
- Updates invitation document in MongoDB
- Sets response field to "YES" or "NO"
- Filters by event_time and iot in iots array
- Enables manual approval workflow
---
### 5. Execute DR Event
**Endpoint:** `POST /event/check`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"iot": "AC1"
}
```
**Logic Flow:**
```python
1. Receives event_time and iot name
2. Calls core.schedule_event(event_time, iot)
3. Core finds IoT device by name
4. Creates new DemandResponseAtuator(core, iot)
5. Starts thread begins power reduction tracking
6. Thread runs for 59 minutes, accumulating reduction every second
7. Auto-terminates at scheduled end time
```
---
### 6. Configure Auto-Accept
**Get Config:** `GET /invitation/auto`
**Response:**
```json
{
"auto_answer": true
}
```
**Set Config:** `POST /invitation/auto`
**Request:**
```json
{
"auto_answer": true
}
```
**Response:**
```json
{
"auto_answer": true
}
```
**Effect:**
- When enabled: New invitations automatically set to response="YES"
- When disabled: New invitations set to response="WAITING" (require manual approval)
---
## Integration Points
### 1. Energy Management
**ForecastService** (`services/ForecastService.py`)
- Calculates `forecast_flexibility()` based on historical data
- Predicts available DR capacity for future periods
- Uses flexibility data stored with hourly consumption/generation
**Core.get_total_flexibility()**
- Returns sum of power from DR-capable devices
- Indicates current available flexibility
- Accessible via `GET /energy/flexibility`
```python
def get_total_flexibility(self):
return sum(iot.get_power() for iot in self.iots_consumption
if iot.demandresponse)
```
---
### 2. Building Management
**StoringManager** (`model/StoringManager.py`)
- Stores hourly aggregates including flexibility
- MongoDB collection: `TOTALPOWERHOUR`
- Fields: `{datetime, consumption, generation, flexibility}`
**BuildingRepository** (`database/BuildingRepository.py`)
- `insert_hour()` stores flexibility alongside consumption/generation
- Flexibility calculated as: `power * random(0-20%)`
- Provides historical baseline for forecasting
---
### 3. Financial Tracking
**EnergyService** (`services/EnergyService.py`)
```python
def add_benefit(source, product, value):
"""Record financial benefit from DR or P2P"""
# source: "dr" or "p2p"
# product: device name
# value: financial reward amount
```
**Record DR Benefit:** `POST /dr/benefit`
**Request:**
```json
{
"iot": "AC1",
"value": 5.50
}
```
**Storage:**
```json
{
"source": "dr",
"product": "AC1",
"value": 5.50,
"datetime": "2025-12-10 14:00:00"
}
```
**Monthly Benefits:** `GET /benefits/monthly`
**Response:**
```json
{
"dr": 150.00,
"p2p": 50.00
}
```
---
### 4. IoT Device Control
**IotService** (`services/IotService.py`)
```python
def change_dr_enable(iot, enable):
"""Enable or disable DR capability for device"""
iot.demandresponse = enable
def update_instructions(instructions):
"""Set hourly DR participation instructions"""
# Format: {iot_name: {hour: "participation|shifting|off"}}
def get_instructions():
"""Retrieve current DR instructions"""
return {iot.name: iot.instructions for iot in iots}
```
**Update Instructions:** `POST /iot/instructions`
**Request:**
```json
{
"AC1": {
"0": "participation",
"1": "shifting",
"2": "off",
"3": "participation"
// ... hours 4-23
}
}
```
**Forecasted Flexibility by Hour:** `POST /iots/forecast/flexibility`
**Request:**
```json
{
"hour": 14
}
```
**Response:**
```json
{
"shifting": [["AC1", 50], ["AC2", 75]], // 0-20% participation
"reducing": [["Water Heater", 100]] // Full participation
}
```
---
## API Reference
### Demand Response Endpoints
| Method | Endpoint | Description | Request Body | Response |
|--------|----------|-------------|--------------|----------|
| POST | `/invitation/send` | Create DR invitation | `{event_time, kwh, percentage, iots}` | `{event_time}` |
| POST | `/invitation/get` | Get specific invitation | `{event_time}` | Invitation object |
| GET | `/invitation/unanswered` | Get pending invitations | None | Array of invitations |
| GET | `/invitation/answered` | Get last 5 completed | None | Array of invitations |
| POST | `/invitation/answer` | Submit response | `{event_time, iot, response}` | `{message: "answered"}` |
| GET | `/invitation/auto` | Get auto-accept config | None | `{auto_answer: boolean}` |
| POST | `/invitation/auto` | Set auto-accept config | `{auto_answer: boolean}` | `{auto_answer: boolean}` |
| POST | `/event/check` | Execute DR event | `{event_time, iot}` | Success status |
| POST | `/dr/benefit` | Record DR benefit | `{iot, value}` | `{message: "ok"}` |
### Related Flexibility/Energy Endpoints
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/energy/now` | Current consumption, generation, flexibility |
| GET | `/energy/flexibility` | Available flexibility for DR |
| GET | `/forecast/flexibility` | Forecasted flexibility |
| POST | `/iots/forecast/flexibility` | Flexibility by hour and device |
| POST | `/iot/demandresponse/enable` | Enable/disable device DR |
| POST | `/iot/instructions` | Update DR instructions |
| GET | `/iot/instructions` | Get current instructions |
| GET | `/benefits/monthly` | Monthly DR benefits |
---
## Complete Event Flow Example
### Scenario: 10 kWh Reduction Event at 2:00 PM
```
┌─────────────────────────────────────────────────────────────┐
│ STEP 1: Create Invitation (1:45 PM) │
└─────────────────────────────────────────────────────────────┘
POST /invitation/send
{
"event_time": "2025-12-10 14:00:00",
"kwh": 10,
"percentage": 20,
"iots": ["AC1", "AC2", "Water Heater"]
}
Flow:
├─ DemandResponseService.invitation()
├─ Checks auto_answer config → disabled (false)
├─ Sets response = "WAITING"
├─ DemandResponseRepository.insert_invitation()
└─ MongoDB: Creates invitation document
Result: Invitation stored, awaiting participant approval
┌─────────────────────────────────────────────────────────────┐
│ STEP 2: Check Pending Invitations (1:50 PM) │
└─────────────────────────────────────────────────────────────┘
GET /invitation/unanswered
Response:
[
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 10,
"load_percentage": 20,
"iots": ["AC1", "AC2", "Water Heater"],
"response": "WAITING"
}
]
┌─────────────────────────────────────────────────────────────┐
│ STEP 3: Answer Invitation for Each Device (1:55 PM) │
└─────────────────────────────────────────────────────────────┘
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "AC1", "response": "YES"}
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "AC2", "response": "YES"}
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "Water Heater", "response": "NO"}
Flow per request:
├─ DemandResponseService.answer_invitation()
├─ DemandResponseRepository.answer_invitation()
└─ MongoDB: Updates invitation.response for specified iot
Result: AC1 and AC2 accepted, Water Heater declined
┌─────────────────────────────────────────────────────────────┐
│ STEP 4: Execute DR Event (2:00 PM - Event Start) │
└─────────────────────────────────────────────────────────────┘
POST /event/check
{"event_time": "2025-12-10 14:00:00", "iot": "AC1"}
POST /event/check
{"event_time": "2025-12-10 14:00:00", "iot": "AC2"}
Flow per request:
├─ Core.schedule_event("2025-12-10 14:00:00", "AC1")
├─ Finds IoT device: iot = [i for i in core.iots if i.name == "AC1"][0]
├─ Creates DemandResponseAtuator(core, iot)
└─ Starts thread
DemandResponseAtuator.run():
├─ Schedules end_event() at 14:59:00
└─ While loop (every 1 second for 59 minutes):
└─ core.dr_reduced_power += iot.get_power()
Result: Two threads running, accumulating power reduction
┌─────────────────────────────────────────────────────────────┐
│ STEP 5: Monitor Energy (2:30 PM - During Event) │
└─────────────────────────────────────────────────────────────┘
GET /energy/now
Flow:
├─ Core.get_total_consumption()
├─ totalPower = sum(iot.get_power() for iot in iots_consumption)
├─ totalPower = 50 kW (all devices)
├─ reduce = core.dr_reduced_power = 8 kW (accumulated from AC1+AC2)
├─ core.dr_reduced_power = 0 # Reset
└─ return 50 - 8 = 42 kW
Response:
{
"consumption": 42.0, // Reduced by DR
"generation": 15.0,
"flexibility": 18.0
}
Result: Consumption appears 8 kW lower due to DR reduction
┌─────────────────────────────────────────────────────────────┐
│ STEP 6: Automatic Event End (2:59 PM) │
└─────────────────────────────────────────────────────────────┘
Scheduled Task Triggered:
├─ DemandResponseAtuator.end_event() called
├─ self.event_on = False
├─ Thread exits while loop
└─ Thread terminates
Result: Both AC1 and AC2 threads stopped, DR event complete
┌─────────────────────────────────────────────────────────────┐
│ STEP 7: Record Financial Benefit (3:00 PM) │
└─────────────────────────────────────────────────────────────┘
POST /dr/benefit
{"iot": "AC1", "value": 5.50}
POST /dr/benefit
{"iot": "AC2", "value": 4.75}
Flow per request:
├─ EnergyService.add_benefit("dr", iot, value)
├─ FinancialRepository.insert_benefit()
└─ MongoDB.benefit: {source: "dr", product: iot, value: value, datetime: now}
Result: Total DR benefit = €10.25
┌─────────────────────────────────────────────────────────────┐
│ STEP 8: Hourly Storage (3:00 PM - End of Hour) │
└─────────────────────────────────────────────────────────────┘
StoringManager.save_hour() (automatic):
├─ BuildingService.save_last_hour()
├─ Calculates flexibility = power * random(0-20%)
├─ BuildingRepository.insert_hour()
└─ MongoDB.TOTALPOWERHOUR: {
datetime: "2025-12-10 14:00:00",
consumption: 42.0, // Average during hour (with DR reduction)
generation: 15.0,
flexibility: 7.8
}
Result: Hour data stored with DR-reduced consumption
┌─────────────────────────────────────────────────────────────┐
│ STEP 9: View Monthly Benefits (End of Month) │
└─────────────────────────────────────────────────────────────┘
GET /benefits/monthly
Response:
{
"dr": 185.50, // Total DR benefits for month
"p2p": 62.30 // Total P2P benefits for month
}
Result: Financial tracking shows €185.50 earned from DR participation
```
---
## Key Metrics & Statistics
| Metric | Value | Source |
|--------|-------|--------|
| **Update Frequency** | 1 second | DemandResponseAtuator.run() |
| **Event Duration** | 59 minutes | Scheduled termination |
| **Storage Frequency** | Every hour | StoringManager |
| **DR-Capable Devices** | 8 devices | config/f.json |
| **Threading Model** | 1 thread per device per event | Core.schedule_event() |
| **Database** | MongoDB (H01, BuildingRightSide) | Multiple collections |
| **API Framework** | Flask with CORS | api/main.py |
| **Flexibility Calculation** | 0-20% of device power | Based on instructions |
---
## Configuration Reference
### Device Configuration (config/f.json)
```json
{
"app": {
"dr_events_auto_accept": 1, // 1=enabled, 0=disabled
"monitoring": 0 // Debug logging
},
"storage": {
"local": {
"demand_response": ["H01", "demand_response_invitations"],
"config": ["H01", "config"],
"benefit": ["BuildingRightSide", "benefit"],
"instructions": ["H01", "instructions"]
}
},
"resources": {
"iots": [
{
"name": "AC1",
"type": "hvac",
"uri": "http://192.168.2.91:30000/api/realtime/H_01/AC1",
"control": {
"demandresponse": true
}
}
// ... more devices
]
}
}
```
### MongoDB Database Structure
```
Database: H01
├─ demand_response_invitations (DR events)
├─ config (auto_answer setting)
├─ instructions (hourly participation rules)
└─ TOTALPOWERHOUR (hourly aggregates)
Database: BuildingRightSide
└─ benefit (financial tracking)
```
---
## Summary
The Demand Response system is a comprehensive, multi-threaded solution that enables building participation in grid flexibility programs. It features:
- **Automatic or Manual Approval:** Configurable auto-accept or manual review workflow
- **Real-Time Power Tracking:** Per-device threads accumulate power reduction every second
- **Financial Benefit Tracking:** Source-based tracking (DR vs P2P) with monthly aggregation
- **Flexibility Forecasting:** Historical data and hourly instructions for predictive planning
- **Device-Level Control:** Per-device, per-hour participation configuration
- **MongoDB Persistence:** Scalable data storage with optimized queries
- **REST API:** Complete API for external integration and control
- **Thread Safety:** Separate threads per device prevent interference
**Critical Files:**
- **services/DemandResponseService.py:35-38** - Auto-accept logic
- **core/DemandResponseAtuator.py:run()** - Power reduction accumulation
- **core/Core.py:get_total_consumption()** - DR-reduced consumption calculation
- **api/main.py:230-329** - All DR endpoints
This architecture enables scalable, reliable demand response management with precise power tracking and financial incentive tracking.

View File

@@ -44,12 +44,6 @@ The system consists of 6 independent microservices coordinated by an API Gateway
## 🚀 Quick Start ## 🚀 Quick Start
### Prerequisites
- Docker 20.0+
- Docker Compose 2.0+
- 8GB RAM minimum
- 10GB free disk space
### 1. Deploy the Complete System ### 1. Deploy the Complete System
```bash ```bash
cd microservices/ cd microservices/

View File

@@ -10,47 +10,42 @@ import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class AuthMiddleware: class AuthMiddleware:
"""Authentication middleware for validating tokens"""
def __init__(self, token_service_url: str = "http://localhost:8001"): def __init__(self, token_service_url: str = "http://localhost:8001"):
self.token_service_url = token_service_url self.token_service_url = token_service_url
logger.info(f"Initialized AuthMiddleware with token service URL: {self.token_service_url}")
async def verify_token(self, request: Request) -> Optional[Dict[str, Any]]: async def verify_token(self, request: Request) -> Optional[Dict[str, Any]]:
""" """
Verify authentication token from request headers Verify authentication token from request headers
Returns token payload if valid, raises HTTPException if invalid Returns token payload if valid, raises HTTPException if invalid
""" """
# Extract token from Authorization header
auth_header = request.headers.get("Authorization") auth_header = request.headers.get("Authorization")
if not auth_header: if not auth_header:
raise HTTPException(status_code=401, detail="Authorization header required") raise HTTPException(status_code=401, detail="Authorization header required")
if not auth_header.startswith("Bearer "): if not auth_header.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Bearer token required") raise HTTPException(status_code=401, detail="Bearer token required")
token = auth_header[7:] # Remove "Bearer " prefix token = auth_header.split(" ")[1].strip() # Remove "Bearer " prefix
try: try:
# Validate token with token service
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.post( async with session.post(
f"{self.token_service_url}/tokens/validate", f"{self.token_service_url}/tokens/validate",
json={"token": token}, json={'token': token},
timeout=aiohttp.ClientTimeout(total=5) timeout=aiohttp.ClientTimeout(total=5)
) as response: ) as response:
if response.status != 200: if response.status != 200:
raise HTTPException(status_code=401, detail="Token validation failed") raise HTTPException(status_code=401, detail="Token validation failed")
token_data = await response.json() token_data = await response.json()
if not token_data.get("valid"): if not token_data.get("valid"):
error_msg = token_data.get("error", "Invalid token") error_msg = token_data.get("error", "Invalid token")
raise HTTPException(status_code=401, detail=error_msg) raise HTTPException(status_code=401, detail=error_msg)
# Token is valid, return decoded payload
return token_data.get("decoded") return token_data.get("decoded")
except aiohttp.ClientError as e: except aiohttp.ClientError as e:
logger.error(f"Token service connection error: {e}") logger.error(f"Token service connection error: {e}")
raise HTTPException(status_code=503, detail="Authentication service unavailable") raise HTTPException(status_code=503, detail="Authentication service unavailable")
@@ -59,24 +54,22 @@ class AuthMiddleware:
except Exception as e: except Exception as e:
logger.error(f"Token verification error: {e}") logger.error(f"Token verification error: {e}")
raise HTTPException(status_code=500, detail="Authentication error") raise HTTPException(status_code=500, detail="Authentication error")
async def check_permissions(self, token_payload: Dict[str, Any], required_resources: list) -> bool: async def check_permissions(self, token_payload: Dict[str, Any], required_resources: list) -> bool:
""" """
Check if token has required permissions for specific resources Check if token has required permissions for specific resources
""" """
if not token_payload: if not token_payload:
return False return False
# Get list of resources the token has access to
token_resources = token_payload.get("list_of_resources", []) token_resources = token_payload.get("list_of_resources", [])
# Check if token has access to all required resources
for resource in required_resources: for resource in required_resources:
if resource not in token_resources: if resource not in token_resources:
return False return False
return True return True
def extract_user_info(self, token_payload: Dict[str, Any]) -> Dict[str, Any]: def extract_user_info(self, token_payload: Dict[str, Any]) -> Dict[str, Any]:
"""Extract user information from token payload""" """Extract user information from token payload"""
return { return {
@@ -86,4 +79,4 @@ class AuthMiddleware:
"time_aggregation": token_payload.get("time_aggregation", False), "time_aggregation": token_payload.get("time_aggregation", False),
"embargo": token_payload.get("embargo", 0), "embargo": token_payload.get("embargo", 0),
"expires_at": token_payload.get("exp") "expires_at": token_payload.get("exp")
} }

View File

@@ -10,11 +10,12 @@ logger = logging.getLogger(__name__)
class LoadBalancer: class LoadBalancer:
"""Simple load balancer for microservice requests""" """Simple load balancer for microservice requests"""
def __init__(self): def __init__(self, service_registry=None):
# In a real implementation, this would track multiple instances per service # In a real implementation, this would track multiple instances per service
self.service_instances: Dict[str, List[str]] = {} self.service_instances: Dict[str, List[str]] = {}
self.current_index: Dict[str, int] = {} self.current_index: Dict[str, int] = {}
self.service_registry = service_registry
def register_service_instance(self, service_name: str, instance_url: str): def register_service_instance(self, service_name: str, instance_url: str):
"""Register a new service instance""" """Register a new service instance"""
@@ -54,9 +55,11 @@ class LoadBalancer:
if strategy == "single": if strategy == "single":
# Default behavior - get the service URL from service registry # Default behavior - get the service URL from service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available")
return None
elif strategy == "round_robin": elif strategy == "round_robin":
return await self._round_robin_select(service_name) return await self._round_robin_select(service_name)
@@ -73,9 +76,11 @@ class LoadBalancer:
instances = self.service_instances.get(service_name, []) instances = self.service_instances.get(service_name, [])
if not instances: if not instances:
# Fall back to service registry # Fall back to service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available for fallback")
return None
# Round-robin selection # Round-robin selection
current_idx = self.current_index[service_name] current_idx = self.current_index[service_name]
@@ -92,9 +97,11 @@ class LoadBalancer:
instances = self.service_instances.get(service_name, []) instances = self.service_instances.get(service_name, [])
if not instances: if not instances:
# Fall back to service registry # Fall back to service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available for fallback")
return None
selected_instance = random.choice(instances) selected_instance = random.choice(instances)
logger.debug(f"Random selected {selected_instance} for {service_name}") logger.debug(f"Random selected {selected_instance} for {service_name}")

View File

@@ -27,16 +27,12 @@ logger = logging.getLogger(__name__)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager"""
logger.info("API Gateway starting up...") logger.info("API Gateway starting up...")
# Initialize service registry
await service_registry.initialize() await service_registry.initialize()
# Register all services
await service_registry.register_services(SERVICES) await service_registry.register_services(SERVICES)
# Start health check task
asyncio.create_task(health_check_task()) asyncio.create_task(health_check_task())
logger.info("API Gateway startup complete") logger.info("API Gateway startup complete")
@@ -62,49 +58,14 @@ app.add_middleware(
allow_headers=["*"], allow_headers=["*"],
) )
# Service registry and load balancer
service_registry = ServiceRegistry()
load_balancer = LoadBalancer()
auth_middleware = AuthMiddleware()
# Service configuration
SERVICES = { SERVICES = {
# "token-service": ServiceConfig( "token-service": ServiceConfig(
# name="token-service", name="token-service",
# base_url=os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"), base_url=os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"),
# health_endpoint="/health", health_endpoint="/health",
# auth_required=False auth_required=False
# ), ),
# "battery-service": ServiceConfig(
# name="battery-service",
# base_url=os.getenv("BATTERY_SERVICE_URL", "http://battery-service:8002"),
# health_endpoint="/health",
# auth_required=True
# ),
# "demand-response-service": ServiceConfig(
# name="demand-response-service",
# base_url=os.getenv("DEMAND_RESPONSE_SERVICE_URL", "http://demand-response-service:8003"),
# health_endpoint="/health",
# auth_required=True
# ),
# "p2p-trading-service": ServiceConfig(
# name="p2p-trading-service",
# base_url=os.getenv("P2P_TRADING_SERVICE_URL", "http://p2p-trading-service:8004"),
# health_endpoint="/health",
# auth_required=True
# ),
# "forecasting-service": ServiceConfig(
# name="forecasting-service",
# base_url=os.getenv("FORECASTING_SERVICE_URL", "http://forecasting-service:8005"),
# health_endpoint="/health",
# auth_required=True
# ),
# "iot-control-service": ServiceConfig(
# name="iot-control-service",
# base_url=os.getenv("IOT_CONTROL_SERVICE_URL", "http://iot-control-service:8006"),
# health_endpoint="/health",
# auth_required=True
# ),
"sensor-service": ServiceConfig( "sensor-service": ServiceConfig(
name="sensor-service", name="sensor-service",
base_url=os.getenv("SENSOR_SERVICE_URL", "http://sensor-service:8007"), base_url=os.getenv("SENSOR_SERVICE_URL", "http://sensor-service:8007"),
@@ -116,23 +77,32 @@ SERVICES = {
base_url=os.getenv("DATA_INGESTION_SERVICE_URL", "http://data-ingestion-service:8008"), base_url=os.getenv("DATA_INGESTION_SERVICE_URL", "http://data-ingestion-service:8008"),
health_endpoint="/health", health_endpoint="/health",
auth_required=False auth_required=False
),
"demand-response-service": ServiceConfig(
name="demand-response-service",
base_url=os.getenv("DEMAND_RESPONSE_SERVICE_URL", "http://demand-response-service:8003"),
health_endpoint="/health",
auth_required=True
) )
} }
# Request statistics # Service registry and load balancer
service_registry = ServiceRegistry()
load_balancer = LoadBalancer(service_registry)
auth_middleware = AuthMiddleware(os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"))
request_stats = { request_stats = {
"total_requests": 0, "total_requests": 0,
"successful_requests": 0, "successful_requests": 0,
"failed_requests": 0, "failed_requests": 0,
"service_requests": {service: 0 for service in SERVICES.keys()}, "service_requests": {service: 0 for service in SERVICES.keys()},
"start_time": datetime.utcnow() "start_time": datetime.now()
} }
@app.get("/health", response_model=HealthResponse) @app.get("/health", response_model=HealthResponse)
async def gateway_health_check(): async def gateway_health_check():
"""Gateway health check endpoint""" """Gateway health check endpoint"""
try: try:
# Check all services
service_health = await service_registry.get_all_service_health() service_health = await service_registry.get_all_service_health()
healthy_services = sum(1 for status in service_health.values() if status.get("status") == "healthy") healthy_services = sum(1 for status in service_health.values() if status.get("status") == "healthy")
@@ -187,7 +157,7 @@ async def get_gateway_stats():
@app.api_route("/api/v1/tokens/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/tokens/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def token_service_proxy(request: Request, path: str): async def token_service_proxy(request: Request, path: str):
"""Proxy requests to token service""" """Proxy requests to token service"""
return await proxy_request(request, "token-service", f"/{path}") return await proxy_request(request, "token-service", f"/tokens/{path}")
# Battery Service Routes # Battery Service Routes
@app.api_route("/api/v1/batteries/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/batteries/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
@@ -223,7 +193,7 @@ async def iot_control_service_proxy(request: Request, path: str):
@app.api_route("/api/v1/sensors/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/sensors/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def sensor_service_proxy(request: Request, path: str): async def sensor_service_proxy(request: Request, path: str):
"""Proxy requests to sensor service""" """Proxy requests to sensor service"""
return await proxy_request(request, "sensor-service", f"/{path}") return await proxy_request(request, "sensor-service", f"/sensors/{path}")
# Data Ingestion Service Routes (SA4CPS FTP Monitoring) # Data Ingestion Service Routes (SA4CPS FTP Monitoring)
@app.api_route("/api/v1/ingestion/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/ingestion/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
@@ -241,6 +211,16 @@ async def data_sources_list_proxy(request: Request):
"""Proxy requests to data ingestion service for sources list""" """Proxy requests to data ingestion service for sources list"""
return await proxy_request(request, "data-ingestion-service", "/sources") return await proxy_request(request, "data-ingestion-service", "/sources")
@app.get("/api/v1/rooms/names")
async def room_names_proxy(request: Request):
"""Proxy requests to sensor service for room names list"""
return await proxy_request(request, "sensor-service", "/rooms/names")
@app.get("/api/v1/rooms")
async def rooms_list_proxy(request: Request):
"""Proxy requests to sensor service for rooms list"""
return await proxy_request(request, "sensor-service", "/rooms")
@app.api_route("/api/v1/rooms/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/rooms/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def room_service_proxy(request: Request, path: str): async def room_service_proxy(request: Request, path: str):
"""Proxy requests to sensor service for room management""" """Proxy requests to sensor service for room management"""
@@ -302,6 +282,7 @@ async def websocket_proxy(websocket: WebSocket):
async def proxy_request(request: Request, service_name: str, path: str): async def proxy_request(request: Request, service_name: str, path: str):
"""Generic request proxy function""" """Generic request proxy function"""
try: try:
logger.info(f"Proxying request to {service_name} at {path}")
# Update request statistics # Update request statistics
request_stats["total_requests"] += 1 request_stats["total_requests"] += 1
request_stats["service_requests"][service_name] += 1 request_stats["service_requests"][service_name] += 1
@@ -313,7 +294,7 @@ async def proxy_request(request: Request, service_name: str, path: str):
# Check authentication if required # Check authentication if required
if service_config.auth_required: if service_config.auth_required:
await auth_middleware.verify_token(request) decoded = await auth_middleware.verify_token(request)
# Get healthy service instance # Get healthy service instance
service_url = await load_balancer.get_service_url(service_name) service_url = await load_balancer.get_service_url(service_name)

View File

@@ -1,7 +1,3 @@
"""
Service registry for managing microservice discovery and health monitoring
"""
import aiohttp import aiohttp
import asyncio import asyncio
from datetime import datetime from datetime import datetime
@@ -13,30 +9,26 @@ from models import ServiceConfig, ServiceHealth
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ServiceRegistry: class ServiceRegistry:
"""Service registry for microservice management"""
def __init__(self): def __init__(self):
self.services: Dict[str, ServiceConfig] = {} self.services: Dict[str, ServiceConfig] = {}
self.service_health: Dict[str, ServiceHealth] = {} self.service_health: Dict[str, ServiceHealth] = {}
self.session: Optional[aiohttp.ClientSession] = None self.session: Optional[aiohttp.ClientSession] = None
async def initialize(self): async def initialize(self):
"""Initialize the service registry"""
self.session = aiohttp.ClientSession( self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=10) timeout=aiohttp.ClientTimeout(total=10)
) )
logger.info("Service registry initialized") logger.info("Service registry initialized")
async def close(self): async def close(self):
"""Close the service registry"""
if self.session: if self.session:
await self.session.close() await self.session.close()
logger.info("Service registry closed") logger.info("Service registry closed")
async def register_services(self, services: Dict[str, ServiceConfig]): async def register_services(self, services: Dict[str, ServiceConfig]):
"""Register multiple services"""
self.services.update(services) self.services.update(services)
# Initialize health status for all services # Initialize health status for all services
for service_name, config in services.items(): for service_name, config in services.items():
self.service_health[service_name] = ServiceHealth( self.service_health[service_name] = ServiceHealth(
@@ -44,34 +36,30 @@ class ServiceRegistry:
status="unknown", status="unknown",
last_check=datetime.utcnow() last_check=datetime.utcnow()
) )
logger.info(f"Registered {len(services)} services") logger.info(f"Registered {len(services)} services")
# Perform initial health check
await self.update_all_service_health() await self.update_all_service_health()
async def register_service(self, service_config: ServiceConfig): async def register_service(self, service_config: ServiceConfig):
"""Register a single service"""
self.services[service_config.name] = service_config self.services[service_config.name] = service_config
self.service_health[service_config.name] = ServiceHealth( self.service_health[service_config.name] = ServiceHealth(
service=service_config.name, service=service_config.name,
status="unknown", status="unknown",
last_check=datetime.utcnow() last_check=datetime.utcnow()
) )
logger.info(f"Registered service: {service_config.name}") logger.info(f"Registered service: {service_config.name}")
# Check health of the newly registered service # Check health of the newly registered service
await self.check_service_health(service_config.name) await self.check_service_health(service_config.name)
async def unregister_service(self, service_name: str): async def unregister_service(self, service_name: str):
"""Unregister a service"""
self.services.pop(service_name, None) self.services.pop(service_name, None)
self.service_health.pop(service_name, None) self.service_health.pop(service_name, None)
logger.info(f"Unregistered service: {service_name}") logger.info(f"Unregistered service: {service_name}")
async def check_service_health(self, service_name: str) -> ServiceHealth: async def check_service_health(self, service_name: str) -> ServiceHealth:
"""Check health of a specific service"""
service_config = self.services.get(service_name) service_config = self.services.get(service_name)
if not service_config: if not service_config:
logger.error(f"Service {service_name} not found in registry") logger.error(f"Service {service_name} not found in registry")
@@ -81,20 +69,19 @@ class ServiceRegistry:
last_check=datetime.utcnow(), last_check=datetime.utcnow(),
error_message="Service not registered" error_message="Service not registered"
) )
start_time = datetime.utcnow() start_time = datetime.utcnow()
try: try:
health_url = f"{service_config.base_url}{service_config.health_endpoint}" health_url = f"{service_config.base_url}{service_config.health_endpoint}"
async with self.session.get(health_url) as response: async with self.session.get(health_url) as response:
end_time = datetime.utcnow() end_time = datetime.utcnow()
response_time = (end_time - start_time).total_seconds() * 1000 response_time = (end_time - start_time).total_seconds() * 1000
if response.status == 200: if response.status == 200:
health_data = await response.json() health_data = await response.json()
status = "healthy" if health_data.get("status") in ["healthy", "ok"] else "unhealthy" status = "healthy" if health_data.get("status") in ["healthy", "ok"] else "unhealthy"
health = ServiceHealth( health = ServiceHealth(
service=service_name, service=service_name,
status=status, status=status,
@@ -109,7 +96,7 @@ class ServiceRegistry:
last_check=end_time, last_check=end_time,
error_message=f"HTTP {response.status}" error_message=f"HTTP {response.status}"
) )
except aiohttp.ClientError as e: except aiohttp.ClientError as e:
health = ServiceHealth( health = ServiceHealth(
service=service_name, service=service_name,
@@ -124,37 +111,30 @@ class ServiceRegistry:
last_check=datetime.utcnow(), last_check=datetime.utcnow(),
error_message=f"Health check failed: {str(e)}" error_message=f"Health check failed: {str(e)}"
) )
# Update health status
self.service_health[service_name] = health self.service_health[service_name] = health
# Log health status changes
if health.status != "healthy": if health.status != "healthy":
logger.warning(f"Service {service_name} health check failed: {health.error_message}") logger.warning(f"Service {service_name} health check failed: {health.error_message}")
return health return health
async def update_all_service_health(self): async def update_all_service_health(self):
"""Update health status for all registered services"""
health_checks = [ health_checks = [
self.check_service_health(service_name) self.check_service_health(service_name)
for service_name in self.services.keys() for service_name in self.services.keys()
] ]
if health_checks: if health_checks:
await asyncio.gather(*health_checks, return_exceptions=True) await asyncio.gather(*health_checks, return_exceptions=True)
# Log summary
healthy_count = sum(1 for h in self.service_health.values() if h.status == "healthy") healthy_count = sum(1 for h in self.service_health.values() if h.status == "healthy")
total_count = len(self.services) total_count = len(self.services)
logger.info(f"Health check complete: {healthy_count}/{total_count} services healthy") logger.info(f"Health check complete: {healthy_count}/{total_count} services healthy")
async def get_service_health(self, service_name: str) -> Optional[ServiceHealth]: async def get_service_health(self, service_name: str) -> Optional[ServiceHealth]:
"""Get health status of a specific service"""
return self.service_health.get(service_name) return self.service_health.get(service_name)
async def get_all_service_health(self) -> Dict[str, Dict]: async def get_all_service_health(self) -> Dict[str, Dict]:
"""Get health status of all services"""
health_dict = {} health_dict = {}
for service_name, health in self.service_health.items(): for service_name, health in self.service_health.items():
health_dict[service_name] = { health_dict[service_name] = {
@@ -164,31 +144,26 @@ class ServiceRegistry:
"error_message": health.error_message "error_message": health.error_message
} }
return health_dict return health_dict
async def is_service_healthy(self, service_name: str) -> bool: async def is_service_healthy(self, service_name: str) -> bool:
"""Check if a service is healthy"""
health = self.service_health.get(service_name) health = self.service_health.get(service_name)
return health is not None and health.status == "healthy" return health is not None and health.status == "healthy"
async def get_healthy_services(self) -> List[str]: async def get_healthy_services(self) -> List[str]:
"""Get list of healthy service names"""
return [ return [
service_name service_name
for service_name, health in self.service_health.items() for service_name, health in self.service_health.items()
if health.status == "healthy" if health.status == "healthy"
] ]
def get_service_config(self, service_name: str) -> Optional[ServiceConfig]: def get_service_config(self, service_name: str) -> Optional[ServiceConfig]:
"""Get configuration for a specific service"""
return self.services.get(service_name) return self.services.get(service_name)
def get_all_services(self) -> Dict[str, ServiceConfig]: def get_all_services(self) -> Dict[str, ServiceConfig]:
"""Get all registered services"""
return self.services.copy() return self.services.copy()
async def get_service_url(self, service_name: str) -> Optional[str]: async def get_service_url(self, service_name: str) -> Optional[str]:
"""Get base URL for a healthy service"""
if await self.is_service_healthy(service_name): if await self.is_service_healthy(service_name):
service_config = self.services.get(service_name) service_config = self.services.get(service_name)
return service_config.base_url if service_config else None return service_config.base_url if service_config else None
return None return None

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
"""
Test script to validate that the coroutine fix works
"""
import asyncio
import sys
from unittest.mock import MagicMock, AsyncMock
# Mock the dependencies
sys.modules['aiohttp'] = MagicMock()
sys.modules['models'] = MagicMock()
sys.modules['service_registry'] = MagicMock()
sys.modules['load_balancer'] = MagicMock()
sys.modules['auth_middleware'] = MagicMock()
# Import the main module after mocking
import main
async def test_lifespan():
"""Test that the lifespan function works correctly"""
# Mock the service registry
main.service_registry.initialize = AsyncMock()
main.service_registry.register_services = AsyncMock()
main.service_registry.close = AsyncMock()
# Test the lifespan context manager
async with main.lifespan(None):
print("✅ Lifespan startup completed successfully")
# Verify that the methods were called
main.service_registry.initialize.assert_called_once()
main.service_registry.register_services.assert_called_once_with(main.SERVICES)
# Verify shutdown was called
main.service_registry.close.assert_called_once()
print("✅ Lifespan shutdown completed successfully")
print("✅ All coroutines are properly awaited - RuntimeWarning should be resolved")
if __name__ == "__main__":
asyncio.run(test_lifespan())

View File

@@ -15,6 +15,7 @@ class DatabaseManager:
self.client: Optional[MongoClient] = None self.client: Optional[MongoClient] = None
self.db = None self.db = None
self.collections = {} self.collections = {}
self.energy_collections_cache = {} # Cache for dynamically created energy data collections
self.connection_string = MONGO_CONFIG["connection_string"] self.connection_string = MONGO_CONFIG["connection_string"]
self.database_name = MONGO_CONFIG["database_name"] self.database_name = MONGO_CONFIG["database_name"]
@@ -31,12 +32,11 @@ class DatabaseManager:
self.db = self.client[self.database_name] self.db = self.client[self.database_name]
self.collections = { self.collections = {
'files': self.db.sa4cps_files, 'files': self.db.sa4cps_files,
'energy_data': self.db.sa4cps_energy_data,
'metadata': self.db.sa4cps_metadata, 'metadata': self.db.sa4cps_metadata,
'scanned_directories': self.db.sa4cps_scanned_directories 'scanned_directories': self.db.sa4cps_scanned_directories
} }
self._create_indexes() self._create_base_indexes()
logger.info(f"Connected to MongoDB database: {self.database_name}") logger.info(f"Connected to MongoDB database: {self.database_name}")
@@ -78,13 +78,12 @@ class DatabaseManager:
logger.error(f"MongoDB ping failed with error: {e}") logger.error(f"MongoDB ping failed with error: {e}")
raise ConnectionFailure(f"Ping failed: {e}") raise ConnectionFailure(f"Ping failed: {e}")
def _create_indexes(self): def _create_base_indexes(self):
"""Create indexes for base collections (not energy data collections)"""
try: try:
self.collections['files'].create_index("filename", unique=True) self.collections['files'].create_index("filename", unique=True)
self.collections['files'].create_index("processed_at") self.collections['files'].create_index("processed_at")
self.collections['files'].create_index("directory_path")
self.collections['energy_data'].create_index([("filename", 1), ("timestamp", 1)])
self.collections['energy_data'].create_index("timestamp")
self.collections['scanned_directories'].create_index("directory_path", unique=True) self.collections['scanned_directories'].create_index("directory_path", unique=True)
self.collections['scanned_directories'].create_index("last_scanned") self.collections['scanned_directories'].create_index("last_scanned")
@@ -94,13 +93,97 @@ class DatabaseManager:
except Exception as e: except Exception as e:
logger.warning(f"Failed to create indexes: {e}") logger.warning(f"Failed to create indexes: {e}")
async def store_file_data(self, filename: str, records: List[Dict[str, Any]]) -> bool: def _extract_level3_path(self, directory_path: str) -> Optional[str]:
"""Extract level 3 directory path (SLGs/Community/Building) from full path"""
# Expected structure: /SLGs/Community/Building/...
parts = directory_path.strip('/').split('/')
if len(parts) >= 3 and parts[0] == 'SLGs':
# Return SLGs/Community/Building
return '/'.join(parts[:3])
return None
def _sanitize_collection_name(self, level3_path: str) -> str:
"""Convert level 3 directory path to valid MongoDB collection name
Example: SLGs/CommunityA/Building1 -> energy_data__CommunityA_Building1
"""
parts = level3_path.strip('/').split('/')
if len(parts) >= 3 and parts[0] == 'SLGs':
# Use Community_Building as the collection suffix
collection_suffix = f"{parts[1]}_{parts[2]}"
collection_name = f"energy_data__{collection_suffix}"
return collection_name
# Fallback: sanitize the entire path
sanitized = level3_path.replace('/', '_').replace('.', '_').replace(' ', '_')
sanitized = sanitized.strip('_')
return f"energy_data__{sanitized}"
def _get_energy_collection(self, directory_path: str):
"""Get or create energy data collection for a specific level 3 directory path"""
level3_path = self._extract_level3_path(directory_path)
if not level3_path:
logger.warning(f"Could not extract level 3 path from: {directory_path}, using default collection")
# Fallback to a default collection for non-standard paths
collection_name = "energy_data__other"
else:
collection_name = self._sanitize_collection_name(level3_path)
# Check cache first
if collection_name in self.energy_collections_cache:
return self.energy_collections_cache[collection_name]
# Create/get collection
collection = self.db[collection_name]
# Create indexes for this energy collection
try:
collection.create_index([("filename", 1), ("timestamp", 1)])
collection.create_index("timestamp")
collection.create_index("meter_id")
logger.debug(f"Created indexes for collection: {collection_name}")
except Exception as e:
logger.warning(f"Failed to create indexes for {collection_name}: {e}")
# Cache the collection
self.energy_collections_cache[collection_name] = collection
logger.info(f"Initialized energy data collection: {collection_name} for path: {directory_path}")
return collection
def _list_energy_collections(self) -> List[str]:
"""List all energy data collections in the database"""
try:
all_collections = self.db.list_collection_names()
# Filter collections that start with 'energy_data__'
energy_collections = [c for c in all_collections if c.startswith('energy_data__')]
return energy_collections
except Exception as e:
logger.error(f"Error listing energy collections: {e}")
return []
async def store_file_data(self, filename: str, records: List[Dict[str, Any]], directory_path: str = None) -> bool:
try: try:
current_time = datetime.now() current_time = datetime.now()
# Determine which collection to use based on directory path
if directory_path:
energy_collection = self._get_energy_collection(directory_path)
level3_path = self._extract_level3_path(directory_path)
else:
logger.warning(f"No directory path provided for {filename}, using default collection")
energy_collection = self._get_energy_collection("/SLGs/unknown/unknown")
level3_path = None
# Store file metadata # Store file metadata
file_metadata = { file_metadata = {
"filename": filename, "filename": filename,
"directory_path": directory_path,
"level3_path": level3_path,
"record_count": len(records), "record_count": len(records),
"processed_at": current_time, "processed_at": current_time,
"file_size": sum(len(str(record)) for record in records), "file_size": sum(len(str(record)) for record in records),
@@ -118,12 +201,13 @@ class DatabaseManager:
for record in records: for record in records:
record["filename"] = filename record["filename"] = filename
record["processed_at"] = current_time record["processed_at"] = current_time
record["directory_path"] = directory_path
# Insert energy data records # Insert energy data records into the appropriate collection
if records: if records:
result = self.collections['energy_data'].insert_many(records) result = energy_collection.insert_many(records)
inserted_count = len(result.inserted_ids) inserted_count = len(result.inserted_ids)
logger.debug(f"Stored {inserted_count} records from {filename}") logger.debug(f"Stored {inserted_count} records from {filename} to {energy_collection.name}")
return True return True
return False return False
@@ -134,6 +218,7 @@ class DatabaseManager:
# Store error metadata # Store error metadata
error_metadata = { error_metadata = {
"filename": filename, "filename": filename,
"directory_path": directory_path,
"processed_at": current_time, "processed_at": current_time,
"status": "error", "status": "error",
"error_message": str(e) "error_message": str(e)
@@ -178,8 +263,14 @@ class DatabaseManager:
return None return None
# Directory scanning tracking methods # Directory scanning tracking methods
# Note: Only level 4+ directories (/SLGs/Community/Building/SubDir) are tracked
# to avoid unnecessary caching of high-level organizational directories
async def is_directory_scanned(self, directory_path: str, since_timestamp: datetime = None) -> bool: async def is_directory_scanned(self, directory_path: str, since_timestamp: datetime = None) -> bool:
"""Check if directory has been scanned recently""" """Check if directory has been scanned recently
Note: Only level 4+ directories are tracked in the database
"""
try: try:
query = {"directory_path": directory_path, "scan_status": "complete"} query = {"directory_path": directory_path, "scan_status": "complete"}
if since_timestamp: if since_timestamp:
@@ -249,14 +340,14 @@ class DatabaseManager:
return False return False
async def get_stats(self) -> Dict[str, Any]: async def get_stats(self) -> Dict[str, Any]:
"""Get database statistics""" """Get database statistics including all energy collections"""
try: try:
stats = { stats = {
"database": self.database_name, "database": self.database_name,
"timestamp": datetime.now().isoformat() "timestamp": datetime.now().isoformat()
} }
# Count documents in each collection # Count documents in base collections
for name, collection in self.collections.items(): for name, collection in self.collections.items():
try: try:
count = collection.count_documents({}) count = collection.count_documents({})
@@ -264,12 +355,35 @@ class DatabaseManager:
except Exception as e: except Exception as e:
stats[f"{name}_count"] = f"error: {e}" stats[f"{name}_count"] = f"error: {e}"
# Get all energy collections and their counts
try:
energy_collections = self._list_energy_collections()
energy_stats = []
total_energy_records = 0
for collection_name in energy_collections:
collection = self.db[collection_name]
count = collection.count_documents({})
total_energy_records += count
energy_stats.append({
"collection": collection_name,
"record_count": count
})
stats["energy_collections"] = energy_stats
stats["total_energy_collections"] = len(energy_collections)
stats["total_energy_records"] = total_energy_records
except Exception as e:
stats["energy_collections"] = f"error: {e}"
# Get recent files # Get recent files
try: try:
recent_files = [] recent_files = []
cursor = self.collections['files'].find( cursor = self.collections['files'].find(
{}, {},
{"filename": 1, "processed_at": 1, "record_count": 1, "status": 1, "_id": 0} {"filename": 1, "processed_at": 1, "record_count": 1, "status": 1, "directory_path": 1, "level3_path": 1, "_id": 0}
).sort("processed_at", -1).limit(5) ).sort("processed_at", -1).limit(5)
for doc in cursor: for doc in cursor:
@@ -292,8 +406,17 @@ class DatabaseManager:
filename: Optional[str] = None, filename: Optional[str] = None,
start_time: Optional[datetime] = None, start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
directory_path: Optional[str] = None,
limit: int = 100) -> List[Dict[str, Any]]: limit: int = 100) -> List[Dict[str, Any]]:
"""Retrieve energy data with optional filtering""" """Retrieve energy data with optional filtering
Args:
filename: Filter by specific filename
start_time: Filter by start timestamp
end_time: Filter by end timestamp
directory_path: Filter by specific directory path (level 3). If None, queries all collections
limit: Maximum number of records to return
"""
try: try:
query = {} query = {}
@@ -308,22 +431,48 @@ class DatabaseManager:
time_query["$lte"] = end_time time_query["$lte"] = end_time
query["timestamp"] = time_query query["timestamp"] = time_query
cursor = self.collections['energy_data'].find(query).sort("timestamp", -1).limit(limit)
data = [] data = []
for doc in cursor:
# Convert ObjectId to string and datetime to ISO string
if "_id" in doc:
doc["_id"] = str(doc["_id"])
if "timestamp" in doc and hasattr(doc["timestamp"], "isoformat"):
doc["timestamp"] = doc["timestamp"].isoformat()
if "processed_at" in doc and hasattr(doc["processed_at"], "isoformat"):
doc["processed_at"] = doc["processed_at"].isoformat()
data.append(doc) # If directory_path is specified, query only that collection
if directory_path:
collection = self._get_energy_collection(directory_path)
cursor = collection.find(query).sort("timestamp", -1).limit(limit)
for doc in cursor:
data.append(self._format_energy_document(doc))
else:
# Query across all energy collections
energy_collection_names = self._list_energy_collections()
# Collect data from all collections, then sort and limit
all_data = []
per_collection_limit = max(limit, 1000) # Get more from each to ensure we have enough after sorting
for collection_name in energy_collection_names:
collection = self.db[collection_name]
cursor = collection.find(query).sort("timestamp", -1).limit(per_collection_limit)
for doc in cursor:
all_data.append(self._format_energy_document(doc))
# Sort all data by timestamp and apply final limit
all_data.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
data = all_data[:limit]
return data return data
except Exception as e: except Exception as e:
logger.error(f"Error retrieving energy data: {e}") logger.error(f"Error retrieving energy data: {e}")
return [] return []
def _format_energy_document(self, doc: Dict[str, Any]) -> Dict[str, Any]:
"""Format energy document for API response"""
# Convert ObjectId to string and datetime to ISO string
if "_id" in doc:
doc["_id"] = str(doc["_id"])
if "timestamp" in doc and hasattr(doc["timestamp"], "isoformat"):
doc["timestamp"] = doc["timestamp"].isoformat()
if "processed_at" in doc and hasattr(doc["processed_at"], "isoformat"):
doc["processed_at"] = doc["processed_at"].isoformat()
return doc

View File

@@ -17,6 +17,7 @@ class FTPFileInfo:
path: str path: str
name: str name: str
size: int size: int
directory_path: str # Directory containing the file
modified_time: Optional[datetime] = None modified_time: Optional[datetime] = None
@@ -79,6 +80,7 @@ class FTPMonitor:
self.status = "stopped" self.status = "stopped"
return return
await asyncio.wait_for(self.check_for_new_files(), timeout=300.0) # 5 minute timeout
self.status = "running" self.status = "running"
# Optionally skip initial scan and wait for first scheduled interval # Optionally skip initial scan and wait for first scheduled interval
@@ -202,9 +204,13 @@ class FTPMonitor:
visited_dirs.add(normalized_path) visited_dirs.add(normalized_path)
# Check if directory should be skipped based on previous scans # Determine directory depth (level 4 = /SLGs/Community/Building/SubDir)
if await self.db_manager.should_skip_directory(normalized_path): path_parts = normalized_path.strip('/').split('/')
logger.info(f"Skipping previously scanned directory: {normalized_path}") directory_level = len(path_parts)
# Check if directory should be skipped based on previous scans (only for level 4+)
if directory_level >= 4 and await self.db_manager.should_skip_directory(normalized_path):
logger.info(f"Skipping previously scanned level {directory_level} directory: {normalized_path}")
skipped_dirs += 1 skipped_dirs += 1
continue continue
@@ -221,7 +227,7 @@ class FTPMonitor:
# Count files found in this directory # Count files found in this directory
files_found_in_dir = 0 files_found_in_dir = 0
for line in dir_list: for line in dir_list:
parts = line.split() parts = line.split()
if len(parts) >= 9: if len(parts) >= 9:
@@ -257,7 +263,8 @@ class FTPMonitor:
files.append(FTPFileInfo( files.append(FTPFileInfo(
path=full_path, path=full_path,
name=filename, name=filename,
size=size size=size,
directory_path=normalized_path
)) ))
files_found_in_dir += 1 files_found_in_dir += 1
@@ -265,10 +272,13 @@ class FTPMonitor:
logger.warning(f"Could not parse file info for: {filename}") logger.warning(f"Could not parse file info for: {filename}")
ftp.cwd(original_dir) ftp.cwd(original_dir)
# Mark directory as scanned # Mark directory as scanned (only for level 4+ directories)
await self.db_manager.mark_directory_scanned(normalized_path, files_found_in_dir) if directory_level >= 4:
logger.debug(f"Completed scanning: {normalized_path} ({files_found_in_dir} files found)") await self.db_manager.mark_directory_scanned(normalized_path, files_found_in_dir)
logger.debug(f"Completed scanning level {directory_level} directory: {normalized_path} ({files_found_in_dir} files found)")
else:
logger.debug(f"Completed scanning level {directory_level} directory (not saved to cache): {normalized_path} ({files_found_in_dir} files found)")
except Exception as e: except Exception as e:
logger.warning(f"Error scanning directory {normalized_path}: {e}") logger.warning(f"Error scanning directory {normalized_path}: {e}")
@@ -277,7 +287,7 @@ class FTPMonitor:
logger.info(f"Iterative scan completed. Scanned: {scanned_dirs} directories, Skipped: {skipped_dirs} directories (Total visited: {len(visited_dirs)})") logger.info(f"Iterative scan completed. Scanned: {scanned_dirs} directories, Skipped: {skipped_dirs} directories (Total visited: {len(visited_dirs)})")
async def _process_file(self, ftp: FTP, file_info: FTPFileInfo) -> bool: async def _process_file(self, ftp: FTP, file_info: FTPFileInfo) -> bool:
logger.debug(f"Processing file: {file_info.path} ({file_info.size} bytes)") logger.debug(f"Processing file: {file_info.path} ({file_info.size} bytes) from directory: {file_info.directory_path}")
try: try:
with tempfile.NamedTemporaryFile(mode='wb', suffix='.slg_v2', delete=False) as temp_file: with tempfile.NamedTemporaryFile(mode='wb', suffix='.slg_v2', delete=False) as temp_file:
@@ -289,8 +299,9 @@ class FTPMonitor:
records = await self.processor.process_file(temp_path, file_info.name) records = await self.processor.process_file(temp_path, file_info.name)
if records: if records:
await self.db_manager.store_file_data(file_info.name, records) # Pass directory path to store_file_data for collection selection
logger.debug(f"Stored {len(records)} records from {file_info.name}") await self.db_manager.store_file_data(file_info.name, records, file_info.directory_path)
logger.debug(f"Stored {len(records)} records from {file_info.name} to collection for {file_info.directory_path}")
return True return True
else: else:
logger.warning(f"No valid records found in {file_info.name}") logger.warning(f"No valid records found in {file_info.name}")

View File

@@ -19,19 +19,12 @@ db_manager = None
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
global ftp_monitor, db_manager global ftp_monitor, db_manager
logger.info("Starting SA4CPS Data Ingestion Service...")
db_manager = DatabaseManager() db_manager = DatabaseManager()
await db_manager.connect() await db_manager.connect()
logger.info("Database connection established")
ftp_monitor = FTPMonitor(db_manager) ftp_monitor = FTPMonitor(db_manager)
logger.info("FTP monitor created")
monitoring_task = asyncio.create_task(ftp_monitor.start_monitoring()) monitoring_task = asyncio.create_task(ftp_monitor.start_monitoring())
logger.info("FTP monitoring task started in background")
logger.info("Service startup complete - HTTP server ready to accept requests")
yield yield
@@ -78,7 +71,8 @@ async def health_check():
global ftp_monitor, db_manager global ftp_monitor, db_manager
health_status = { health_status = {
"service": "healthy", "service": "data-ingestion-service",
"status": "healthy",
"timestamp": datetime.now().isoformat(), "timestamp": datetime.now().isoformat(),
"database": "unknown", "database": "unknown",
"ftp_monitor": "unknown" "ftp_monitor": "unknown"

View File

@@ -32,7 +32,7 @@ class MockDatabaseManager:
"""Mock get list of processed files""" """Mock get list of processed files"""
return list(self.processed_files) return list(self.processed_files)
async def store_file_data(self, filename: str, records: List) -> bool: async def store_file_data(self, filename: str, records: List, directory_path: str = None) -> bool:
"""Mock store file data""" """Mock store file data"""
self.processed_files.add(filename) self.processed_files.add(filename)
self.stored_files[filename] = records self.stored_files[filename] = records

View File

@@ -0,0 +1,26 @@
FROM python:3.9-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
curl \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Expose port
EXPOSE 8003
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD curl -f http://localhost:8003/health || exit 1
# Run the application
CMD ["python", "main.py"]

View File

@@ -0,0 +1,208 @@
"""
Database configuration and connection management for Demand Response Service
"""
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
import redis.asyncio as redis
import logging
import os
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Configuration from environment variables
MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017")
DATABASE_NAME = os.getenv("DATABASE_NAME", "energy_dashboard_demand_response")
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
# Global database clients
_mongo_client: AsyncIOMotorClient = None
_database: AsyncIOMotorDatabase = None
_redis_client: redis.Redis = None
async def connect_to_mongo():
"""Initialize MongoDB connection and create indexes"""
global _mongo_client, _database
try:
logger.info(f"Connecting to MongoDB at {MONGO_URL}")
_mongo_client = AsyncIOMotorClient(MONGO_URL)
_database = _mongo_client[DATABASE_NAME]
# Test connection
await _database.command("ping")
logger.info(f"Successfully connected to MongoDB database: {DATABASE_NAME}")
# Create indexes
await create_indexes()
except Exception as e:
logger.error(f"Failed to connect to MongoDB: {e}")
raise
async def close_mongo_connection():
"""Close MongoDB connection"""
global _mongo_client
if _mongo_client:
_mongo_client.close()
logger.info("MongoDB connection closed")
async def get_database() -> AsyncIOMotorDatabase:
"""Get database instance"""
if _database is None:
await connect_to_mongo()
return _database
async def connect_to_redis():
"""Initialize Redis connection"""
global _redis_client
try:
logger.info(f"Connecting to Redis at {REDIS_URL}")
_redis_client = redis.from_url(REDIS_URL, decode_responses=True)
# Test connection
await _redis_client.ping()
logger.info("Successfully connected to Redis")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
raise
async def close_redis_connection():
"""Close Redis connection"""
global _redis_client
if _redis_client:
await _redis_client.close()
logger.info("Redis connection closed")
async def get_redis() -> redis.Redis:
"""Get Redis client instance"""
if _redis_client is None:
await connect_to_redis()
return _redis_client
async def create_indexes():
"""Create MongoDB indexes for optimal query performance"""
db = await get_database()
logger.info("Creating MongoDB indexes...")
try:
# Indexes for demand_response_invitations collection
await db.demand_response_invitations.create_index("event_id", unique=True)
await db.demand_response_invitations.create_index([("event_time", 1), ("status", 1)])
await db.demand_response_invitations.create_index("status")
await db.demand_response_invitations.create_index("created_at")
await db.demand_response_invitations.create_index("response")
logger.info("Created indexes for demand_response_invitations collection")
# Indexes for demand_response_events collection
await db.demand_response_events.create_index("event_id", unique=True)
await db.demand_response_events.create_index([("start_time", 1), ("status", 1)])
await db.demand_response_events.create_index([("status", 1), ("start_time", 1)])
await db.demand_response_events.create_index("status")
await db.demand_response_events.create_index("invitation_id")
logger.info("Created indexes for demand_response_events collection")
# Indexes for demand_response_responses collection
await db.demand_response_responses.create_index([("event_id", 1), ("device_id", 1)], unique=True)
await db.demand_response_responses.create_index("event_id")
await db.demand_response_responses.create_index("device_id")
await db.demand_response_responses.create_index("responded_at")
logger.info("Created indexes for demand_response_responses collection")
# Indexes for flexibility_snapshots collection (with TTL for auto-cleanup)
await db.flexibility_snapshots.create_index([("timestamp", -1)])
await db.flexibility_snapshots.create_index(
"timestamp",
expireAfterSeconds=7776000 # 90 days TTL
)
logger.info("Created indexes for flexibility_snapshots collection")
# Indexes for auto_response_config collection (singleton document)
await db.auto_response_config.create_index("config_id", unique=True)
logger.info("Created indexes for auto_response_config collection")
# Indexes for device_instructions collection
await db.device_instructions.create_index("device_id", unique=True)
await db.device_instructions.create_index("updated_at")
logger.info("Created indexes for device_instructions collection")
logger.info("All MongoDB indexes created successfully")
except Exception as e:
logger.error(f"Error creating indexes: {e}")
# Don't raise - indexes may already exist
async def initialize_default_config():
"""Initialize default auto-response configuration if it doesn't exist"""
db = await get_database()
try:
# Check if default config exists
existing_config = await db.auto_response_config.find_one({"config_id": "default"})
if not existing_config:
default_config = {
"config_id": "default",
"enabled": False,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60,
"created_at": None,
"updated_at": None
}
await db.auto_response_config.insert_one(default_config)
logger.info("Created default auto-response configuration")
else:
logger.info("Auto-response configuration already exists")
except Exception as e:
logger.error(f"Error initializing default config: {e}")
# Utility functions for common database operations
async def get_collection(collection_name: str):
"""Get a collection by name"""
db = await get_database()
return db[collection_name]
async def health_check() -> dict:
"""Check database connections health"""
status = {
"mongodb": False,
"redis": False
}
try:
# Check MongoDB
db = await get_database()
await db.command("ping")
status["mongodb"] = True
except Exception as e:
logger.error(f"MongoDB health check failed: {e}")
try:
# Check Redis
redis_client = await get_redis()
await redis_client.ping()
status["redis"] = True
except Exception as e:
logger.error(f"Redis health check failed: {e}")
return status

View File

@@ -0,0 +1,747 @@
"""
Demand Response Service - Core Business Logic
Handles DR invitations, event execution, auto-response, and flexibility calculation
"""
import asyncio
import json
import uuid
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Any
import logging
from motor.motor_asyncio import AsyncIOMotorDatabase
import redis.asyncio as redis
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DemandResponseService:
"""Core Demand Response service business logic"""
def __init__(self, db: AsyncIOMotorDatabase, redis_client: redis.Redis):
self.db = db
self.redis = redis_client
self.active_events: Dict[str, asyncio.Task] = {} # event_id -> task
self.device_power_cache: Dict[str, float] = {} # device_id -> power_kw (updated by Redis subscriber)
# ===== INVITATION MANAGEMENT =====
async def send_invitation(
self,
event_time: datetime,
load_kwh: float,
load_percentage: float,
iots: List[str],
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Create and send DR invitation
Returns: {"event_id": str, "response": str, "message": str}
"""
logger.info(f"Creating DR invitation for {len(iots)} devices at {event_time}")
# Generate unique event ID
event_id = str(uuid.uuid4())
# Check auto-response configuration
auto_config = await self.get_auto_response_config()
response = "YES" if auto_config.get("enabled", False) else "WAITING"
# Create invitation document
invitation = {
"event_id": event_id,
"created_at": datetime.utcnow(),
"event_time": event_time,
"load_kwh": load_kwh,
"load_percentage": load_percentage,
"iots": iots,
"duration_minutes": duration_minutes,
"response": response,
"status": "pending"
}
# Store in MongoDB
await self.db.demand_response_invitations.insert_one(invitation)
# Cache in Redis for fast access (24 hour TTL)
cache_key = f"dr:invitation:{event_id}"
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
# Publish event to Redis pub/sub
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_created",
"event_id": event_id,
"event_time": event_time.isoformat(),
"load_kwh": load_kwh,
"response": response
}))
logger.info(f"Invitation {event_id} created with response: {response}")
return {
"event_id": event_id,
"response": response,
"message": "Invitation created successfully"
}
async def answer_invitation(
self,
event_id: str,
iot_id: str,
response: str,
committed_reduction_kw: Optional[float] = None
) -> Dict[str, Any]:
"""
Record device response to invitation
Returns: {"success": bool, "message": str}
"""
logger.info(f"Recording response for invitation {event_id}, device {iot_id}: {response}")
# Validate invitation exists
invitation = await self.get_invitation(event_id)
if not invitation:
return {"success": False, "message": f"Invitation {event_id} not found"}
if iot_id not in invitation["iots"]:
return {"success": False, "message": f"Device {iot_id} not in invitation"}
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": iot_id
})
if existing:
return {"success": False, "message": f"Device {iot_id} has already responded"}
# Store response
response_doc = {
"event_id": event_id,
"device_id": iot_id,
"response": response,
"committed_reduction_kw": committed_reduction_kw,
"responded_at": datetime.utcnow()
}
await self.db.demand_response_responses.insert_one(response_doc)
# Check if all devices have responded
total_devices = len(invitation["iots"])
total_responses = await self.db.demand_response_responses.count_documents({"event_id": event_id})
if total_responses == total_devices:
# All devices responded - update invitation status
yes_count = await self.db.demand_response_responses.count_documents({
"event_id": event_id,
"response": "YES"
})
all_yes = yes_count == total_devices
new_response = "YES" if all_yes else "NO"
new_status = "scheduled" if all_yes else "cancelled"
await self.db.demand_response_invitations.update_one(
{"event_id": event_id},
{"$set": {"response": new_response, "status": new_status}}
)
logger.info(f"Invitation {event_id} final response: {new_response} (status: {new_status})")
# Clear cache
await self.redis.delete(f"dr:invitation:{event_id}")
# Publish event
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_answered",
"event_id": event_id,
"device_id": iot_id,
"response": response
}))
return {"success": True, "message": "Response recorded successfully"}
async def get_invitation(self, event_id: str) -> Optional[Dict[str, Any]]:
"""
Get invitation by event_id (with Redis caching)
"""
# Try cache first
cache_key = f"dr:invitation:{event_id}"
cached = await self.redis.get(cache_key)
if cached:
invitation = json.loads(cached)
return invitation
# Fallback to MongoDB
invitation = await self.db.demand_response_invitations.find_one({"event_id": event_id})
if invitation:
invitation["_id"] = str(invitation["_id"])
# Cache for 24 hours
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
return invitation
return None
async def get_unanswered_invitations(self) -> List[Dict[str, Any]]:
"""Get all pending invitations awaiting response"""
cursor = self.db.demand_response_invitations.find({
"response": "WAITING",
"status": "pending"
}).sort("created_at", -1)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
async def get_answered_invitations(self, hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get recent answered invitations"""
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.demand_response_invitations.find({
"response": {"$ne": "WAITING"},
"created_at": {"$gte": start_time}
}).sort("created_at", -1).limit(limit)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
# ===== EVENT EXECUTION =====
async def schedule_event(
self,
event_time: datetime,
iots: List[str],
load_reduction_kw: float,
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Schedule a DR event for execution
Returns: {"event_id": str, "message": str}
"""
logger.info(f"Scheduling DR event for {len(iots)} devices at {event_time}")
# Create event document
event_id = str(uuid.uuid4())
end_time = event_time + timedelta(minutes=duration_minutes)
event = {
"event_id": event_id,
"start_time": event_time,
"end_time": end_time,
"status": "scheduled",
"participating_devices": iots,
"target_reduction_kw": load_reduction_kw,
"actual_reduction_kw": 0.0,
"power_samples": []
}
await self.db.demand_response_events.insert_one(event)
# Publish scheduled event
await self.redis.publish("dr_events", json.dumps({
"event": "event_scheduled",
"event_id": event_id,
"start_time": event_time.isoformat(),
"end_time": end_time.isoformat(),
"devices": iots
}))
logger.info(f"Event {event_id} scheduled successfully")
return {
"event_id": event_id,
"message": "Event scheduled successfully"
}
async def execute_event(self, event_id: str):
"""
Execute a DR event (spawns background task)
"""
logger.info(f"Executing DR event {event_id}")
# Get event details
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if not event:
logger.error(f"Event {event_id} not found")
return
# Update status to active
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{"$set": {"status": "active", "actual_start_time": datetime.utcnow()}}
)
# Publish event started
await self.redis.publish("dr_events", json.dumps({
"event": "event_started",
"event_id": event_id,
"devices": event["participating_devices"]
}))
# Create and store async task for this event
task = asyncio.create_task(self._run_event_loop(event))
self.active_events[event_id] = task
logger.info(f"DR event {event_id} started successfully")
async def _run_event_loop(self, event: Dict[str, Any]):
"""
CRITICAL: Core event execution loop - runs for duration_minutes
Samples power every 5 seconds, accumulates reduction, handles cancellation
"""
event_id = event["event_id"]
end_time = event["end_time"]
devices = event["participating_devices"]
total_reduction_kwh = 0.0
sample_count = 0
logger.info(f"Starting event loop for {event_id}, ending at {end_time}")
try:
while datetime.utcnow() < end_time:
# Get current power for all participating devices from cache
device_powers = {
device_id: self.device_power_cache.get(device_id, 0.0)
for device_id in devices
}
# Calculate reduction for this 5-second interval
# interval_hours = 5.0 / 3600.0 = 0.00139 hours
interval_reduction_kwh = sum(device_powers.values()) * (5.0 / 3600.0)
total_reduction_kwh += interval_reduction_kwh
sample_count += 1
# Store sample in MongoDB (every sample to maintain accuracy)
sample = {
"timestamp": datetime.utcnow(),
"device_powers": device_powers,
"interval_reduction_kwh": interval_reduction_kwh
}
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$push": {"power_samples": sample},
"$set": {"actual_reduction_kw": total_reduction_kwh}
}
)
# Update Redis cache for fast access to current reduction
cache_key = f"dr:event:active:{event_id}"
await self.redis.setex(
cache_key,
300, # 5 minute TTL
json.dumps({
"event_id": event_id,
"current_reduction_kwh": total_reduction_kwh,
"devices": device_powers,
"last_update": datetime.utcnow().isoformat()
}, default=str)
)
# Publish progress every 10 samples (50 seconds)
if sample_count % 10 == 0:
await self.redis.publish("dr_events", json.dumps({
"event": "event_progress",
"event_id": event_id,
"total_reduction_kwh": round(total_reduction_kwh, 3),
"device_powers": device_powers,
"timestamp": datetime.utcnow().isoformat()
}))
logger.info(f"Event {event_id} progress: {total_reduction_kwh:.3f} kWh ({sample_count} samples)")
# Sleep for 5 seconds
await asyncio.sleep(5)
# Event completed successfully
logger.info(f"Event {event_id} completed with {total_reduction_kwh:.3f} kWh reduction")
await self._complete_event(event_id, total_reduction_kwh)
except asyncio.CancelledError:
logger.info(f"Event {event_id} cancelled by user")
await self._cancel_event(event_id)
raise
except Exception as e:
logger.error(f"Error in event {event_id}: {e}", exc_info=True)
await self._cancel_event(event_id)
async def _complete_event(self, event_id: str, total_reduction_kwh: float):
"""Mark event as completed"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "completed",
"actual_end_time": datetime.utcnow(),
"actual_reduction_kw": total_reduction_kwh
}
}
)
# Remove from active events
self.active_events.pop(event_id, None)
# Clear cache
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish completion
await self.redis.publish("dr_events", json.dumps({
"event": "event_completed",
"event_id": event_id,
"total_reduction_kwh": total_reduction_kwh
}))
logger.info(f"DR event {event_id} marked as completed")
async def _cancel_event(self, event_id: str):
"""Internal method to cancel an event"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "cancelled",
"cancelled_at": datetime.utcnow()
}
}
)
self.active_events.pop(event_id, None)
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish cancellation
await self.redis.publish("dr_events", json.dumps({
"event": "event_cancelled",
"event_id": event_id,
"timestamp": datetime.utcnow().isoformat()
}))
async def cancel_event(self, event_id: str):
"""
Public method to cancel a running DR event gracefully
"""
logger.info(f"Cancelling DR event {event_id}")
# Cancel the async task
task = self.active_events.get(event_id)
if task and not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
# Expected - task cancelled successfully
pass
except Exception as e:
logger.error(f"Error cancelling event task {event_id}: {e}")
# Update database status (if not already done by _cancel_event)
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if event and event.get("status") != "cancelled":
await self._cancel_event(event_id)
logger.info(f"DR event {event_id} cancelled successfully")
async def get_active_events(self) -> List[Dict[str, Any]]:
"""Get currently running events with real-time data"""
cursor = self.db.demand_response_events.find({
"status": "active"
}).sort("start_time", -1)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
# Add real-time data from cache
cache_key = f"dr:event:active:{event['event_id']}"
cached = await self.redis.get(cache_key)
if cached:
realtime_data = json.loads(cached)
event["current_reduction_kwh"] = realtime_data.get("current_reduction_kwh")
event["current_device_powers"] = realtime_data.get("devices")
events.append(event)
return events
# ===== DEVICE POWER INTEGRATION =====
def update_device_power_cache(self, device_id: str, power_kw: float):
"""
Update device power cache (called by Redis subscriber)
This is synchronous because it's just updating a dict
"""
self.device_power_cache[device_id] = power_kw
# No logging here to avoid spam (called every few seconds per device)
async def get_device_power(self, device_id: str) -> float:
"""Get current power for a device from cache"""
return self.device_power_cache.get(device_id, 0.0)
# ===== AUTO-RESPONSE CONFIGURATION =====
async def get_auto_response_config(self) -> Dict[str, Any]:
"""Get auto-response configuration"""
config = await self.db.auto_response_config.find_one({"config_id": "default"})
if not config:
# Create default config
default_config = {
"config_id": "default",
"enabled": False,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60,
"updated_at": datetime.utcnow()
}
await self.db.auto_response_config.insert_one(default_config)
return default_config
return config
async def set_auto_response_config(
self,
enabled: bool,
max_reduction_percentage: float = 20.0,
response_delay_seconds: int = 300,
min_notice_minutes: int = 60
) -> Dict[str, Any]:
"""Update auto-response configuration"""
await self.db.auto_response_config.update_one(
{"config_id": "default"},
{
"$set": {
"enabled": enabled,
"max_reduction_percentage": max_reduction_percentage,
"response_delay_seconds": response_delay_seconds,
"min_notice_minutes": min_notice_minutes,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
# Clear cache
await self.redis.delete("dr:config:auto_response")
logger.info(f"Auto-response config updated: enabled={enabled}")
return await self.get_auto_response_config()
async def process_auto_responses(self):
"""
Process pending invitations with auto-response (called by background task)
"""
# Get auto-response configuration
auto_config = await self.get_auto_response_config()
if not auto_config.get("enabled"):
return
# Find unanswered invitations
invitations = await self.get_unanswered_invitations()
for invitation in invitations:
event_id = invitation["event_id"]
event_time = invitation["event_time"]
# Parse event_time (might be string from cache)
if isinstance(event_time, str):
event_time = datetime.fromisoformat(event_time.replace('Z', '+00:00'))
# Check if event is within auto-response criteria
time_until_event = (event_time - datetime.utcnow()).total_seconds() / 60 # minutes
min_notice = auto_config.get("min_notice_minutes", 60)
if time_until_event >= min_notice:
logger.info(f"Auto-responding to invitation {event_id}")
# Auto-accept for all devices
for device_id in invitation["iots"]:
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": device_id
})
if not existing:
# Get device current power
device_power = await self.get_device_power(device_id)
# Calculate committed reduction based on max_reduction_percentage
max_reduction_pct = auto_config.get("max_reduction_percentage", 20.0)
committed_reduction = device_power * (max_reduction_pct / 100) if device_power > 0 else 0.5
# Submit auto-response
try:
await self.answer_invitation(event_id, device_id, "YES", committed_reduction)
logger.info(f"Auto-accepted for device {device_id} with {committed_reduction:.2f} kW commitment")
except Exception as e:
logger.error(f"Error auto-responding for {device_id}: {e}")
else:
logger.debug(f"Invitation {event_id} too soon ({time_until_event:.0f}m < {min_notice}m)")
# ===== BACKGROUND TASK SUPPORT =====
async def check_scheduled_events(self):
"""
Check for events that need to be started (called by scheduler task)
"""
now = datetime.utcnow()
threshold = now + timedelta(minutes=1) # Start events within next minute
# Find scheduled events that should start
cursor = self.db.demand_response_events.find({
"status": "scheduled",
"start_time": {"$lte": threshold, "$gte": now}
})
async for event in cursor:
event_id = event["event_id"]
# Check if not already active
if event_id not in self.active_events:
logger.info(f"Starting scheduled DR event {event_id}")
await self.execute_event(event_id)
# ===== BASIC FLEXIBILITY CALCULATION =====
async def get_current_flexibility(self) -> Dict[str, Any]:
"""
Calculate current available flexibility from device power cache
"""
total_flexibility_kw = 0.0
devices = []
# Get all devices with instructions
cursor = self.db.device_instructions.find({})
current_hour = datetime.utcnow().hour
async for device_doc in cursor:
device_id = device_doc["device_id"]
instruction = device_doc["instructions"].get(str(current_hour), "off")
if instruction != "off":
# Get device current power from cache
device_power = self.device_power_cache.get(device_id, 0.0)
if instruction == "participation":
# Full flexibility (100%)
flexibility = device_power
elif instruction == "shifting":
# Partial flexibility (20%)
flexibility = device_power * 0.20
else:
flexibility = 0.0
if flexibility > 0:
devices.append({
"device_id": device_id,
"available_kw": round(flexibility, 2),
"instruction": instruction,
"current_power": round(device_power, 2)
})
total_flexibility_kw += flexibility
snapshot = {
"timestamp": datetime.utcnow(),
"total_flexibility_kw": round(total_flexibility_kw, 2),
"devices": devices
}
# Store snapshot
await self.db.flexibility_snapshots.insert_one(dict(snapshot))
# Cache for 5 minutes
await self.redis.setex(
"dr:flexibility:current",
300,
json.dumps(snapshot, default=str)
)
return snapshot
async def get_device_instructions(self, device_id: Optional[str] = None) -> Dict[str, Any]:
"""Get DR instructions for device(s)"""
if device_id:
doc = await self.db.device_instructions.find_one({"device_id": device_id})
return doc if doc else {"device_id": device_id, "instructions": {}}
else:
cursor = self.db.device_instructions.find({})
instructions = {}
async for doc in cursor:
instructions[doc["device_id"]] = doc["instructions"]
return instructions
async def update_device_instructions(self, device_id: str, instructions: Dict[str, str]):
"""Update hourly instructions for a device"""
await self.db.device_instructions.update_one(
{"device_id": device_id},
{
"$set": {
"instructions": instructions,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
logger.info(f"Updated instructions for device {device_id}")
# ===== ANALYTICS =====
async def get_performance_analytics(self, days: int = 30) -> Dict[str, Any]:
"""Get DR performance analytics"""
start_date = datetime.utcnow() - timedelta(days=days)
# Query completed events
cursor = self.db.demand_response_events.find({
"status": "completed",
"start_time": {"$gte": start_date}
})
events = await cursor.to_list(length=None)
if not events:
return {
"period_days": days,
"total_events": 0,
"total_reduction_kwh": 0.0,
"total_target_kwh": 0.0,
"average_reduction_kwh": 0.0,
"achievement_rate": 0.0,
"average_event_duration_minutes": 59
}
total_reduction = sum(e.get("actual_reduction_kw", 0) for e in events)
total_target = sum(e.get("target_reduction_kw", 0) for e in events)
return {
"period_days": days,
"total_events": len(events),
"total_reduction_kwh": round(total_reduction, 2),
"total_target_kwh": round(total_target, 2),
"average_reduction_kwh": round(total_reduction / len(events), 2),
"achievement_rate": round((total_reduction / total_target * 100) if total_target > 0 else 0, 2),
"average_event_duration_minutes": 59
}

View File

@@ -23,22 +23,130 @@ from demand_response_service import DemandResponseService
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Background task functions
async def event_scheduler_task():
"""Background task for checking and executing scheduled events"""
logger.info("Starting event scheduler task")
while True:
try:
db = await get_database()
redis = await get_redis()
service = DemandResponseService(db, redis)
# Check for events that need to be executed
await service.check_scheduled_events()
# Sleep for 60 seconds between checks
await asyncio.sleep(60)
except asyncio.CancelledError:
logger.info("Event scheduler task cancelled")
raise
except Exception as e:
logger.error(f"Error in event scheduler task: {e}")
await asyncio.sleep(120) # Wait longer on error
async def auto_response_task():
"""Background task for automatic demand response"""
logger.info("Starting auto-response task")
while True:
try:
db = await get_database()
redis = await get_redis()
service = DemandResponseService(db, redis)
# Check for auto-response opportunities
await service.process_auto_responses()
# Sleep for 30 seconds between checks
await asyncio.sleep(30)
except asyncio.CancelledError:
logger.info("Auto-response task cancelled")
raise
except Exception as e:
logger.error(f"Error in auto-response task: {e}")
await asyncio.sleep(90) # Wait longer on error
async def energy_data_subscriber_task():
"""Subscribe to energy_data Redis channel for device power updates"""
logger.info("Starting energy data subscriber task")
try:
redis = await get_redis()
db = await get_database()
service = DemandResponseService(db, redis)
pubsub = redis.pubsub()
await pubsub.subscribe("energy_data")
logger.info("Subscribed to energy_data channel")
while True:
try:
message = await pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
if message and message.get('type') == 'message':
import json
data = json.loads(message['data'])
# Format: {"sensorId": "sensor_1", "timestamp": 123, "value": 3.5, "unit": "kWh"}
sensor_id = data.get("sensorId")
power_kw = data.get("value", 0.0)
# Update service cache
service.update_device_power_cache(sensor_id, power_kw)
except json.JSONDecodeError as e:
logger.warning(f"Invalid JSON in energy_data message: {e}")
except Exception as e:
logger.error(f"Error processing energy data message: {e}")
await asyncio.sleep(5)
except asyncio.CancelledError:
logger.info("Energy data subscriber task cancelled")
raise
except Exception as e:
logger.error(f"Energy data subscriber task failed: {e}")
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager""" """Application lifespan manager"""
logger.info("Demand Response Service starting up...") logger.info("Demand Response Service starting up...")
await connect_to_mongo() await connect_to_mongo()
await connect_to_redis() await connect_to_redis()
# Create global service instance for shutdown cleanup
db = await get_database()
redis = await get_redis()
app.state.dr_service = DemandResponseService(db, redis)
# Start background tasks # Start background tasks
asyncio.create_task(event_scheduler_task()) asyncio.create_task(event_scheduler_task())
asyncio.create_task(auto_response_task()) asyncio.create_task(auto_response_task())
asyncio.create_task(energy_data_subscriber_task())
logger.info("Demand Response Service startup complete") logger.info("Demand Response Service startup complete")
yield yield
logger.info("Demand Response Service shutting down...") logger.info("Demand Response Service shutting down...")
# Cancel all active DR events gracefully
if hasattr(app.state, 'dr_service'):
active_event_ids = list(app.state.dr_service.active_events.keys())
if active_event_ids:
logger.info(f"Cancelling {len(active_event_ids)} active events...")
for event_id in active_event_ids:
try:
await app.state.dr_service.cancel_event(event_id)
except Exception as e:
logger.error(f"Error cancelling event {event_id}: {e}")
await close_mongo_connection() await close_mongo_connection()
logger.info("Demand Response Service shutdown complete") logger.info("Demand Response Service shutdown complete")

View File

@@ -0,0 +1,338 @@
"""
Pydantic models for Demand Response Service
"""
from datetime import datetime
from typing import List, Dict, Optional, Literal
from pydantic import BaseModel, Field
from enum import Enum
# Enums
class InvitationStatus(str, Enum):
"""Invitation status states"""
PENDING = "pending"
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class ResponseType(str, Enum):
"""Device response types"""
WAITING = "WAITING"
YES = "YES"
NO = "NO"
class EventStatus(str, Enum):
"""DR event status states"""
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class InstructionType(str, Enum):
"""Device participation instruction types"""
PARTICIPATION = "participation" # Full DR participation (100%)
SHIFTING = "shifting" # Partial participation (0-20%)
OFF = "off" # No DR participation
# Invitation Models
class EventRequest(BaseModel):
"""Request model for creating a DR event (alias for DRInvitationCreate)"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationCreate(BaseModel):
"""Request model for creating a DR invitation"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationResponse(BaseModel):
"""Response model for device answering invitation"""
event_id: str = Field(..., description="Event identifier")
iot_id: str = Field(..., description="Device identifier")
response: ResponseType = Field(..., description="Device response (YES/NO)")
committed_reduction_kw: Optional[float] = Field(None, description="Committed power reduction in kW", ge=0)
class Config:
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"iot_id": "sensor_1",
"response": "YES",
"committed_reduction_kw": 2.5
}
}
class DRInvitation(BaseModel):
"""Full DR invitation model"""
event_id: str = Field(..., description="Unique event identifier")
created_at: datetime = Field(..., description="Invitation creation time")
event_time: datetime = Field(..., description="Scheduled event start time")
load_kwh: float = Field(..., description="Target load reduction in kWh")
load_percentage: float = Field(..., description="Target reduction percentage")
iots: List[str] = Field(..., description="Participating device IDs")
duration_minutes: int = Field(..., description="Event duration in minutes")
response: str = Field(..., description="Overall response status")
status: str = Field(..., description="Invitation status")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"created_at": "2025-12-10T13:45:00",
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59,
"response": "WAITING",
"status": "pending"
}
}
# Event Models
class EventScheduleRequest(BaseModel):
"""Request model for scheduling a DR event"""
event_time: datetime = Field(..., description="Event start time")
iots: List[str] = Field(..., description="Participating device IDs", min_items=1)
load_reduction_kw: float = Field(..., description="Target reduction in kW", gt=0)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iots": ["sensor_1", "sensor_2"],
"load_reduction_kw": 5.0,
"duration_minutes": 59
}
}
class PowerSample(BaseModel):
"""Individual power sample during event"""
timestamp: datetime = Field(..., description="Sample timestamp")
device_powers: Dict[str, float] = Field(..., description="Device power readings (device_id -> kW)")
interval_reduction_kwh: Optional[float] = Field(None, description="Reduction for this interval")
class DREvent(BaseModel):
"""DR event execution model"""
event_id: str = Field(..., description="Unique event identifier")
invitation_id: Optional[str] = Field(None, description="Source invitation ID if applicable")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
status: EventStatus = Field(..., description="Event status")
participating_devices: List[str] = Field(..., description="Device IDs participating")
target_reduction_kw: float = Field(..., description="Target power reduction in kW")
actual_reduction_kw: float = Field(0.0, description="Actual achieved reduction in kWh")
power_samples: List[Dict] = Field(default_factory=list, description="Power samples during event")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class ActiveEventResponse(BaseModel):
"""Response model for active event with real-time data"""
event_id: str = Field(..., description="Event identifier")
status: EventStatus = Field(..., description="Current status")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
participating_devices: List[str] = Field(..., description="Participating devices")
target_reduction_kw: float = Field(..., description="Target reduction")
actual_reduction_kw: float = Field(..., description="Current achieved reduction")
current_device_powers: Optional[Dict[str, float]] = Field(None, description="Current device power readings")
progress_percentage: Optional[float] = Field(None, description="Event progress (0-100%)")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class LoadReductionRequest(BaseModel):
"""Request model for executing load reduction"""
event_time: datetime = Field(..., description="Event start time")
iot: str = Field(..., description="Device ID")
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iot": "sensor_1"
}
}
# Flexibility Models
class DeviceFlexibility(BaseModel):
"""Per-device flexibility information"""
device_id: str = Field(..., description="Device identifier")
available_kw: float = Field(..., description="Available flexibility in kW", ge=0)
instruction: str = Field(..., description="Current DR instruction")
current_power: float = Field(..., description="Current power consumption in kW", ge=0)
class FlexibilityResponse(BaseModel):
"""Response model for current flexibility"""
timestamp: datetime = Field(..., description="Calculation timestamp")
total_flexibility_kw: float = Field(..., description="Total available flexibility in kW", ge=0)
devices: List[DeviceFlexibility] = Field(..., description="Per-device breakdown")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"timestamp": "2025-12-10T13:45:00",
"total_flexibility_kw": 15.5,
"devices": [
{
"device_id": "sensor_1",
"available_kw": 3.5,
"instruction": "participation",
"current_power": 3.5
},
{
"device_id": "sensor_2",
"available_kw": 0.8,
"instruction": "shifting",
"current_power": 4.0
}
]
}
}
class DeviceInstructionUpdate(BaseModel):
"""Model for updating device instructions"""
device_id: str = Field(..., description="Device identifier")
instructions: Dict[str, str] = Field(..., description="Hourly instructions (hour -> instruction type)")
class Config:
json_schema_extra = {
"example": {
"device_id": "sensor_1",
"instructions": {
"0": "participation",
"1": "shifting",
"2": "off",
"3": "participation"
}
}
}
# Configuration Models
class AutoResponseConfig(BaseModel):
"""Auto-response configuration model"""
enabled: bool = Field(..., description="Whether auto-response is enabled")
max_reduction_percentage: float = Field(20.0, description="Maximum reduction percentage for auto-accept", ge=0, le=100)
response_delay_seconds: int = Field(300, description="Delay before auto-responding (seconds)", ge=0)
min_notice_minutes: int = Field(60, description="Minimum notice required for auto-accept (minutes)", ge=0)
class Config:
json_schema_extra = {
"example": {
"enabled": True,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60
}
}
# Response Models
class InvitationSendResponse(BaseModel):
"""Response for sending invitation"""
event_id: str = Field(..., description="Created event identifier")
response: str = Field(..., description="Initial response status")
message: str = Field(..., description="Status message")
class InvitationAnswerResponse(BaseModel):
"""Response for answering invitation"""
success: bool = Field(..., description="Whether answer was recorded")
message: str = Field(..., description="Status message")
class EventScheduleResponse(BaseModel):
"""Response for scheduling event"""
event_id: str = Field(..., description="Scheduled event identifier")
message: str = Field(..., description="Status message")
class PerformanceAnalytics(BaseModel):
"""Performance analytics response"""
period_days: int = Field(..., description="Analysis period in days")
total_events: int = Field(..., description="Total number of events")
total_reduction_kwh: float = Field(..., description="Total energy reduced")
total_target_kwh: float = Field(..., description="Total target reduction")
average_reduction_kwh: float = Field(..., description="Average reduction per event")
achievement_rate: float = Field(..., description="Achievement rate (%)")
average_event_duration_minutes: int = Field(..., description="Average event duration")
# Health Check Model
class HealthResponse(BaseModel):
"""Health check response model"""
service: str = Field(..., description="Service name")
status: str = Field(..., description="Service status")
timestamp: datetime = Field(..., description="Check timestamp")
version: str = Field(..., description="Service version")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"service": "demand-response-service",
"status": "healthy",
"timestamp": "2025-12-10T13:45:00",
"version": "1.0.0"
}
}

View File

@@ -0,0 +1,11 @@
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
pymongo>=4.5.0
motor>=3.3.0
redis>=5.0.0
python-dotenv>=1.0.0
pydantic>=2.4.0
aiohttp>=3.9.0
pytest>=7.4.0
pytest-asyncio>=0.21.0
python-multipart

View File

@@ -0,0 +1,524 @@
"""
Unit tests for Demand Response Service
Run with: pytest test_demand_response.py -v
"""
import pytest
import asyncio
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import json
from demand_response_service import DemandResponseService
# Test fixtures
@pytest.fixture
def mock_db():
"""Mock MongoDB database"""
db = MagicMock()
# Mock collections
db.demand_response_invitations = MagicMock()
db.demand_response_events = MagicMock()
db.demand_response_responses = MagicMock()
db.auto_response_config = MagicMock()
db.device_instructions = MagicMock()
db.flexibility_snapshots = MagicMock()
return db
@pytest.fixture
def mock_redis():
"""Mock Redis client"""
redis = AsyncMock()
redis.get = AsyncMock(return_value=None)
redis.setex = AsyncMock()
redis.delete = AsyncMock()
redis.publish = AsyncMock()
return redis
@pytest.fixture
def dr_service(mock_db, mock_redis):
"""Create DemandResponseService instance with mocks"""
return DemandResponseService(mock_db, mock_redis)
# Test: Invitation Management
@pytest.mark.asyncio
async def test_send_invitation_with_auto_accept(dr_service, mock_db, mock_redis):
"""Test sending invitation with auto-accept enabled"""
# Mock auto-response config (enabled)
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
assert "event_id" in result
assert result["response"] == "YES"
assert result["message"] == "Invitation created successfully"
# Verify MongoDB insert was called
mock_db.demand_response_invitations.insert_one.assert_called_once()
# Verify Redis caching
mock_redis.setex.assert_called()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_send_invitation_manual(dr_service, mock_db, mock_redis):
"""Test sending invitation with auto-accept disabled (manual mode)"""
# Mock auto-response config (disabled)
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
assert result["response"] == "WAITING"
@pytest.mark.asyncio
async def test_answer_invitation_success(dr_service, mock_db, mock_redis):
"""Test answering an invitation successfully"""
event_id = "test-event-123"
# Mock get_invitation to return a valid invitation
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
# Mock that device hasn't responded yet
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
mock_db.demand_response_responses.insert_one = AsyncMock()
mock_db.demand_response_responses.count_documents = AsyncMock(return_value=1)
result = await dr_service.answer_invitation(
event_id=event_id,
iot_id="sensor_1",
response="YES",
committed_reduction_kw=2.5
)
assert result["success"] is True
assert result["message"] == "Response recorded successfully"
# Verify response was stored
mock_db.demand_response_responses.insert_one.assert_called_once()
mock_redis.delete.assert_called()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_answer_invitation_device_not_in_list(dr_service, mock_db, mock_redis):
"""Test answering invitation for device not in invitation list"""
event_id = "test-event-123"
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
result = await dr_service.answer_invitation(
event_id=event_id,
iot_id="sensor_3", # Not in list
response="YES"
)
assert result["success"] is False
assert "not in invitation" in result["message"]
# Test: Event Execution
@pytest.mark.asyncio
async def test_schedule_event(dr_service, mock_db, mock_redis):
"""Test scheduling a DR event"""
mock_db.demand_response_events.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=1)
result = await dr_service.schedule_event(
event_time=event_time,
iots=["sensor_1", "sensor_2"],
load_reduction_kw=5.0,
duration_minutes=59
)
assert "event_id" in result
assert result["message"] == "Event scheduled successfully"
mock_db.demand_response_events.insert_one.assert_called_once()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_execute_event(dr_service, mock_db, mock_redis):
"""Test executing a DR event (spawns background task)"""
event_id = "test-event-456"
# Mock event document
event = {
"event_id": event_id,
"start_time": datetime.utcnow(),
"end_time": datetime.utcnow() + timedelta(minutes=59),
"participating_devices": ["sensor_1"],
"target_reduction_kw": 5.0
}
mock_db.demand_response_events.find_one = AsyncMock(return_value=event)
mock_db.demand_response_events.update_one = AsyncMock()
# Execute event (starts background task)
await dr_service.execute_event(event_id)
# Verify event status updated to active
mock_db.demand_response_events.update_one.assert_called()
mock_redis.publish.assert_called()
# Verify task was created and stored
assert event_id in dr_service.active_events
# Cancel the task to prevent it from running
task = dr_service.active_events[event_id]
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@pytest.mark.asyncio
async def test_cancel_event(dr_service, mock_db, mock_redis):
"""Test cancelling a running DR event"""
event_id = "test-event-789"
# Create a mock task
mock_task = AsyncMock()
mock_task.done = MagicMock(return_value=False)
mock_task.cancel = MagicMock()
dr_service.active_events[event_id] = mock_task
# Mock database operations
mock_db.demand_response_events.find_one = AsyncMock(return_value={
"event_id": event_id,
"status": "active"
})
mock_db.demand_response_events.update_one = AsyncMock()
await dr_service.cancel_event(event_id)
# Verify task was cancelled
mock_task.cancel.assert_called_once()
# Verify database updated
mock_db.demand_response_events.update_one.assert_called()
mock_redis.delete.assert_called()
mock_redis.publish.assert_called()
# Test: Device Power Integration
@pytest.mark.asyncio
async def test_update_device_power_cache(dr_service):
"""Test updating device power cache"""
dr_service.update_device_power_cache("sensor_1", 2.5)
assert dr_service.device_power_cache["sensor_1"] == 2.5
dr_service.update_device_power_cache("sensor_1", 3.0)
assert dr_service.device_power_cache["sensor_1"] == 3.0
@pytest.mark.asyncio
async def test_get_device_power(dr_service):
"""Test getting device power from cache"""
dr_service.device_power_cache["sensor_1"] = 2.5
power = await dr_service.get_device_power("sensor_1")
assert power == 2.5
# Test non-existent device returns 0
power = await dr_service.get_device_power("sensor_999")
assert power == 0.0
# Test: Auto-Response Configuration
@pytest.mark.asyncio
async def test_get_auto_response_config_exists(dr_service, mock_db):
"""Test getting existing auto-response config"""
mock_config = {
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 20.0
}
mock_db.auto_response_config.find_one = AsyncMock(return_value=mock_config)
config = await dr_service.get_auto_response_config()
assert config["enabled"] is True
assert config["max_reduction_percentage"] == 20.0
@pytest.mark.asyncio
async def test_get_auto_response_config_creates_default(dr_service, mock_db):
"""Test creating default config when none exists"""
mock_db.auto_response_config.find_one = AsyncMock(return_value=None)
mock_db.auto_response_config.insert_one = AsyncMock()
config = await dr_service.get_auto_response_config()
assert config["enabled"] is False
mock_db.auto_response_config.insert_one.assert_called_once()
@pytest.mark.asyncio
async def test_set_auto_response_config(dr_service, mock_db, mock_redis):
"""Test updating auto-response configuration"""
mock_db.auto_response_config.update_one = AsyncMock()
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 25.0
})
config = await dr_service.set_auto_response_config(
enabled=True,
max_reduction_percentage=25.0
)
assert config["enabled"] is True
assert config["max_reduction_percentage"] == 25.0
mock_db.auto_response_config.update_one.assert_called_once()
mock_redis.delete.assert_called()
# Test: Auto-Response Processing
@pytest.mark.asyncio
async def test_process_auto_responses_disabled(dr_service, mock_db):
"""Test auto-response processing when disabled"""
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
# Should return early without processing
await dr_service.process_auto_responses()
# No invitations should be queried
mock_db.demand_response_invitations.find.assert_not_called()
@pytest.mark.asyncio
async def test_process_auto_responses_enabled(dr_service, mock_db, mock_redis):
"""Test auto-response processing when enabled"""
# Mock enabled config
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 20.0,
"min_notice_minutes": 60
})
# Mock pending invitation
future_time = datetime.utcnow() + timedelta(hours=2)
mock_invitation = {
"event_id": "test-event-auto",
"event_time": future_time,
"iots": ["sensor_1"]
}
dr_service.get_unanswered_invitations = AsyncMock(return_value=[mock_invitation])
dr_service.get_device_power = AsyncMock(return_value=5.0)
dr_service.answer_invitation = AsyncMock(return_value={"success": True})
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
await dr_service.process_auto_responses()
# Should have auto-responded
dr_service.answer_invitation.assert_called_once()
# Test: Flexibility Calculation
@pytest.mark.asyncio
async def test_get_current_flexibility(dr_service, mock_db, mock_redis):
"""Test calculating current flexibility"""
# Mock device with instructions
mock_device = {
"device_id": "sensor_1",
"instructions": {
str(datetime.utcnow().hour): "participation"
}
}
async def mock_cursor():
yield mock_device
mock_db.device_instructions.find = MagicMock(return_value=mock_cursor())
mock_db.flexibility_snapshots.insert_one = AsyncMock()
# Set device power in cache
dr_service.device_power_cache["sensor_1"] = 5.0
result = await dr_service.get_current_flexibility()
assert result["total_flexibility_kw"] == 5.0
assert len(result["devices"]) == 1
assert result["devices"][0]["device_id"] == "sensor_1"
mock_db.flexibility_snapshots.insert_one.assert_called_once()
mock_redis.setex.assert_called()
# Test: Device Instructions
@pytest.mark.asyncio
async def test_update_device_instructions(dr_service, mock_db):
"""Test updating device DR instructions"""
mock_db.device_instructions.update_one = AsyncMock()
instructions = {
"0": "participation",
"1": "shifting",
"2": "off"
}
await dr_service.update_device_instructions("sensor_1", instructions)
mock_db.device_instructions.update_one.assert_called_once()
@pytest.mark.asyncio
async def test_get_device_instructions_single(dr_service, mock_db):
"""Test getting instructions for single device"""
mock_instructions = {
"device_id": "sensor_1",
"instructions": {"0": "participation"}
}
mock_db.device_instructions.find_one = AsyncMock(return_value=mock_instructions)
result = await dr_service.get_device_instructions("sensor_1")
assert result["device_id"] == "sensor_1"
assert "instructions" in result
# Test: Analytics
@pytest.mark.asyncio
async def test_get_performance_analytics(dr_service, mock_db):
"""Test getting performance analytics"""
# Mock completed events
mock_events = [
{"actual_reduction_kw": 5.0, "target_reduction_kw": 6.0},
{"actual_reduction_kw": 4.5, "target_reduction_kw": 5.0}
]
mock_cursor = AsyncMock()
mock_cursor.to_list = AsyncMock(return_value=mock_events)
mock_db.demand_response_events.find = MagicMock(return_value=mock_cursor)
analytics = await dr_service.get_performance_analytics(days=30)
assert analytics["total_events"] == 2
assert analytics["total_reduction_kwh"] == 9.5
assert analytics["total_target_kwh"] == 11.0
assert analytics["achievement_rate"] > 0
@pytest.mark.asyncio
async def test_get_performance_analytics_no_events(dr_service, mock_db):
"""Test analytics with no completed events"""
mock_cursor = AsyncMock()
mock_cursor.to_list = AsyncMock(return_value=[])
mock_db.demand_response_events.find = MagicMock(return_value=mock_cursor)
analytics = await dr_service.get_performance_analytics(days=30)
assert analytics["total_events"] == 0
assert analytics["total_reduction_kwh"] == 0.0
assert analytics["achievement_rate"] == 0.0
# Integration-style tests
@pytest.mark.asyncio
async def test_full_invitation_workflow(dr_service, mock_db, mock_redis):
"""Test complete invitation workflow from creation to response"""
# Step 1: Create invitation
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
invite_result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
event_id = invite_result["event_id"]
assert invite_result["response"] == "WAITING"
# Step 2: Answer invitation for device 1
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
mock_db.demand_response_responses.insert_one = AsyncMock()
mock_db.demand_response_responses.count_documents = AsyncMock(side_effect=[1, 1, 2, 2])
mock_db.demand_response_invitations.update_one = AsyncMock()
answer1 = await dr_service.answer_invitation(event_id, "sensor_1", "YES", 2.5)
assert answer1["success"] is True
# Step 3: Answer invitation for device 2
answer2 = await dr_service.answer_invitation(event_id, "sensor_2", "YES", 2.5)
assert answer2["success"] is True
# Verify final invitation update was called (all devices responded)
assert mock_db.demand_response_invitations.update_one.call_count >= 1
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"])

View File

@@ -1,309 +0,0 @@
#!/bin/bash
# Energy Management Microservices Deployment Script
# This script handles deployment, startup, and management of all microservices
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
COMPOSE_FILE="docker-compose.yml"
PROJECT_NAME="energy-dashboard"
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to check if Docker and Docker Compose are installed
check_dependencies() {
print_status "Checking dependencies..."
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
if ! command -v docker compose &> /dev/null; then
print_error "Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
print_success "Dependencies check passed"
}
# Function to create necessary directories and files
setup_environment() {
print_status "Setting up environment..."
# Create nginx configuration directory
mkdir -p nginx/ssl
# Create init-mongo directory for database initialization
mkdir -p init-mongo
# Create a simple nginx configuration if it doesn't exist
if [ ! -f "nginx/nginx.conf" ]; then
cat > nginx/nginx.conf << 'EOF'
events {
worker_connections 1024;
}
http {
upstream api_gateway {
server api-gateway:8000;
}
server {
listen 80;
location / {
proxy_pass http://api_gateway;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /ws {
proxy_pass http://api_gateway;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
}
EOF
print_success "Created nginx configuration"
fi
# Create MongoDB initialization script if it doesn't exist
if [ ! -f "init-mongo/init.js" ]; then
cat > init-mongo/init.js << 'EOF'
// MongoDB initialization script
db = db.getSiblingDB('energy_dashboard');
db.createUser({
user: 'dashboard_user',
pwd: 'dashboard_pass',
roles: [
{ role: 'readWrite', db: 'energy_dashboard' },
{ role: 'readWrite', db: 'energy_dashboard_tokens' },
{ role: 'readWrite', db: 'energy_dashboard_batteries' },
{ role: 'readWrite', db: 'energy_dashboard_demand_response' },
{ role: 'readWrite', db: 'energy_dashboard_p2p' },
{ role: 'readWrite', db: 'energy_dashboard_forecasting' },
{ role: 'readWrite', db: 'energy_dashboard_iot' }
]
});
// Create initial collections and indexes
db.sensors.createIndex({ "sensor_id": 1 }, { unique: true });
db.sensor_readings.createIndex({ "sensor_id": 1, "timestamp": -1 });
db.room_metrics.createIndex({ "room": 1, "timestamp": -1 });
print("MongoDB initialization completed");
EOF
print_success "Created MongoDB initialization script"
fi
print_success "Environment setup completed"
}
# Function to build all services
build_services() {
print_status "Building all microservices..."
docker compose -f $COMPOSE_FILE build
if [ $? -eq 0 ]; then
print_success "All services built successfully"
else
print_error "Failed to build services"
exit 1
fi
}
# Function to start all services
start_services() {
print_status "Starting all services..."
docker compose -f $COMPOSE_FILE up -d
if [ $? -eq 0 ]; then
print_success "All services started successfully"
else
print_error "Failed to start services"
exit 1
fi
}
# Function to stop all services
stop_services() {
print_status "Stopping all services..."
docker compose -f $COMPOSE_FILE down
print_success "All services stopped"
}
# Function to restart all services
restart_services() {
stop_services
start_services
}
# Function to show service status
show_status() {
print_status "Service status:"
docker compose -f $COMPOSE_FILE ps
print_status "Service health checks:"
# Wait a moment for services to start
sleep 5
# services=("api-gateway:8000" "token-service:8001" "battery-service:8002" "demand-response-service:8003")
services=("api-gateway:8000" "token-service:8001")
for service in "${services[@]}"; do
name="${service%:*}"
port="${service#*:}"
if curl -f -s "http://localhost:$port/health" > /dev/null; then
print_success "$name is healthy"
else
print_warning "$name is not responding to health checks"
fi
done
}
# Function to view logs
view_logs() {
if [ -z "$2" ]; then
print_status "Showing logs for all services..."
docker compose -f $COMPOSE_FILE logs -f
else
print_status "Showing logs for $2..."
docker compose -f $COMPOSE_FILE logs -f $2
fi
}
# Function to clean up everything
cleanup() {
print_warning "This will remove all containers, images, and volumes. Are you sure? (y/N)"
read -r response
if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then
print_status "Cleaning up everything..."
docker compose -f $COMPOSE_FILE down -v --rmi all
docker system prune -f
print_success "Cleanup completed"
else
print_status "Cleanup cancelled"
fi
}
# Function to run database migrations or setup
setup_database() {
print_status "Setting up databases..."
# Wait for MongoDB to be ready
print_status "Waiting for MongoDB to be ready..."
sleep 10
# Run any additional setup scripts here
print_success "Database setup completed"
}
# Function to show help
show_help() {
echo "Energy Management Microservices Deployment Script"
echo ""
echo "Usage: $0 [COMMAND]"
echo ""
echo "Commands:"
echo " setup Setup environment and dependencies"
echo " build Build all microservices"
echo " start Start all services"
echo " stop Stop all services"
echo " restart Restart all services"
echo " status Show service status and health"
echo " logs Show logs for all services"
echo " logs <svc> Show logs for specific service"
echo " deploy Full deployment (setup + build + start)"
echo " db-setup Setup databases"
echo " cleanup Remove all containers, images, and volumes"
echo " help Show this help message"
echo ""
echo "Examples:"
echo " $0 deploy # Full deployment"
echo " $0 logs battery-service # Show battery service logs"
echo " $0 status # Check service health"
}
# Main script logic
case "${1:-help}" in
setup)
check_dependencies
setup_environment
;;
build)
check_dependencies
build_services
;;
start)
check_dependencies
start_services
;;
stop)
stop_services
;;
restart)
restart_services
;;
status)
show_status
;;
logs)
view_logs $@
;;
deploy)
check_dependencies
setup_environment
build_services
start_services
setup_database
show_status
;;
db-setup)
setup_database
;;
cleanup)
cleanup
;;
help|--help|-h)
show_help
;;
*)
print_error "Unknown command: $1"
show_help
exit 1
;;
esac

View File

@@ -1,33 +1,6 @@
version: "3.8" version: "3.8"
services: services:
# Database Services
mongodb:
image: mongo:5.0
container_name: mongodb
restart: unless-stopped
environment:
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: password123
ports:
- "27017:27017"
volumes:
- mongodb_data:/data/db
- ./init-mongo:/docker-entrypoint-initdb.d
networks:
- energy-network
redis:
image: redis:7-alpine
container_name: redis
restart: unless-stopped
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- energy-network
# API Gateway # API Gateway
api-gateway: api-gateway:
build: build:
@@ -48,33 +21,10 @@ services:
- FORECASTING_SERVICE_URL=http://forecasting-service:8005 - FORECASTING_SERVICE_URL=http://forecasting-service:8005
- IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006 - IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006
- DATA_INGESTION_SERVICE_URL=http://data-ingestion-service:8008 - DATA_INGESTION_SERVICE_URL=http://data-ingestion-service:8008
depends_on:
- mongodb
- redis
# - token-service
- sensor-service
- data-ingestion-service
# - battery-service
# - demand-response-service
networks: networks:
- energy-network - energy-network
# Token Management Service # Token Management Service
# token-service:
# build:
# context: ./token-service
# dockerfile: Dockerfile
# container_name: token-service
# restart: unless-stopped
# ports:
# - "8001:8001"
# environment:
# - MONGO_URL=mongodb://admin:password123@localhost:27017/energy_dashboard_tokens?authSource=admin
# - JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production
# depends_on:
# - mongodb
# networks:
# - energy-network
# Battery Management Service # Battery Management Service
# battery-service: # battery-service:
@@ -88,30 +38,24 @@ services:
# environment: # environment:
# - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_batteries?authSource=admin # - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_batteries?authSource=admin
# - REDIS_URL=redis://redis:6379 # - REDIS_URL=redis://redis:6379
# depends_on:
# - mongodb
# - redis
# networks: # networks:
# - energy-network # - energy-network
# Demand Response Service # Demand Response Service
# demand-response-service: demand-response-service:
# build: build:
# context: ./demand-response-service context: ./demand-response-service
# dockerfile: Dockerfile dockerfile: Dockerfile
# container_name: demand-response-service container_name: demand-response-service
# restart: unless-stopped restart: unless-stopped
# ports: ports:
# - "8003:8003" - "8003:8003"
# environment: environment:
# - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_demand_response?authSource=admin - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_demand_response?authSource=admin
# - REDIS_URL=redis://redis:6379 - REDIS_URL=redis://redis:6379
# - IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006 - SENSOR_SERVICE_URL=http://sensor-service:8007
# depends_on: networks:
# - mongodb - energy-network
# - redis
# networks:
# - energy-network
# P2P Trading Service # P2P Trading Service
# p2p-trading-service: # p2p-trading-service:
@@ -125,9 +69,6 @@ services:
# environment: # environment:
# - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_p2p?authSource=admin # - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_p2p?authSource=admin
# - REDIS_URL=redis://redis:6379 # - REDIS_URL=redis://redis:6379
# depends_on:
# - mongodb
# - redis
# networks: # networks:
# - energy-network # - energy-network
@@ -143,9 +84,6 @@ services:
# environment: # environment:
# - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_forecasting?authSource=admin # - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_forecasting?authSource=admin
# - REDIS_URL=redis://redis:6379 # - REDIS_URL=redis://redis:6379
# depends_on:
# - mongodb
# - redis
# networks: # networks:
# - energy-network # - energy-network
@@ -163,13 +101,9 @@ services:
# - REDIS_URL=redis://redis:6379 # - REDIS_URL=redis://redis:6379
# - BATTERY_SERVICE_URL=http://battery-service:8002 # - BATTERY_SERVICE_URL=http://battery-service:8002
# - DEMAND_RESPONSE_SERVICE_URL=http://demand-response-service:8003 # - DEMAND_RESPONSE_SERVICE_URL=http://demand-response-service:8003
# depends_on:
# - mongodb
# - redis
# networks: # networks:
# - energy-network # - energy-network
# Data Ingestion Service (FTP Monitoring & SA4CPS Integration)
data-ingestion-service: data-ingestion-service:
build: build:
context: ./data-ingestion-service context: ./data-ingestion-service
@@ -185,8 +119,7 @@ services:
- FTP_SA4CPS_USERNAME=curvascarga@sa4cps.pt - FTP_SA4CPS_USERNAME=curvascarga@sa4cps.pt
- FTP_SA4CPS_REMOTE_PATH=/SLGs/ - FTP_SA4CPS_REMOTE_PATH=/SLGs/
- FTP_CHECK_INTERVAL=21600 - FTP_CHECK_INTERVAL=21600
depends_on: - FTP_SKIP_INITIAL_SCAN=true
- mongodb
networks: networks:
- energy-network - energy-network
@@ -202,26 +135,7 @@ services:
environment: environment:
- MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://redis:6379
- TOKEN_SERVICE_URL=http://token-service:8001 # - TOKEN_SERVICE_URL=http://token-service:8001
depends_on:
- mongodb
- redis
networks:
- energy-network
# Monitoring and Management
nginx:
image: nginx:alpine
container_name: nginx
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/ssl:/etc/nginx/ssl
depends_on:
- api-gateway
networks: networks:
- energy-network - energy-network

View File

@@ -1,29 +0,0 @@
events {
worker_connections 1024;
}
http {
upstream api_gateway {
server api-gateway:8000;
}
server {
listen 80;
location / {
proxy_pass http://api_gateway;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /ws {
proxy_pass http://api_gateway;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
}

1748
microservices/openapi.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,377 @@
"""
Analytics service for processing sensor data and generating insights
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class AnalyticsService:
"""Service for analytics and data processing"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def query_data(self, query_params) -> Dict[str, Any]:
"""Execute advanced data query"""
try:
# Build query
query = {}
if hasattr(query_params, 'sensor_ids') and query_params.sensor_ids:
query["sensor_id"] = {"$in": query_params.sensor_ids}
if hasattr(query_params, 'start_time') and query_params.start_time:
query.setdefault("timestamp", {})["$gte"] = query_params.start_time
if hasattr(query_params, 'end_time') and query_params.end_time:
query.setdefault("timestamp", {})["$lte"] = query_params.end_time
# Execute query
cursor = self.db.sensor_readings.find(query)
if hasattr(query_params, 'limit') and query_params.limit:
cursor = cursor.limit(query_params.limit)
if hasattr(query_params, 'offset') and query_params.offset:
cursor = cursor.skip(query_params.offset)
cursor = cursor.sort("timestamp", -1)
# Get results
results = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
results.append(reading)
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
return {
"data": results,
"total_count": total_count,
"query": query_params.__dict__ if hasattr(query_params, '__dict__') else {},
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error executing data query: {e}")
raise
async def get_analytics_summary(self, hours: int = 24) -> Dict[str, Any]:
"""Get comprehensive analytics summary"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get basic statistics
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time}
}
},
{
"$group": {
"_id": None,
"total_readings": {"$sum": 1},
"average_value": {"$avg": "$value"},
"min_value": {"$min": "$value"},
"max_value": {"$max": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
stats = await cursor.to_list(length=1)
base_stats = stats[0] if stats else {
"total_readings": 0,
"average_value": 0,
"min_value": 0,
"max_value": 0,
"unique_sensors": []
}
# Get room-level statistics
room_stats = await self._get_room_analytics(hours)
# Get energy trends
energy_trends = await self._get_energy_trends(hours)
return {
"period_hours": hours,
"timestamp": datetime.utcnow().isoformat(),
"total_readings": base_stats["total_readings"],
"unique_sensors": len(base_stats["unique_sensors"]),
"value_statistics": {
"average": round(base_stats["average_value"], 2) if base_stats["average_value"] else 0,
"minimum": base_stats["min_value"],
"maximum": base_stats["max_value"]
},
"room_statistics": room_stats,
"energy_trends": energy_trends
}
except Exception as e:
logger.error(f"Error getting analytics summary: {e}")
raise
async def get_energy_analytics(self, hours: int = 24, room: Optional[str] = None) -> Dict[str, Any]:
"""Get energy-specific analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Build query
query = {"created_at": {"$gte": start_time}}
if room:
query["room"] = room
# Energy consumption over time
pipeline = [
{"$match": query},
{
"$group": {
"_id": {
"hour": {"$hour": "$created_at"},
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$created_at"}}
},
"total_energy": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"_id.date": 1, "_id.hour": 1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_data = []
async for data in cursor:
hourly_data.append({
"hour": data["_id"]["hour"],
"date": data["_id"]["date"],
"total_energy": data["total_energy"],
"reading_count": data["reading_count"]
})
# Peak consumption analysis
peak_analysis = await self._get_peak_consumption_analysis(query)
# Energy efficiency metrics
efficiency_metrics = await self._get_efficiency_metrics(query)
return {
"period_hours": hours,
"room": room,
"timestamp": datetime.utcnow().isoformat(),
"hourly_consumption": hourly_data,
"peak_analysis": peak_analysis,
"efficiency_metrics": efficiency_metrics,
"total_consumption": sum(item["total_energy"] for item in hourly_data)
}
except Exception as e:
logger.error(f"Error getting energy analytics: {e}")
raise
async def _get_room_analytics(self, hours: int) -> Dict[str, Any]:
"""Get room-level analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time},
"room": {"$ne": None}
}
},
{
"$group": {
"_id": "$room",
"total_readings": {"$sum": 1},
"total_energy": {"$sum": "$value"},
"average_energy": {"$avg": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
},
{"$sort": {"total_energy": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
room_data = []
async for room in cursor:
room_data.append({
"room": room["_id"],
"total_readings": room["total_readings"],
"total_energy": room["total_energy"],
"average_energy": round(room["average_energy"], 2),
"sensor_count": len(room["unique_sensors"])
})
return {
"by_room": room_data,
"total_rooms": len(room_data)
}
except Exception as e:
logger.error(f"Error getting room analytics: {e}")
return {"by_room": [], "total_rooms": 0}
async def _get_energy_trends(self, hours: int) -> Dict[str, Any]:
"""Get energy consumption trends"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get current period data
current_query = {"created_at": {"$gte": start_time}}
current_cursor = self.db.sensor_readings.aggregate([
{"$match": current_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
current_data = await current_cursor.to_list(length=1)
current_total = current_data[0]["total"] if current_data else 0
current_count = current_data[0]["count"] if current_data else 0
# Get previous period for comparison
previous_start = start_time - timedelta(hours=hours)
previous_query = {
"created_at": {"$gte": previous_start, "$lt": start_time}
}
previous_cursor = self.db.sensor_readings.aggregate([
{"$match": previous_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
previous_data = await previous_cursor.to_list(length=1)
previous_total = previous_data[0]["total"] if previous_data else 0
# Calculate trend
if previous_total > 0:
trend_percentage = ((current_total - previous_total) / previous_total) * 100
else:
trend_percentage = 0
return {
"current_period": {
"total_energy": current_total,
"reading_count": current_count,
"average_per_reading": current_total / current_count if current_count > 0 else 0
},
"previous_period": {
"total_energy": previous_total
},
"trend": {
"percentage_change": round(trend_percentage, 2),
"direction": "up" if trend_percentage > 0 else "down" if trend_percentage < 0 else "stable"
}
}
except Exception as e:
logger.error(f"Error getting energy trends: {e}")
return {}
async def _get_peak_consumption_analysis(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Analyze peak consumption patterns"""
try:
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": {"$hour": "$created_at"},
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"total_consumption": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_consumption = await cursor.to_list(length=None)
if not hourly_consumption:
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
peak_data = hourly_consumption[0]
return {
"peak_hour": peak_data["_id"],
"peak_consumption": peak_data["total_consumption"],
"hourly_pattern": [
{
"hour": item["_id"],
"consumption": item["total_consumption"],
"reading_count": item["reading_count"]
}
for item in hourly_consumption
]
}
except Exception as e:
logger.error(f"Error analyzing peak consumption: {e}")
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
async def _get_efficiency_metrics(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Calculate energy efficiency metrics"""
try:
# Average consumption per sensor
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": "$sensor_id",
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{
"$group": {
"_id": None,
"average_per_sensor": {"$avg": "$total_consumption"},
"sensor_count": {"$sum": 1},
"min_consumption": {"$min": "$total_consumption"},
"max_consumption": {"$max": "$total_consumption"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
efficiency_data = await cursor.to_list(length=1)
if not efficiency_data:
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}
data = efficiency_data[0]
# Calculate efficiency score (lower variation = higher efficiency)
if data["average_per_sensor"] > 0:
variation_coefficient = (data["max_consumption"] - data["min_consumption"]) / data["average_per_sensor"]
efficiency_score = max(0, 100 - (variation_coefficient * 10)) # Scale to 0-100
else:
variation_coefficient = 0
efficiency_score = 100
return {
"average_per_sensor": round(data["average_per_sensor"], 2),
"sensor_count": data["sensor_count"],
"efficiency_score": round(efficiency_score, 1),
"variation_coefficient": round(variation_coefficient, 2)
}
except Exception as e:
logger.error(f"Error calculating efficiency metrics: {e}")
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}

View File

@@ -0,0 +1,60 @@
"""
Database connection and management for sensor service
"""
import asyncio
import logging
from motor.motor_asyncio import AsyncIOMotorClient
import redis.asyncio as redis
from typing import Optional
import os
logger = logging.getLogger(__name__)
# Global database connections
mongo_client: Optional[AsyncIOMotorClient] = None
redis_client: Optional[redis.Redis] = None
database = None
async def connect_to_mongo():
global mongo_client, database
try:
mongo_url = os.getenv("MONGO_URL", "mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin")
mongo_client = AsyncIOMotorClient(mongo_url)
database = mongo_client.energy_dashboard_sensors
await mongo_client.admin.command('ping')
logger.info("Connected to MongoDB successfully")
except Exception as e:
logger.error(f"Failed to connect to MongoDB: {e}")
raise
async def close_mongo_connection():
global mongo_client
if mongo_client:
mongo_client.close()
logger.info("Closed MongoDB connection")
async def connect_to_redis():
global redis_client
try:
redis_url = os.getenv("REDIS_URL", "redis://redis:6379")
redis_client = redis.from_url(redis_url, decode_responses=True)
# Test connection
await redis_client.ping()
logger.info("Connected to Redis successfully")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
raise
async def get_database():
return database
async def get_redis():
return redis_client

View File

@@ -16,7 +16,8 @@ import json
from models import ( from models import (
SensorReading, SensorMetadata, RoomMetrics, SystemEvent, DataQuery, DataResponse, SensorReading, SensorMetadata, RoomMetrics, SystemEvent, DataQuery, DataResponse,
SensorType, SensorStatus, CO2Status, OccupancyLevel, HealthResponse SensorType, SensorStatus, CO2Status, OccupancyLevel, HealthResponse,
Room, RoomCreate, RoomUpdate, RoomInfo
) )
from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis
from sensor_service import SensorService from sensor_service import SensorService
@@ -24,20 +25,22 @@ from room_service import RoomService
from analytics_service import AnalyticsService from analytics_service import AnalyticsService
from websocket_manager import WebSocketManager from websocket_manager import WebSocketManager
# Configure logging
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# WebSocket manager for real-time updates
websocket_manager = WebSocketManager() websocket_manager = WebSocketManager()
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager"""
logger.info("Sensor Service starting up...") logger.info("Sensor Service starting up...")
await connect_to_mongo() await connect_to_mongo()
await connect_to_redis() await connect_to_redis()
db = await get_database()
redis_client = await get_redis()
room_service = RoomService(db, redis_client)
await room_service.initialize_default_rooms()
# Start background tasks # Start background tasks
asyncio.create_task(redis_subscriber_task()) asyncio.create_task(redis_subscriber_task())
asyncio.create_task(room_metrics_aggregation_task()) asyncio.create_task(room_metrics_aggregation_task())
@@ -117,7 +120,7 @@ async def websocket_endpoint(websocket: WebSocket):
# Original Dashboard API Endpoints # Original Dashboard API Endpoints
# Sensor Management # Sensor Management
@app.get("/sensors") @app.get("/sensors/get")
async def get_sensors( async def get_sensors(
room: Optional[str] = Query(None, description="Filter by room"), room: Optional[str] = Query(None, description="Filter by room"),
sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"), sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"),
@@ -250,6 +253,19 @@ async def delete_sensor(
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
# Room Management # Room Management
@app.get("/rooms/names")
async def get_room_names(service: RoomService = Depends(get_room_service)):
"""Get simple list of room names for dropdowns"""
try:
room_names = await service.get_all_room_names()
return {
"rooms": room_names,
"count": len(room_names)
}
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.get("/rooms") @app.get("/rooms")
async def get_rooms(service: RoomService = Depends(get_room_service)): async def get_rooms(service: RoomService = Depends(get_room_service)):
"""Get all rooms with sensor counts and metrics""" """Get all rooms with sensor counts and metrics"""
@@ -265,16 +281,16 @@ async def get_rooms(service: RoomService = Depends(get_room_service)):
@app.post("/rooms") @app.post("/rooms")
async def create_room( async def create_room(
room_data: dict, room_data: RoomCreate,
service: RoomService = Depends(get_room_service) service: RoomService = Depends(get_room_service)
): ):
"""Create a new room""" """Create a new room"""
try: try:
result = await service.create_room(room_data) result = await service.create_room(room_data.dict())
return { return {
"message": "Room created successfully", "message": "Room created successfully",
"room": room_data.get("name"), "room": result["name"],
"created_at": result.get("created_at") "created_at": result["created_at"]
} }
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))
@@ -282,6 +298,40 @@ async def create_room(
logger.error(f"Error creating room: {e}") logger.error(f"Error creating room: {e}")
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.put("/rooms/{room_name}")
async def update_room(
room_name: str,
room_data: RoomUpdate,
service: RoomService = Depends(get_room_service)
):
"""Update an existing room"""
try:
result = await service.update_room(room_name, room_data.dict(exclude_unset=True))
return {
"message": "Room updated successfully",
"room": result["name"],
"updated_at": result["updated_at"],
"modified": result["modified"]
}
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error updating room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.delete("/rooms/{room_name}")
async def delete_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Delete a room"""
try:
result = await service.delete_room(room_name)
return {
"message": "Room deleted successfully",
**result
}
except Exception as e:
logger.error(f"Error deleting room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.get("/rooms/{room_name}") @app.get("/rooms/{room_name}")
async def get_room(room_name: str, service: RoomService = Depends(get_room_service)): async def get_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Get detailed room information""" """Get detailed room information"""

View File

@@ -296,19 +296,82 @@ class AnalyticsSummary(BaseModel):
datetime: lambda v: v.isoformat() datetime: lambda v: v.isoformat()
} }
# Room Management Models
class Room(BaseModel):
"""Room model for database storage and API responses"""
name: str = Field(..., description="Unique room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type (office, meeting, storage, etc.)")
# Metadata
created_at: datetime = Field(default_factory=datetime.utcnow, description="Room creation timestamp")
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class RoomCreate(BaseModel):
"""Model for creating new rooms"""
name: str = Field(..., description="Unique room name", min_length=1, max_length=100)
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomUpdate(BaseModel):
"""Model for updating existing rooms"""
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomInfo(BaseModel):
"""Comprehensive room information for API responses"""
name: str = Field(..., description="Room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type")
# Runtime information
sensor_count: int = Field(0, description="Number of sensors in room")
active_sensors: int = Field(0, description="Number of active sensors")
last_updated: Optional[datetime] = Field(None, description="Last metrics update")
# Timestamps
created_at: datetime = Field(..., description="Room creation timestamp")
updated_at: datetime = Field(..., description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class HealthResponse(BaseModel): class HealthResponse(BaseModel):
"""Health check response""" """Health check response"""
service: str service: str
status: str status: str
timestamp: datetime timestamp: datetime
version: str version: str
# Additional service-specific health metrics # Additional service-specific health metrics
total_sensors: Optional[int] = None total_sensors: Optional[int] = None
active_sensors: Optional[int] = None active_sensors: Optional[int] = None
total_rooms: Optional[int] = None total_rooms: Optional[int] = None
websocket_connections: Optional[int] = None websocket_connections: Optional[int] = None
class Config: class Config:
json_encoders = { json_encoders = {
datetime: lambda v: v.isoformat() datetime: lambda v: v.isoformat()

View File

@@ -0,0 +1,467 @@
"""
Room service for managing rooms and room-level metrics
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class RoomService:
"""Service for managing rooms and room-level analytics"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_all_room_names(self) -> List[str]:
"""Get a simple list of all room names for dropdowns/selections"""
try:
# Get rooms from the rooms collection
room_cursor = self.db.rooms.find({}, {"name": 1})
room_names = set()
async for room in room_cursor:
room_names.add(room["name"])
# Also get rooms that exist only in sensor data (legacy support)
sensor_cursor = self.db.sensors.find(
{"room": {"$ne": None, "$exists": True}},
{"room": 1}
)
async for sensor in sensor_cursor:
if sensor.get("room"):
room_names.add(sensor["room"])
# Convert to sorted list
return sorted(list(room_names))
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise
async def initialize_default_rooms(self) -> None:
"""Initialize default rooms if none exist"""
try:
# Check if any rooms exist
room_count = await self.db.rooms.count_documents({})
if room_count == 0:
# Create default rooms
default_rooms = [
{"name": "Conference Room A", "description": "Main conference room", "room_type": "meeting"},
{"name": "Conference Room B", "description": "Secondary conference room", "room_type": "meeting"},
{"name": "Office Floor 1", "description": "First floor office space", "room_type": "office"},
{"name": "Office Floor 2", "description": "Second floor office space", "room_type": "office"},
{"name": "Kitchen", "description": "Employee kitchen and break room", "room_type": "common"},
{"name": "Lobby", "description": "Main entrance and reception", "room_type": "common"},
{"name": "Server Room", "description": "IT equipment room", "room_type": "technical"},
{"name": "Storage Room", "description": "General storage", "room_type": "storage"},
{"name": "Meeting Room 1", "description": "Small meeting room", "room_type": "meeting"},
{"name": "Meeting Room 2", "description": "Small meeting room", "room_type": "meeting"}
]
for room_data in default_rooms:
room_doc = {
**room_data,
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
await self.db.rooms.insert_one(room_doc)
logger.info(f"Initialized {len(default_rooms)} default rooms")
except Exception as e:
logger.error(f"Error initializing default rooms: {e}")
raise
async def get_rooms(self) -> List[Dict[str, Any]]:
"""Get all rooms with sensor counts and metrics"""
try:
# Get unique rooms from sensors
pipeline = [
{"$group": {"_id": "$room", "sensor_count": {"$sum": 1}}},
{"$match": {"_id": {"$ne": None}}}
]
cursor = self.db.sensors.aggregate(pipeline)
rooms = []
async for room_data in cursor:
room_name = room_data["_id"]
# Get latest room metrics
latest_metrics = await self._get_latest_room_metrics(room_name)
room_info = {
"name": room_name,
"sensor_count": room_data["sensor_count"],
"latest_metrics": latest_metrics,
"last_updated": latest_metrics.get("timestamp") if latest_metrics else None
}
rooms.append(room_info)
return rooms
except Exception as e:
logger.error(f"Error getting rooms: {e}")
raise
async def create_room(self, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new room"""
try:
room_doc = {
"name": room_data.get("name"),
"description": room_data.get("description", ""),
"floor": room_data.get("floor"),
"building": room_data.get("building"),
"area": room_data.get("area"),
"capacity": room_data.get("capacity"),
"room_type": room_data.get("room_type"),
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
# Validate required fields
if not room_doc["name"] or not room_doc["name"].strip():
raise ValueError("Room name is required")
# Check if room already exists
existing = await self.db.rooms.find_one({"name": room_doc["name"]})
if existing:
raise ValueError(f"Room {room_doc['name']} already exists")
result = await self.db.rooms.insert_one(room_doc)
return {
"id": str(result.inserted_id),
"name": room_doc["name"],
"created_at": room_doc["created_at"]
}
except Exception as e:
logger.error(f"Error creating room: {e}")
raise
async def update_room(self, room_name: str, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update an existing room"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
if not existing:
raise ValueError(f"Room {room_name} not found")
# Prepare update document
update_doc = {
"updated_at": datetime.utcnow()
}
# Update only provided fields
for field in ["description", "floor", "building", "area", "capacity", "room_type"]:
if field in room_data and room_data[field] is not None:
update_doc[field] = room_data[field]
# Perform update
result = await self.db.rooms.update_one(
{"name": room_name},
{"$set": update_doc}
)
if result.modified_count == 0:
logger.warning(f"No changes made to room {room_name}")
return {
"name": room_name,
"updated_at": update_doc["updated_at"],
"modified": result.modified_count > 0
}
except Exception as e:
logger.error(f"Error updating room: {e}")
raise
async def delete_room(self, room_name: str) -> Dict[str, Any]:
"""Delete a room and optionally reassign sensors"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
# Check for sensors in this room
sensors_in_room = await self.db.sensors.find({"room": room_name}).to_list(None)
if sensors_in_room:
# Update sensors to have null room (don't delete sensors)
await self.db.sensors.update_many(
{"room": room_name},
{"$unset": {"room": ""}}
)
# Delete room from rooms collection if it exists
room_deleted = False
if existing:
result = await self.db.rooms.delete_one({"name": room_name})
room_deleted = result.deleted_count > 0
# Delete room metrics
metrics_result = await self.db.room_metrics.delete_many({"room": room_name})
return {
"room": room_name,
"room_deleted": room_deleted,
"sensors_updated": len(sensors_in_room),
"metrics_deleted": metrics_result.deleted_count
}
except Exception as e:
logger.error(f"Error deleting room: {e}")
raise
async def get_room_details(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get detailed room information"""
try:
# Get room info
room = await self.db.rooms.find_one({"name": room_name})
if not room:
# Create basic room info from sensor data
sensors = await self.db.sensors.find({"room": room_name}).to_list(None)
if not sensors:
return None
room = {
"name": room_name,
"description": f"Room with {len(sensors)} sensors",
"sensor_count": len(sensors)
}
else:
room["_id"] = str(room["_id"])
# Get sensor count
sensor_count = await self.db.sensors.count_documents({"room": room_name})
room["sensor_count"] = sensor_count
# Get sensors in this room
cursor = self.db.sensors.find({"room": room_name})
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
room["sensors"] = sensors
# Get recent room metrics
room["recent_metrics"] = await self._get_recent_room_metrics(room_name, hours=24)
return room
except Exception as e:
logger.error(f"Error getting room details: {e}")
raise
async def get_room_data(self, room_name: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100) -> Dict[str, Any]:
"""Get historical data for a room"""
try:
# Get room metrics
room_query = {"room": room_name}
if start_time or end_time:
room_query["timestamp"] = {}
if start_time:
room_query["timestamp"]["$gte"] = start_time
if end_time:
room_query["timestamp"]["$lte"] = end_time
room_metrics_cursor = self.db.room_metrics.find(room_query).sort("timestamp", -1).limit(limit)
room_metrics = []
async for metric in room_metrics_cursor:
metric["_id"] = str(metric["_id"])
room_metrics.append(metric)
# Get sensor readings for this room
sensor_query = {"room": room_name}
if start_time or end_time:
sensor_query["timestamp"] = {}
if start_time:
sensor_query["timestamp"]["$gte"] = start_time
if end_time:
sensor_query["timestamp"]["$lte"] = end_time
sensor_readings_cursor = self.db.sensor_readings.find(sensor_query).sort("timestamp", -1).limit(limit)
sensor_readings = []
async for reading in sensor_readings_cursor:
reading["_id"] = str(reading["_id"])
sensor_readings.append(reading)
return {
"room_metrics": room_metrics,
"sensor_readings": sensor_readings
}
except Exception as e:
logger.error(f"Error getting room data: {e}")
raise
async def update_room_metrics(self, sensor_data):
"""Update room-level metrics when sensor data is received"""
try:
if not sensor_data.room:
return
# Calculate room-level aggregates
room_metrics = await self._calculate_room_metrics(sensor_data.room)
if room_metrics:
# Store room metrics
metrics_doc = {
"room": sensor_data.room,
"timestamp": sensor_data.timestamp,
"total_energy": room_metrics.get("total_energy", 0),
"average_temperature": room_metrics.get("avg_temperature"),
"co2_level": room_metrics.get("co2_level"),
"occupancy_estimate": room_metrics.get("occupancy_estimate"),
"sensor_count": room_metrics.get("sensor_count", 0),
"created_at": datetime.utcnow()
}
await self.db.room_metrics.insert_one(metrics_doc)
# Cache latest metrics
if self.redis:
cache_key = f"room:{sensor_data.room}:latest_metrics"
await self.redis.setex(cache_key, 3600, json.dumps(metrics_doc, default=str))
except Exception as e:
logger.error(f"Error updating room metrics: {e}")
async def aggregate_all_room_metrics(self):
"""Aggregate metrics for all rooms"""
try:
# Get all unique rooms
pipeline = [{"$group": {"_id": "$room"}}]
cursor = self.db.sensors.aggregate(pipeline)
async for room_data in cursor:
room_name = room_data["_id"]
if room_name:
await self._calculate_room_metrics(room_name)
except Exception as e:
logger.error(f"Error aggregating room metrics: {e}")
async def _get_latest_room_metrics(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get latest room metrics"""
try:
# Try Redis cache first
if self.redis:
cache_key = f"room:{room_name}:latest_metrics"
cached = await self.redis.get(cache_key)
if cached:
return json.loads(cached)
# Fall back to database
latest = await self.db.room_metrics.find_one(
{"room": room_name},
sort=[("timestamp", -1)]
)
if latest:
latest["_id"] = str(latest["_id"])
return latest
return None
except Exception as e:
logger.error(f"Error getting latest room metrics: {e}")
return None
async def _get_recent_room_metrics(self, room_name: str, hours: int = 24) -> List[Dict[str, Any]]:
"""Get recent room metrics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.room_metrics.find({
"room": room_name,
"created_at": {"$gte": start_time}
}).sort("timestamp", -1)
metrics = []
async for metric in cursor:
metric["_id"] = str(metric["_id"])
metrics.append(metric)
return metrics
except Exception as e:
logger.error(f"Error getting recent room metrics: {e}")
return []
async def _calculate_room_metrics(self, room_name: str) -> Dict[str, Any]:
"""Calculate aggregated metrics for a room"""
try:
# Get recent sensor readings (last 5 minutes)
five_minutes_ago = datetime.utcnow() - timedelta(minutes=5)
pipeline = [
{
"$match": {
"room": room_name,
"created_at": {"$gte": five_minutes_ago}
}
},
{
"$group": {
"_id": "$sensor_id",
"latest_value": {"$last": "$value"},
"sensor_type": {"$last": "$sensor_type"} if "sensor_type" in ["$first", "$last"] else {"$first": "energy"},
"unit": {"$last": "$unit"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
total_energy = 0
temperatures = []
co2_levels = []
sensor_count = 0
async for sensor_data in cursor:
sensor_count += 1
value = sensor_data.get("latest_value", 0)
sensor_type = sensor_data.get("sensor_type", "energy")
if sensor_type == "energy" or "energy" in str(sensor_data.get("unit", "")).lower():
total_energy += value
elif sensor_type == "temperature":
temperatures.append(value)
elif sensor_type == "co2":
co2_levels.append(value)
metrics = {
"total_energy": total_energy,
"sensor_count": sensor_count,
"avg_temperature": sum(temperatures) / len(temperatures) if temperatures else None,
"co2_level": sum(co2_levels) / len(co2_levels) if co2_levels else None,
"occupancy_estimate": self._estimate_occupancy(sensor_count, total_energy)
}
return metrics
except Exception as e:
logger.error(f"Error calculating room metrics: {e}")
return {}
def _estimate_occupancy(self, sensor_count: int, total_energy: float) -> Optional[str]:
"""Estimate occupancy level based on energy consumption"""
if total_energy == 0:
return "vacant"
elif total_energy < sensor_count * 50: # Low threshold
return "low"
elif total_energy < sensor_count * 150: # Medium threshold
return "medium"
else:
return "high"

View File

@@ -0,0 +1,251 @@
"""
Sensor service business logic
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class SensorService:
"""Service for managing sensors and sensor data"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_sensors(self, room: Optional[str] = None, sensor_type: Optional[str] = None, status: Optional[str] = None) -> List[Dict[str, Any]]:
"""Get sensors with optional filtering"""
try:
query = {}
if room:
query["room"] = room
if sensor_type:
query["sensor_type"] = sensor_type
if status:
query["status"] = status
cursor = self.db.sensors.find(query)
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
return sensors
except Exception as e:
logger.error(f"Error getting sensors: {e}")
raise
async def get_sensor_details(self, sensor_id: str) -> Optional[Dict[str, Any]]:
"""Get detailed sensor information"""
try:
sensor = await self.db.sensors.find_one({"sensor_id": sensor_id})
if sensor:
sensor["_id"] = str(sensor["_id"])
# Get recent readings
recent_readings = await self.get_sensor_data(sensor_id, limit=10)
sensor["recent_readings"] = recent_readings.get("readings", [])
return sensor
return None
except Exception as e:
logger.error(f"Error getting sensor details: {e}")
raise
async def get_sensor_data(self, sensor_id: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
"""Get historical sensor data"""
try:
query = {"sensor_id": sensor_id}
if start_time or end_time:
query["timestamp"] = {}
if start_time:
query["timestamp"]["$gte"] = start_time
if end_time:
query["timestamp"]["$lte"] = end_time
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
# Get readings
cursor = self.db.sensor_readings.find(query).sort("timestamp", -1).skip(offset).limit(limit)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"readings": readings,
"total_count": total_count,
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error getting sensor data: {e}")
raise
async def create_sensor(self, sensor_data) -> Dict[str, Any]:
"""Create a new sensor"""
try:
# Check if sensor already exists
existing = await self.db.sensors.find_one({"sensor_id": sensor_data.sensor_id})
if existing:
raise ValueError(f"Sensor {sensor_data.sensor_id} already exists")
# Create sensor document
sensor_doc = {
"sensor_id": sensor_data.sensor_id,
"name": sensor_data.name,
"sensor_type": sensor_data.sensor_type.value if hasattr(sensor_data.sensor_type, 'value') else str(sensor_data.sensor_type),
"room": sensor_data.room,
"location": sensor_data.location if hasattr(sensor_data, 'location') else None,
"status": "active",
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
result = await self.db.sensors.insert_one(sensor_doc)
return {"created_at": datetime.utcnow()}
except Exception as e:
logger.error(f"Error creating sensor: {e}")
raise
async def update_sensor(self, sensor_id: str, update_data: Dict[str, Any]) -> bool:
"""Update sensor metadata"""
try:
update_data["updated_at"] = datetime.utcnow()
result = await self.db.sensors.update_one(
{"sensor_id": sensor_id},
{"$set": update_data}
)
return result.modified_count > 0
except Exception as e:
logger.error(f"Error updating sensor: {e}")
raise
async def delete_sensor(self, sensor_id: str) -> Dict[str, Any]:
"""Delete a sensor and its data"""
try:
# Delete readings
readings_result = await self.db.sensor_readings.delete_many({"sensor_id": sensor_id})
# Delete sensor
await self.db.sensors.delete_one({"sensor_id": sensor_id})
return {"readings_deleted": readings_result.deleted_count}
except Exception as e:
logger.error(f"Error deleting sensor: {e}")
raise
async def ingest_sensor_data(self, sensor_data) -> Dict[str, Any]:
"""Ingest real-time sensor data"""
try:
# Create reading document
reading_doc = {
"sensor_id": sensor_data.sensor_id,
"timestamp": sensor_data.timestamp,
"value": sensor_data.value,
"unit": sensor_data.unit if hasattr(sensor_data, 'unit') else None,
"room": sensor_data.room if hasattr(sensor_data, 'room') else None,
"created_at": datetime.utcnow()
}
# Store in database
await self.db.sensor_readings.insert_one(reading_doc)
# Cache recent value in Redis
if self.redis:
cache_key = f"sensor:{sensor_data.sensor_id}:latest"
await self.redis.setex(cache_key, 3600, json.dumps(reading_doc, default=str))
return {"success": True}
except Exception as e:
logger.error(f"Error ingesting sensor data: {e}")
raise
async def export_data(self, start_time: int, end_time: int, sensor_ids: Optional[str] = None,
format: str = "json") -> Dict[str, Any]:
"""Export sensor data"""
try:
query = {
"timestamp": {"$gte": start_time, "$lte": end_time}
}
if sensor_ids:
sensor_list = [s.strip() for s in sensor_ids.split(",")]
query["sensor_id"] = {"$in": sensor_list}
cursor = self.db.sensor_readings.find(query).sort("timestamp", 1)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"format": format,
"data": readings,
"total_records": len(readings),
"period": {"start": start_time, "end": end_time}
}
except Exception as e:
logger.error(f"Error exporting data: {e}")
raise
async def get_events(self, severity: Optional[str] = None, event_type: Optional[str] = None,
hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get system events"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
query = {"timestamp": {"$gte": start_time}}
if severity:
query["severity"] = severity
if event_type:
query["event_type"] = event_type
cursor = self.db.system_events.find(query).sort("timestamp", -1).limit(limit)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
events.append(event)
return events
except Exception as e:
logger.error(f"Error getting events: {e}")
return []
async def cleanup_old_data(self, cutoff_date: datetime):
"""Clean up old sensor data"""
try:
result = await self.db.sensor_readings.delete_many({
"created_at": {"$lt": cutoff_date}
})
logger.info(f"Cleaned up {result.deleted_count} old sensor readings")
except Exception as e:
logger.error(f"Error cleaning up old data: {e}")
raise

View File

@@ -14,8 +14,8 @@ import logging
from typing import List, Optional from typing import List, Optional
from models import ( from models import (
TokenGenerateRequest, TokenResponse, TokenValidationResponse, TokenGenerateRequest, TokenResponse, TokenValidationResponse,
TokenListResponse, HealthResponse TokenListResponse, HealthResponse, Token
) )
from database import connect_to_mongo, close_mongo_connection, get_database from database import connect_to_mongo, close_mongo_connection, get_database
from token_service import TokenService from token_service import TokenService
@@ -32,9 +32,9 @@ async def lifespan(app: FastAPI):
logger.info("Token Service starting up...") logger.info("Token Service starting up...")
await connect_to_mongo() await connect_to_mongo()
logger.info("Token Service startup complete") logger.info("Token Service startup complete")
yield yield
logger.info("Token Service shutting down...") logger.info("Token Service shutting down...")
await close_mongo_connection() await close_mongo_connection()
logger.info("Token Service shutdown complete") logger.info("Token Service shutdown complete")
@@ -64,7 +64,7 @@ async def health_check():
try: try:
db = await get_database() db = await get_database()
await db.command("ping") await db.command("ping")
return HealthResponse( return HealthResponse(
service="token-service", service="token-service",
status="healthy", status="healthy",
@@ -81,7 +81,7 @@ async def get_tokens(db=Depends(get_db)):
try: try:
token_service = TokenService(db) token_service = TokenService(db)
tokens = await token_service.get_tokens() tokens = await token_service.get_tokens()
return TokenListResponse( return TokenListResponse(
tokens=tokens, tokens=tokens,
count=len(tokens) count=len(tokens)
@@ -95,6 +95,16 @@ async def generate_token(request: TokenGenerateRequest, db=Depends(get_db)):
"""Generate a new JWT token""" """Generate a new JWT token"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
if not request.data_aggregation:
request.data_aggregation = False
if not request.time_aggregation:
request.time_aggregation = False
if not request.embargo:
request.embargo = False
if not request.exp_hours:
request.exp_hours = 24
token = token_service.generate_token( token = token_service.generate_token(
name=request.name, name=request.name,
list_of_resources=request.list_of_resources, list_of_resources=request.list_of_resources,
@@ -103,23 +113,23 @@ async def generate_token(request: TokenGenerateRequest, db=Depends(get_db)):
embargo=request.embargo, embargo=request.embargo,
exp_hours=request.exp_hours exp_hours=request.exp_hours
) )
return TokenResponse(token=token) return TokenResponse(token=token)
except Exception as e: except Exception as e:
logger.error(f"Error generating token: {e}") logger.error(f"Error generating token: {e}")
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.post("/tokens/validate", response_model=TokenValidationResponse) @app.post("/tokens/validate", response_model=TokenValidationResponse)
async def validate_token(token: str, db=Depends(get_db)): async def validate_token(token: Token, db=Depends(get_db)):
"""Validate and decode a JWT token""" """Validate and decode a JWT token"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
is_valid = await token_service.is_token_valid(token) is_valid = await token_service.is_token_valid(token.token)
decoded = token_service.decode_token(token) if is_valid else None decoded = token_service.decode_token(token.token) if is_valid else None
return TokenValidationResponse( return TokenValidationResponse(
valid=is_valid, valid=is_valid,
token=token, token=token.token,
decoded=decoded if is_valid and "error" not in (decoded or {}) else None, decoded=decoded if is_valid and "error" not in (decoded or {}) else None,
error=decoded.get("error") if decoded and "error" in decoded else None error=decoded.get("error") if decoded and "error" in decoded else None
) )
@@ -128,11 +138,11 @@ async def validate_token(token: str, db=Depends(get_db)):
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.post("/tokens/save") @app.post("/tokens/save")
async def save_token(token: str, db=Depends(get_db)): async def save_token(token: Token, db=Depends(get_db)):
"""Save a token to database""" """Save a token to database"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
result = await token_service.insert_token(token) result = await token_service.insert_token(token.token)
return result return result
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))
@@ -159,7 +169,7 @@ async def get_token_permissions(token: str, db=Depends(get_db)):
try: try:
token_service = TokenService(db) token_service = TokenService(db)
permissions = await token_service.get_token_permissions(token) permissions = await token_service.get_token_permissions(token)
if permissions: if permissions:
return {"permissions": permissions} return {"permissions": permissions}
else: else:
@@ -176,7 +186,7 @@ async def cleanup_expired_tokens(db=Depends(get_db)):
try: try:
token_service = TokenService(db) token_service = TokenService(db)
expired_count = await token_service.cleanup_expired_tokens() expired_count = await token_service.cleanup_expired_tokens()
return { return {
"message": "Expired tokens cleaned up", "message": "Expired tokens cleaned up",
"expired_tokens_removed": expired_count "expired_tokens_removed": expired_count
@@ -187,4 +197,4 @@ async def cleanup_expired_tokens(db=Depends(get_db)):
if __name__ == "__main__": if __name__ == "__main__":
import uvicorn import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8001) uvicorn.run(app, host="0.0.0.0", port=8001)

View File

@@ -6,6 +6,9 @@ from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any from typing import List, Optional, Dict, Any
from datetime import datetime from datetime import datetime
class Token(BaseModel):
token: str = Field(..., description="JWT token")
class TokenGenerateRequest(BaseModel): class TokenGenerateRequest(BaseModel):
"""Request model for token generation""" """Request model for token generation"""
name: str = Field(..., description="Token owner name") name: str = Field(..., description="Token owner name")
@@ -48,8 +51,8 @@ class HealthResponse(BaseModel):
status: str status: str
timestamp: datetime timestamp: datetime
version: str version: str
class Config: class Config:
json_encoders = { json_encoders = {
datetime: lambda v: v.isoformat() datetime: lambda v: v.isoformat()
} }

View File

@@ -11,20 +11,20 @@ import os
class TokenService: class TokenService:
"""Service for managing JWT tokens and authentication""" """Service for managing JWT tokens and authentication"""
def __init__(self, db: AsyncIOMotorDatabase, secret_key: str = None): def __init__(self, db: AsyncIOMotorDatabase, secret_key: str = None):
self.db = db self.db = db
self.secret_key = secret_key or os.getenv("JWT_SECRET_KEY", "energy-dashboard-secret-key") self.secret_key = secret_key or os.getenv("JWT_SECRET_KEY", "energy-dashboard-secret-key")
self.tokens_collection = db.tokens self.tokens_collection = db.tokens
def generate_token(self, name: str, list_of_resources: List[str], def generate_token(self, name: str, list_of_resources: List[str],
data_aggregation: bool = False, time_aggregation: bool = False, data_aggregation: bool = False, time_aggregation: bool = False,
embargo: int = 0, exp_hours: int = 24) -> str: embargo: int = 0, exp_hours: int = 24) -> str:
"""Generate a new JWT token with specified permissions""" """Generate a new JWT token with specified permissions"""
# Calculate expiration time # Calculate expiration time
exp_timestamp = int((datetime.utcnow() + timedelta(hours=exp_hours)).timestamp()) exp_timestamp = int((datetime.utcnow() + timedelta(hours=exp_hours)).timestamp())
# Create token payload # Create token payload
payload = { payload = {
"name": name, "name": name,
@@ -36,11 +36,11 @@ class TokenService:
"iat": int(datetime.utcnow().timestamp()), "iat": int(datetime.utcnow().timestamp()),
"jti": str(uuid.uuid4()) # unique token ID "jti": str(uuid.uuid4()) # unique token ID
} }
# Generate JWT token # Generate JWT token
token = jwt.encode(payload, self.secret_key, algorithm="HS256") token = jwt.encode(payload, self.secret_key, algorithm="HS256")
return token return token
def decode_token(self, token: str) -> Optional[Dict[str, Any]]: def decode_token(self, token: str) -> Optional[Dict[str, Any]]:
"""Decode and verify JWT token""" """Decode and verify JWT token"""
try: try:
@@ -50,11 +50,11 @@ class TokenService:
return {"error": "Token has expired"} return {"error": "Token has expired"}
except jwt.InvalidTokenError: except jwt.InvalidTokenError:
return {"error": "Invalid token"} return {"error": "Invalid token"}
async def insert_token(self, token: str) -> Dict[str, Any]: async def insert_token(self, token: str) -> Dict[str, Any]:
"""Save token to database""" """Save token to database"""
now = datetime.utcnow() now = datetime.utcnow()
# Decode token to verify it's valid # Decode token to verify it's valid
decoded = self.decode_token(token) decoded = self.decode_token(token)
if decoded and "error" not in decoded: if decoded and "error" not in decoded:
@@ -68,14 +68,14 @@ class TokenService:
"resources": decoded.get("list_of_resources", []), "resources": decoded.get("list_of_resources", []),
"expires_at": datetime.fromtimestamp(decoded.get("exp", 0)) "expires_at": datetime.fromtimestamp(decoded.get("exp", 0))
} }
# Upsert token (update if exists, insert if not) # Upsert token (update if exists, insert if not)
await self.tokens_collection.replace_one( await self.tokens_collection.replace_one(
{"token": token}, {"token": token},
token_record, token_record,
upsert=True upsert=True
) )
return { return {
"token": token, "token": token,
"datetime": now.isoformat(), "datetime": now.isoformat(),
@@ -83,11 +83,11 @@ class TokenService:
} }
else: else:
raise ValueError("Invalid token cannot be saved") raise ValueError("Invalid token cannot be saved")
async def revoke_token(self, token: str) -> Dict[str, Any]: async def revoke_token(self, token: str) -> Dict[str, Any]:
"""Revoke a token by marking it as inactive""" """Revoke a token by marking it as inactive"""
now = datetime.utcnow() now = datetime.utcnow()
result = await self.tokens_collection.update_one( result = await self.tokens_collection.update_one(
{"token": token}, {"token": token},
{ {
@@ -98,7 +98,7 @@ class TokenService:
} }
} }
) )
if result.matched_count > 0: if result.matched_count > 0:
return { return {
"token": token, "token": token,
@@ -107,51 +107,49 @@ class TokenService:
} }
else: else:
raise ValueError("Token not found") raise ValueError("Token not found")
async def get_tokens(self) -> List[Dict[str, Any]]: async def get_tokens(self) -> List[Dict[str, Any]]:
"""Get all tokens from database""" """Get all tokens from database"""
cursor = self.tokens_collection.find({}) cursor = self.tokens_collection.find({})
tokens = [] tokens = []
async for token_record in cursor: async for token_record in cursor:
# Convert ObjectId to string and datetime to ISO format # Convert ObjectId to string and datetime to ISO format
token_record["_id"] = str(token_record["_id"]) token_record["_id"] = str(token_record["_id"])
for field in ["datetime", "created_at", "updated_at", "expires_at", "revoked_at"]: for field in ["datetime", "created_at", "updated_at", "expires_at", "revoked_at"]:
if field in token_record and token_record[field]: if field in token_record and token_record[field]:
token_record[field] = token_record[field].isoformat() token_record[field] = token_record[field].isoformat()
tokens.append(token_record) tokens.append(token_record)
return tokens return tokens
async def is_token_valid(self, token: str) -> bool: async def is_token_valid(self, token: str) -> bool:
"""Check if token is valid and active""" """Check if token is valid and active"""
# Check if token exists and is active in database
token_record = await self.tokens_collection.find_one({ token_record = await self.tokens_collection.find_one({
"token": token, "token": token,
"active": True "active": True
}) })
if not token_record: if not token_record:
return False return False
# Verify JWT signature and expiration
decoded = self.decode_token(token) decoded = self.decode_token(token)
return decoded is not None and "error" not in decoded return decoded is not None and "error" not in decoded
async def get_token_permissions(self, token: str) -> Optional[Dict[str, Any]]: async def get_token_permissions(self, token: str) -> Optional[Dict[str, Any]]:
"""Get permissions for a valid token""" """Get permissions for a valid token"""
if await self.is_token_valid(token): if await self.is_token_valid(token):
return self.decode_token(token) return self.decode_token(token)
return None return None
async def cleanup_expired_tokens(self) -> int: async def cleanup_expired_tokens(self) -> int:
"""Remove expired tokens from database""" """Remove expired tokens from database"""
now = datetime.utcnow() now = datetime.now()
# Delete tokens that have expired # Delete tokens that have expired
result = await self.tokens_collection.delete_many({ result = await self.tokens_collection.delete_many({
"expires_at": {"$lt": now} "expires_at": {"$lt": now}
}) })
return result.deleted_count return result.deleted_count

14
monolith/.env.example Normal file
View File

@@ -0,0 +1,14 @@
# MongoDB Configuration
# Update with your MongoDB connection string
MONGO_URL=mongodb://admin:password123@localhost:27017/?authSource=admin
# HTTP Poller Configuration
# IoT device polling settings
HTTP_POLL_INTERVAL=60
HTTP_TIMEOUT=10
HTTP_MAX_CONCURRENT=5
# Application Settings
DEBUG=false
HOST=0.0.0.0
PORT=8000

28
monolith/Dockerfile Normal file
View File

@@ -0,0 +1,28 @@
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY src/ ./src/
# Set Python path
ENV PYTHONPATH=/app
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:8000/health')"
# Run the application
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]

480
monolith/MIGRATION.md Normal file
View File

@@ -0,0 +1,480 @@
# Migration Guide: Microservices to Modular Monolith
This guide explains the transformation from the microservices architecture to the modular monolithic architecture.
## Overview of Changes
### Architecture Transformation
**Before (Microservices)**:
- Multiple independent services (8+ services)
- HTTP-based inter-service communication
- Redis pub/sub for events
- API Gateway for routing
- Service discovery and health checking
- Separate Docker containers per service
**After (Modular Monolith)**:
- Single application with modular structure
- Direct function calls via dependency injection
- In-process event bus
- Integrated routing in main application
- Single Docker container
- Separate databases per module (preserved isolation)
## Key Architectural Differences
### 1. Service Communication
#### Microservices Approach
```python
# HTTP call to another service
async with aiohttp.ClientSession() as session:
url = f"{SENSOR_SERVICE_URL}/sensors/{sensor_id}"
async with session.get(url) as response:
data = await response.json()
```
#### Modular Monolith Approach
```python
# Direct function call with dependency injection
from modules.sensors import SensorService
from core.dependencies import get_sensors_db
sensor_service = SensorService(db=await get_sensors_db(), redis=None)
data = await sensor_service.get_sensor_details(sensor_id)
```
### 2. Event Communication
#### Microservices Approach (Redis Pub/Sub)
```python
# Publishing
await redis.publish("energy_data", json.dumps(data))
# Subscribing
pubsub = redis.pubsub()
await pubsub.subscribe("energy_data")
message = await pubsub.get_message()
```
#### Modular Monolith Approach (Event Bus)
```python
# Publishing
from core.events import event_bus, EventTopics
await event_bus.publish(EventTopics.ENERGY_DATA, data)
# Subscribing
def handle_energy_data(data):
# Process data
pass
event_bus.subscribe(EventTopics.ENERGY_DATA, handle_energy_data)
```
### 3. Database Access
#### Microservices Approach
```python
# Each service has its own database connection
from database import get_database
db = await get_database() # Returns service-specific database
```
#### Modular Monolith Approach
```python
# Centralized database manager with module-specific databases
from core.database import db_manager
sensors_db = db_manager.sensors_db
demand_response_db = db_manager.demand_response_db
```
### 4. Application Structure
#### Microservices Structure
```
microservices/
├── api-gateway/
│ └── main.py (port 8000)
├── sensor-service/
│ └── main.py (port 8007)
├── demand-response-service/
│ └── main.py (port 8003)
├── data-ingestion-service/
│ └── main.py (port 8008)
└── docker-compose.yml (8+ containers)
```
#### Modular Monolith Structure
```
monolith/
├── src/
│ ├── main.py (single entry point)
│ ├── core/ (shared infrastructure)
│ └── modules/
│ ├── sensors/
│ ├── demand_response/
│ └── data_ingestion/
└── docker-compose.yml (1 container)
```
## Migration Steps
### Phase 1: Preparation
1. **Backup existing data**:
```bash
# Backup all MongoDB databases
mongodump --uri="mongodb://admin:password123@localhost:27017" --out=/backup/microservices
```
2. **Document current API endpoints**:
- List all endpoints from each microservice
- Document inter-service communication patterns
- Identify Redis pub/sub channels in use
3. **Review environment variables**:
- Consolidate environment variables
- Update connection strings for external MongoDB and Redis
### Phase 2: Deploy Modular Monolith
1. **Configure environment**:
```bash
cd /path/to/monolith
cp .env.example .env
# Edit .env with MongoDB and Redis connection strings
```
2. **Build and deploy**:
```bash
docker-compose up --build -d
```
3. **Verify health**:
```bash
curl http://localhost:8000/health
curl http://localhost:8000/api/v1/overview
```
### Phase 3: Data Migration (if needed)
The modular monolith uses the **same database structure** as the microservices, so typically no data migration is needed. However, verify:
1. **Database names match**:
- `energy_dashboard_sensors`
- `energy_dashboard_demand_response`
- `digitalmente_ingestion`
2. **Collections are accessible**:
```bash
# Connect to MongoDB
mongosh mongodb://admin:password123@mongodb-host:27017/?authSource=admin
# Check databases
show dbs
# Verify collections in each database
use energy_dashboard_sensors
show collections
```
### Phase 4: API Client Migration
Update API clients to point to the new monolith endpoint:
**Before**:
- Sensor API: `http://api-gateway:8000/api/v1/sensors/*`
- DR API: `http://api-gateway:8000/api/v1/demand-response/*`
**After**:
- All APIs: `http://monolith:8000/api/v1/*`
The API paths remain the same, only the host changes!
### Phase 5: Decommission Microservices
Once the monolith is stable:
1. **Stop microservices**:
```bash
cd /path/to/microservices
docker-compose down
```
2. **Keep backups** for at least 30 days
3. **Archive microservices code** for reference
## Benefits of the Migration
### Operational Simplification
| Aspect | Microservices | Modular Monolith | Improvement |
|--------|---------------|------------------|-------------|
| **Containers** | 8+ containers | 1 container | 87% reduction |
| **Network calls** | HTTP between services | In-process calls | ~100x faster |
| **Deployment complexity** | Coordinate 8+ services | Single deployment | Much simpler |
| **Monitoring** | 8+ health endpoints | 1 health endpoint | Easier |
| **Log aggregation** | 8+ log sources | 1 log source | Simpler |
### Performance Improvements
1. **Reduced latency**:
- Inter-service HTTP calls: ~10-50ms
- Direct function calls: ~0.01-0.1ms
- **Improvement**: 100-1000x faster
2. **Reduced network overhead**:
- No HTTP serialization/deserialization
- No network round-trips
- No service discovery delays
3. **Shared resources**:
- Single database connection pool
- Shared Redis connection (if enabled)
- Shared in-memory caches
### Development Benefits
1. **Easier debugging**:
- Single process to debug
- Direct stack traces across modules
- No distributed tracing needed
2. **Simpler testing**:
- Test entire flow in one process
- No need to mock HTTP calls
- Integration tests run faster
3. **Faster development**:
- Single application to run locally
- Immediate code changes (with reload)
- No service orchestration needed
## Preserved Benefits from Microservices
### Module Isolation
Each module maintains clear boundaries:
- Separate directory structure
- Own models and business logic
- Dedicated database (data isolation)
- Clear public interfaces
### Independent Scaling (Future)
If needed, modules can be extracted back into microservices:
- Clean module boundaries make extraction easy
- Database per module already separated
- Event bus can switch to Redis pub/sub
- Direct calls can switch to HTTP calls
### Team Organization
Teams can still own modules:
- Sensors team owns `modules/sensors/`
- DR team owns `modules/demand_response/`
- Clear ownership and responsibilities
## Rollback Strategy
If you need to rollback to microservices:
1. **Keep microservices code** in the repository
2. **Database unchanged**: Both architectures use the same databases
3. **Redeploy microservices**:
```bash
cd /path/to/microservices
docker-compose up -d
```
4. **Update API clients** to point back to API Gateway
## Monitoring and Observability
### Health Checks
**Single health endpoint**:
```bash
curl http://localhost:8000/health
```
Returns:
```json
{
"service": "Energy Dashboard Monolith",
"status": "healthy",
"components": {
"database": "healthy",
"redis": "healthy",
"event_bus": "healthy"
},
"modules": {
"sensors": "loaded",
"demand_response": "loaded",
"data_ingestion": "loaded"
}
}
```
### Logging
All logs in one place:
```bash
# Docker logs
docker-compose logs -f monolith
# Application logs
docker-compose logs -f monolith | grep "ERROR"
```
### Metrics
System overview endpoint:
```bash
curl http://localhost:8000/api/v1/overview
```
## Common Migration Issues
### Issue: Module Import Errors
**Problem**: `ModuleNotFoundError: No module named 'src.modules'`
**Solution**:
```bash
# Set PYTHONPATH
export PYTHONPATH=/app
# Or in docker-compose.yml
environment:
- PYTHONPATH=/app
```
### Issue: Database Connection Errors
**Problem**: Cannot connect to MongoDB
**Solution**:
1. Verify MongoDB is accessible:
```bash
docker-compose exec monolith ping mongodb-host
```
2. Check connection string in `.env`
3. Ensure network connectivity
### Issue: Redis Connection Errors
**Problem**: Redis connection failed but app should work
**Solution**:
Redis is optional. Set in `.env`:
```
REDIS_ENABLED=false
```
### Issue: Event Subscribers Not Receiving Events
**Problem**: Events published but subscribers not called
**Solution**:
Ensure subscribers are registered before events are published:
```python
# Register subscriber in lifespan startup
@asynccontextmanager
async def lifespan(app: FastAPI):
# Subscribe before publishing
event_bus.subscribe(EventTopics.ENERGY_DATA, handle_energy)
yield
```
## Testing the Migration
### 1. Functional Testing
Test each module's endpoints:
```bash
# Sensors
curl http://localhost:8000/api/v1/sensors/get
curl http://localhost:8000/api/v1/rooms
# Analytics
curl http://localhost:8000/api/v1/analytics/summary
# Health
curl http://localhost:8000/health
```
### 2. Load Testing
Compare performance:
```bash
# Microservices
ab -n 1000 -c 10 http://localhost:8000/api/v1/sensors/get
# Modular Monolith
ab -n 1000 -c 10 http://localhost:8000/api/v1/sensors/get
```
Expected: Modular monolith should be significantly faster.
### 3. WebSocket Testing
Test real-time features:
```javascript
const ws = new WebSocket('ws://localhost:8000/api/v1/ws');
ws.onmessage = (event) => console.log('Received:', event.data);
```
## FAQ
### Q: Do I need to migrate the database?
**A**: No, the modular monolith uses the same database structure as the microservices.
### Q: Can I scale individual modules?
**A**: Not independently. The entire monolith scales together. If you need independent scaling, consider keeping the microservices architecture or using horizontal scaling with load balancers.
### Q: What happens to Redis pub/sub?
**A**: Replaced with an in-process event bus. Redis can still be used for caching if `REDIS_ENABLED=true`.
### Q: Are the API endpoints the same?
**A**: Yes, the API paths remain identical. Only the host changes.
### Q: Can I extract modules back to microservices later?
**A**: Yes, the modular structure makes it easy to extract modules back into separate services if needed.
### Q: How do I add a new module?
**A**: See the "Adding a New Module" section in README.md.
### Q: Is this suitable for production?
**A**: Yes, modular monoliths are production-ready and often more reliable than microservices for small-to-medium scale applications.
## Next Steps
1. **Deploy to staging** and run full test suite
2. **Monitor performance** and compare with microservices
3. **Gradual rollout** to production (canary or blue-green deployment)
4. **Decommission microservices** after 30 days of stable operation
5. **Update documentation** and team training
## Support
For issues or questions about the migration:
1. Check this guide and README.md
2. Review application logs: `docker-compose logs monolith`
3. Test health endpoint: `curl http://localhost:8000/health`
4. Contact the development team

453
monolith/README.md Normal file
View File

@@ -0,0 +1,453 @@
# Energy Dashboard - Modular Monolith
This is the modular monolithic architecture version of the Energy Dashboard, refactored from the original microservices architecture.
## Architecture Overview
The application is structured as a **modular monolith**, combining the benefits of:
- **Monolithic deployment**: Single application, simpler operations
- **Modular design**: Clear module boundaries, maintainability
### Key Architectural Decisions
1. **Single Application**: All modules run in one process
2. **Module Isolation**: Each module has its own directory and clear interfaces
3. **Separate Databases**: Each module maintains its own database for data isolation
4. **In-Process Event Bus**: Replaces Redis pub/sub for inter-module communication
5. **Direct Dependency Injection**: Modules communicate directly via function calls
6. **Shared Core**: Common infrastructure (database, events, config) shared across modules
## Project Structure
```
monolith/
├── src/
│ ├── main.py # Main FastAPI application
│ ├── core/ # Shared core infrastructure
│ │ ├── config.py # Centralized configuration
│ │ ├── database.py # Database connection manager
│ │ ├── events.py # In-process event bus
│ │ ├── redis.py # Optional Redis cache
│ │ ├── dependencies.py # FastAPI dependencies
│ │ └── logging_config.py # Logging setup
│ ├── modules/ # Business modules
│ │ ├── sensors/ # Sensor management module
│ │ │ ├── __init__.py
│ │ │ ├── router.py # API routes
│ │ │ ├── models.py # Data models
│ │ │ ├── sensor_service.py # Business logic
│ │ │ ├── room_service.py
│ │ │ ├── analytics_service.py
│ │ │ └── websocket_manager.py
│ │ ├── demand_response/ # Demand response module
│ │ │ ├── __init__.py
│ │ │ ├── models.py
│ │ │ └── demand_response_service.py
│ │ └── data_ingestion/ # Data ingestion module
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── ftp_monitor.py
│ │ ├── slg_processor.py
│ │ └── database.py
│ └── api/ # API layer (if needed)
├── config/ # Configuration files
├── tests/ # Test files
├── requirements.txt # Python dependencies
├── Dockerfile # Docker build file
├── docker-compose.yml # Docker Compose configuration
└── README.md # This file
```
## Modules
### 1. Sensors Module (`src/modules/sensors`)
**Responsibility**: Sensor management, room management, real-time data, and analytics
**Key Features**:
- Sensor CRUD operations
- Room management
- Real-time data ingestion
- Analytics and reporting
- WebSocket support for live data streaming
**Database**: `energy_dashboard_sensors`
**API Endpoints**: `/api/v1/sensors/*`, `/api/v1/rooms/*`, `/api/v1/analytics/*`
### 2. Demand Response Module (`src/modules/demand_response`)
**Responsibility**: Grid interaction, demand response events, and load management
**Key Features**:
- Demand response event management
- Device flexibility calculation
- Auto-response configuration
- Load reduction requests
**Database**: `energy_dashboard_demand_response`
**API Endpoints**: `/api/v1/demand-response/*`
### 3. Data Ingestion Module (`src/modules/data_ingestion`)
**Responsibility**: FTP monitoring and SA4CPS data processing
**Key Features**:
- FTP file monitoring
- .sgl_v2 file processing
- Dynamic collection management
- Duplicate detection
**Database**: `digitalmente_ingestion`
**API Endpoints**: `/api/v1/ingestion/*`
## Core Components
### Event Bus (`src/core/events.py`)
Replaces Redis pub/sub with an in-process event bus for inter-module communication.
**Standard Event Topics**:
- `energy_data`: Energy consumption updates
- `dr_events`: Demand response events
- `sensor_events`: Sensor-related events
- `system_events`: System-level events
- `data_ingestion`: Data ingestion events
**Usage Example**:
```python
from core.events import event_bus, EventTopics
# Publish event
await event_bus.publish(EventTopics.ENERGY_DATA, {"sensor_id": "sensor_1", "value": 3.5})
# Subscribe to events
def handle_energy_data(data):
print(f"Received energy data: {data}")
event_bus.subscribe(EventTopics.ENERGY_DATA, handle_energy_data)
```
### Database Manager (`src/core/database.py`)
Centralized database connection management with separate databases per module.
**Available Databases**:
- `main_db`: Main application database
- `sensors_db`: Sensors module database
- `demand_response_db`: Demand response module database
- `data_ingestion_db`: Data ingestion module database
**Usage Example**:
```python
from core.dependencies import get_sensors_db
from fastapi import Depends
async def my_endpoint(db=Depends(get_sensors_db)):
result = await db.sensors.find_one({"sensor_id": "sensor_1"})
```
### Configuration (`src/core/config.py`)
Centralized configuration using Pydantic Settings.
**Configuration Sources**:
1. Environment variables
2. `.env` file (if present)
3. Default values
## Getting Started
### Prerequisites
- Python 3.11+
- MongoDB 7.0+ (deployed separately)
- Redis 7+ (optional, for caching - deployed separately)
- Docker and Docker Compose (for containerized deployment)
### Local Development
1. **Install dependencies**:
```bash
cd monolith
pip install -r requirements.txt
```
2. **Configure environment**:
```bash
cp .env.example .env
# Edit .env with your MongoDB and Redis connection strings
```
3. **Ensure MongoDB and Redis are accessible**:
- MongoDB should be running and accessible at the URL specified in `MONGO_URL`
- Redis (optional) should be accessible at the URL specified in `REDIS_URL`
4. **Run the application**:
```bash
cd src
uvicorn main:app --reload --host 0.0.0.0 --port 8000
```
5. **Access the application**:
- API: http://localhost:8000
- Health Check: http://localhost:8000/health
- API Docs: http://localhost:8000/docs
### Docker Deployment
**Note**: MongoDB and Redis are deployed separately and must be accessible before starting the application.
1. **Configure environment variables**:
```bash
cp .env.example .env
# Edit .env with your MongoDB and Redis connection strings
```
2. **Build and start the application**:
```bash
cd monolith
docker-compose up --build -d
```
3. **View logs**:
```bash
docker-compose logs -f monolith
```
4. **Stop the application**:
```bash
docker-compose down
```
## API Endpoints
### Global Endpoints
- `GET /`: Root endpoint
- `GET /health`: Global health check
- `GET /api/v1/overview`: System overview
### Sensors Module
- `GET /api/v1/sensors/get`: Get sensors with filters
- `GET /api/v1/sensors/{sensor_id}`: Get sensor details
- `GET /api/v1/sensors/{sensor_id}/data`: Get sensor data
- `POST /api/v1/sensors`: Create sensor
- `PUT /api/v1/sensors/{sensor_id}`: Update sensor
- `DELETE /api/v1/sensors/{sensor_id}`: Delete sensor
- `GET /api/v1/rooms`: Get all rooms
- `GET /api/v1/rooms/names`: Get room names
- `POST /api/v1/rooms`: Create room
- `GET /api/v1/rooms/{room_name}`: Get room details
- `PUT /api/v1/rooms/{room_name}`: Update room
- `DELETE /api/v1/rooms/{room_name}`: Delete room
- `GET /api/v1/analytics/summary`: Analytics summary
- `GET /api/v1/analytics/energy`: Energy analytics
- `POST /api/v1/data/query`: Advanced data query
- `WS /api/v1/ws`: WebSocket for real-time data
### Demand Response Module
- Endpoints for demand response events, invitations, and device management
- (To be fully documented when router is added)
### Data Ingestion Module
- Endpoints for FTP monitoring status and manual triggers
- (To be fully documented when router is added)
## Inter-Module Communication
Modules communicate in two ways:
### 1. Direct Dependency Injection
For synchronous operations, modules directly import and call each other's services:
```python
from modules.sensors import SensorService
from core.dependencies import get_sensors_db
sensor_service = SensorService(db=await get_sensors_db(), redis=None)
sensors = await sensor_service.get_sensors()
```
### 2. Event-Driven Communication
For asynchronous operations, modules use the event bus:
```python
from core.events import event_bus, EventTopics
# Publisher
await event_bus.publish(EventTopics.ENERGY_DATA, {
"sensor_id": "sensor_1",
"value": 3.5,
"timestamp": 1234567890
})
# Subscriber
async def handle_energy_update(data):
print(f"Energy update: {data}")
event_bus.subscribe(EventTopics.ENERGY_DATA, handle_energy_update)
```
## Background Tasks
The application runs several background tasks:
1. **Room Metrics Aggregation** (every 5 minutes)
- Aggregates sensor data into room-level metrics
2. **Data Cleanup** (daily)
- Removes sensor data older than 90 days
3. **Event Scheduler** (every 60 seconds)
- Checks and executes scheduled demand response events
4. **Auto Response** (every 30 seconds)
- Processes automatic demand response opportunities
5. **FTP Monitoring** (every 6 hours, configurable)
- Monitors FTP server for new SA4CPS data files
## Configuration Options
Key environment variables:
### Database
- `MONGO_URL`: MongoDB connection string
- `REDIS_URL`: Redis connection string
- `REDIS_ENABLED`: Enable/disable Redis (true/false)
### Application
- `DEBUG`: Enable debug mode (true/false)
- `HOST`: Application host (default: 0.0.0.0)
- `PORT`: Application port (default: 8000)
### FTP
- `FTP_SA4CPS_HOST`: FTP server host
- `FTP_SA4CPS_PORT`: FTP server port
- `FTP_SA4CPS_USERNAME`: FTP username
- `FTP_SA4CPS_PASSWORD`: FTP password
- `FTP_SA4CPS_REMOTE_PATH`: Remote directory path
- `FTP_CHECK_INTERVAL`: Check interval in seconds
- `FTP_SKIP_INITIAL_SCAN`: Skip initial FTP scan (true/false)
## Migration from Microservices
See [MIGRATION.md](MIGRATION.md) for detailed migration guide.
## Development Guidelines
### Adding a New Module
1. Create module directory: `src/modules/new_module/`
2. Add module files:
- `__init__.py`: Module exports
- `models.py`: Pydantic models
- `service.py`: Business logic
- `router.py`: API routes
3. Register module in main application:
```python
from modules.new_module.router import router as new_module_router
app.include_router(new_module_router, prefix="/api/v1/new-module", tags=["new-module"])
```
### Adding an Event Topic
1. Add topic to `EventTopics` class in `src/core/events.py`:
```python
class EventTopics:
NEW_TOPIC = "new_topic"
```
2. Use in your module:
```python
from core.events import event_bus, EventTopics
await event_bus.publish(EventTopics.NEW_TOPIC, data)
```
## Testing
```bash
# Run all tests
pytest
# Run with coverage
pytest --cov=src --cov-report=html
# Run specific module tests
pytest tests/modules/sensors/
```
## Monitoring and Logging
- **Logs**: Application logs to stdout
- **Log Level**: Controlled by `DEBUG` environment variable
- **Health Checks**: Available at `/health` endpoint
- **Metrics**: System overview at `/api/v1/overview`
## Performance Considerations
- **Database Indexing**: Ensure proper indexes on frequently queried fields
- **Redis Caching**: Enable Redis for improved performance (optional)
- **Connection Pooling**: Motor (MongoDB) and Redis clients handle connection pooling
- **Async Operations**: All I/O operations are asynchronous
- **Background Tasks**: Long-running operations don't block request handling
## Security
- **CORS**: Configured in main application
- **Environment Variables**: Use `.env` file, never commit secrets
- **Database Authentication**: MongoDB requires authentication
- **Input Validation**: Pydantic models validate all inputs
- **Error Handling**: Sensitive information not exposed in error messages
## Troubleshooting
### Database Connection Issues
```bash
# Test MongoDB connection (update with your connection string)
mongosh mongodb://admin:password123@mongodb-host:27017/?authSource=admin
# Check if MongoDB is accessible from the container
docker-compose exec monolith ping mongodb-host
```
### Redis Connection Issues
```bash
# Test Redis connection (update with your connection string)
redis-cli -h redis-host ping
# Check if Redis is accessible from the container
docker-compose exec monolith ping redis-host
```
### Application Won't Start
```bash
# Check logs
docker-compose logs monolith
# Verify environment variables
docker-compose exec monolith env | grep MONGO
```
## License
[Your License Here]
## Contributing
[Your Contributing Guidelines Here]

View File

@@ -0,0 +1,41 @@
version: "3.8"
services:
# Modular Monolith Application
monolith:
build:
context: .
dockerfile: Dockerfile
container_name: energy-dashboard-monolith
restart: unless-stopped
ports:
- "8000:8000"
environment:
# MongoDB Configuration (external deployment)
- MONGO_URL=${MONGO_URL}
# Redis Configuration (external deployment, optional)
- REDIS_URL=${REDIS_URL}
- REDIS_ENABLED=${REDIS_ENABLED:-false}
# FTP Configuration
- FTP_SA4CPS_HOST=${FTP_SA4CPS_HOST:-ftp.sa4cps.pt}
- FTP_SA4CPS_PORT=${FTP_SA4CPS_PORT:-21}
- FTP_SA4CPS_USERNAME=${FTP_SA4CPS_USERNAME}
- FTP_SA4CPS_PASSWORD=${FTP_SA4CPS_PASSWORD}
- FTP_SA4CPS_REMOTE_PATH=${FTP_SA4CPS_REMOTE_PATH:-/SLGs/}
- FTP_CHECK_INTERVAL=${FTP_CHECK_INTERVAL:-21600}
- FTP_SKIP_INITIAL_SCAN=${FTP_SKIP_INITIAL_SCAN:-true}
# Application Settings
- DEBUG=${DEBUG:-false}
networks:
- energy-network
volumes:
- ./src:/app/src # Mount source code for development
networks:
energy-network:
driver: bridge
name: energy-network

1561
monolith/iots-left.json Executable file

File diff suppressed because it is too large Load Diff

616
monolith/iots-right.json Executable file
View File

@@ -0,0 +1,616 @@
{
"iots": {
"battery": [
{
"name": "Battery_1",
"type": "battery",
"uri": "192.168.2.54",
"sensors": [
{
"type": "energy",
"tag": [
"battery",
"stored_energy"
],
"data": "DOUBLE"
},
{
"type": "charging_rate",
"tag": [
"battery",
"charging_rate"
],
"data": "DOUBLE"
}
]
},
{
"name": "Battery_2",
"type": "battery",
"uri": "192.168.2.55",
"sensors": [
{
"type": "energy",
"tag": [
"battery",
"stored_energy"
],
"data": "DOUBLE"
},
{
"type": "charging_rate",
"tag": [
"battery",
"charging_rate"
],
"data": "DOUBLE"
}
]
},
{
"name": "Battery_3",
"type": "battery",
"uri": "192.168.2.56",
"sensors": [
{
"type": "energy",
"tag": [
"battery",
"stored_energy"
],
"data": "DOUBLE"
},
{
"type": "charging_rate",
"tag": [
"battery",
"charging_rate"
],
"data": "DOUBLE"
}
]
}
],
"refrigerator": [
{
"name": "Fridge",
"type": "refrigerator",
"uri": "http://192.168.2.5:8520/enaplug/read/170307001",
"sensors": [
{
"type": "power",
"tag": [
"enaplug_170307001",
"act1"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"enaplug_170307001",
"volt1"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"enaplug_170307001",
"curr1"
],
"data": "DOUBLE"
},
{
"type": "doorOpen",
"tag": [
"enaplug_170307001",
"doorOpened"
],
"data": "BOOLEAN"
},
{
"type": "state",
"tag": [
"enaplug_170307001",
"state"
],
"data": "DOUBLE"
},
{
"type": "internal Temperature",
"tag": [
"enaplug_170307001",
"temp2"
],
"data": "DOUBLE"
},
{
"type": "external Temperature",
"tag": [
"enaplug_170307001",
"temp1"
],
"data": "DOUBLE"
},
{
"type": "humidity",
"tag": [
"enaplug_170307001",
"hum1"
],
"data": "DOUBLE"
}
]
}
],
"waterheater": [
{
"name": "Water Heater",
"type": "water heater",
"uri": "http://192.168.2.5:8520/enaplug/read/180717001",
"sensors": [
{
"type": "power",
"tag": [
"enaplug_180717001",
"act"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"enaplug_180717001",
"volt"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"enaplug_180717001",
"curr"
],
"data": "DOUBLE"
},
{
"type": "state",
"tag": [
"enaplug_180717001",
"state"
],
"data": "BOOLEAN"
},
{
"type": "temperature",
"tag": [
"enaplug_180717001",
"temp"
],
"data": "DOUBLE"
}
]
}
],
"microwave": [
{
"name": "Microwave",
"type": "microwave",
"uri": "http://192.168.2.5:8520/resource/AnalyzerKitHall_V2",
"sensors": [
{
"type": "power",
"tag": [
"AnalyzerKitHall_V2",
"microwave_active"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"AnalyzerKitHall_V2",
"microwave_voltage"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"AnalyzerKitHall_V2",
"microwave_current_x10"
],
"data": "DOUBLE"
}
]
}
],
"dishwasher": [
{
"name": "Dishwasher",
"type": "dishwasher",
"uri": "http://192.168.2.5:8520/resource/AnalyzerKitHall_V2",
"sensors": [
{
"type": "power",
"tag": [
"AnalyzerKitHall_V2",
"dishwasher_active"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"AnalyzerKitHall_V2",
"dishwasher_voltage"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"AnalyzerKitHall_V2",
"dishwasher_current_x10"
],
"data": "DOUBLE"
}
]
}
],
"kettle": [
{
"name": "Kettle",
"type": "kettle",
"uri": "http://192.168.2.5:8520/resource/AnalyzerKitHall_V2",
"sensors": [
{
"type": "power",
"tag": [
"AnalyzerKitHall_V2",
"kettle_active"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"AnalyzerKitHall_V2",
"kettle_voltage"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"AnalyzerKitHall_V2",
"kettle_current_x10"
],
"data": "DOUBLE"
}
]
}
],
"hvac": [
{
"name": "Air Conditioner Kitchen",
"type": "hvac",
"uri": "http://192.168.2.5:8520/resource/AnalyzerKitHall_V2",
"sensors": [
{
"type": "power",
"tag": [
"AnalyzerKitHall_V2",
"kitchen_ac_activePower"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"AnalyzerKitHall_V2",
"kitchen_ac_voltage"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"AnalyzerKitHall_V2",
"kitchen_ac_current_x10"
],
"data": "DOUBLE"
}
]
},
{
"name": "Air Conditioner Hallway",
"type": "hvac",
"uri": "http://192.168.2.5:8520/resource/AnalyzerKitHall_V2",
"sensors": [
{
"type": "power",
"tag": [
"AnalyzerKitHall_V2",
"hallway_ac_activePower"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"AnalyzerKitHall_V2",
"hallway_ac_voltage"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"AnalyzerKitHall_V2",
"hallway_ac_current_x10"
],
"data": "DOUBLE"
}
]
},
{
"name": "Air Conditioner 112_115",
"type": "hvac",
"uri": "http://192.168.2.5:8520/resource/Analyzer115_V1",
"sensors": [
{
"type": "power",
"tag": [
"Analyzer115_V1",
"P2_W"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"Analyzer115_V1",
"U2N_Vx10"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"Analyzer115_V1",
"Curr2_mA"
],
"data": "DOUBLE"
}
]
},
{
"name": "Air Conditioner 111_116",
"type": "hvac",
"uri": "http://192.168.2.5:8520/resource/Analyzer116_V1",
"sensors": [
{
"type": "power",
"tag": [
"Analyzer116_V1",
"P2_W"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"Analyzer116_V1",
"U2N_V"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"Analyzer116_V1",
"Curr2_A"
],
"data": "DOUBLE"
}
]
}
],
"sockets": [
{
"name": "Sockets 112_115",
"type": "sockets",
"uri": "http://192.168.2.5:8520/resource/Analyzer115_V1",
"sensors": [
{
"type": "power",
"tag": [
"Analyzer115_V1",
"P1_W"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"Analyzer115_V1",
"U1N_Vx10"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"Analyzer115_V1",
"Curr1_mA"
],
"data": "DOUBLE"
}
]
},
{
"name": "Sockets 111_116",
"type": "sockets",
"uri": "http://192.168.2.5:8520/resource/Analyzer116_V1",
"sensors": [
{
"type": "power",
"tag": [
"Analyzer116_V1",
"P3_W"
],
"data": "DOUBLE"
},
{
"type": "voltage",
"tag": [
"Analyzer116_V1",
"U3N_V"
],
"data": "DOUBLE"
},
{
"type": "current",
"tag": [
"Analyzer116_V1",
"Curr3_A"
],
"data": "DOUBLE"
}
]
}
],
"lamp": [
{
"name": "Lamp 1_111",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/111/1",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 1_112",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/112/1",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 2_112",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/112/2",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 3_112",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/112/3",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 1_115",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/115/1",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 2_115",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/115/2",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 3_115",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/115/3",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
},
{
"name": "Lamp 1_116",
"type": "lamp",
"uri": "http://192.168.2.68:8089/desenrasca/lamp/116/1",
"sensors": [
{
"type": "power",
"tag": [
"consumption_w"
],
"data": "DOUBLE"
}
]
}
],
"generation": [
{
"name": "Generation",
"type": "generation",
"uri": "http://192.168.2.68:8089/desenrasca/generation/3750",
"sensors": [
{
"type": "generation",
"tag": [
"generation_w"
],
"data": "DOUBLE"
}
]
}
]
}
}

28
monolith/requirements.txt Normal file
View File

@@ -0,0 +1,28 @@
# FastAPI and ASGI server
fastapi==0.104.1
uvicorn[standard]==0.24.0
python-multipart==0.0.6
# Database drivers
motor==3.3.2 # Async MongoDB
redis[hiredis]==5.0.1 # Redis with hiredis for better performance
# Data validation and settings
pydantic==2.5.0
pydantic-settings==2.1.0
# Async HTTP client
aiohttp==3.9.1
# WebSockets
websockets==12.0
# Data processing
pandas==2.1.4
numpy==1.26.2
# FTP support
ftputil==5.0.4
# Utilities
python-dateutil==2.8.2

View File

@@ -0,0 +1 @@
"""Core infrastructure components for the modular monolith."""

View File

@@ -0,0 +1,43 @@
"""Centralized configuration management."""
import os
from typing import Dict, Optional
from pydantic_settings import BaseSettings
class Settings(BaseSettings):
"""Application settings."""
# Application
app_name: str = "Energy Dashboard Monolith"
app_version: str = "1.0.0"
debug: bool = False
host: str = "0.0.0.0"
port: int = 8000
# MongoDB
mongo_url: str = os.getenv("MONGO_URL", "mongodb://admin:password123@localhost:27017/?authSource=admin")
# Module-specific databases (preserving isolation)
sensors_db_name: str = "energy_dashboard_sensors"
demand_response_db_name: str = "energy_dashboard_demand_response"
data_ingestion_db_name: str = "digitalmente_ingestion"
main_db_name: str = "energy_dashboard"
# HTTP Poller Configuration (for IoT devices)
http_poll_interval: int = int(os.getenv("HTTP_POLL_INTERVAL", "60")) # 60 seconds
http_timeout: int = int(os.getenv("HTTP_TIMEOUT", "10")) # 10 seconds
http_max_concurrent: int = int(os.getenv("HTTP_MAX_CONCURRENT", "5")) # 5 concurrent requests
# CORS
cors_origins: list = ["*"]
cors_allow_credentials: bool = True
cors_allow_methods: list = ["*"]
cors_allow_headers: list = ["*"]
class Config:
env_file = ".env"
case_sensitive = False
# Global settings instance
settings = Settings()

View File

@@ -0,0 +1,85 @@
"""Database connection management for all modules."""
import logging
from typing import Optional, Dict
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
from .config import settings
logger = logging.getLogger(__name__)
class DatabaseManager:
"""Manages MongoDB connections for all modules."""
def __init__(self):
self._client: Optional[AsyncIOMotorClient] = None
self._databases: Dict[str, AsyncIOMotorDatabase] = {}
async def connect(self):
"""Establish connection to MongoDB."""
try:
logger.info(f"Connecting to MongoDB: {settings.mongo_url}")
self._client = AsyncIOMotorClient(settings.mongo_url)
# Test connection
await self._client.admin.command('ping')
logger.info("Successfully connected to MongoDB")
# Initialize database references
self._databases = {
"main": self._client[settings.main_db_name],
"sensors": self._client[settings.sensors_db_name],
"demand_response": self._client[settings.demand_response_db_name],
"data_ingestion": self._client[settings.data_ingestion_db_name],
}
except Exception as e:
logger.error(f"Failed to connect to MongoDB: {e}")
raise
async def disconnect(self):
"""Close MongoDB connection."""
if self._client:
self._client.close()
logger.info("Disconnected from MongoDB")
def get_database(self, name: str) -> AsyncIOMotorDatabase:
"""Get database by name."""
if name not in self._databases:
raise ValueError(f"Database '{name}' not configured")
return self._databases[name]
@property
def client(self) -> AsyncIOMotorClient:
"""Get the MongoDB client."""
if not self._client:
raise RuntimeError("Database not connected. Call connect() first.")
return self._client
@property
def main_db(self) -> AsyncIOMotorDatabase:
"""Get main database."""
return self.get_database("main")
@property
def sensors_db(self) -> AsyncIOMotorDatabase:
"""Get sensors database."""
return self.get_database("sensors")
@property
def demand_response_db(self) -> AsyncIOMotorDatabase:
"""Get demand response database."""
return self.get_database("demand_response")
@property
def data_ingestion_db(self) -> AsyncIOMotorDatabase:
"""Get data ingestion database."""
return self.get_database("data_ingestion")
# Global database manager instance
db_manager = DatabaseManager()
async def get_database(name: str = "main") -> AsyncIOMotorDatabase:
"""Dependency injection function for database access."""
return db_manager.get_database(name)

View File

@@ -0,0 +1,39 @@
"""FastAPI dependency injection utilities."""
from typing import Optional
from fastapi import Depends, HTTPException, status
from motor.motor_asyncio import AsyncIOMotorDatabase
import redis.asyncio as aioredis
from .database import db_manager
from .redis import redis_manager
from .events import event_bus, EventBus
async def get_main_db() -> AsyncIOMotorDatabase:
"""Get main database dependency."""
return db_manager.main_db
async def get_sensors_db() -> AsyncIOMotorDatabase:
"""Get sensors database dependency."""
return db_manager.sensors_db
async def get_demand_response_db() -> AsyncIOMotorDatabase:
"""Get demand response database dependency."""
return db_manager.demand_response_db
async def get_data_ingestion_db() -> AsyncIOMotorDatabase:
"""Get data ingestion database dependency."""
return db_manager.data_ingestion_db
async def get_redis() -> Optional[aioredis.Redis]:
"""Get Redis client dependency."""
return redis_manager.client
def get_event_bus() -> EventBus:
"""Get event bus dependency."""
return event_bus

137
monolith/src/core/events.py Normal file
View File

@@ -0,0 +1,137 @@
"""In-process event bus for inter-module communication."""
import asyncio
import logging
from typing import Dict, List, Callable, Any, Set
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime
import json
logger = logging.getLogger(__name__)
@dataclass
class Event:
"""Event data structure."""
topic: str
data: Any
timestamp: datetime
source: str = "system"
def to_dict(self) -> dict:
"""Convert to dictionary."""
return {
"topic": self.topic,
"data": self.data,
"timestamp": self.timestamp.isoformat(),
"source": self.source
}
class EventBus:
"""
In-process event bus for replacing Redis pub/sub.
Provides asynchronous event publishing and subscription.
"""
def __init__(self):
self._subscribers: Dict[str, List[Callable]] = defaultdict(list)
self._event_history: List[Event] = []
self._max_history: int = 1000
self._lock = asyncio.Lock()
async def publish(self, topic: str, data: Any, source: str = "system"):
"""
Publish an event to a topic.
Args:
topic: Event topic/channel name
data: Event data (will be JSON serialized if dict)
source: Event source identifier
"""
event = Event(
topic=topic,
data=data,
timestamp=datetime.utcnow(),
source=source
)
# Store in history
async with self._lock:
self._event_history.append(event)
if len(self._event_history) > self._max_history:
self._event_history.pop(0)
# Notify subscribers
if topic in self._subscribers:
logger.debug(f"Publishing event to topic '{topic}': {len(self._subscribers[topic])} subscribers")
# Create tasks for all subscribers
tasks = []
for callback in self._subscribers[topic]:
tasks.append(self._call_subscriber(callback, event))
# Execute all callbacks concurrently
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
else:
logger.debug(f"No subscribers for topic '{topic}'")
async def _call_subscriber(self, callback: Callable, event: Event):
"""Call a subscriber callback with error handling."""
try:
if asyncio.iscoroutinefunction(callback):
await callback(event.data)
else:
callback(event.data)
except Exception as e:
logger.error(f"Error in event subscriber: {e}", exc_info=True)
def subscribe(self, topic: str, callback: Callable):
"""
Subscribe to events on a topic.
Args:
topic: Event topic/channel name
callback: Async or sync callback function that receives event data
"""
self._subscribers[topic].append(callback)
logger.info(f"Subscribed to topic '{topic}'. Total subscribers: {len(self._subscribers[topic])}")
def unsubscribe(self, topic: str, callback: Callable):
"""Unsubscribe from a topic."""
if topic in self._subscribers and callback in self._subscribers[topic]:
self._subscribers[topic].remove(callback)
logger.info(f"Unsubscribed from topic '{topic}'")
def get_topics(self) -> List[str]:
"""Get list of all topics with subscribers."""
return list(self._subscribers.keys())
def get_subscriber_count(self, topic: str) -> int:
"""Get number of subscribers for a topic."""
return len(self._subscribers.get(topic, []))
async def get_event_history(self, topic: str = None, limit: int = 100) -> List[Event]:
"""Get event history, optionally filtered by topic."""
async with self._lock:
if topic:
events = [e for e in self._event_history if e.topic == topic]
else:
events = self._event_history.copy()
return events[-limit:]
# Global event bus instance
event_bus = EventBus()
# Common event topics (replaces Redis channels)
class EventTopics:
"""Standard event topic names."""
ENERGY_DATA = "energy_data"
DR_EVENTS = "dr_events"
SENSOR_EVENTS = "sensor_events"
SYSTEM_EVENTS = "system_events"
DATA_INGESTION = "data_ingestion"

View File

@@ -0,0 +1,25 @@
"""Logging configuration."""
import logging
import sys
from .config import settings
def setup_logging():
"""Configure application logging."""
log_level = logging.DEBUG if settings.debug else logging.INFO
logging.basicConfig(
level=log_level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
# Set third-party loggers to WARNING
logging.getLogger("uvicorn").setLevel(logging.WARNING)
logging.getLogger("motor").setLevel(logging.WARNING)
logging.getLogger("redis").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.info(f"Logging configured. Level: {log_level}")

View File

@@ -0,0 +1,61 @@
"""Redis connection management (optional, for caching)."""
import logging
from typing import Optional
import redis.asyncio as aioredis
from .config import settings
logger = logging.getLogger(__name__)
class RedisManager:
"""Manages Redis connection for caching."""
def __init__(self):
self._client: Optional[aioredis.Redis] = None
async def connect(self):
"""Establish connection to Redis."""
if not settings.redis_enabled:
logger.info("Redis is disabled in settings")
return
try:
logger.info(f"Connecting to Redis: {settings.redis_url}")
self._client = await aioredis.from_url(
settings.redis_url,
encoding="utf-8",
decode_responses=True
)
# Test connection
await self._client.ping()
logger.info("Successfully connected to Redis")
except Exception as e:
logger.warning(f"Failed to connect to Redis: {e}. Continuing without Redis cache.")
self._client = None
async def disconnect(self):
"""Close Redis connection."""
if self._client:
await self._client.close()
logger.info("Disconnected from Redis")
@property
def client(self) -> Optional[aioredis.Redis]:
"""Get the Redis client."""
return self._client
@property
def is_available(self) -> bool:
"""Check if Redis is available."""
return self._client is not None
# Global Redis manager instance
redis_manager = RedisManager()
async def get_redis() -> Optional[aioredis.Redis]:
"""Dependency injection function for Redis access."""
return redis_manager.client

305
monolith/src/main.py Normal file
View File

@@ -0,0 +1,305 @@
"""
Main FastAPI application for the Energy Dashboard Modular Monolith.
Integrates all modules: sensors, demand-response, and data-ingestion.
"""
import asyncio
import logging
from contextlib import asynccontextmanager
from datetime import datetime, timedelta
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
# Core imports
from core.config import settings
from core.logging_config import setup_logging
from core.database import db_manager
from core.events import event_bus, EventTopics
# Module imports
from modules.sensors.router import router as sensors_router
from modules.sensors.room_service import RoomService
from modules.sensors import WebSocketManager
# TEMPORARILY DISABLED: from modules.demand_response import DemandResponseService
# Setup logging
setup_logging()
logger = logging.getLogger(__name__)
# Background tasks
async def room_metrics_aggregation_task():
"""Periodically aggregate room-level metrics"""
logger.info("Starting room metrics aggregation task")
while True:
try:
room_service = RoomService(db_manager.sensors_db, None)
await room_service.aggregate_all_room_metrics()
await asyncio.sleep(300) # 5 minutes
except Exception as e:
logger.error(f"Error in room metrics aggregation: {e}")
await asyncio.sleep(600)
async def data_cleanup_task():
"""Periodic cleanup of old data"""
logger.info("Starting data cleanup task")
while True:
try:
from modules.sensors import SensorService
service = SensorService(db_manager.sensors_db, None)
cleanup_date = datetime.utcnow() - timedelta(days=90)
await service.cleanup_old_data(cleanup_date)
await asyncio.sleep(86400) # 24 hours
except Exception as e:
logger.error(f"Error in data cleanup task: {e}")
await asyncio.sleep(7200)
# TEMPORARILY DISABLED: Demand Response background tasks
# async def event_scheduler_task():
# """Background task for checking and executing scheduled DR events"""
# logger.info("Starting event scheduler task")
#
# while True:
# try:
# service = DemandResponseService(db_manager.demand_response_db, redis_manager.client)
# await service.check_scheduled_events()
# await asyncio.sleep(settings.event_scheduler_interval)
#
# except Exception as e:
# logger.error(f"Error in event scheduler task: {e}")
# await asyncio.sleep(120)
# async def auto_response_task():
# """Background task for automatic demand response"""
# logger.info("Starting auto-response task")
#
# while True:
# try:
# service = DemandResponseService(db_manager.demand_response_db, redis_manager.client)
# await service.process_auto_responses()
# await asyncio.sleep(settings.auto_response_interval)
#
# except Exception as e:
# logger.error(f"Error in auto-response task: {e}")
# await asyncio.sleep(90)
# async def energy_data_event_subscriber():
# """Subscribe to internal event bus for energy data events"""
# logger.info("Starting energy data event subscriber")
#
# async def handle_energy_data(data):
# """Handle energy data events"""
# try:
# service = DemandResponseService(db_manager.demand_response_db, redis_manager.client)
# sensor_id = data.get("sensorId") or data.get("sensor_id")
# power_kw = data.get("value", 0.0)
#
# if sensor_id:
# service.update_device_power_cache(sensor_id, power_kw)
#
# except Exception as e:
# logger.error(f"Error processing energy data event: {e}")
#
# # Subscribe to energy data events
# event_bus.subscribe(EventTopics.ENERGY_DATA, handle_energy_data)
async def http_polling_task(http_poller):
"""Background task for HTTP IoT device polling"""
logger.info("Starting HTTP polling task")
try:
await http_poller.run()
except Exception as e:
logger.error(f"Error in HTTP polling task: {e}")
# Application lifespan
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Manage application lifespan"""
logger.info(f"Starting {settings.app_name} v{settings.app_version}...")
# Connect to databases
await db_manager.connect()
# Initialize default rooms
room_service = RoomService(db_manager.sensors_db, None)
await room_service.initialize_default_rooms()
# Initialize HTTP poller for IoT devices
from modules.data_ingestion import HttpPoller, set_http_poller
http_poller = HttpPoller(
sensors_db=db_manager.sensors_db,
poll_interval=settings.http_poll_interval,
timeout=10,
max_concurrent=5
)
set_http_poller(http_poller)
# Subscribe to internal events
# TEMPORARILY DISABLED: await energy_data_event_subscriber()
# Start background tasks
asyncio.create_task(room_metrics_aggregation_task())
asyncio.create_task(data_cleanup_task())
# TEMPORARILY DISABLED: asyncio.create_task(event_scheduler_task())
# TEMPORARILY DISABLED: asyncio.create_task(auto_response_task())
# Start HTTP polling task
asyncio.create_task(http_polling_task(http_poller))
logger.info("Application startup complete")
yield
logger.info("Shutting down application...")
# Disconnect from databases
await db_manager.disconnect()
logger.info("Application shutdown complete")
# Create FastAPI application
app = FastAPI(
title=settings.app_name,
description="Modular monolithic architecture for Energy Dashboard",
version=settings.app_version,
lifespan=lifespan
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=settings.cors_origins,
allow_credentials=settings.cors_allow_credentials,
allow_methods=settings.cors_allow_methods,
allow_headers=settings.cors_allow_headers,
)
# Root endpoint
@app.get("/")
async def root():
"""Root endpoint"""
return {
"service": settings.app_name,
"version": settings.app_version,
"status": "running",
"timestamp": datetime.utcnow().isoformat()
}
# Health check endpoint
@app.get("/health")
async def health_check():
"""Global health check"""
try:
# Check database connection
await db_manager.main_db.command("ping")
return {
"service": settings.app_name,
"version": settings.app_version,
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"components": {
"database": "healthy",
"event_bus": "healthy"
},
"modules": {
"sensors": "loaded",
"demand_response": "disabled",
"data_ingestion": "loaded"
}
}
except Exception as e:
logger.error(f"Health check failed: {e}")
raise HTTPException(status_code=503, detail="Service Unavailable")
# System overview endpoint
@app.get("/api/v1/overview")
async def system_overview():
"""Get system overview"""
try:
from modules.sensors import SensorService
# TEMPORARILY DISABLED: from modules.demand_response import DemandResponseService
sensor_service = SensorService(db_manager.sensors_db, None)
# TEMPORARILY DISABLED: dr_service = DemandResponseService(db_manager.demand_response_db, None)
# Get sensor counts
all_sensors = await sensor_service.get_sensors()
active_sensors = [s for s in all_sensors if s.get("status") == "online"]
# Get room counts
room_service = RoomService(db_manager.sensors_db, None)
all_rooms = await room_service.get_rooms()
# Get DR event counts
# TEMPORARILY DISABLED: active_events = len(dr_service.active_events) if hasattr(dr_service, 'active_events') else 0
return {
"timestamp": datetime.utcnow().isoformat(),
"sensors": {
"total": len(all_sensors),
"active": len(active_sensors),
"offline": len(all_sensors) - len(active_sensors)
},
"rooms": {
"total": len(all_rooms)
},
"demand_response": {
"status": "disabled"
},
"event_bus": {
"topics": event_bus.get_topics(),
"total_topics": len(event_bus.get_topics())
}
}
except Exception as e:
logger.error(f"Error getting system overview: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Include module routers with prefixes
app.include_router(
sensors_router,
prefix="/api/v1",
tags=["sensors"]
)
# Data Ingestion router
from modules.data_ingestion import router as data_ingestion_router
app.include_router(
data_ingestion_router,
prefix="/api/v1/ingestion",
tags=["data-ingestion"]
)
# Note: Demand Response router would be added here (currently disabled)
# app.include_router(demand_response_router, prefix="/api/v1/demand-response", tags=["demand-response"])
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"main:app",
host=settings.host,
port=settings.port,
reload=settings.debug
)

View File

@@ -0,0 +1,31 @@
"""Data Ingestion module - handles HTTP/MQTT IoT device polling."""
from .http_poller import HttpPoller
from .iot_config import IoTConfiguration, IoTDevice, get_iot_config, get_config_loader
from .models import (
DataSourceType, PollingStatus, DeviceStatus,
DataSourceSummary, PollingMetrics
)
from .router import router, set_http_poller
__all__ = [
# HTTP Poller
"HttpPoller",
"set_http_poller",
# Configuration
"IoTConfiguration",
"IoTDevice",
"get_iot_config",
"get_config_loader",
# Models
"DataSourceType",
"PollingStatus",
"DeviceStatus",
"DataSourceSummary",
"PollingMetrics",
# Router
"router",
]

View File

@@ -0,0 +1,353 @@
"""
HTTP Poller Service
Polls IoT devices via HTTP and ingests sensor data
"""
import asyncio
import logging
import time
from datetime import datetime
from typing import Dict, Optional, Any, List
import aiohttp
from motor.motor_asyncio import AsyncIOMotorDatabase
from .iot_config import IoTConfiguration, IoTDevice, SensorConfig, get_iot_config
from .models import DeviceStatus, PollingStatus, PollingMetrics
from modules.sensors.models import SensorReading, SensorType
from core.events import event_bus, EventTopics
logger = logging.getLogger(__name__)
class HttpPoller:
"""HTTP-based IoT device poller"""
def __init__(
self,
sensors_db: AsyncIOMotorDatabase,
config: Optional[IoTConfiguration] = None,
poll_interval: int = 60,
timeout: int = 10,
max_concurrent: int = 5
):
"""
Initialize HTTP poller
Args:
sensors_db: Motor database for sensor data storage
config: IoT configuration (loads from file if None)
poll_interval: Seconds between polls
timeout: HTTP request timeout in seconds
max_concurrent: Maximum concurrent HTTP requests
"""
self.sensors_db = sensors_db
self.config = config or get_iot_config()
self.poll_interval = poll_interval
self.timeout = timeout
self.max_concurrent = max_concurrent
# Metrics tracking
self.device_status: Dict[str, DeviceStatus] = {}
self.total_polls = 0
self.successful_polls = 0
self.failed_polls = 0
self.poll_times: List[float] = []
# Control flags
self.running = False
self._semaphore = asyncio.Semaphore(max_concurrent)
# Initialize device status
self._initialize_device_status()
def _initialize_device_status(self):
"""Initialize status tracking for all devices"""
for device in self.config.get_all_devices():
self.device_status[device.name] = DeviceStatus(
device_name=device.name,
device_type=device.type,
uri=device.uri,
status=PollingStatus.INACTIVE,
sensors_count=len(device.sensors)
)
async def poll_device(self, device: IoTDevice) -> bool:
"""
Poll a single device and ingest its data
Args:
device: IoT device configuration
Returns:
True if successful, False otherwise
"""
async with self._semaphore:
status = self.device_status[device.name]
start_time = time.time()
try:
# Update status
status.last_poll = datetime.utcnow()
status.total_polls += 1
self.total_polls += 1
# Make HTTP request
async with aiohttp.ClientSession() as session:
async with session.get(
device.uri,
timeout=aiohttp.ClientTimeout(total=self.timeout)
) as response:
if response.status != 200:
raise Exception(f"HTTP {response.status}: {await response.text()}")
data = await response.json()
# Extract and store sensor readings
timestamp = int(datetime.utcnow().timestamp())
readings_stored = 0
for sensor in device.sensors:
try:
value = self._extract_value(data, sensor.tag)
if value is not None:
await self._store_reading(
device=device,
sensor=sensor,
value=value,
timestamp=timestamp
)
readings_stored += 1
except Exception as e:
logger.warning(
f"Failed to extract sensor {sensor.type} from {device.name}: {e}"
)
# Update success metrics
poll_time = (time.time() - start_time) * 1000
self.poll_times.append(poll_time)
if len(self.poll_times) > 100:
self.poll_times.pop(0)
status.successful_polls += 1
status.last_success = datetime.utcnow()
status.status = PollingStatus.ACTIVE
status.last_error = None
self.successful_polls += 1
logger.debug(
f"Polled {device.name}: {readings_stored} readings in {poll_time:.1f}ms"
)
return True
except asyncio.TimeoutError:
error_msg = f"Timeout after {self.timeout}s"
status.failed_polls += 1
status.status = PollingStatus.ERROR
status.last_error = error_msg
self.failed_polls += 1
logger.error(f"Timeout polling {device.name}")
return False
except Exception as e:
error_msg = str(e)
status.failed_polls += 1
status.status = PollingStatus.ERROR
status.last_error = error_msg
self.failed_polls += 1
logger.error(f"Error polling {device.name}: {e}")
return False
def _extract_value(self, data: Dict[str, Any], tag_path: List[str]) -> Optional[Any]:
"""
Extract value from nested JSON using tag path
Args:
data: JSON response data
tag_path: List of keys to traverse
Returns:
Extracted value or None
"""
current = data
for key in tag_path:
if isinstance(current, dict):
current = current.get(key)
if current is None:
return None
else:
return None
return current
async def _store_reading(
self,
device: IoTDevice,
sensor: SensorConfig,
value: Any,
timestamp: int
):
"""
Store sensor reading in database and publish event
Args:
device: Device configuration
sensor: Sensor configuration
value: Sensor value
timestamp: Unix timestamp
"""
# Map sensor type to SensorType enum (with fallback to ENERGY)
sensor_type_map = {
"power": SensorType.ENERGY,
"voltage": SensorType.ENERGY,
"current": SensorType.ENERGY,
"energy": SensorType.ENERGY,
"temperature": SensorType.TEMPERATURE,
"humidity": SensorType.HUMIDITY,
"co2": SensorType.CO2,
"generation": SensorType.ENERGY,
"charging_rate": SensorType.ENERGY,
"doorOpen": SensorType.OCCUPANCY,
"state": SensorType.OCCUPANCY,
}
sensor_type = sensor_type_map.get(sensor.type, SensorType.ENERGY)
# Create sensor ID from device name and sensor type
sensor_id = f"{device.name}_{sensor.type}".replace(" ", "_")
# Convert value based on data type
if sensor.data == "BOOLEAN":
numeric_value = 1.0 if value else 0.0
else:
numeric_value = float(value)
# Create sensor reading
reading_data = {
"sensor_id": sensor_id,
"sensor_type": sensor_type.value,
"timestamp": timestamp,
"room": device.type, # Use device type as room (battery, refrigerator, etc.)
"metadata": {
"device_name": device.name,
"device_type": device.type,
"sensor_type": sensor.type,
"data_type": sensor.data,
"source": "http_poller"
}
}
# Add specific sensor data fields
if sensor.type == "power":
reading_data["power"] = numeric_value
elif sensor.type == "voltage":
reading_data["voltage"] = numeric_value
elif sensor.type == "current":
reading_data["current"] = numeric_value
elif sensor.type == "temperature":
reading_data["temperature"] = numeric_value
elif sensor.type == "humidity":
reading_data["humidity"] = numeric_value
elif sensor.type == "energy":
reading_data["energy"] = numeric_value
elif sensor.type == "generation":
reading_data["generation"] = numeric_value
else:
reading_data["energy"] = numeric_value # Default field
# Store in database
await self.sensors_db.sensor_readings.insert_one(reading_data)
# Publish to event bus
await event_bus.publish(EventTopics.ENERGY_DATA, {
"sensor_id": sensor_id,
"value": numeric_value,
"timestamp": timestamp,
"device_name": device.name,
"sensor_type": sensor.type
})
async def poll_all_devices(self):
"""Poll all configured devices concurrently"""
devices = self.config.get_all_devices()
if not devices:
logger.warning("No devices configured for polling")
return
logger.info(f"Polling {len(devices)} devices...")
# Poll all devices concurrently
tasks = [self.poll_device(device) for device in devices]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Count successes
successes = sum(1 for r in results if r is True)
logger.info(f"Polling complete: {successes}/{len(devices)} successful")
async def run(self):
"""Run continuous polling loop"""
self.running = True
logger.info(
f"Starting HTTP poller: {len(self.config.get_all_devices())} devices, "
f"interval={self.poll_interval}s"
)
while self.running:
try:
await self.poll_all_devices()
await asyncio.sleep(self.poll_interval)
except Exception as e:
logger.error(f"Error in polling loop: {e}")
await asyncio.sleep(10) # Brief pause on error
def stop(self):
"""Stop the polling loop"""
logger.info("Stopping HTTP poller...")
self.running = False
def get_metrics(self) -> PollingMetrics:
"""Get current polling metrics"""
active_count = sum(
1 for s in self.device_status.values()
if s.status == PollingStatus.ACTIVE
)
inactive_count = sum(
1 for s in self.device_status.values()
if s.status == PollingStatus.INACTIVE
)
error_count = sum(
1 for s in self.device_status.values()
if s.status == PollingStatus.ERROR
)
# Count devices by type
devices_by_type: Dict[str, int] = {}
for device in self.config.get_all_devices():
devices_by_type[device.type] = devices_by_type.get(device.type, 0) + 1
# Calculate success rate
success_rate = 0.0
if self.total_polls > 0:
success_rate = (self.successful_polls / self.total_polls) * 100
# Calculate average poll time
avg_poll_time = 0.0
if self.poll_times:
avg_poll_time = sum(self.poll_times) / len(self.poll_times)
return PollingMetrics(
timestamp=datetime.utcnow(),
total_devices=len(self.device_status),
active_devices=active_count,
inactive_devices=inactive_count,
error_devices=error_count,
total_polls=self.total_polls,
successful_polls=self.successful_polls,
failed_polls=self.failed_polls,
success_rate=success_rate,
average_poll_time_ms=avg_poll_time,
devices_by_type=devices_by_type
)
def get_device_statuses(self) -> List[DeviceStatus]:
"""Get status for all devices"""
return list(self.device_status.values())

View File

@@ -0,0 +1,187 @@
"""
IoT Configuration Loader
Loads and validates IoT device configuration from iots-right.json
"""
import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any
from pydantic import BaseModel, Field, validator
logger = logging.getLogger(__name__)
class SensorConfig(BaseModel):
"""Individual sensor configuration within a device"""
type: str = Field(..., description="Sensor type (power, voltage, temperature, etc.)")
tag: List[str] = Field(..., description="JSON path tags to extract value")
data: str = Field(..., description="Data type: DOUBLE, BOOLEAN, etc.")
class IoTDevice(BaseModel):
"""IoT device configuration"""
name: str = Field(..., description="Device name")
type: str = Field(..., description="Device type (battery, refrigerator, hvac, etc.)")
uri: str = Field(..., description="HTTP endpoint URI")
sensors: List[SensorConfig] = Field(..., description="List of sensors")
@validator('uri')
def validate_uri(cls, v):
"""Ensure URI is valid"""
if not v.startswith(('http://', 'https://')):
raise ValueError(f"Invalid URI: {v}")
return v
class IoTConfiguration(BaseModel):
"""Complete IoT configuration"""
iots: Dict[str, List[IoTDevice]] = Field(..., description="IoT devices grouped by category")
def get_all_devices(self) -> List[IoTDevice]:
"""Get flat list of all devices"""
devices = []
for device_list in self.iots.values():
devices.extend(device_list)
return devices
def get_devices_by_type(self, device_type: str) -> List[IoTDevice]:
"""Get devices by category type"""
return self.iots.get(device_type, [])
def get_device_by_name(self, name: str) -> Optional[IoTDevice]:
"""Find device by name"""
for device in self.get_all_devices():
if device.name == name:
return device
return None
class IoTConfigLoader:
"""Loads and manages IoT configuration"""
def __init__(self, config_path: Optional[Path] = None):
"""
Initialize config loader
Args:
config_path: Path to iots-right.json. If None, looks in monolith root
"""
self.config_path = config_path or self._find_config_file()
self.config: Optional[IoTConfiguration] = None
def _find_config_file(self) -> Path:
"""Find iots-right.json in monolith directory"""
# Start from current file location and go up to find monolith root
current = Path(__file__).parent
while current.name != 'monolith' and current.parent != current:
current = current.parent
config_file = current / 'iots-right.json'
if not config_file.exists():
logger.warning(f"Config file not found at {config_file}")
return config_file
def load(self) -> IoTConfiguration:
"""
Load configuration from file
Returns:
IoTConfiguration object
Raises:
FileNotFoundError: If config file doesn't exist
ValueError: If config is invalid
"""
if not self.config_path.exists():
raise FileNotFoundError(f"IoT config not found: {self.config_path}")
try:
with open(self.config_path, 'r') as f:
data = json.load(f)
self.config = IoTConfiguration(**data)
device_count = len(self.config.get_all_devices())
logger.info(f"Loaded IoT config: {device_count} devices from {self.config_path}")
# Log summary by type
for device_type, devices in self.config.iots.items():
logger.info(f" - {device_type}: {len(devices)} devices")
return self.config
except json.JSONDecodeError as e:
logger.error(f"Invalid JSON in config file: {e}")
raise ValueError(f"Invalid JSON in {self.config_path}: {e}")
except Exception as e:
logger.error(f"Failed to load config: {e}")
raise
def reload(self) -> IoTConfiguration:
"""Reload configuration from file"""
return self.load()
def get_config(self) -> IoTConfiguration:
"""
Get current configuration, loading if necessary
Returns:
IoTConfiguration object
"""
if self.config is None:
self.load()
return self.config
def get_device_summary(self) -> Dict[str, Any]:
"""Get summary statistics of devices"""
config = self.get_config()
total_devices = 0
total_sensors = 0
summary = {}
for device_type, devices in config.iots.items():
device_count = len(devices)
sensor_count = sum(len(d.sensors) for d in devices)
total_devices += device_count
total_sensors += sensor_count
summary[device_type] = {
"device_count": device_count,
"sensor_count": sensor_count,
"devices": [
{
"name": d.name,
"uri": d.uri,
"sensor_count": len(d.sensors)
}
for d in devices
]
}
return {
"total_devices": total_devices,
"total_sensors": total_sensors,
"by_type": summary,
"config_file": str(self.config_path)
}
# Global config loader instance
_config_loader: Optional[IoTConfigLoader] = None
def get_config_loader() -> IoTConfigLoader:
"""Get global config loader instance"""
global _config_loader
if _config_loader is None:
_config_loader = IoTConfigLoader()
return _config_loader
def get_iot_config() -> IoTConfiguration:
"""Get IoT configuration"""
return get_config_loader().get_config()

View File

@@ -0,0 +1,112 @@
"""
Pydantic models for Data Ingestion module
"""
from datetime import datetime
from typing import Optional, Dict, Any, List
from pydantic import BaseModel, Field
from enum import Enum
class PollingStatus(str, Enum):
"""Device polling status"""
ACTIVE = "active"
INACTIVE = "inactive"
ERROR = "error"
DISABLED = "disabled"
class DataSourceType(str, Enum):
"""Data source type"""
HTTP = "http"
MQTT = "mqtt"
class DeviceStatus(BaseModel):
"""Device polling status information"""
device_name: str
device_type: str
uri: str
status: PollingStatus
last_poll: Optional[datetime] = None
last_success: Optional[datetime] = None
total_polls: int = 0
successful_polls: int = 0
failed_polls: int = 0
last_error: Optional[str] = None
sensors_count: int = 0
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class DataSourceSummary(BaseModel):
"""Summary of data sources"""
source_type: DataSourceType
enabled: bool
total_devices: int
active_devices: int
total_sensors: int
devices: List[DeviceStatus]
class PollingMetrics(BaseModel):
"""Polling performance metrics"""
timestamp: datetime
total_devices: int
active_devices: int
inactive_devices: int
error_devices: int
total_polls: int
successful_polls: int
failed_polls: int
success_rate: float
average_poll_time_ms: float
devices_by_type: Dict[str, int]
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}
class HttpPollerConfig(BaseModel):
"""HTTP poller configuration"""
enabled: bool = True
poll_interval_seconds: int = Field(60, ge=5, le=3600)
timeout_seconds: int = Field(10, ge=1, le=60)
max_retries: int = Field(3, ge=0, le=10)
concurrent_requests: int = Field(5, ge=1, le=50)
class MqttSubscriberConfig(BaseModel):
"""MQTT subscriber configuration (future)"""
enabled: bool = False
broker_host: str = "localhost"
broker_port: int = 1883
username: Optional[str] = None
password: Optional[str] = None
topics: List[str] = []
class DataIngestionConfig(BaseModel):
"""Complete data ingestion configuration"""
http: HttpPollerConfig = HttpPollerConfig()
mqtt: MqttSubscriberConfig = MqttSubscriberConfig()
class HealthResponse(BaseModel):
"""Health check response"""
service: str
status: str
timestamp: datetime
version: str
http_poller: Optional[Dict[str, Any]] = None
mqtt_subscriber: Optional[Dict[str, Any]] = None
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}

View File

@@ -0,0 +1,194 @@
"""Data Ingestion module API routes."""
import logging
from datetime import datetime
from fastapi import APIRouter, HTTPException, Depends
from typing import Optional
from .models import (
HealthResponse, DataSourceSummary, PollingMetrics,
DeviceStatus, DataSourceType
)
from .iot_config import get_config_loader
from .http_poller import HttpPoller
from core.dependencies import get_sensors_db
logger = logging.getLogger(__name__)
# Create router
router = APIRouter()
# Global HTTP poller instance (will be set by main.py)
_http_poller: Optional[HttpPoller] = None
def set_http_poller(poller: HttpPoller):
"""Set the global HTTP poller instance"""
global _http_poller
_http_poller = poller
def get_http_poller() -> HttpPoller:
"""Get the HTTP poller instance"""
if _http_poller is None:
raise HTTPException(status_code=503, detail="HTTP poller not initialized")
return _http_poller
# Health check
@router.get("/health", response_model=HealthResponse)
async def health_check(db=Depends(get_sensors_db)):
"""Health check endpoint for data ingestion module"""
try:
await db.command("ping")
http_status = None
if _http_poller:
metrics = _http_poller.get_metrics()
http_status = {
"enabled": True,
"running": _http_poller.running,
"total_devices": metrics.total_devices,
"active_devices": metrics.active_devices,
"success_rate": metrics.success_rate
}
return HealthResponse(
service="data-ingestion-module",
status="healthy",
timestamp=datetime.utcnow(),
version="1.0.0",
http_poller=http_status,
mqtt_subscriber={"enabled": False}
)
except Exception as e:
logger.error(f"Health check failed: {e}")
raise HTTPException(status_code=503, detail="Service Unavailable")
@router.get("/sources", response_model=DataSourceSummary)
async def get_data_sources():
"""
Get all data ingestion sources
Implements OpenAPI endpoint: GET /api/v1/sources
"""
try:
poller = get_http_poller()
config_loader = get_config_loader()
device_statuses = poller.get_device_statuses()
active_count = sum(1 for d in device_statuses if d.status.value == "active")
return DataSourceSummary(
source_type=DataSourceType.HTTP,
enabled=True,
total_devices=len(device_statuses),
active_devices=active_count,
total_sensors=sum(d.sensors_count for d in device_statuses),
devices=device_statuses
)
except Exception as e:
logger.error(f"Error getting data sources: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/sources/summary")
async def get_sources_summary():
"""Get summary of configured data sources"""
try:
config_loader = get_config_loader()
summary = config_loader.get_device_summary()
return summary
except Exception as e:
logger.error(f"Error getting sources summary: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/sources/devices")
async def get_device_list():
"""Get list of all configured devices"""
try:
poller = get_http_poller()
devices = poller.get_device_statuses()
return {
"devices": devices,
"count": len(devices),
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting device list: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/sources/devices/{device_name}", response_model=DeviceStatus)
async def get_device_status(device_name: str):
"""Get status of a specific device"""
try:
poller = get_http_poller()
status = poller.device_status.get(device_name)
if not status:
raise HTTPException(status_code=404, detail=f"Device '{device_name}' not found")
return status
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting device status: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/metrics", response_model=PollingMetrics)
async def get_polling_metrics():
"""Get HTTP polling performance metrics"""
try:
poller = get_http_poller()
return poller.get_metrics()
except Exception as e:
logger.error(f"Error getting metrics: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/poll/trigger")
async def trigger_manual_poll():
"""Manually trigger a polling cycle for all devices"""
try:
poller = get_http_poller()
# Trigger poll in background
import asyncio
asyncio.create_task(poller.poll_all_devices())
return {
"message": "Manual poll triggered",
"timestamp": datetime.utcnow().isoformat(),
"devices": len(poller.device_status)
}
except Exception as e:
logger.error(f"Error triggering poll: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/config/reload")
async def reload_configuration():
"""Reload IoT configuration from iots-right.json"""
try:
config_loader = get_config_loader()
config = config_loader.reload()
return {
"message": "Configuration reloaded successfully",
"total_devices": len(config.get_all_devices()),
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error reloading config: {e}")
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,21 @@
"""Demand Response module - handles grid interaction and load management."""
from .models import (
DRInvitation,
DRInvitationResponse,
EventRequest,
EventStatus,
LoadReductionRequest,
FlexibilityResponse
)
from .demand_response_service import DemandResponseService
__all__ = [
"DRInvitation",
"DRInvitationResponse",
"EventRequest",
"EventStatus",
"LoadReductionRequest",
"FlexibilityResponse",
"DemandResponseService",
]

View File

@@ -0,0 +1,747 @@
"""
Demand Response Service - Core Business Logic
Handles DR invitations, event execution, auto-response, and flexibility calculation
"""
import asyncio
import json
import uuid
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Any
import logging
from motor.motor_asyncio import AsyncIOMotorDatabase
import redis.asyncio as redis
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DemandResponseService:
"""Core Demand Response service business logic"""
def __init__(self, db: AsyncIOMotorDatabase, redis_client: redis.Redis):
self.db = db
self.redis = redis_client
self.active_events: Dict[str, asyncio.Task] = {} # event_id -> task
self.device_power_cache: Dict[str, float] = {} # device_id -> power_kw (updated by Redis subscriber)
# ===== INVITATION MANAGEMENT =====
async def send_invitation(
self,
event_time: datetime,
load_kwh: float,
load_percentage: float,
iots: List[str],
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Create and send DR invitation
Returns: {"event_id": str, "response": str, "message": str}
"""
logger.info(f"Creating DR invitation for {len(iots)} devices at {event_time}")
# Generate unique event ID
event_id = str(uuid.uuid4())
# Check auto-response configuration
auto_config = await self.get_auto_response_config()
response = "YES" if auto_config.get("enabled", False) else "WAITING"
# Create invitation document
invitation = {
"event_id": event_id,
"created_at": datetime.utcnow(),
"event_time": event_time,
"load_kwh": load_kwh,
"load_percentage": load_percentage,
"iots": iots,
"duration_minutes": duration_minutes,
"response": response,
"status": "pending"
}
# Store in MongoDB
await self.db.demand_response_invitations.insert_one(invitation)
# Cache in Redis for fast access (24 hour TTL)
cache_key = f"dr:invitation:{event_id}"
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
# Publish event to Redis pub/sub
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_created",
"event_id": event_id,
"event_time": event_time.isoformat(),
"load_kwh": load_kwh,
"response": response
}))
logger.info(f"Invitation {event_id} created with response: {response}")
return {
"event_id": event_id,
"response": response,
"message": "Invitation created successfully"
}
async def answer_invitation(
self,
event_id: str,
iot_id: str,
response: str,
committed_reduction_kw: Optional[float] = None
) -> Dict[str, Any]:
"""
Record device response to invitation
Returns: {"success": bool, "message": str}
"""
logger.info(f"Recording response for invitation {event_id}, device {iot_id}: {response}")
# Validate invitation exists
invitation = await self.get_invitation(event_id)
if not invitation:
return {"success": False, "message": f"Invitation {event_id} not found"}
if iot_id not in invitation["iots"]:
return {"success": False, "message": f"Device {iot_id} not in invitation"}
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": iot_id
})
if existing:
return {"success": False, "message": f"Device {iot_id} has already responded"}
# Store response
response_doc = {
"event_id": event_id,
"device_id": iot_id,
"response": response,
"committed_reduction_kw": committed_reduction_kw,
"responded_at": datetime.utcnow()
}
await self.db.demand_response_responses.insert_one(response_doc)
# Check if all devices have responded
total_devices = len(invitation["iots"])
total_responses = await self.db.demand_response_responses.count_documents({"event_id": event_id})
if total_responses == total_devices:
# All devices responded - update invitation status
yes_count = await self.db.demand_response_responses.count_documents({
"event_id": event_id,
"response": "YES"
})
all_yes = yes_count == total_devices
new_response = "YES" if all_yes else "NO"
new_status = "scheduled" if all_yes else "cancelled"
await self.db.demand_response_invitations.update_one(
{"event_id": event_id},
{"$set": {"response": new_response, "status": new_status}}
)
logger.info(f"Invitation {event_id} final response: {new_response} (status: {new_status})")
# Clear cache
await self.redis.delete(f"dr:invitation:{event_id}")
# Publish event
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_answered",
"event_id": event_id,
"device_id": iot_id,
"response": response
}))
return {"success": True, "message": "Response recorded successfully"}
async def get_invitation(self, event_id: str) -> Optional[Dict[str, Any]]:
"""
Get invitation by event_id (with Redis caching)
"""
# Try cache first
cache_key = f"dr:invitation:{event_id}"
cached = await self.redis.get(cache_key)
if cached:
invitation = json.loads(cached)
return invitation
# Fallback to MongoDB
invitation = await self.db.demand_response_invitations.find_one({"event_id": event_id})
if invitation:
invitation["_id"] = str(invitation["_id"])
# Cache for 24 hours
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
return invitation
return None
async def get_unanswered_invitations(self) -> List[Dict[str, Any]]:
"""Get all pending invitations awaiting response"""
cursor = self.db.demand_response_invitations.find({
"response": "WAITING",
"status": "pending"
}).sort("created_at", -1)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
async def get_answered_invitations(self, hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get recent answered invitations"""
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.demand_response_invitations.find({
"response": {"$ne": "WAITING"},
"created_at": {"$gte": start_time}
}).sort("created_at", -1).limit(limit)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
# ===== EVENT EXECUTION =====
async def schedule_event(
self,
event_time: datetime,
iots: List[str],
load_reduction_kw: float,
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Schedule a DR event for execution
Returns: {"event_id": str, "message": str}
"""
logger.info(f"Scheduling DR event for {len(iots)} devices at {event_time}")
# Create event document
event_id = str(uuid.uuid4())
end_time = event_time + timedelta(minutes=duration_minutes)
event = {
"event_id": event_id,
"start_time": event_time,
"end_time": end_time,
"status": "scheduled",
"participating_devices": iots,
"target_reduction_kw": load_reduction_kw,
"actual_reduction_kw": 0.0,
"power_samples": []
}
await self.db.demand_response_events.insert_one(event)
# Publish scheduled event
await self.redis.publish("dr_events", json.dumps({
"event": "event_scheduled",
"event_id": event_id,
"start_time": event_time.isoformat(),
"end_time": end_time.isoformat(),
"devices": iots
}))
logger.info(f"Event {event_id} scheduled successfully")
return {
"event_id": event_id,
"message": "Event scheduled successfully"
}
async def execute_event(self, event_id: str):
"""
Execute a DR event (spawns background task)
"""
logger.info(f"Executing DR event {event_id}")
# Get event details
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if not event:
logger.error(f"Event {event_id} not found")
return
# Update status to active
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{"$set": {"status": "active", "actual_start_time": datetime.utcnow()}}
)
# Publish event started
await self.redis.publish("dr_events", json.dumps({
"event": "event_started",
"event_id": event_id,
"devices": event["participating_devices"]
}))
# Create and store async task for this event
task = asyncio.create_task(self._run_event_loop(event))
self.active_events[event_id] = task
logger.info(f"DR event {event_id} started successfully")
async def _run_event_loop(self, event: Dict[str, Any]):
"""
CRITICAL: Core event execution loop - runs for duration_minutes
Samples power every 5 seconds, accumulates reduction, handles cancellation
"""
event_id = event["event_id"]
end_time = event["end_time"]
devices = event["participating_devices"]
total_reduction_kwh = 0.0
sample_count = 0
logger.info(f"Starting event loop for {event_id}, ending at {end_time}")
try:
while datetime.utcnow() < end_time:
# Get current power for all participating devices from cache
device_powers = {
device_id: self.device_power_cache.get(device_id, 0.0)
for device_id in devices
}
# Calculate reduction for this 5-second interval
# interval_hours = 5.0 / 3600.0 = 0.00139 hours
interval_reduction_kwh = sum(device_powers.values()) * (5.0 / 3600.0)
total_reduction_kwh += interval_reduction_kwh
sample_count += 1
# Store sample in MongoDB (every sample to maintain accuracy)
sample = {
"timestamp": datetime.utcnow(),
"device_powers": device_powers,
"interval_reduction_kwh": interval_reduction_kwh
}
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$push": {"power_samples": sample},
"$set": {"actual_reduction_kw": total_reduction_kwh}
}
)
# Update Redis cache for fast access to current reduction
cache_key = f"dr:event:active:{event_id}"
await self.redis.setex(
cache_key,
300, # 5 minute TTL
json.dumps({
"event_id": event_id,
"current_reduction_kwh": total_reduction_kwh,
"devices": device_powers,
"last_update": datetime.utcnow().isoformat()
}, default=str)
)
# Publish progress every 10 samples (50 seconds)
if sample_count % 10 == 0:
await self.redis.publish("dr_events", json.dumps({
"event": "event_progress",
"event_id": event_id,
"total_reduction_kwh": round(total_reduction_kwh, 3),
"device_powers": device_powers,
"timestamp": datetime.utcnow().isoformat()
}))
logger.info(f"Event {event_id} progress: {total_reduction_kwh:.3f} kWh ({sample_count} samples)")
# Sleep for 5 seconds
await asyncio.sleep(5)
# Event completed successfully
logger.info(f"Event {event_id} completed with {total_reduction_kwh:.3f} kWh reduction")
await self._complete_event(event_id, total_reduction_kwh)
except asyncio.CancelledError:
logger.info(f"Event {event_id} cancelled by user")
await self._cancel_event(event_id)
raise
except Exception as e:
logger.error(f"Error in event {event_id}: {e}", exc_info=True)
await self._cancel_event(event_id)
async def _complete_event(self, event_id: str, total_reduction_kwh: float):
"""Mark event as completed"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "completed",
"actual_end_time": datetime.utcnow(),
"actual_reduction_kw": total_reduction_kwh
}
}
)
# Remove from active events
self.active_events.pop(event_id, None)
# Clear cache
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish completion
await self.redis.publish("dr_events", json.dumps({
"event": "event_completed",
"event_id": event_id,
"total_reduction_kwh": total_reduction_kwh
}))
logger.info(f"DR event {event_id} marked as completed")
async def _cancel_event(self, event_id: str):
"""Internal method to cancel an event"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "cancelled",
"cancelled_at": datetime.utcnow()
}
}
)
self.active_events.pop(event_id, None)
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish cancellation
await self.redis.publish("dr_events", json.dumps({
"event": "event_cancelled",
"event_id": event_id,
"timestamp": datetime.utcnow().isoformat()
}))
async def cancel_event(self, event_id: str):
"""
Public method to cancel a running DR event gracefully
"""
logger.info(f"Cancelling DR event {event_id}")
# Cancel the async task
task = self.active_events.get(event_id)
if task and not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
# Expected - task cancelled successfully
pass
except Exception as e:
logger.error(f"Error cancelling event task {event_id}: {e}")
# Update database status (if not already done by _cancel_event)
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if event and event.get("status") != "cancelled":
await self._cancel_event(event_id)
logger.info(f"DR event {event_id} cancelled successfully")
async def get_active_events(self) -> List[Dict[str, Any]]:
"""Get currently running events with real-time data"""
cursor = self.db.demand_response_events.find({
"status": "active"
}).sort("start_time", -1)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
# Add real-time data from cache
cache_key = f"dr:event:active:{event['event_id']}"
cached = await self.redis.get(cache_key)
if cached:
realtime_data = json.loads(cached)
event["current_reduction_kwh"] = realtime_data.get("current_reduction_kwh")
event["current_device_powers"] = realtime_data.get("devices")
events.append(event)
return events
# ===== DEVICE POWER INTEGRATION =====
def update_device_power_cache(self, device_id: str, power_kw: float):
"""
Update device power cache (called by Redis subscriber)
This is synchronous because it's just updating a dict
"""
self.device_power_cache[device_id] = power_kw
# No logging here to avoid spam (called every few seconds per device)
async def get_device_power(self, device_id: str) -> float:
"""Get current power for a device from cache"""
return self.device_power_cache.get(device_id, 0.0)
# ===== AUTO-RESPONSE CONFIGURATION =====
async def get_auto_response_config(self) -> Dict[str, Any]:
"""Get auto-response configuration"""
config = await self.db.auto_response_config.find_one({"config_id": "default"})
if not config:
# Create default config
default_config = {
"config_id": "default",
"enabled": False,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60,
"updated_at": datetime.utcnow()
}
await self.db.auto_response_config.insert_one(default_config)
return default_config
return config
async def set_auto_response_config(
self,
enabled: bool,
max_reduction_percentage: float = 20.0,
response_delay_seconds: int = 300,
min_notice_minutes: int = 60
) -> Dict[str, Any]:
"""Update auto-response configuration"""
await self.db.auto_response_config.update_one(
{"config_id": "default"},
{
"$set": {
"enabled": enabled,
"max_reduction_percentage": max_reduction_percentage,
"response_delay_seconds": response_delay_seconds,
"min_notice_minutes": min_notice_minutes,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
# Clear cache
await self.redis.delete("dr:config:auto_response")
logger.info(f"Auto-response config updated: enabled={enabled}")
return await self.get_auto_response_config()
async def process_auto_responses(self):
"""
Process pending invitations with auto-response (called by background task)
"""
# Get auto-response configuration
auto_config = await self.get_auto_response_config()
if not auto_config.get("enabled"):
return
# Find unanswered invitations
invitations = await self.get_unanswered_invitations()
for invitation in invitations:
event_id = invitation["event_id"]
event_time = invitation["event_time"]
# Parse event_time (might be string from cache)
if isinstance(event_time, str):
event_time = datetime.fromisoformat(event_time.replace('Z', '+00:00'))
# Check if event is within auto-response criteria
time_until_event = (event_time - datetime.utcnow()).total_seconds() / 60 # minutes
min_notice = auto_config.get("min_notice_minutes", 60)
if time_until_event >= min_notice:
logger.info(f"Auto-responding to invitation {event_id}")
# Auto-accept for all devices
for device_id in invitation["iots"]:
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": device_id
})
if not existing:
# Get device current power
device_power = await self.get_device_power(device_id)
# Calculate committed reduction based on max_reduction_percentage
max_reduction_pct = auto_config.get("max_reduction_percentage", 20.0)
committed_reduction = device_power * (max_reduction_pct / 100) if device_power > 0 else 0.5
# Submit auto-response
try:
await self.answer_invitation(event_id, device_id, "YES", committed_reduction)
logger.info(f"Auto-accepted for device {device_id} with {committed_reduction:.2f} kW commitment")
except Exception as e:
logger.error(f"Error auto-responding for {device_id}: {e}")
else:
logger.debug(f"Invitation {event_id} too soon ({time_until_event:.0f}m < {min_notice}m)")
# ===== BACKGROUND TASK SUPPORT =====
async def check_scheduled_events(self):
"""
Check for events that need to be started (called by scheduler task)
"""
now = datetime.utcnow()
threshold = now + timedelta(minutes=1) # Start events within next minute
# Find scheduled events that should start
cursor = self.db.demand_response_events.find({
"status": "scheduled",
"start_time": {"$lte": threshold, "$gte": now}
})
async for event in cursor:
event_id = event["event_id"]
# Check if not already active
if event_id not in self.active_events:
logger.info(f"Starting scheduled DR event {event_id}")
await self.execute_event(event_id)
# ===== BASIC FLEXIBILITY CALCULATION =====
async def get_current_flexibility(self) -> Dict[str, Any]:
"""
Calculate current available flexibility from device power cache
"""
total_flexibility_kw = 0.0
devices = []
# Get all devices with instructions
cursor = self.db.device_instructions.find({})
current_hour = datetime.utcnow().hour
async for device_doc in cursor:
device_id = device_doc["device_id"]
instruction = device_doc["instructions"].get(str(current_hour), "off")
if instruction != "off":
# Get device current power from cache
device_power = self.device_power_cache.get(device_id, 0.0)
if instruction == "participation":
# Full flexibility (100%)
flexibility = device_power
elif instruction == "shifting":
# Partial flexibility (20%)
flexibility = device_power * 0.20
else:
flexibility = 0.0
if flexibility > 0:
devices.append({
"device_id": device_id,
"available_kw": round(flexibility, 2),
"instruction": instruction,
"current_power": round(device_power, 2)
})
total_flexibility_kw += flexibility
snapshot = {
"timestamp": datetime.utcnow(),
"total_flexibility_kw": round(total_flexibility_kw, 2),
"devices": devices
}
# Store snapshot
await self.db.flexibility_snapshots.insert_one(dict(snapshot))
# Cache for 5 minutes
await self.redis.setex(
"dr:flexibility:current",
300,
json.dumps(snapshot, default=str)
)
return snapshot
async def get_device_instructions(self, device_id: Optional[str] = None) -> Dict[str, Any]:
"""Get DR instructions for device(s)"""
if device_id:
doc = await self.db.device_instructions.find_one({"device_id": device_id})
return doc if doc else {"device_id": device_id, "instructions": {}}
else:
cursor = self.db.device_instructions.find({})
instructions = {}
async for doc in cursor:
instructions[doc["device_id"]] = doc["instructions"]
return instructions
async def update_device_instructions(self, device_id: str, instructions: Dict[str, str]):
"""Update hourly instructions for a device"""
await self.db.device_instructions.update_one(
{"device_id": device_id},
{
"$set": {
"instructions": instructions,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
logger.info(f"Updated instructions for device {device_id}")
# ===== ANALYTICS =====
async def get_performance_analytics(self, days: int = 30) -> Dict[str, Any]:
"""Get DR performance analytics"""
start_date = datetime.utcnow() - timedelta(days=days)
# Query completed events
cursor = self.db.demand_response_events.find({
"status": "completed",
"start_time": {"$gte": start_date}
})
events = await cursor.to_list(length=None)
if not events:
return {
"period_days": days,
"total_events": 0,
"total_reduction_kwh": 0.0,
"total_target_kwh": 0.0,
"average_reduction_kwh": 0.0,
"achievement_rate": 0.0,
"average_event_duration_minutes": 59
}
total_reduction = sum(e.get("actual_reduction_kw", 0) for e in events)
total_target = sum(e.get("target_reduction_kw", 0) for e in events)
return {
"period_days": days,
"total_events": len(events),
"total_reduction_kwh": round(total_reduction, 2),
"total_target_kwh": round(total_target, 2),
"average_reduction_kwh": round(total_reduction / len(events), 2),
"achievement_rate": round((total_reduction / total_target * 100) if total_target > 0 else 0, 2),
"average_event_duration_minutes": 59
}

View File

@@ -0,0 +1,338 @@
"""
Pydantic models for Demand Response Service
"""
from datetime import datetime
from typing import List, Dict, Optional, Literal
from pydantic import BaseModel, Field
from enum import Enum
# Enums
class InvitationStatus(str, Enum):
"""Invitation status states"""
PENDING = "pending"
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class ResponseType(str, Enum):
"""Device response types"""
WAITING = "WAITING"
YES = "YES"
NO = "NO"
class EventStatus(str, Enum):
"""DR event status states"""
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class InstructionType(str, Enum):
"""Device participation instruction types"""
PARTICIPATION = "participation" # Full DR participation (100%)
SHIFTING = "shifting" # Partial participation (0-20%)
OFF = "off" # No DR participation
# Invitation Models
class EventRequest(BaseModel):
"""Request model for creating a DR event (alias for DRInvitationCreate)"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationCreate(BaseModel):
"""Request model for creating a DR invitation"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationResponse(BaseModel):
"""Response model for device answering invitation"""
event_id: str = Field(..., description="Event identifier")
iot_id: str = Field(..., description="Device identifier")
response: ResponseType = Field(..., description="Device response (YES/NO)")
committed_reduction_kw: Optional[float] = Field(None, description="Committed power reduction in kW", ge=0)
class Config:
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"iot_id": "sensor_1",
"response": "YES",
"committed_reduction_kw": 2.5
}
}
class DRInvitation(BaseModel):
"""Full DR invitation model"""
event_id: str = Field(..., description="Unique event identifier")
created_at: datetime = Field(..., description="Invitation creation time")
event_time: datetime = Field(..., description="Scheduled event start time")
load_kwh: float = Field(..., description="Target load reduction in kWh")
load_percentage: float = Field(..., description="Target reduction percentage")
iots: List[str] = Field(..., description="Participating device IDs")
duration_minutes: int = Field(..., description="Event duration in minutes")
response: str = Field(..., description="Overall response status")
status: str = Field(..., description="Invitation status")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"created_at": "2025-12-10T13:45:00",
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59,
"response": "WAITING",
"status": "pending"
}
}
# Event Models
class EventScheduleRequest(BaseModel):
"""Request model for scheduling a DR event"""
event_time: datetime = Field(..., description="Event start time")
iots: List[str] = Field(..., description="Participating device IDs", min_items=1)
load_reduction_kw: float = Field(..., description="Target reduction in kW", gt=0)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iots": ["sensor_1", "sensor_2"],
"load_reduction_kw": 5.0,
"duration_minutes": 59
}
}
class PowerSample(BaseModel):
"""Individual power sample during event"""
timestamp: datetime = Field(..., description="Sample timestamp")
device_powers: Dict[str, float] = Field(..., description="Device power readings (device_id -> kW)")
interval_reduction_kwh: Optional[float] = Field(None, description="Reduction for this interval")
class DREvent(BaseModel):
"""DR event execution model"""
event_id: str = Field(..., description="Unique event identifier")
invitation_id: Optional[str] = Field(None, description="Source invitation ID if applicable")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
status: EventStatus = Field(..., description="Event status")
participating_devices: List[str] = Field(..., description="Device IDs participating")
target_reduction_kw: float = Field(..., description="Target power reduction in kW")
actual_reduction_kw: float = Field(0.0, description="Actual achieved reduction in kWh")
power_samples: List[Dict] = Field(default_factory=list, description="Power samples during event")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class ActiveEventResponse(BaseModel):
"""Response model for active event with real-time data"""
event_id: str = Field(..., description="Event identifier")
status: EventStatus = Field(..., description="Current status")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
participating_devices: List[str] = Field(..., description="Participating devices")
target_reduction_kw: float = Field(..., description="Target reduction")
actual_reduction_kw: float = Field(..., description="Current achieved reduction")
current_device_powers: Optional[Dict[str, float]] = Field(None, description="Current device power readings")
progress_percentage: Optional[float] = Field(None, description="Event progress (0-100%)")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class LoadReductionRequest(BaseModel):
"""Request model for executing load reduction"""
event_time: datetime = Field(..., description="Event start time")
iot: str = Field(..., description="Device ID")
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iot": "sensor_1"
}
}
# Flexibility Models
class DeviceFlexibility(BaseModel):
"""Per-device flexibility information"""
device_id: str = Field(..., description="Device identifier")
available_kw: float = Field(..., description="Available flexibility in kW", ge=0)
instruction: str = Field(..., description="Current DR instruction")
current_power: float = Field(..., description="Current power consumption in kW", ge=0)
class FlexibilityResponse(BaseModel):
"""Response model for current flexibility"""
timestamp: datetime = Field(..., description="Calculation timestamp")
total_flexibility_kw: float = Field(..., description="Total available flexibility in kW", ge=0)
devices: List[DeviceFlexibility] = Field(..., description="Per-device breakdown")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"timestamp": "2025-12-10T13:45:00",
"total_flexibility_kw": 15.5,
"devices": [
{
"device_id": "sensor_1",
"available_kw": 3.5,
"instruction": "participation",
"current_power": 3.5
},
{
"device_id": "sensor_2",
"available_kw": 0.8,
"instruction": "shifting",
"current_power": 4.0
}
]
}
}
class DeviceInstructionUpdate(BaseModel):
"""Model for updating device instructions"""
device_id: str = Field(..., description="Device identifier")
instructions: Dict[str, str] = Field(..., description="Hourly instructions (hour -> instruction type)")
class Config:
json_schema_extra = {
"example": {
"device_id": "sensor_1",
"instructions": {
"0": "participation",
"1": "shifting",
"2": "off",
"3": "participation"
}
}
}
# Configuration Models
class AutoResponseConfig(BaseModel):
"""Auto-response configuration model"""
enabled: bool = Field(..., description="Whether auto-response is enabled")
max_reduction_percentage: float = Field(20.0, description="Maximum reduction percentage for auto-accept", ge=0, le=100)
response_delay_seconds: int = Field(300, description="Delay before auto-responding (seconds)", ge=0)
min_notice_minutes: int = Field(60, description="Minimum notice required for auto-accept (minutes)", ge=0)
class Config:
json_schema_extra = {
"example": {
"enabled": True,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60
}
}
# Response Models
class InvitationSendResponse(BaseModel):
"""Response for sending invitation"""
event_id: str = Field(..., description="Created event identifier")
response: str = Field(..., description="Initial response status")
message: str = Field(..., description="Status message")
class InvitationAnswerResponse(BaseModel):
"""Response for answering invitation"""
success: bool = Field(..., description="Whether answer was recorded")
message: str = Field(..., description="Status message")
class EventScheduleResponse(BaseModel):
"""Response for scheduling event"""
event_id: str = Field(..., description="Scheduled event identifier")
message: str = Field(..., description="Status message")
class PerformanceAnalytics(BaseModel):
"""Performance analytics response"""
period_days: int = Field(..., description="Analysis period in days")
total_events: int = Field(..., description="Total number of events")
total_reduction_kwh: float = Field(..., description="Total energy reduced")
total_target_kwh: float = Field(..., description="Total target reduction")
average_reduction_kwh: float = Field(..., description="Average reduction per event")
achievement_rate: float = Field(..., description="Achievement rate (%)")
average_event_duration_minutes: int = Field(..., description="Average event duration")
# Health Check Model
class HealthResponse(BaseModel):
"""Health check response model"""
service: str = Field(..., description="Service name")
status: str = Field(..., description="Service status")
timestamp: datetime = Field(..., description="Check timestamp")
version: str = Field(..., description="Service version")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"service": "demand-response-service",
"status": "healthy",
"timestamp": "2025-12-10T13:45:00",
"version": "1.0.0"
}
}

View File

@@ -0,0 +1,39 @@
"""Sensors module - handles sensor management, rooms, and analytics."""
from .models import (
SensorReading,
SensorMetadata,
RoomMetrics,
SystemEvent,
Room,
RoomCreate,
RoomUpdate,
RoomInfo,
SensorType,
SensorStatus,
CO2Status,
OccupancyLevel
)
from .sensor_service import SensorService
from .room_service import RoomService
from .analytics_service import AnalyticsService
from .websocket_manager import WebSocketManager
__all__ = [
"SensorReading",
"SensorMetadata",
"RoomMetrics",
"SystemEvent",
"Room",
"RoomCreate",
"RoomUpdate",
"RoomInfo",
"SensorType",
"SensorStatus",
"CO2Status",
"OccupancyLevel",
"SensorService",
"RoomService",
"AnalyticsService",
"WebSocketManager",
]

View File

@@ -0,0 +1,377 @@
"""
Analytics service for processing sensor data and generating insights
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class AnalyticsService:
"""Service for analytics and data processing"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def query_data(self, query_params) -> Dict[str, Any]:
"""Execute advanced data query"""
try:
# Build query
query = {}
if hasattr(query_params, 'sensor_ids') and query_params.sensor_ids:
query["sensor_id"] = {"$in": query_params.sensor_ids}
if hasattr(query_params, 'start_time') and query_params.start_time:
query.setdefault("timestamp", {})["$gte"] = query_params.start_time
if hasattr(query_params, 'end_time') and query_params.end_time:
query.setdefault("timestamp", {})["$lte"] = query_params.end_time
# Execute query
cursor = self.db.sensor_readings.find(query)
if hasattr(query_params, 'limit') and query_params.limit:
cursor = cursor.limit(query_params.limit)
if hasattr(query_params, 'offset') and query_params.offset:
cursor = cursor.skip(query_params.offset)
cursor = cursor.sort("timestamp", -1)
# Get results
results = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
results.append(reading)
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
return {
"data": results,
"total_count": total_count,
"query": query_params.__dict__ if hasattr(query_params, '__dict__') else {},
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error executing data query: {e}")
raise
async def get_analytics_summary(self, hours: int = 24) -> Dict[str, Any]:
"""Get comprehensive analytics summary"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get basic statistics
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time}
}
},
{
"$group": {
"_id": None,
"total_readings": {"$sum": 1},
"average_value": {"$avg": "$value"},
"min_value": {"$min": "$value"},
"max_value": {"$max": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
stats = await cursor.to_list(length=1)
base_stats = stats[0] if stats else {
"total_readings": 0,
"average_value": 0,
"min_value": 0,
"max_value": 0,
"unique_sensors": []
}
# Get room-level statistics
room_stats = await self._get_room_analytics(hours)
# Get energy trends
energy_trends = await self._get_energy_trends(hours)
return {
"period_hours": hours,
"timestamp": datetime.utcnow().isoformat(),
"total_readings": base_stats["total_readings"],
"unique_sensors": len(base_stats["unique_sensors"]),
"value_statistics": {
"average": round(base_stats["average_value"], 2) if base_stats["average_value"] else 0,
"minimum": base_stats["min_value"],
"maximum": base_stats["max_value"]
},
"room_statistics": room_stats,
"energy_trends": energy_trends
}
except Exception as e:
logger.error(f"Error getting analytics summary: {e}")
raise
async def get_energy_analytics(self, hours: int = 24, room: Optional[str] = None) -> Dict[str, Any]:
"""Get energy-specific analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Build query
query = {"created_at": {"$gte": start_time}}
if room:
query["room"] = room
# Energy consumption over time
pipeline = [
{"$match": query},
{
"$group": {
"_id": {
"hour": {"$hour": "$created_at"},
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$created_at"}}
},
"total_energy": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"_id.date": 1, "_id.hour": 1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_data = []
async for data in cursor:
hourly_data.append({
"hour": data["_id"]["hour"],
"date": data["_id"]["date"],
"total_energy": data["total_energy"],
"reading_count": data["reading_count"]
})
# Peak consumption analysis
peak_analysis = await self._get_peak_consumption_analysis(query)
# Energy efficiency metrics
efficiency_metrics = await self._get_efficiency_metrics(query)
return {
"period_hours": hours,
"room": room,
"timestamp": datetime.utcnow().isoformat(),
"hourly_consumption": hourly_data,
"peak_analysis": peak_analysis,
"efficiency_metrics": efficiency_metrics,
"total_consumption": sum(item["total_energy"] for item in hourly_data)
}
except Exception as e:
logger.error(f"Error getting energy analytics: {e}")
raise
async def _get_room_analytics(self, hours: int) -> Dict[str, Any]:
"""Get room-level analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time},
"room": {"$ne": None}
}
},
{
"$group": {
"_id": "$room",
"total_readings": {"$sum": 1},
"total_energy": {"$sum": "$value"},
"average_energy": {"$avg": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
},
{"$sort": {"total_energy": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
room_data = []
async for room in cursor:
room_data.append({
"room": room["_id"],
"total_readings": room["total_readings"],
"total_energy": room["total_energy"],
"average_energy": round(room["average_energy"], 2),
"sensor_count": len(room["unique_sensors"])
})
return {
"by_room": room_data,
"total_rooms": len(room_data)
}
except Exception as e:
logger.error(f"Error getting room analytics: {e}")
return {"by_room": [], "total_rooms": 0}
async def _get_energy_trends(self, hours: int) -> Dict[str, Any]:
"""Get energy consumption trends"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get current period data
current_query = {"created_at": {"$gte": start_time}}
current_cursor = self.db.sensor_readings.aggregate([
{"$match": current_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
current_data = await current_cursor.to_list(length=1)
current_total = current_data[0]["total"] if current_data else 0
current_count = current_data[0]["count"] if current_data else 0
# Get previous period for comparison
previous_start = start_time - timedelta(hours=hours)
previous_query = {
"created_at": {"$gte": previous_start, "$lt": start_time}
}
previous_cursor = self.db.sensor_readings.aggregate([
{"$match": previous_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
previous_data = await previous_cursor.to_list(length=1)
previous_total = previous_data[0]["total"] if previous_data else 0
# Calculate trend
if previous_total > 0:
trend_percentage = ((current_total - previous_total) / previous_total) * 100
else:
trend_percentage = 0
return {
"current_period": {
"total_energy": current_total,
"reading_count": current_count,
"average_per_reading": current_total / current_count if current_count > 0 else 0
},
"previous_period": {
"total_energy": previous_total
},
"trend": {
"percentage_change": round(trend_percentage, 2),
"direction": "up" if trend_percentage > 0 else "down" if trend_percentage < 0 else "stable"
}
}
except Exception as e:
logger.error(f"Error getting energy trends: {e}")
return {}
async def _get_peak_consumption_analysis(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Analyze peak consumption patterns"""
try:
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": {"$hour": "$created_at"},
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"total_consumption": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_consumption = await cursor.to_list(length=None)
if not hourly_consumption:
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
peak_data = hourly_consumption[0]
return {
"peak_hour": peak_data["_id"],
"peak_consumption": peak_data["total_consumption"],
"hourly_pattern": [
{
"hour": item["_id"],
"consumption": item["total_consumption"],
"reading_count": item["reading_count"]
}
for item in hourly_consumption
]
}
except Exception as e:
logger.error(f"Error analyzing peak consumption: {e}")
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
async def _get_efficiency_metrics(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Calculate energy efficiency metrics"""
try:
# Average consumption per sensor
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": "$sensor_id",
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{
"$group": {
"_id": None,
"average_per_sensor": {"$avg": "$total_consumption"},
"sensor_count": {"$sum": 1},
"min_consumption": {"$min": "$total_consumption"},
"max_consumption": {"$max": "$total_consumption"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
efficiency_data = await cursor.to_list(length=1)
if not efficiency_data:
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}
data = efficiency_data[0]
# Calculate efficiency score (lower variation = higher efficiency)
if data["average_per_sensor"] > 0:
variation_coefficient = (data["max_consumption"] - data["min_consumption"]) / data["average_per_sensor"]
efficiency_score = max(0, 100 - (variation_coefficient * 10)) # Scale to 0-100
else:
variation_coefficient = 0
efficiency_score = 100
return {
"average_per_sensor": round(data["average_per_sensor"], 2),
"sensor_count": data["sensor_count"],
"efficiency_score": round(efficiency_score, 1),
"variation_coefficient": round(variation_coefficient, 2)
}
except Exception as e:
logger.error(f"Error calculating efficiency metrics: {e}")
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}

View File

@@ -0,0 +1,378 @@
"""
Models for Sensor Management Service - integrating all original dashboard functionality
"""
from pydantic import BaseModel, Field
from typing import Optional, List, Dict, Any, Literal
from datetime import datetime
from enum import Enum
class SensorType(str, Enum):
ENERGY = "energy"
CO2 = "co2"
TEMPERATURE = "temperature"
HUMIDITY = "humidity"
HVAC = "hvac"
LIGHTING = "lighting"
SECURITY = "security"
MOTION = "motion"
class SensorStatus(str, Enum):
ONLINE = "online"
OFFLINE = "offline"
ERROR = "error"
MAINTENANCE = "maintenance"
class CO2Status(str, Enum):
GOOD = "good"
MODERATE = "moderate"
POOR = "poor"
CRITICAL = "critical"
class OccupancyLevel(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
# Base Models from original dashboard
class SensorReading(BaseModel):
"""Individual sensor reading model - enhanced from original"""
sensor_id: str = Field(..., description="Unique sensor identifier")
room: Optional[str] = Field(None, description="Room where sensor is located")
sensor_type: SensorType = Field(..., description="Type of sensor")
timestamp: int = Field(..., description="Unix timestamp of reading")
created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp")
# Sensor values with enhanced structure
energy: Optional[Dict[str, Any]] = Field(None, description="Energy reading with value and unit")
co2: Optional[Dict[str, Any]] = Field(None, description="CO2 reading with value and unit")
temperature: Optional[Dict[str, Any]] = Field(None, description="Temperature reading with value and unit")
humidity: Optional[Dict[str, Any]] = Field(None, description="Humidity reading with value and unit")
motion: Optional[Dict[str, Any]] = Field(None, description="Motion detection reading")
# Additional sensor types from tiocps
power: Optional[Dict[str, Any]] = Field(None, description="Power consumption reading")
voltage: Optional[Dict[str, Any]] = Field(None, description="Voltage reading")
current: Optional[Dict[str, Any]] = Field(None, description="Current reading")
generation: Optional[Dict[str, Any]] = Field(None, description="Energy generation reading")
# Metadata
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Additional sensor metadata")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}
class LegacySensorReading(BaseModel):
"""Legacy sensor reading format for backward compatibility"""
sensor_id: str = Field(..., alias="sensorId")
timestamp: int
value: float
unit: str
created_at: datetime = Field(default_factory=datetime.utcnow)
class Config:
allow_population_by_field_name = True
class SensorMetadata(BaseModel):
"""Enhanced sensor metadata from original dashboard"""
sensor_id: str = Field(..., description="Unique sensor identifier")
name: str = Field(..., description="Human-readable sensor name")
sensor_type: SensorType = Field(..., description="Type of sensor")
room: Optional[str] = Field(None, description="Room assignment")
status: SensorStatus = Field(default=SensorStatus.OFFLINE, description="Current sensor status")
# Physical location and installation details
location: Optional[str] = Field(None, description="Physical location description")
floor: Optional[str] = Field(None, description="Floor level")
building: Optional[str] = Field(None, description="Building identifier")
# Technical specifications
model: Optional[str] = Field(None, description="Sensor model")
manufacturer: Optional[str] = Field(None, description="Sensor manufacturer")
firmware_version: Optional[str] = Field(None, description="Firmware version")
hardware_version: Optional[str] = Field(None, description="Hardware version")
# Network and connectivity
ip_address: Optional[str] = Field(None, description="IP address if network connected")
mac_address: Optional[str] = Field(None, description="MAC address")
connection_type: Optional[str] = Field(None, description="Connection type (wifi, ethernet, zigbee, etc.)")
# Power and maintenance
battery_level: Optional[float] = Field(None, description="Battery level percentage")
last_maintenance: Optional[datetime] = Field(None, description="Last maintenance date")
next_maintenance: Optional[datetime] = Field(None, description="Next scheduled maintenance")
# Operational settings
sampling_rate: Optional[int] = Field(None, description="Data sampling rate in seconds")
calibration_date: Optional[datetime] = Field(None, description="Last calibration date")
# Capabilities from tiocps integration
monitoring_capabilities: List[str] = Field(default_factory=list, description="List of monitoring capabilities")
control_capabilities: List[str] = Field(default_factory=list, description="List of control capabilities")
demand_response_enabled: bool = Field(default=False, description="Demand response participation")
# Timestamps
installed_at: Optional[datetime] = Field(None, description="Installation timestamp")
last_seen: Optional[datetime] = Field(None, description="Last communication timestamp")
created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp")
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Record update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class RoomMetrics(BaseModel):
"""Enhanced room metrics from original dashboard"""
room: str = Field(..., description="Room identifier")
timestamp: int = Field(..., description="Metrics calculation timestamp")
created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp")
# Sensor inventory
sensor_count: int = Field(0, description="Total number of sensors in room")
active_sensors: List[str] = Field(default_factory=list, description="List of active sensor IDs")
sensor_types: List[SensorType] = Field(default_factory=list, description="Types of sensors present")
# Energy metrics (enhanced from tiocps)
energy: Optional[Dict[str, Any]] = Field(None, description="Energy consumption metrics")
power: Optional[Dict[str, Any]] = Field(None, description="Power consumption metrics")
generation: Optional[Dict[str, Any]] = Field(None, description="Energy generation metrics")
flexibility: Optional[Dict[str, Any]] = Field(None, description="Energy flexibility metrics")
# Environmental metrics
co2: Optional[Dict[str, Any]] = Field(None, description="CO2 level metrics")
temperature: Optional[Dict[str, Any]] = Field(None, description="Temperature metrics")
humidity: Optional[Dict[str, Any]] = Field(None, description="Humidity metrics")
# Occupancy and usage
occupancy_estimate: OccupancyLevel = Field(default=OccupancyLevel.LOW, description="Estimated occupancy level")
motion_detected: bool = Field(default=False, description="Recent motion detection status")
# Time-based metrics
last_activity: Optional[datetime] = Field(None, description="Last detected activity timestamp")
daily_usage_hours: Optional[float] = Field(None, description="Estimated daily usage in hours")
# Economic metrics from tiocps
energy_cost: Optional[float] = Field(None, description="Estimated energy cost")
savings_potential: Optional[float] = Field(None, description="Potential savings from optimization")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class Room(BaseModel):
"""Room definition model"""
name: str = Field(..., description="Room name/identifier")
display_name: Optional[str] = Field(None, description="Human-readable room name")
floor: Optional[str] = Field(None, description="Floor level")
building: Optional[str] = Field(None, description="Building identifier")
area_m2: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Room capacity (people)")
room_type: Optional[str] = Field(None, description="Room type (office, meeting, etc.)")
# Configuration
target_temperature: Optional[float] = Field(None, description="Target temperature")
target_co2: Optional[float] = Field(None, description="Target CO2 level")
operating_hours: Optional[Dict[str, Any]] = Field(None, description="Operating hours schedule")
# Status
active: bool = Field(default=True, description="Whether room is active")
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}
class SystemEvent(BaseModel):
"""Enhanced system events from original dashboard"""
event_id: str = Field(..., description="Unique event identifier")
event_type: str = Field(..., description="Type of event")
severity: Literal["info", "warning", "error", "critical"] = Field(..., description="Event severity")
timestamp: int = Field(..., description="Event timestamp")
created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp")
# Event details
title: str = Field(..., description="Event title")
description: str = Field(..., description="Event description")
source: Optional[str] = Field(None, description="Event source (sensor_id, system component, etc.)")
# Context
sensor_id: Optional[str] = Field(None, description="Related sensor ID")
room: Optional[str] = Field(None, description="Related room")
# Event data
data: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Additional event data")
# Status tracking
acknowledged: bool = Field(default=False, description="Whether event has been acknowledged")
resolved: bool = Field(default=False, description="Whether event has been resolved")
acknowledged_by: Optional[str] = Field(None, description="Who acknowledged the event")
resolved_by: Optional[str] = Field(None, description="Who resolved the event")
acknowledged_at: Optional[datetime] = Field(None, description="Acknowledgment timestamp")
resolved_at: Optional[datetime] = Field(None, description="Resolution timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class DataQuery(BaseModel):
"""Enhanced data query parameters from original dashboard"""
sensor_ids: Optional[List[str]] = Field(None, description="Filter by sensor IDs")
rooms: Optional[List[str]] = Field(None, description="Filter by rooms")
sensor_types: Optional[List[SensorType]] = Field(None, description="Filter by sensor types")
# Time range
start_time: Optional[int] = Field(None, description="Start timestamp (Unix)")
end_time: Optional[int] = Field(None, description="End timestamp (Unix)")
# Aggregation
aggregate: Optional[str] = Field(None, description="Aggregation method (avg, sum, min, max)")
interval: Optional[str] = Field(None, description="Aggregation interval (1m, 5m, 1h, 1d)")
# Pagination
limit: int = Field(default=100, description="Maximum number of records to return")
offset: int = Field(default=0, description="Number of records to skip")
# Sorting
sort_by: str = Field(default="timestamp", description="Field to sort by")
sort_order: Literal["asc", "desc"] = Field(default="desc", description="Sort order")
# Additional filters from tiocps
energy_threshold: Optional[float] = Field(None, description="Filter by energy threshold")
co2_threshold: Optional[float] = Field(None, description="Filter by CO2 threshold")
include_metadata: bool = Field(default=False, description="Include sensor metadata in response")
class DataResponse(BaseModel):
"""Enhanced response model for data queries"""
data: List[Dict[str, Any]] = Field(default_factory=list, description="Query results")
total_count: int = Field(0, description="Total number of matching records")
query: DataQuery = Field(..., description="Original query parameters")
execution_time_ms: float = Field(..., description="Query execution time in milliseconds")
# Additional metadata
aggregation_applied: bool = Field(default=False, description="Whether data was aggregated")
cache_hit: bool = Field(default=False, description="Whether result was served from cache")
class AnalyticsSummary(BaseModel):
"""Comprehensive analytics summary"""
period_hours: int
start_time: datetime
end_time: datetime
# Sensor analytics
total_sensors: int
active_sensors: int
sensor_types_summary: Dict[str, int]
# Room analytics
total_rooms: int
active_rooms: int
room_occupancy_summary: Dict[str, int]
# Energy analytics
total_energy_consumption: float
total_energy_generation: float
net_energy_consumption: float
energy_efficiency: float
# Environmental analytics
average_co2: float
average_temperature: float
average_humidity: float
# System health
system_events_count: int
critical_events_count: int
sensor_errors_count: int
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}
# Room Management Models
class Room(BaseModel):
"""Room model for database storage and API responses"""
name: str = Field(..., description="Unique room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type (office, meeting, storage, etc.)")
# Metadata
created_at: datetime = Field(default_factory=datetime.utcnow, description="Room creation timestamp")
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class RoomCreate(BaseModel):
"""Model for creating new rooms"""
name: str = Field(..., description="Unique room name", min_length=1, max_length=100)
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomUpdate(BaseModel):
"""Model for updating existing rooms"""
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomInfo(BaseModel):
"""Comprehensive room information for API responses"""
name: str = Field(..., description="Room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type")
# Runtime information
sensor_count: int = Field(0, description="Number of sensors in room")
active_sensors: int = Field(0, description="Number of active sensors")
last_updated: Optional[datetime] = Field(None, description="Last metrics update")
# Timestamps
created_at: datetime = Field(..., description="Room creation timestamp")
updated_at: datetime = Field(..., description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class HealthResponse(BaseModel):
"""Health check response"""
service: str
status: str
timestamp: datetime
version: str
# Additional service-specific health metrics
total_sensors: Optional[int] = None
active_sensors: Optional[int] = None
total_rooms: Optional[int] = None
websocket_connections: Optional[int] = None
class Config:
json_encoders = {
datetime: lambda v: v.isoformat()
}

View File

@@ -0,0 +1,467 @@
"""
Room service for managing rooms and room-level metrics
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class RoomService:
"""Service for managing rooms and room-level analytics"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_all_room_names(self) -> List[str]:
"""Get a simple list of all room names for dropdowns/selections"""
try:
# Get rooms from the rooms collection
room_cursor = self.db.rooms.find({}, {"name": 1})
room_names = set()
async for room in room_cursor:
room_names.add(room["name"])
# Also get rooms that exist only in sensor data (legacy support)
sensor_cursor = self.db.sensors.find(
{"room": {"$ne": None, "$exists": True}},
{"room": 1}
)
async for sensor in sensor_cursor:
if sensor.get("room"):
room_names.add(sensor["room"])
# Convert to sorted list
return sorted(list(room_names))
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise
async def initialize_default_rooms(self) -> None:
"""Initialize default rooms if none exist"""
try:
# Check if any rooms exist
room_count = await self.db.rooms.count_documents({})
if room_count == 0:
# Create default rooms
default_rooms = [
{"name": "Conference Room A", "description": "Main conference room", "room_type": "meeting"},
{"name": "Conference Room B", "description": "Secondary conference room", "room_type": "meeting"},
{"name": "Office Floor 1", "description": "First floor office space", "room_type": "office"},
{"name": "Office Floor 2", "description": "Second floor office space", "room_type": "office"},
{"name": "Kitchen", "description": "Employee kitchen and break room", "room_type": "common"},
{"name": "Lobby", "description": "Main entrance and reception", "room_type": "common"},
{"name": "Server Room", "description": "IT equipment room", "room_type": "technical"},
{"name": "Storage Room", "description": "General storage", "room_type": "storage"},
{"name": "Meeting Room 1", "description": "Small meeting room", "room_type": "meeting"},
{"name": "Meeting Room 2", "description": "Small meeting room", "room_type": "meeting"}
]
for room_data in default_rooms:
room_doc = {
**room_data,
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
await self.db.rooms.insert_one(room_doc)
logger.info(f"Initialized {len(default_rooms)} default rooms")
except Exception as e:
logger.error(f"Error initializing default rooms: {e}")
raise
async def get_rooms(self) -> List[Dict[str, Any]]:
"""Get all rooms with sensor counts and metrics"""
try:
# Get unique rooms from sensors
pipeline = [
{"$group": {"_id": "$room", "sensor_count": {"$sum": 1}}},
{"$match": {"_id": {"$ne": None}}}
]
cursor = self.db.sensors.aggregate(pipeline)
rooms = []
async for room_data in cursor:
room_name = room_data["_id"]
# Get latest room metrics
latest_metrics = await self._get_latest_room_metrics(room_name)
room_info = {
"name": room_name,
"sensor_count": room_data["sensor_count"],
"latest_metrics": latest_metrics,
"last_updated": latest_metrics.get("timestamp") if latest_metrics else None
}
rooms.append(room_info)
return rooms
except Exception as e:
logger.error(f"Error getting rooms: {e}")
raise
async def create_room(self, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new room"""
try:
room_doc = {
"name": room_data.get("name"),
"description": room_data.get("description", ""),
"floor": room_data.get("floor"),
"building": room_data.get("building"),
"area": room_data.get("area"),
"capacity": room_data.get("capacity"),
"room_type": room_data.get("room_type"),
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
# Validate required fields
if not room_doc["name"] or not room_doc["name"].strip():
raise ValueError("Room name is required")
# Check if room already exists
existing = await self.db.rooms.find_one({"name": room_doc["name"]})
if existing:
raise ValueError(f"Room {room_doc['name']} already exists")
result = await self.db.rooms.insert_one(room_doc)
return {
"id": str(result.inserted_id),
"name": room_doc["name"],
"created_at": room_doc["created_at"]
}
except Exception as e:
logger.error(f"Error creating room: {e}")
raise
async def update_room(self, room_name: str, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update an existing room"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
if not existing:
raise ValueError(f"Room {room_name} not found")
# Prepare update document
update_doc = {
"updated_at": datetime.utcnow()
}
# Update only provided fields
for field in ["description", "floor", "building", "area", "capacity", "room_type"]:
if field in room_data and room_data[field] is not None:
update_doc[field] = room_data[field]
# Perform update
result = await self.db.rooms.update_one(
{"name": room_name},
{"$set": update_doc}
)
if result.modified_count == 0:
logger.warning(f"No changes made to room {room_name}")
return {
"name": room_name,
"updated_at": update_doc["updated_at"],
"modified": result.modified_count > 0
}
except Exception as e:
logger.error(f"Error updating room: {e}")
raise
async def delete_room(self, room_name: str) -> Dict[str, Any]:
"""Delete a room and optionally reassign sensors"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
# Check for sensors in this room
sensors_in_room = await self.db.sensors.find({"room": room_name}).to_list(None)
if sensors_in_room:
# Update sensors to have null room (don't delete sensors)
await self.db.sensors.update_many(
{"room": room_name},
{"$unset": {"room": ""}}
)
# Delete room from rooms collection if it exists
room_deleted = False
if existing:
result = await self.db.rooms.delete_one({"name": room_name})
room_deleted = result.deleted_count > 0
# Delete room metrics
metrics_result = await self.db.room_metrics.delete_many({"room": room_name})
return {
"room": room_name,
"room_deleted": room_deleted,
"sensors_updated": len(sensors_in_room),
"metrics_deleted": metrics_result.deleted_count
}
except Exception as e:
logger.error(f"Error deleting room: {e}")
raise
async def get_room_details(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get detailed room information"""
try:
# Get room info
room = await self.db.rooms.find_one({"name": room_name})
if not room:
# Create basic room info from sensor data
sensors = await self.db.sensors.find({"room": room_name}).to_list(None)
if not sensors:
return None
room = {
"name": room_name,
"description": f"Room with {len(sensors)} sensors",
"sensor_count": len(sensors)
}
else:
room["_id"] = str(room["_id"])
# Get sensor count
sensor_count = await self.db.sensors.count_documents({"room": room_name})
room["sensor_count"] = sensor_count
# Get sensors in this room
cursor = self.db.sensors.find({"room": room_name})
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
room["sensors"] = sensors
# Get recent room metrics
room["recent_metrics"] = await self._get_recent_room_metrics(room_name, hours=24)
return room
except Exception as e:
logger.error(f"Error getting room details: {e}")
raise
async def get_room_data(self, room_name: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100) -> Dict[str, Any]:
"""Get historical data for a room"""
try:
# Get room metrics
room_query = {"room": room_name}
if start_time or end_time:
room_query["timestamp"] = {}
if start_time:
room_query["timestamp"]["$gte"] = start_time
if end_time:
room_query["timestamp"]["$lte"] = end_time
room_metrics_cursor = self.db.room_metrics.find(room_query).sort("timestamp", -1).limit(limit)
room_metrics = []
async for metric in room_metrics_cursor:
metric["_id"] = str(metric["_id"])
room_metrics.append(metric)
# Get sensor readings for this room
sensor_query = {"room": room_name}
if start_time or end_time:
sensor_query["timestamp"] = {}
if start_time:
sensor_query["timestamp"]["$gte"] = start_time
if end_time:
sensor_query["timestamp"]["$lte"] = end_time
sensor_readings_cursor = self.db.sensor_readings.find(sensor_query).sort("timestamp", -1).limit(limit)
sensor_readings = []
async for reading in sensor_readings_cursor:
reading["_id"] = str(reading["_id"])
sensor_readings.append(reading)
return {
"room_metrics": room_metrics,
"sensor_readings": sensor_readings
}
except Exception as e:
logger.error(f"Error getting room data: {e}")
raise
async def update_room_metrics(self, sensor_data):
"""Update room-level metrics when sensor data is received"""
try:
if not sensor_data.room:
return
# Calculate room-level aggregates
room_metrics = await self._calculate_room_metrics(sensor_data.room)
if room_metrics:
# Store room metrics
metrics_doc = {
"room": sensor_data.room,
"timestamp": sensor_data.timestamp,
"total_energy": room_metrics.get("total_energy", 0),
"average_temperature": room_metrics.get("avg_temperature"),
"co2_level": room_metrics.get("co2_level"),
"occupancy_estimate": room_metrics.get("occupancy_estimate"),
"sensor_count": room_metrics.get("sensor_count", 0),
"created_at": datetime.utcnow()
}
await self.db.room_metrics.insert_one(metrics_doc)
# Cache latest metrics
if self.redis:
cache_key = f"room:{sensor_data.room}:latest_metrics"
await self.redis.setex(cache_key, 3600, json.dumps(metrics_doc, default=str))
except Exception as e:
logger.error(f"Error updating room metrics: {e}")
async def aggregate_all_room_metrics(self):
"""Aggregate metrics for all rooms"""
try:
# Get all unique rooms
pipeline = [{"$group": {"_id": "$room"}}]
cursor = self.db.sensors.aggregate(pipeline)
async for room_data in cursor:
room_name = room_data["_id"]
if room_name:
await self._calculate_room_metrics(room_name)
except Exception as e:
logger.error(f"Error aggregating room metrics: {e}")
async def _get_latest_room_metrics(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get latest room metrics"""
try:
# Try Redis cache first
if self.redis:
cache_key = f"room:{room_name}:latest_metrics"
cached = await self.redis.get(cache_key)
if cached:
return json.loads(cached)
# Fall back to database
latest = await self.db.room_metrics.find_one(
{"room": room_name},
sort=[("timestamp", -1)]
)
if latest:
latest["_id"] = str(latest["_id"])
return latest
return None
except Exception as e:
logger.error(f"Error getting latest room metrics: {e}")
return None
async def _get_recent_room_metrics(self, room_name: str, hours: int = 24) -> List[Dict[str, Any]]:
"""Get recent room metrics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.room_metrics.find({
"room": room_name,
"created_at": {"$gte": start_time}
}).sort("timestamp", -1)
metrics = []
async for metric in cursor:
metric["_id"] = str(metric["_id"])
metrics.append(metric)
return metrics
except Exception as e:
logger.error(f"Error getting recent room metrics: {e}")
return []
async def _calculate_room_metrics(self, room_name: str) -> Dict[str, Any]:
"""Calculate aggregated metrics for a room"""
try:
# Get recent sensor readings (last 5 minutes)
five_minutes_ago = datetime.utcnow() - timedelta(minutes=5)
pipeline = [
{
"$match": {
"room": room_name,
"created_at": {"$gte": five_minutes_ago}
}
},
{
"$group": {
"_id": "$sensor_id",
"latest_value": {"$last": "$value"},
"sensor_type": {"$last": "$sensor_type"} if "sensor_type" in ["$first", "$last"] else {"$first": "energy"},
"unit": {"$last": "$unit"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
total_energy = 0
temperatures = []
co2_levels = []
sensor_count = 0
async for sensor_data in cursor:
sensor_count += 1
value = sensor_data.get("latest_value", 0)
sensor_type = sensor_data.get("sensor_type", "energy")
if sensor_type == "energy" or "energy" in str(sensor_data.get("unit", "")).lower():
total_energy += value
elif sensor_type == "temperature":
temperatures.append(value)
elif sensor_type == "co2":
co2_levels.append(value)
metrics = {
"total_energy": total_energy,
"sensor_count": sensor_count,
"avg_temperature": sum(temperatures) / len(temperatures) if temperatures else None,
"co2_level": sum(co2_levels) / len(co2_levels) if co2_levels else None,
"occupancy_estimate": self._estimate_occupancy(sensor_count, total_energy)
}
return metrics
except Exception as e:
logger.error(f"Error calculating room metrics: {e}")
return {}
def _estimate_occupancy(self, sensor_count: int, total_energy: float) -> Optional[str]:
"""Estimate occupancy level based on energy consumption"""
if total_energy == 0:
return "vacant"
elif total_energy < sensor_count * 50: # Low threshold
return "low"
elif total_energy < sensor_count * 150: # Medium threshold
return "medium"
else:
return "high"

View File

@@ -0,0 +1,475 @@
"""Sensors module API routes."""
import logging
from datetime import datetime
from fastapi import APIRouter, HTTPException, Depends, WebSocket, WebSocketDisconnect, Query, BackgroundTasks
from typing import Optional
from .models import (
SensorReading, SensorMetadata, RoomCreate, RoomUpdate, DataQuery, DataResponse,
SensorType, SensorStatus, HealthResponse
)
from .sensor_service import SensorService
from .room_service import RoomService
from .analytics_service import AnalyticsService
from .websocket_manager import WebSocketManager
from core.dependencies import get_sensors_db, get_redis
logger = logging.getLogger(__name__)
# Create router
router = APIRouter()
# WebSocket manager (shared across all route handlers)
websocket_manager = WebSocketManager()
# Dependency functions
async def get_sensor_service(db=Depends(get_sensors_db), redis=Depends(get_redis)):
return SensorService(db, redis)
async def get_room_service(db=Depends(get_sensors_db), redis=Depends(get_redis)):
return RoomService(db, redis)
async def get_analytics_service(db=Depends(get_sensors_db), redis=Depends(get_redis)):
return AnalyticsService(db, redis)
# Health check
@router.get("/health", response_model=HealthResponse)
async def health_check(db=Depends(get_sensors_db)):
"""Health check endpoint for sensors module"""
try:
await db.command("ping")
return HealthResponse(
service="sensors-module",
status="healthy",
timestamp=datetime.utcnow(),
version="1.0.0"
)
except Exception as e:
logger.error(f"Health check failed: {e}")
raise HTTPException(status_code=503, detail="Service Unavailable")
# WebSocket endpoint for real-time data
@router.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for real-time sensor data"""
await websocket_manager.connect(websocket)
try:
while True:
await websocket.receive_text()
except WebSocketDisconnect:
await websocket_manager.disconnect(websocket)
# Sensor Management Routes
@router.get("/sensors/get")
async def get_sensors(
room: Optional[str] = Query(None, description="Filter by room"),
sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"),
status: Optional[SensorStatus] = Query(None, description="Filter by status"),
service: SensorService = Depends(get_sensor_service)
):
"""Get all sensors with optional filtering"""
try:
sensors = await service.get_sensors(room=room, sensor_type=sensor_type, status=status)
return {
"sensors": sensors,
"count": len(sensors),
"filters": {
"room": room,
"sensor_type": sensor_type.value if sensor_type else None,
"status": status.value if status else None
}
}
except Exception as e:
logger.error(f"Error getting sensors: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/sensors/{sensor_id}")
async def get_sensor(sensor_id: str, service: SensorService = Depends(get_sensor_service)):
"""Get detailed sensor information"""
try:
sensor = await service.get_sensor_details(sensor_id)
if not sensor:
raise HTTPException(status_code=404, detail="Sensor not found")
return sensor
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting sensor {sensor_id}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/sensors/{sensor_id}/data")
async def get_sensor_data(
sensor_id: str,
start_time: Optional[int] = Query(None, description="Start timestamp (Unix)"),
end_time: Optional[int] = Query(None, description="End timestamp (Unix)"),
limit: int = Query(100, description="Maximum records to return"),
offset: int = Query(0, description="Records to skip"),
service: SensorService = Depends(get_sensor_service)
):
"""Get historical data for a specific sensor"""
try:
data = await service.get_sensor_data(
sensor_id=sensor_id,
start_time=start_time,
end_time=end_time,
limit=limit,
offset=offset
)
return DataResponse(
data=data["readings"],
total_count=data["total_count"],
query=DataQuery(
sensor_ids=[sensor_id],
start_time=start_time,
end_time=end_time,
limit=limit,
offset=offset
),
execution_time_ms=data.get("execution_time_ms", 0)
)
except Exception as e:
logger.error(f"Error getting sensor data for {sensor_id}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/sensors")
async def create_sensor(
sensor_data: SensorMetadata,
service: SensorService = Depends(get_sensor_service)
):
"""Register a new sensor"""
try:
result = await service.create_sensor(sensor_data)
return {
"message": "Sensor created successfully",
"sensor_id": sensor_data.sensor_id,
"created_at": result.get("created_at")
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Error creating sensor: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.put("/sensors/{sensor_id}")
async def update_sensor(
sensor_id: str,
update_data: dict,
service: SensorService = Depends(get_sensor_service)
):
"""Update sensor metadata"""
try:
result = await service.update_sensor(sensor_id, update_data)
if not result:
raise HTTPException(status_code=404, detail="Sensor not found")
return {
"message": "Sensor updated successfully",
"sensor_id": sensor_id,
"updated_at": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error updating sensor {sensor_id}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.delete("/sensors/{sensor_id}")
async def delete_sensor(
sensor_id: str,
service: SensorService = Depends(get_sensor_service)
):
"""Delete a sensor and all its data"""
try:
result = await service.delete_sensor(sensor_id)
return {
"message": "Sensor deleted successfully",
"sensor_id": sensor_id,
"readings_deleted": result.get("readings_deleted", 0)
}
except Exception as e:
logger.error(f"Error deleting sensor {sensor_id}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Room Management Routes
@router.get("/rooms/names")
async def get_room_names(service: RoomService = Depends(get_room_service)):
"""Get simple list of room names for dropdowns"""
try:
room_names = await service.get_all_room_names()
return {
"rooms": room_names,
"count": len(room_names)
}
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/rooms")
async def get_rooms(service: RoomService = Depends(get_room_service)):
"""Get all rooms with sensor counts and metrics"""
try:
rooms = await service.get_rooms()
return {
"rooms": rooms,
"count": len(rooms)
}
except Exception as e:
logger.error(f"Error getting rooms: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/rooms")
async def create_room(
room_data: RoomCreate,
service: RoomService = Depends(get_room_service)
):
"""Create a new room"""
try:
result = await service.create_room(room_data.dict())
return {
"message": "Room created successfully",
"room": result["name"],
"created_at": result["created_at"]
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Error creating room: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.put("/rooms/{room_name}")
async def update_room(
room_name: str,
room_data: RoomUpdate,
service: RoomService = Depends(get_room_service)
):
"""Update an existing room"""
try:
result = await service.update_room(room_name, room_data.dict(exclude_unset=True))
return {
"message": "Room updated successfully",
"room": result["name"],
"updated_at": result["updated_at"],
"modified": result["modified"]
}
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error updating room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.delete("/rooms/{room_name}")
async def delete_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Delete a room"""
try:
result = await service.delete_room(room_name)
return {
"message": "Room deleted successfully",
**result
}
except Exception as e:
logger.error(f"Error deleting room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/rooms/{room_name}")
async def get_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Get detailed room information"""
try:
room = await service.get_room_details(room_name)
if not room:
raise HTTPException(status_code=404, detail="Room not found")
return room
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/rooms/{room_name}/data")
async def get_room_data(
room_name: str,
start_time: Optional[int] = Query(None, description="Start timestamp (Unix)"),
end_time: Optional[int] = Query(None, description="End timestamp (Unix)"),
limit: int = Query(100, description="Maximum records to return"),
service: RoomService = Depends(get_room_service)
):
"""Get historical data for a specific room"""
try:
data = await service.get_room_data(
room_name=room_name,
start_time=start_time,
end_time=end_time,
limit=limit
)
return {
"room": room_name,
"room_metrics": data.get("room_metrics", []),
"sensor_readings": data.get("sensor_readings", []),
"period": {
"start_time": start_time,
"end_time": end_time
}
}
except Exception as e:
logger.error(f"Error getting room data for {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Analytics Routes
@router.post("/data/query")
async def query_data(
query_params: DataQuery,
service: AnalyticsService = Depends(get_analytics_service)
):
"""Advanced data querying with multiple filters"""
try:
result = await service.query_data(query_params)
return result
except Exception as e:
logger.error(f"Error executing data query: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics/summary")
async def get_analytics_summary(
hours: int = Query(24, description="Hours of data to analyze"),
service: AnalyticsService = Depends(get_analytics_service)
):
"""Get comprehensive analytics summary"""
try:
analytics = await service.get_analytics_summary(hours)
return analytics
except Exception as e:
logger.error(f"Error getting analytics summary: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics/energy")
async def get_energy_analytics(
hours: int = Query(24),
room: Optional[str] = Query(None),
service: AnalyticsService = Depends(get_analytics_service)
):
"""Get energy-specific analytics"""
try:
analytics = await service.get_energy_analytics(hours, room)
return analytics
except Exception as e:
logger.error(f"Error getting energy analytics: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Data Export
@router.get("/export")
async def export_data(
start_time: int = Query(..., description="Start timestamp (Unix)"),
end_time: int = Query(..., description="End timestamp (Unix)"),
sensor_ids: Optional[str] = Query(None, description="Comma-separated sensor IDs"),
format: str = Query("json", description="Export format (json, csv)"),
service: SensorService = Depends(get_sensor_service)
):
"""Export sensor data"""
try:
export_data_result = await service.export_data(
start_time=start_time,
end_time=end_time,
sensor_ids=sensor_ids,
format=format
)
return export_data_result
except Exception as e:
logger.error(f"Error exporting data: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# System Events
@router.get("/events")
async def get_events(
severity: Optional[str] = Query(None, description="Filter by severity"),
event_type: Optional[str] = Query(None, description="Filter by event type"),
hours: int = Query(24, description="Hours of events to retrieve"),
limit: int = Query(50, description="Maximum events to return"),
service: SensorService = Depends(get_sensor_service)
):
"""Get system events and alerts"""
try:
events = await service.get_events(
severity=severity,
event_type=event_type,
hours=hours,
limit=limit
)
return {
"events": events,
"count": len(events),
"period_hours": hours
}
except Exception as e:
logger.error(f"Error getting events: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Real-time data ingestion endpoint
@router.post("/data/ingest")
async def ingest_sensor_data(
sensor_data: SensorReading,
background_tasks: BackgroundTasks,
service: SensorService = Depends(get_sensor_service),
room_service: RoomService = Depends(get_room_service)
):
"""Ingest real-time sensor data"""
try:
result = await service.ingest_sensor_data(sensor_data)
# Schedule background tasks
if sensor_data.room:
background_tasks.add_task(_update_room_metrics, room_service, sensor_data)
background_tasks.add_task(_broadcast_sensor_data, sensor_data)
return {
"message": "Sensor data ingested successfully",
"sensor_id": sensor_data.sensor_id,
"timestamp": sensor_data.timestamp
}
except Exception as e:
logger.error(f"Error ingesting sensor data: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
# Background task helper functions
async def _update_room_metrics(room_service: RoomService, sensor_data: SensorReading):
"""Update room-level metrics when sensor data is received"""
try:
await room_service.update_room_metrics(sensor_data)
except Exception as e:
logger.error(f"Error updating room metrics: {e}")
async def _broadcast_sensor_data(sensor_data: SensorReading):
"""Broadcast sensor data to WebSocket clients"""
try:
await websocket_manager.broadcast_sensor_data(sensor_data)
except Exception as e:
logger.error(f"Error broadcasting sensor data: {e}")

View File

@@ -0,0 +1,251 @@
"""
Sensor service business logic
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class SensorService:
"""Service for managing sensors and sensor data"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_sensors(self, room: Optional[str] = None, sensor_type: Optional[str] = None, status: Optional[str] = None) -> List[Dict[str, Any]]:
"""Get sensors with optional filtering"""
try:
query = {}
if room:
query["room"] = room
if sensor_type:
query["sensor_type"] = sensor_type
if status:
query["status"] = status
cursor = self.db.sensors.find(query)
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
return sensors
except Exception as e:
logger.error(f"Error getting sensors: {e}")
raise
async def get_sensor_details(self, sensor_id: str) -> Optional[Dict[str, Any]]:
"""Get detailed sensor information"""
try:
sensor = await self.db.sensors.find_one({"sensor_id": sensor_id})
if sensor:
sensor["_id"] = str(sensor["_id"])
# Get recent readings
recent_readings = await self.get_sensor_data(sensor_id, limit=10)
sensor["recent_readings"] = recent_readings.get("readings", [])
return sensor
return None
except Exception as e:
logger.error(f"Error getting sensor details: {e}")
raise
async def get_sensor_data(self, sensor_id: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
"""Get historical sensor data"""
try:
query = {"sensor_id": sensor_id}
if start_time or end_time:
query["timestamp"] = {}
if start_time:
query["timestamp"]["$gte"] = start_time
if end_time:
query["timestamp"]["$lte"] = end_time
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
# Get readings
cursor = self.db.sensor_readings.find(query).sort("timestamp", -1).skip(offset).limit(limit)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"readings": readings,
"total_count": total_count,
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error getting sensor data: {e}")
raise
async def create_sensor(self, sensor_data) -> Dict[str, Any]:
"""Create a new sensor"""
try:
# Check if sensor already exists
existing = await self.db.sensors.find_one({"sensor_id": sensor_data.sensor_id})
if existing:
raise ValueError(f"Sensor {sensor_data.sensor_id} already exists")
# Create sensor document
sensor_doc = {
"sensor_id": sensor_data.sensor_id,
"name": sensor_data.name,
"sensor_type": sensor_data.sensor_type.value if hasattr(sensor_data.sensor_type, 'value') else str(sensor_data.sensor_type),
"room": sensor_data.room,
"location": sensor_data.location if hasattr(sensor_data, 'location') else None,
"status": "active",
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
result = await self.db.sensors.insert_one(sensor_doc)
return {"created_at": datetime.utcnow()}
except Exception as e:
logger.error(f"Error creating sensor: {e}")
raise
async def update_sensor(self, sensor_id: str, update_data: Dict[str, Any]) -> bool:
"""Update sensor metadata"""
try:
update_data["updated_at"] = datetime.utcnow()
result = await self.db.sensors.update_one(
{"sensor_id": sensor_id},
{"$set": update_data}
)
return result.modified_count > 0
except Exception as e:
logger.error(f"Error updating sensor: {e}")
raise
async def delete_sensor(self, sensor_id: str) -> Dict[str, Any]:
"""Delete a sensor and its data"""
try:
# Delete readings
readings_result = await self.db.sensor_readings.delete_many({"sensor_id": sensor_id})
# Delete sensor
await self.db.sensors.delete_one({"sensor_id": sensor_id})
return {"readings_deleted": readings_result.deleted_count}
except Exception as e:
logger.error(f"Error deleting sensor: {e}")
raise
async def ingest_sensor_data(self, sensor_data) -> Dict[str, Any]:
"""Ingest real-time sensor data"""
try:
# Create reading document
reading_doc = {
"sensor_id": sensor_data.sensor_id,
"timestamp": sensor_data.timestamp,
"value": sensor_data.value,
"unit": sensor_data.unit if hasattr(sensor_data, 'unit') else None,
"room": sensor_data.room if hasattr(sensor_data, 'room') else None,
"created_at": datetime.utcnow()
}
# Store in database
await self.db.sensor_readings.insert_one(reading_doc)
# Cache recent value in Redis
if self.redis:
cache_key = f"sensor:{sensor_data.sensor_id}:latest"
await self.redis.setex(cache_key, 3600, json.dumps(reading_doc, default=str))
return {"success": True}
except Exception as e:
logger.error(f"Error ingesting sensor data: {e}")
raise
async def export_data(self, start_time: int, end_time: int, sensor_ids: Optional[str] = None,
format: str = "json") -> Dict[str, Any]:
"""Export sensor data"""
try:
query = {
"timestamp": {"$gte": start_time, "$lte": end_time}
}
if sensor_ids:
sensor_list = [s.strip() for s in sensor_ids.split(",")]
query["sensor_id"] = {"$in": sensor_list}
cursor = self.db.sensor_readings.find(query).sort("timestamp", 1)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"format": format,
"data": readings,
"total_records": len(readings),
"period": {"start": start_time, "end": end_time}
}
except Exception as e:
logger.error(f"Error exporting data: {e}")
raise
async def get_events(self, severity: Optional[str] = None, event_type: Optional[str] = None,
hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get system events"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
query = {"timestamp": {"$gte": start_time}}
if severity:
query["severity"] = severity
if event_type:
query["event_type"] = event_type
cursor = self.db.system_events.find(query).sort("timestamp", -1).limit(limit)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
events.append(event)
return events
except Exception as e:
logger.error(f"Error getting events: {e}")
return []
async def cleanup_old_data(self, cutoff_date: datetime):
"""Clean up old sensor data"""
try:
result = await self.db.sensor_readings.delete_many({
"created_at": {"$lt": cutoff_date}
})
logger.info(f"Cleaned up {result.deleted_count} old sensor readings")
except Exception as e:
logger.error(f"Error cleaning up old data: {e}")
raise

View File

@@ -0,0 +1,288 @@
"""
WebSocket manager for real-time sensor data broadcasting
"""
import asyncio
import json
from typing import List, Set, Dict, Any
from fastapi import WebSocket, WebSocketDisconnect
import logging
from .models import SensorReading
logger = logging.getLogger(__name__)
class WebSocketManager:
"""Manages WebSocket connections for real-time data broadcasting"""
def __init__(self):
self.active_connections: List[WebSocket] = []
self.room_subscriptions: Dict[str, Set[WebSocket]] = {}
self.sensor_subscriptions: Dict[str, Set[WebSocket]] = {}
self.connection_metadata: Dict[WebSocket, Dict[str, Any]] = {}
async def connect(self, websocket: WebSocket, room: str = None, sensor_id: str = None):
"""Accept a WebSocket connection and handle subscriptions"""
await websocket.accept()
self.active_connections.append(websocket)
# Store connection metadata
self.connection_metadata[websocket] = {
"connected_at": asyncio.get_event_loop().time(),
"room": room,
"sensor_id": sensor_id,
"message_count": 0
}
# Handle room subscription
if room:
if room not in self.room_subscriptions:
self.room_subscriptions[room] = set()
self.room_subscriptions[room].add(websocket)
# Handle sensor subscription
if sensor_id:
if sensor_id not in self.sensor_subscriptions:
self.sensor_subscriptions[sensor_id] = set()
self.sensor_subscriptions[sensor_id].add(websocket)
logger.info(f"WebSocket client connected. Total connections: {len(self.active_connections)}")
# Send initial connection confirmation
await self.send_to_connection(websocket, {
"type": "connection_established",
"timestamp": asyncio.get_event_loop().time(),
"subscriptions": {
"room": room,
"sensor_id": sensor_id
},
"total_connections": len(self.active_connections)
})
async def disconnect(self, websocket: WebSocket):
"""Remove a WebSocket connection and clean up subscriptions"""
if websocket in self.active_connections:
self.active_connections.remove(websocket)
# Clean up room subscriptions
for room_connections in self.room_subscriptions.values():
room_connections.discard(websocket)
# Clean up sensor subscriptions
for sensor_connections in self.sensor_subscriptions.values():
sensor_connections.discard(websocket)
# Clean up metadata
self.connection_metadata.pop(websocket, None)
logger.info(f"WebSocket client disconnected. Total connections: {len(self.active_connections)}")
async def send_to_connection(self, websocket: WebSocket, data: Dict[str, Any]):
"""Send data to a specific WebSocket connection"""
try:
await websocket.send_text(json.dumps(data))
# Update message count
if websocket in self.connection_metadata:
self.connection_metadata[websocket]["message_count"] += 1
except Exception as e:
logger.error(f"Error sending data to WebSocket: {e}")
await self.disconnect(websocket)
async def broadcast_to_all(self, data: Dict[str, Any]):
"""Broadcast data to all connected WebSocket clients"""
if not self.active_connections:
return
message = json.dumps(data)
disconnected = []
for websocket in self.active_connections:
try:
await websocket.send_text(message)
# Update message count
if websocket in self.connection_metadata:
self.connection_metadata[websocket]["message_count"] += 1
except Exception as e:
logger.error(f"Error broadcasting to WebSocket: {e}")
disconnected.append(websocket)
# Clean up disconnected connections
for websocket in disconnected:
await self.disconnect(websocket)
async def broadcast_to_room(self, room: str, data: Dict[str, Any]):
"""Broadcast data to all clients subscribed to a specific room"""
if room not in self.room_subscriptions:
return
room_connections = self.room_subscriptions[room].copy()
if not room_connections:
return
message = json.dumps(data)
disconnected = []
for websocket in room_connections:
try:
await websocket.send_text(message)
# Update message count
if websocket in self.connection_metadata:
self.connection_metadata[websocket]["message_count"] += 1
except Exception as e:
logger.error(f"Error broadcasting to room {room}: {e}")
disconnected.append(websocket)
# Clean up disconnected connections
for websocket in disconnected:
await self.disconnect(websocket)
async def broadcast_to_sensor(self, sensor_id: str, data: Dict[str, Any]):
"""Broadcast data to all clients subscribed to a specific sensor"""
if sensor_id not in self.sensor_subscriptions:
return
sensor_connections = self.sensor_subscriptions[sensor_id].copy()
if not sensor_connections:
return
message = json.dumps(data)
disconnected = []
for websocket in sensor_connections:
try:
await websocket.send_text(message)
# Update message count
if websocket in self.connection_metadata:
self.connection_metadata[websocket]["message_count"] += 1
except Exception as e:
logger.error(f"Error broadcasting to sensor {sensor_id}: {e}")
disconnected.append(websocket)
# Clean up disconnected connections
for websocket in disconnected:
await self.disconnect(websocket)
async def broadcast_sensor_data(self, sensor_reading: SensorReading):
"""Broadcast sensor reading data to appropriate subscribers"""
data = {
"type": "sensor_data",
"sensor_id": sensor_reading.sensor_id,
"room": sensor_reading.room,
"sensor_type": sensor_reading.sensor_type.value,
"timestamp": sensor_reading.timestamp,
"data": {
"energy": sensor_reading.energy,
"co2": sensor_reading.co2,
"temperature": sensor_reading.temperature,
"humidity": sensor_reading.humidity,
"motion": sensor_reading.motion,
"power": sensor_reading.power,
"voltage": sensor_reading.voltage,
"current": sensor_reading.current,
"generation": sensor_reading.generation
},
"metadata": sensor_reading.metadata
}
# Broadcast to all connections
await self.broadcast_to_all(data)
# Broadcast to room-specific subscribers
if sensor_reading.room:
await self.broadcast_to_room(sensor_reading.room, data)
# Broadcast to sensor-specific subscribers
await self.broadcast_to_sensor(sensor_reading.sensor_id, data)
async def broadcast_room_metrics(self, room: str, metrics: Dict[str, Any]):
"""Broadcast room-level metrics to subscribers"""
data = {
"type": "room_metrics",
"room": room,
"timestamp": asyncio.get_event_loop().time(),
"metrics": metrics
}
# Broadcast to all connections
await self.broadcast_to_all(data)
# Broadcast to room-specific subscribers
await self.broadcast_to_room(room, data)
async def broadcast_system_event(self, event: Dict[str, Any]):
"""Broadcast system events to all subscribers"""
data = {
"type": "system_event",
"timestamp": asyncio.get_event_loop().time(),
"event": event
}
await self.broadcast_to_all(data)
async def broadcast_raw_data(self, raw_data: str):
"""Broadcast raw data from Redis or other sources"""
try:
# Try to parse as JSON
data = json.loads(raw_data)
# Add type if not present
if "type" not in data:
data["type"] = "raw_data"
await self.broadcast_to_all(data)
except json.JSONDecodeError:
# Send as raw string if not JSON
data = {
"type": "raw_data",
"data": raw_data,
"timestamp": asyncio.get_event_loop().time()
}
await self.broadcast_to_all(data)
def get_connection_stats(self) -> Dict[str, Any]:
"""Get statistics about current WebSocket connections"""
total_connections = len(self.active_connections)
room_stats = {room: len(connections) for room, connections in self.room_subscriptions.items()}
sensor_stats = {sensor: len(connections) for sensor, connections in self.sensor_subscriptions.items()}
# Calculate message statistics
total_messages = sum(
metadata.get("message_count", 0)
for metadata in self.connection_metadata.values()
)
return {
"total_connections": total_connections,
"room_subscriptions": room_stats,
"sensor_subscriptions": sensor_stats,
"total_messages_sent": total_messages,
"active_rooms": len([room for room, connections in self.room_subscriptions.items() if connections]),
"active_sensors": len([sensor for sensor, connections in self.sensor_subscriptions.items() if connections])
}
async def send_connection_stats(self):
"""Send connection statistics to all clients"""
stats = self.get_connection_stats()
data = {
"type": "connection_stats",
"timestamp": asyncio.get_event_loop().time(),
"stats": stats
}
await self.broadcast_to_all(data)
async def ping_all_connections(self):
"""Send ping to all connections to keep them alive"""
data = {
"type": "ping",
"timestamp": asyncio.get_event_loop().time()
}
await self.broadcast_to_all(data)