Compare commits

...

10 Commits

Author SHA1 Message Date
rafaeldpsilva
7547e6b229 demand response 2025-12-10 15:26:34 +00:00
rafaeldpsilva
b54999f7a7 Add rb option to deploy script and update sensors endpoint path 2025-10-08 14:05:16 +01:00
rafaeldpsilva
da1fb2a058 Add Token model and update endpoints to use it
Refactor token validation and saving endpoints to accept a Token model
instead of a raw string. Set default values for token generation request
fields. Update TokenService cleanup to use datetime.now().
2025-10-08 14:05:11 +01:00
rafaeldpsilva
38fb3e6e96 Add dynamic energy data collections per building
Energy data is now stored in separate collections for each
SLGs/Community/Building directory. Collections are created and indexed
dynamically. Metadata and API queries have been updated to support this
structure.
2025-10-08 14:05:00 +01:00
rafaeldpsilva
8ff20935fc Refactor auth middleware and update service config usage
- Remove redundant comments and docstrings in auth_middleware.py - Use
TOKEN_SERVICE_URL env variable for AuthMiddleware - Fix sensor service
proxy path in main.py - Use datetime.now() for request_stats start_time
2025-10-08 14:04:50 +01:00
rafaeldpsilva
863e0161b0 Implement dynamic energy data collections per building
- Store energy data in separate MongoDB collections for each
SLGs/Community/Building directory - Update FTP monitor and database
manager to track directory paths and select appropriate collections -
Add collection stats to database statistics API - Update sensor and
token services for improved API consistency - Add 'rb' (rebuild and
restart) option to deploy.sh script
2025-10-08 14:03:57 +01:00
rafaeldpsilva
ba99b09e08 Remove comments and verbose logging from services 2025-09-23 15:05:59 +01:00
rafaeldpsilva
78151feb86 Documentation files 2025-09-22 15:25:52 +01:00
rafaeldpsilva
02a0d54e14 Add room and analytics services with CRUD API endpoints
- Implement RoomService for room management and metrics - Add
AnalyticsService for sensor data analytics and trends - Extend models
with Room, RoomCreate, RoomUpdate, RoomInfo - Add room CRUD endpoints to
FastAPI app - Add database connection logic for MongoDB and Redis -
Refactor sensor service logic into SensorService class
2025-09-22 15:13:14 +01:00
rafaeldpsilva
2008ea0e70 Refactor service registry and load balancer integration
- Pass service registry to load balancer for dependency injection -
Remove dynamic imports of service registry in load balancer - Update
service registration and health check logic - Enable token-service in
docker-compose and service config - Add room names and rooms proxy
endpoints - Improve logging for proxy requests and health checks -
Update deploy script project name to sa4cps - Add test script for
coroutine fix - Minor code cleanup and formatting
2025-09-22 15:13:06 +01:00
34 changed files with 7318 additions and 602 deletions

559
bootstrap_sensors.py Normal file
View File

@@ -0,0 +1,559 @@
#!/usr/bin/env python3
"""
Bootstrap script to populate the database with sensors and room configurations.
This script creates a realistic smart building sensor setup for testing and development.
"""
import asyncio
import json
import sys
import logging
from datetime import datetime
from typing import List, Dict, Any
import aiohttp
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Base URLs
API_BASE_URL = "http://localhost:8000"
SENSOR_SERVICE_URL = "http://localhost:8007"
s = {
'id': 'sensor_1',
'name': 'Energy Monitor 1',
'type': 'energy',
'room': 'Conference Room A',
'status': 'online',
'lastSeen': Date.now() / 1000,
'capabilities': {
'monitoring': ['energy'],
'actions': [],
},
'metadata': {
'location': 'Wall mounted',
'model': 'EM-100',
'firmware': '2.1.0',
},
},{
'id': 'sensor_2',
'name': 'HVAC Controller 1',
'type': 'hvac',
'room': 'Conference Room A',
'status': 'online',
'lastSeen': Date.now() / 1000,
'capabilities': {
'monitoring': ['temperature', 'co2'],
'actions': [
{
'id': 'temp_adjust',
'name': 'Adjust Temperature',
'type': 'adjust',
'icon': '🌡️',
'parameters': { min: 18, max: 28, step: 0.5 },
},
{
'id': 'fan_speed',
'name': 'Fan Speed',
'type': 'adjust',
'icon': '💨',
'parameters': { min: 0, max: 5, step: 1 },
},
{ 'id': 'power_toggle', 'name': 'Power', 'type': 'toggle', 'icon': '' },
],
},
metadata: {
location: 'Ceiling mounted',
model: 'HVAC-200',
firmware: '3.2.1',
},
},
{
id: 'sensor_3',
name: 'Smart Light Controller',
type: 'lighting',
room: 'Office Floor 1',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['energy'],
actions: [
{
id: 'brightness',
name: 'Brightness',
type: 'adjust',
icon: '💡',
parameters: { min: 0, max: 100, step: 5 },
},
{ id: 'power_toggle', name: 'Power', type: 'toggle', icon: '' },
{
id: 'scene',
name: 'Scene',
type: 'adjust',
icon: '🎨',
parameters: { options: ['Work', 'Meeting', 'Presentation', 'Relax'] },
},
],
},
metadata: {
location: 'Ceiling grid',
model: 'SL-300',
firmware: '1.5.2',
},
},
{
id: 'sensor_4',
name: 'CO2 Sensor',
type: 'co2',
room: 'Meeting Room 1',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['co2', 'temperature', 'humidity'],
actions: [{ id: 'calibrate', name: 'Calibrate', type: 'trigger', icon: '⚙️' }],
},
metadata: {
location: 'Wall mounted',
model: 'CO2-150',
firmware: '2.0.3',
battery: 85,
},
},
{
id: 'sensor_5',
name: 'Security Camera',
type: 'security',
room: 'Lobby',
status: 'online',
lastSeen: Date.now() / 1000,
capabilities: {
monitoring: ['motion'],
actions: [
{ id: 'record_toggle', name: 'Recording', type: 'toggle', icon: '📹' },
{ id: 'ptz_control', name: 'Pan/Tilt/Zoom', type: 'trigger', icon: '🎥' },
{ id: 'night_mode', name: 'Night Mode', type: 'toggle', icon: '🌙' },
],
},
metadata: {
location: 'Corner ceiling',
model: 'SEC-400',
firmware: '4.1.0',
},
},
# Bootstrap data configuration
BOOTSTRAP_SENSORS = [
# Living Room Sensors
{
"sensor_id": "lr_energy_001",
"name": "Living Room Main Energy Monitor",
"sensor_type": "energy",
"room": "living_room",
"location": "Main electrical panel - Living Room circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "lr_co2_001",
"name": "Living Room Air Quality Monitor",
"sensor_type": "co2",
"room": "living_room",
"location": "Wall mounted near seating area",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Pro",
"status": "online"
},
{
"sensor_id": "lr_temp_001",
"name": "Living Room Temperature Sensor",
"sensor_type": "temperature",
"room": "living_room",
"location": "Central wall position",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T200",
"status": "online"
},
# Kitchen Sensors
{
"sensor_id": "kt_energy_001",
"name": "Kitchen Appliances Energy Monitor",
"sensor_type": "energy",
"room": "kitchen",
"location": "Kitchen appliance circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "kt_humidity_001",
"name": "Kitchen Humidity Sensor",
"sensor_type": "humidity",
"room": "kitchen",
"location": "Above sink area",
"floor": "1",
"manufacturer": "HumidSense",
"model": "HS-H150",
"status": "online"
},
{
"sensor_id": "kt_temp_001",
"name": "Kitchen Temperature Monitor",
"sensor_type": "temperature",
"room": "kitchen",
"location": "Central kitchen position",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T200",
"status": "online"
},
# Bedroom Sensors
{
"sensor_id": "br_energy_001",
"name": "Bedroom Energy Monitor",
"sensor_type": "energy",
"room": "bedroom",
"location": "Bedroom electrical circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E200",
"status": "online"
},
{
"sensor_id": "br_co2_001",
"name": "Bedroom Air Quality Monitor",
"sensor_type": "co2",
"room": "bedroom",
"location": "Bedside wall mount",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Basic",
"status": "online"
},
{
"sensor_id": "br_temp_001",
"name": "Bedroom Temperature Sensor",
"sensor_type": "temperature",
"room": "bedroom",
"location": "Opposite wall from bed",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T100",
"status": "online"
},
# Office Sensors
{
"sensor_id": "of_energy_001",
"name": "Office Equipment Energy Monitor",
"sensor_type": "energy",
"room": "office",
"location": "Office equipment circuit",
"floor": "1",
"manufacturer": "SmartMeter Co",
"model": "SM-E300",
"status": "online"
},
{
"sensor_id": "of_co2_001",
"name": "Office Air Quality Monitor",
"sensor_type": "co2",
"room": "office",
"location": "Desk area wall mount",
"floor": "1",
"manufacturer": "AirSense",
"model": "AS-CO2-Pro",
"status": "online"
},
{
"sensor_id": "of_motion_001",
"name": "Office Motion Detector",
"sensor_type": "motion",
"room": "office",
"location": "Ceiling mounted - center",
"floor": "1",
"manufacturer": "MotionTech",
"model": "MT-M100",
"status": "online"
},
# Bathroom Sensors
{
"sensor_id": "bt_humidity_001",
"name": "Bathroom Humidity Monitor",
"sensor_type": "humidity",
"room": "bathroom",
"location": "Ceiling mounted",
"floor": "1",
"manufacturer": "HumidSense",
"model": "HS-H200",
"status": "online"
},
{
"sensor_id": "bt_temp_001",
"name": "Bathroom Temperature Sensor",
"sensor_type": "temperature",
"room": "bathroom",
"location": "Wall mounted near mirror",
"floor": "1",
"manufacturer": "TempTech",
"model": "TT-T150",
"status": "online"
},
# Garage Sensors
{
"sensor_id": "gr_energy_001",
"name": "Garage Energy Monitor",
"sensor_type": "energy",
"room": "garage",
"location": "Garage main circuit",
"floor": "0",
"manufacturer": "SmartMeter Co",
"model": "SM-E100",
"status": "online"
},
{
"sensor_id": "gr_motion_001",
"name": "Garage Motion Detector",
"sensor_type": "motion",
"room": "garage",
"location": "Ceiling mounted - entrance",
"floor": "0",
"manufacturer": "MotionTech",
"model": "MT-M200",
"status": "online"
}
]
BOOTSTRAP_ROOMS = [
{
"name": "living_room",
"display_name": "Living Room",
"description": "Main living area with entertainment center",
"floor": "1",
"area_sqm": 35.5,
"room_type": "living"
},
{
"name": "kitchen",
"display_name": "Kitchen",
"description": "Main kitchen with appliances",
"floor": "1",
"area_sqm": 15.2,
"room_type": "kitchen"
},
{
"name": "bedroom",
"display_name": "Master Bedroom",
"description": "Primary bedroom",
"floor": "1",
"area_sqm": 20.1,
"room_type": "bedroom"
},
{
"name": "office",
"display_name": "Home Office",
"description": "Work from home office space",
"floor": "1",
"area_sqm": 12.8,
"room_type": "office"
},
{
"name": "bathroom",
"display_name": "Main Bathroom",
"description": "Primary bathroom",
"floor": "1",
"area_sqm": 8.5,
"room_type": "bathroom"
},
{
"name": "garage",
"display_name": "Garage",
"description": "Two-car garage with workshop area",
"floor": "0",
"area_sqm": 42.0,
"room_type": "garage"
}
]
async def generate_auth_token() -> str:
"""Generate authentication token for API calls"""
token_payload = {
"name": "bootstrap_user",
"list_of_resources": ["sensors", "rooms", "analytics", "health", "data", "export", "events"],
"data_aggregation": True,
"time_aggregation": True,
"embargo": 0,
"exp_hours": 24
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{API_BASE_URL}/api/v1/tokens/generate",
json=token_payload,
headers={"Content-Type": "application/json"}
) as response:
if response.status == 200:
data = await response.json()
logger.info("Successfully generated authentication token")
return data["token"]
else:
error_text = await response.text()
raise Exception(f"Failed to generate token: {response.status} - {error_text}")
async def create_rooms(auth_token: str) -> bool:
"""Create rooms in the database"""
logger.info("Creating bootstrap rooms...")
headers = {
"Authorization": f"Bearer {auth_token}",
"Content-Type": "application/json"
}
success_count = 0
async with aiohttp.ClientSession() as session:
for room in BOOTSTRAP_ROOMS:
try:
async with session.post(
f"{SENSOR_SERVICE_URL}/rooms",
json=room,
headers=headers
) as response:
if response.status in [200, 201]:
success_count += 1
logger.info(f"✓ Created room: {room['display_name']}")
elif response.status == 400:
# Room might already exist
error_data = await response.json()
if "already exists" in error_data.get("detail", ""):
logger.info(f"○ Room already exists: {room['display_name']}")
success_count += 1
else:
logger.error(f"✗ Failed to create room {room['name']}: {error_data}")
else:
error_text = await response.text()
logger.error(f"✗ Failed to create room {room['name']}: {response.status} - {error_text}")
except Exception as e:
logger.error(f"✗ Exception creating room {room['name']}: {e}")
logger.info(f"Rooms created: {success_count}/{len(BOOTSTRAP_ROOMS)}")
return success_count > 0
async def create_sensors(auth_token: str) -> bool:
"""Create sensors in the database"""
logger.info("Creating bootstrap sensors...")
headers = {
"Authorization": f"Bearer {auth_token}",
"Content-Type": "application/json"
}
success_count = 0
async with aiohttp.ClientSession() as session:
for sensor in BOOTSTRAP_SENSORS:
try:
async with session.post(
f"{SENSOR_SERVICE_URL}/sensors",
json=sensor,
headers=headers
) as response:
if response.status in [200, 201]:
success_count += 1
logger.info(f"✓ Created sensor: {sensor['name']} ({sensor['sensor_id']})")
elif response.status == 400:
# Sensor might already exist
error_data = await response.json()
if "already exists" in error_data.get("detail", ""):
logger.info(f"○ Sensor already exists: {sensor['sensor_id']}")
success_count += 1
else:
logger.error(f"✗ Failed to create sensor {sensor['sensor_id']}: {error_data}")
else:
error_text = await response.text()
logger.error(f"✗ Failed to create sensor {sensor['sensor_id']}: {response.status} - {error_text}")
except Exception as e:
logger.error(f"✗ Exception creating sensor {sensor['sensor_id']}: {e}")
logger.info(f"Sensors created: {success_count}/{len(BOOTSTRAP_SENSORS)}")
return success_count > 0
async def verify_bootstrap() -> None:
"""Verify that sensors were created successfully"""
logger.info("Verifying bootstrap results...")
try:
# Check sensors directly on sensor service (no auth required for health checks)
async with aiohttp.ClientSession() as session:
async with session.get(f"{SENSOR_SERVICE_URL}/sensors/get") as response:
if response.status == 200:
data = await response.json()
logger.info(f"✓ Total sensors in database: {data['count']}")
# Group by room
rooms = {}
for sensor in data.get('sensors', []):
room = sensor.get('room', 'unknown')
if room not in rooms:
rooms[room] = []
rooms[room].append(sensor['sensor_id'])
for room, sensors in rooms.items():
logger.info(f" - {room}: {len(sensors)} sensors")
else:
logger.error(f"Failed to verify sensors: {response.status}")
async with session.get(f"{SENSOR_SERVICE_URL}/rooms") as response:
if response.status == 200:
data = await response.json()
logger.info(f"✓ Total rooms in database: {data.get('count', 0)}")
else:
logger.error(f"Failed to verify rooms: {response.status}")
except Exception as e:
logger.error(f"✗ Exception during verification: {e}")
async def main():
"""Main bootstrap function"""
logger.info("=== Starting Sensor Bootstrap Process ===")
try:
# Step 1: Generate authentication token
logger.info("Step 1: Generating authentication token...")
auth_token = await generate_auth_token()
# Step 2: Create rooms
logger.info("Step 2: Creating rooms...")
rooms_success = await create_rooms(auth_token)
# Step 3: Create sensors
logger.info("Step 3: Creating sensors...")
sensors_success = await create_sensors(auth_token)
# Step 4: Verify results
logger.info("Step 4: Verifying bootstrap...")
await verify_bootstrap()
if rooms_success and sensors_success:
logger.info("=== Bootstrap Complete! ===")
logger.info("You can now run the data simulator to generate real-time sensor data.")
logger.info("Run: python data_simulator_enhanced.py")
return True
else:
logger.error("=== Bootstrap Failed ===")
return False
except Exception as e:
logger.error(f"Bootstrap failed with error: {e}")
return False
if __name__ == "__main__":
# Run the bootstrap
success = asyncio.run(main())
sys.exit(0 if success else 1)

354
data_simulator_enhanced.py Normal file
View File

@@ -0,0 +1,354 @@
#!/usr/bin/env python3
"""
Enhanced Data Simulator for Bootstrap Sensors
Generates realistic real-time sensor data for the bootstrap sensors created by bootstrap_sensors.py
"""
import redis
import time
import random
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any
import math
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Redis configuration
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_CHANNEL = "energy_data"
# Bootstrap sensor IDs (must match bootstrap_sensors.py)
BOOTSTRAP_SENSORS = {
# Living Room Sensors
"lr_energy_001": {"type": "energy", "room": "living_room", "base_value": 2.5, "variance": 1.2},
"lr_co2_001": {"type": "co2", "room": "living_room", "base_value": 420, "variance": 80},
"lr_temp_001": {"type": "temperature", "room": "living_room", "base_value": 22.0, "variance": 2.0},
# Kitchen Sensors
"kt_energy_001": {"type": "energy", "room": "kitchen", "base_value": 3.8, "variance": 2.1},
"kt_humidity_001": {"type": "humidity", "room": "kitchen", "base_value": 45.0, "variance": 15.0},
"kt_temp_001": {"type": "temperature", "room": "kitchen", "base_value": 24.0, "variance": 3.0},
# Bedroom Sensors
"br_energy_001": {"type": "energy", "room": "bedroom", "base_value": 1.2, "variance": 0.8},
"br_co2_001": {"type": "co2", "room": "bedroom", "base_value": 480, "variance": 120},
"br_temp_001": {"type": "temperature", "room": "bedroom", "base_value": 20.5, "variance": 1.5},
# Office Sensors
"of_energy_001": {"type": "energy", "room": "office", "base_value": 2.1, "variance": 1.5},
"of_co2_001": {"type": "co2", "room": "office", "base_value": 450, "variance": 100},
"of_motion_001": {"type": "motion", "room": "office", "base_value": 0, "variance": 1},
# Bathroom Sensors
"bt_humidity_001": {"type": "humidity", "room": "bathroom", "base_value": 65.0, "variance": 20.0},
"bt_temp_001": {"type": "temperature", "room": "bathroom", "base_value": 23.0, "variance": 2.5},
# Garage Sensors
"gr_energy_001": {"type": "energy", "room": "garage", "base_value": 0.8, "variance": 0.5},
"gr_motion_001": {"type": "motion", "room": "garage", "base_value": 0, "variance": 1}
}
class SensorDataGenerator:
"""Generates realistic sensor data with time-based patterns"""
def __init__(self):
self.start_time = time.time()
self.motion_states = {} # Track motion sensor states
# Initialize motion states
for sensor_id, config in BOOTSTRAP_SENSORS.items():
if config["type"] == "motion":
self.motion_states[sensor_id] = {"active": False, "last_change": time.time()}
def get_time_factor(self) -> float:
"""Get time-based multiplier for realistic daily patterns"""
current_hour = datetime.now().hour
# Energy usage patterns (higher during day, lower at night)
if 6 <= current_hour <= 22: # Daytime
return 1.0 + 0.3 * math.sin((current_hour - 6) * math.pi / 16)
else: # Nighttime
return 0.3 + 0.2 * random.random()
def get_occupancy_factor(self, room: str) -> float:
"""Get occupancy-based multiplier for different rooms"""
current_hour = datetime.now().hour
occupancy_patterns = {
"living_room": 1.2 if 18 <= current_hour <= 23 else 0.8,
"kitchen": 1.5 if 7 <= current_hour <= 9 or 17 <= current_hour <= 20 else 0.6,
"bedroom": 1.3 if 22 <= current_hour or current_hour <= 7 else 0.4,
"office": 1.4 if 9 <= current_hour <= 17 else 0.3,
"bathroom": 1.0, # Consistent usage
"garage": 0.8 if 7 <= current_hour <= 9 or 17 <= current_hour <= 19 else 0.2
}
return occupancy_patterns.get(room, 1.0)
def generate_energy_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic energy consumption reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Apply time and occupancy factors
time_factor = self.get_time_factor()
occupancy_factor = self.get_occupancy_factor(room)
# Add some randomness
random_factor = 1.0 + (random.random() - 0.5) * 0.4
# Calculate final value
value = base_value * time_factor * occupancy_factor * random_factor
value = max(0.1, value) # Ensure minimum consumption
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "energy",
"timestamp": int(time.time()),
"energy": {
"value": round(value, 3),
"unit": "kWh"
},
"metadata": {
"time_factor": round(time_factor, 2),
"occupancy_factor": round(occupancy_factor, 2)
}
}
def generate_co2_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic CO2 level reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# CO2 increases with occupancy
occupancy_factor = self.get_occupancy_factor(room)
co2_increase = (occupancy_factor - 0.5) * 150
# Add random fluctuation
random_variation = (random.random() - 0.5) * variance
value = base_value + co2_increase + random_variation
value = max(350, min(2000, value)) # Realistic CO2 range
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "co2",
"timestamp": int(time.time()),
"co2": {
"value": round(value, 1),
"unit": "ppm"
},
"metadata": {
"quality_level": "good" if value < 600 else "moderate" if value < 1000 else "poor"
}
}
def generate_temperature_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic temperature reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Temperature varies with time of day and occupancy
current_hour = datetime.now().hour
daily_variation = 2 * math.sin((current_hour - 6) * math.pi / 12)
occupancy_factor = self.get_occupancy_factor(room)
occupancy_heat = (occupancy_factor - 0.5) * 1.5
random_variation = (random.random() - 0.5) * variance
value = base_value + daily_variation + occupancy_heat + random_variation
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "temperature",
"timestamp": int(time.time()),
"temperature": {
"value": round(value, 1),
"unit": "°C"
}
}
def generate_humidity_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic humidity reading"""
base_value = config["base_value"]
variance = config["variance"]
room = config["room"]
# Humidity patterns based on room usage
if room == "bathroom":
# Higher spikes during usage times
current_hour = datetime.now().hour
if 7 <= current_hour <= 9 or 19 <= current_hour <= 22:
usage_spike = random.uniform(10, 25)
else:
usage_spike = 0
elif room == "kitchen":
# Cooking increases humidity
current_hour = datetime.now().hour
if 17 <= current_hour <= 20:
usage_spike = random.uniform(5, 15)
else:
usage_spike = 0
else:
usage_spike = 0
random_variation = (random.random() - 0.5) * variance
value = base_value + usage_spike + random_variation
value = max(20, min(95, value)) # Realistic humidity range
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "humidity",
"timestamp": int(time.time()),
"humidity": {
"value": round(value, 1),
"unit": "%"
}
}
def generate_motion_reading(self, sensor_id: str, config: Dict) -> Dict[str, Any]:
"""Generate realistic motion detection reading"""
room = config["room"]
current_time = time.time()
# Get current state
if sensor_id not in self.motion_states:
self.motion_states[sensor_id] = {"active": False, "last_change": current_time}
state = self.motion_states[sensor_id]
# Determine if motion should be detected based on occupancy patterns
occupancy_factor = self.get_occupancy_factor(room)
motion_probability = occupancy_factor * 0.3 # 30% chance when occupied
# Change state based on probability and time since last change
time_since_change = current_time - state["last_change"]
if state["active"]:
# If motion is active, chance to stop after some time
if time_since_change > 30: # At least 30 seconds of motion
if random.random() < 0.4: # 40% chance to stop
state["active"] = False
state["last_change"] = current_time
else:
# If no motion, chance to start based on occupancy
if time_since_change > 10: # At least 10 seconds of no motion
if random.random() < motion_probability:
state["active"] = True
state["last_change"] = current_time
return {
"sensor_id": sensor_id,
"room": room,
"sensor_type": "motion",
"timestamp": int(time.time()),
"motion": {
"value": 1 if state["active"] else 0,
"unit": "detected"
},
"metadata": {
"duration_seconds": int(time_since_change) if state["active"] else 0
}
}
def generate_sensor_reading(self, sensor_id: str) -> Dict[str, Any]:
"""Generate appropriate reading based on sensor type"""
if sensor_id not in BOOTSTRAP_SENSORS:
logger.warning(f"Unknown sensor ID: {sensor_id}")
return None
config = BOOTSTRAP_SENSORS[sensor_id]
sensor_type = config["type"]
if sensor_type == "energy":
return self.generate_energy_reading(sensor_id, config)
elif sensor_type == "co2":
return self.generate_co2_reading(sensor_id, config)
elif sensor_type == "temperature":
return self.generate_temperature_reading(sensor_id, config)
elif sensor_type == "humidity":
return self.generate_humidity_reading(sensor_id, config)
elif sensor_type == "motion":
return self.generate_motion_reading(sensor_id, config)
else:
logger.warning(f"Unknown sensor type: {sensor_type}")
return None
def main():
"""Main simulation loop"""
logger.info("=== Starting Enhanced Data Simulator ===")
# Connect to Redis
try:
redis_client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, decode_responses=True)
redis_client.ping()
logger.info(f"Successfully connected to Redis at {REDIS_HOST}:{REDIS_PORT}")
except redis.exceptions.ConnectionError as e:
logger.error(f"Could not connect to Redis: {e}")
return
# Initialize data generator
generator = SensorDataGenerator()
logger.info(f"Loaded {len(BOOTSTRAP_SENSORS)} bootstrap sensors")
logger.info(f"Publishing to Redis channel: '{REDIS_CHANNEL}'")
logger.info("Press Ctrl+C to stop simulation")
sensor_ids = list(BOOTSTRAP_SENSORS.keys())
try:
while True:
sensors_produced = []
for a in range(5):
# Generate data for a random sensor
sensor_id = random.choice(sensor_ids)
sensors_produced.append(sensor_id)
reading = generator.generate_sensor_reading(sensor_id)
if reading:
# Publish to Redis
payload = json.dumps(reading)
redis_client.publish(REDIS_CHANNEL, payload)
# Log the reading
sensor_type = reading["sensor_type"]
room = reading["room"]
value_info = ""
if "energy" in reading:
value_info = f"{reading['energy']['value']} {reading['energy']['unit']}"
elif "co2" in reading:
value_info = f"{reading['co2']['value']} {reading['co2']['unit']}"
elif "temperature" in reading:
value_info = f"{reading['temperature']['value']} {reading['temperature']['unit']}"
elif "humidity" in reading:
value_info = f"{reading['humidity']['value']} {reading['humidity']['unit']}"
elif "motion" in reading:
value_info = f"{'DETECTED' if reading['motion']['value'] else 'CLEAR'}"
logger.info(f"📊 {sensor_id} ({room}/{sensor_type}): {value_info}")
# Random interval between readings (1-5 seconds)
time.sleep(random.uniform(1, 5))
except KeyboardInterrupt:
logger.info("Stopping data simulation...")
except Exception as e:
logger.error(f"Simulation error: {e}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,976 @@
# Demand Response System - Architecture & Logic Documentation
## Table of Contents
1. [System Overview](#system-overview)
2. [Component Locations](#component-locations)
3. [Architecture & Data Flow](#architecture--data-flow)
4. [Key Components](#key-components)
5. [Invitation Lifecycle](#invitation-lifecycle)
6. [Integration Points](#integration-points)
7. [API Reference](#api-reference)
8. [Complete Event Flow Example](#complete-event-flow-example)
---
## System Overview
The IoT Building Monitoring system includes a comprehensive Demand Response (DR) management system that enables buildings to participate in grid flexibility programs by reducing power consumption during peak demand periods.
**Key Capabilities:**
- Create and manage DR invitations with target load reductions
- Auto-accept or manual approval of DR events
- Track power reduction in real-time during events
- Calculate financial benefits from DR participation
- Forecast available flexibility by device and time
- Configure device-specific DR participation instructions
---
## Component Locations
### Core Components
| Component | Path | Purpose |
|-----------|------|---------|
| **Service Layer** | `services/DemandResponseService.py` | Business logic for DR operations |
| **Database Layer** | `database/DemandResponseRepository.py` | MongoDB data access for DR |
| **Execution Engine** | `core/DemandResponseAtuator.py` | Runs DR events, tracks power reduction |
| **Main Orchestrator** | `core/Core.py` | Coordinates DR events and accumulates reduction |
| **IoT Model** | `model/IoT.py` | Device configuration with DR capabilities |
| **API Endpoints** | `api/main.py:230-329` | REST endpoints for DR operations |
| **Configuration** | `config/f.json` | System and device configuration |
---
## Architecture & Data Flow
### High-Level Architecture
```
┌─────────────────────────────────────┐
│ REST API Endpoints │
│ (Flask: api/main.py) │
│ - /invitation/* │
│ - /event/check │
│ - /dr/benefit │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ DemandResponseService │
│ (Business Logic Layer) │
│ - Invitation management │
│ - Auto-answer configuration │
│ - Response tracking │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ DemandResponseRepository │
│ (Data Access Layer) │
│ - MongoDB operations │
│ - Query optimization │
└──────────────┬──────────────────────┘
┌─────────────────────────────────────┐
│ MongoDB Collections │
│ - demand_response_invitations │
│ - config (auto_answer) │
│ - benefit (financial tracking) │
│ - instructions (hourly rules) │
└─────────────────────────────────────┘
```
### Execution Architecture
```
┌──────────────────────────────────────┐
│ Core.py (Main Thread) │
│ - Manages IoT device fleet │
│ - Tracks dr_reduced_power │
│ - Calculates total flexibility │
└──────────────┬───────────────────────┘
│ schedule_event(time, iot)
┌──────────────────────────────────────┐
│ DemandResponseAtuator (New Thread) │
│ - Spawned per device per event │
│ - Runs for 1 hour (59 minutes) │
│ - Updates core.dr_reduced_power │
│ - Auto-terminates at event end │
└──────────────────────────────────────┘
```
### Data Models
#### MongoDB Collection: `demand_response_invitations`
```json
{
"_id": "ObjectId",
"datetime": "2025-12-10 13:45:32", // Invitation creation time
"event_time": "2025-12-10 14:00:00", // When DR event occurs
"load_kwh": 5.2, // Target reduction in kWh
"load_percentage": 15.0, // Reduction as % of total load
"iots": ["AC1", "AC2", "Lighting"], // Participating devices
"response": "WAITING|YES|NO" // Participant decision
}
```
#### MongoDB Collection: `config`
```json
{
"config": "config",
"auto_answer": true // Auto-accept DR invitations
}
```
#### MongoDB Collection: `benefit`
```json
{
"source": "dr", // "dr" or "p2p"
"product": "AC1", // Device name
"value": 5.50, // Financial benefit (€)
"datetime": "2025-12-10 14:00:00"
}
```
#### MongoDB Collection: `instructions`
```json
{
"AC1": {
"0": "participation", // Hour 0: full DR participation
"1": "shifting", // Hour 1: 0-20% participation
"2": "off", // Hour 2: no DR participation
"3": "participation",
// ... hours 4-23
},
"AC2": { /* ... */ }
}
```
---
## Key Components
### 1. DemandResponseService
**Location:** `services/DemandResponseService.py`
**Responsibilities:**
- Manages DR invitation lifecycle
- Handles participant responses
- Configures auto-accept behavior
- Queries invitation status
**Key Methods:**
```python
def invitation(event_time, load_kwh, load_percentage, iots):
"""Create new DR invitation"""
# Checks auto_answer config
# Sets response to YES if auto-enabled, else WAITING
# Stores in MongoDB via repository
def answer_invitation(event_time, iot, response):
"""Record YES/NO response for specific device"""
# Updates invitation response field
# Used for manual acceptance workflow
def get_unanswered_invitations():
"""Get all pending invitations awaiting response"""
# Returns invitations with response="WAITING"
def get_answered_invitations():
"""Get last 5 completed invitations"""
# Returns historical invitations (YES/NO)
def get_auto_answer_config():
"""Check if auto-accept is enabled"""
# Returns boolean from config collection
def set_auto_answer_config(auto_answer):
"""Enable/disable auto-accept"""
# Updates MongoDB config collection
```
**Auto-Accept Logic:**
```python
# Line 35-38 in DemandResponseService.py
if self.get_auto_answer_config():
response = "YES" # Auto-accept enabled
else:
response = "WAITING" # Require manual approval
```
---
### 2. DemandResponseRepository
**Location:** `database/DemandResponseRepository.py`
**Responsibilities:**
- Direct MongoDB operations
- Query optimization and filtering
- Data persistence
**Key Methods:**
```python
def insert_invitation(datetime, event_time, load_kwh, load_percentage, iots, response):
"""Store new DR invitation in MongoDB"""
def answer_invitation(event_time, iot, response):
"""Update invitation response status"""
# Updates document where event_time matches and iot in iots array
def get_unanswered_invitations():
"""Query: {response: "WAITING"}"""
def get_answered_invitations():
"""Query: {response: {$ne: "WAITING"}}, limit 5, sort by datetime desc"""
def get_accepted_upcoming_invitations():
"""Query: {response: "YES", event_time: {$gte: now}}"""
def get_invitation(event_time):
"""Find specific invitation by event time"""
```
---
### 3. DemandResponseAtuator
**Location:** `core/DemandResponseAtuator.py`
**Responsibilities:**
- Executes DR event for a single device
- Runs as separate thread during event
- Accumulates power reduction in real-time
- Auto-terminates after 1 hour
**Architecture:**
```python
class DemandResponseAtuator(Thread):
def __init__(self, core, iot):
self.core = core # Reference to Core instance
self.iot = iot # IoT device participating in DR
self.event_on = True # Event active flag
def run(self):
# Schedule event end at 59 minutes from now
end_time = (datetime.now() + timedelta(minutes=59))
end_time_formatted = end_time.strftime('%H:%M:%S')
schedule.every().day.at(end_time_formatted).do(self.end_event)
# Main loop: accumulate power reduction every second
while self.event_on:
# Add device's current power to reduction accumulator
self.core.dr_reduced_power += self.iot.get_power()
schedule.run_pending()
time.sleep(1)
def end_event(self):
"""Called automatically at event end"""
self.event_on = False
return schedule.CancelJob
```
**Key Characteristics:**
- **Threading Model:** One thread per device per event
- **Update Frequency:** Every 1 second
- **Duration:** Exactly 59 minutes (scheduled termination)
- **Power Tracking:** Cumulative reduction added to `core.dr_reduced_power`
---
### 4. Core (Main Orchestrator)
**Location:** `core/Core.py`
**DR-Related Attributes:**
```python
class Core(Thread):
def __init__(self):
self.dr_reduced_power = 0.0 # Accumulator for power reduction
self.iots_consumption = [] # List of controllable devices
self.iots = [] # All IoT devices
```
**Key DR Methods:**
```python
def schedule_event(self, event_time, iot_name):
"""Initiate DR event for specified device"""
# Find device by name
iot = [i for i in self.iots if i.name == iot_name][0]
# Create and start DemandResponseAtuator thread
dr = DemandResponseAtuator(self, iot)
dr.start()
def get_total_consumption(self):
"""Returns consumption MINUS DR reductions"""
# Sum all device power
totalPower = sum(iot.get_power() for iot in self.iots_consumption)
# Subtract DR reduction
reduce = self.dr_reduced_power
self.dr_reduced_power = 0 # Reset accumulator
return totalPower - reduce
def get_total_flexibility(self):
"""Calculate available flexibility for DR"""
# Sum power of devices with demandresponse=true
return sum(iot.get_power() for iot in self.iots_consumption
if iot.demandresponse)
```
**How Power Reduction Works:**
1. During DR event, `DemandResponseAtuator` continuously adds to `dr_reduced_power`
2. When `get_total_consumption()` is called, reduction is subtracted from total
3. `dr_reduced_power` is reset to 0 after each reading
4. This creates effective "virtual" power reduction in reported consumption
---
### 5. IoT Model
**Location:** `model/IoT.py`
**DR-Related Attributes:**
```python
class IoT:
def __init__(self, config):
self.name = config['name']
self.demandresponse = config['control'].get('demandresponse', False)
self.instructions = {} # Hourly DR instructions
```
**Configuration Example (config/f.json):**
```json
{
"resources": {
"iots": [
{
"name": "AC1",
"type": "hvac",
"uri": "http://192.168.2.91:30000/api/realtime/H_01/AC1",
"control": {
"demandresponse": true // Device can participate in DR
}
}
]
}
}
```
**DR-Capable Devices:**
- AC1, AC2, AC3, AC4 (HVAC systems)
- Water Heater
- Lighting
- Refrigerator
**Instruction Types:**
- `"participation"` - Full DR participation (100% reduction if needed)
- `"shifting"` - Partial participation (0-20% reduction)
- `"off"` - No DR participation for that hour
---
## Invitation Lifecycle
### 1. Create Invitation
**Endpoint:** `POST /invitation/send`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"kwh": 5.2,
"percentage": 15,
"iots": ["AC1", "AC2", "Lighting"]
}
```
**Response:**
```json
{
"event_time": "2025-12-10 14:00:00"
}
```
**Logic Flow:**
1. Validates event_time format
2. Checks auto_answer configuration
3. Sets response = "YES" if auto-enabled, else "WAITING"
4. Stores invitation in MongoDB
5. Returns event_time as confirmation
---
### 2. Check Invitation Status
**Endpoint:** `POST /invitation/get`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00"
}
```
**Response:**
```json
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 5.2,
"load_percentage": 15,
"iots": ["AC1", "AC2", "Lighting"],
"response": "WAITING"
}
```
---
### 3. Get Pending Invitations
**Endpoint:** `GET /invitation/unanswered`
**Response:**
```json
[
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 5.2,
"load_percentage": 15,
"iots": ["AC1", "AC2"],
"response": "WAITING"
},
{
"datetime": "2025-12-10 14:20:15",
"event_time": "2025-12-10 16:00:00",
"load_kwh": 3.8,
"load_percentage": 10,
"iots": ["Water Heater"],
"response": "WAITING"
}
]
```
**Use Case:** Display pending DR invitations requiring participant decision
---
### 4. Answer Invitation
**Endpoint:** `POST /invitation/answer`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"iot": "AC1",
"response": "YES"
}
```
**Response:**
```json
{
"message": "answered"
}
```
**Logic:**
- Updates invitation document in MongoDB
- Sets response field to "YES" or "NO"
- Filters by event_time and iot in iots array
- Enables manual approval workflow
---
### 5. Execute DR Event
**Endpoint:** `POST /event/check`
**Request:**
```json
{
"event_time": "2025-12-10 14:00:00",
"iot": "AC1"
}
```
**Logic Flow:**
```python
1. Receives event_time and iot name
2. Calls core.schedule_event(event_time, iot)
3. Core finds IoT device by name
4. Creates new DemandResponseAtuator(core, iot)
5. Starts thread begins power reduction tracking
6. Thread runs for 59 minutes, accumulating reduction every second
7. Auto-terminates at scheduled end time
```
---
### 6. Configure Auto-Accept
**Get Config:** `GET /invitation/auto`
**Response:**
```json
{
"auto_answer": true
}
```
**Set Config:** `POST /invitation/auto`
**Request:**
```json
{
"auto_answer": true
}
```
**Response:**
```json
{
"auto_answer": true
}
```
**Effect:**
- When enabled: New invitations automatically set to response="YES"
- When disabled: New invitations set to response="WAITING" (require manual approval)
---
## Integration Points
### 1. Energy Management
**ForecastService** (`services/ForecastService.py`)
- Calculates `forecast_flexibility()` based on historical data
- Predicts available DR capacity for future periods
- Uses flexibility data stored with hourly consumption/generation
**Core.get_total_flexibility()**
- Returns sum of power from DR-capable devices
- Indicates current available flexibility
- Accessible via `GET /energy/flexibility`
```python
def get_total_flexibility(self):
return sum(iot.get_power() for iot in self.iots_consumption
if iot.demandresponse)
```
---
### 2. Building Management
**StoringManager** (`model/StoringManager.py`)
- Stores hourly aggregates including flexibility
- MongoDB collection: `TOTALPOWERHOUR`
- Fields: `{datetime, consumption, generation, flexibility}`
**BuildingRepository** (`database/BuildingRepository.py`)
- `insert_hour()` stores flexibility alongside consumption/generation
- Flexibility calculated as: `power * random(0-20%)`
- Provides historical baseline for forecasting
---
### 3. Financial Tracking
**EnergyService** (`services/EnergyService.py`)
```python
def add_benefit(source, product, value):
"""Record financial benefit from DR or P2P"""
# source: "dr" or "p2p"
# product: device name
# value: financial reward amount
```
**Record DR Benefit:** `POST /dr/benefit`
**Request:**
```json
{
"iot": "AC1",
"value": 5.50
}
```
**Storage:**
```json
{
"source": "dr",
"product": "AC1",
"value": 5.50,
"datetime": "2025-12-10 14:00:00"
}
```
**Monthly Benefits:** `GET /benefits/monthly`
**Response:**
```json
{
"dr": 150.00,
"p2p": 50.00
}
```
---
### 4. IoT Device Control
**IotService** (`services/IotService.py`)
```python
def change_dr_enable(iot, enable):
"""Enable or disable DR capability for device"""
iot.demandresponse = enable
def update_instructions(instructions):
"""Set hourly DR participation instructions"""
# Format: {iot_name: {hour: "participation|shifting|off"}}
def get_instructions():
"""Retrieve current DR instructions"""
return {iot.name: iot.instructions for iot in iots}
```
**Update Instructions:** `POST /iot/instructions`
**Request:**
```json
{
"AC1": {
"0": "participation",
"1": "shifting",
"2": "off",
"3": "participation"
// ... hours 4-23
}
}
```
**Forecasted Flexibility by Hour:** `POST /iots/forecast/flexibility`
**Request:**
```json
{
"hour": 14
}
```
**Response:**
```json
{
"shifting": [["AC1", 50], ["AC2", 75]], // 0-20% participation
"reducing": [["Water Heater", 100]] // Full participation
}
```
---
## API Reference
### Demand Response Endpoints
| Method | Endpoint | Description | Request Body | Response |
|--------|----------|-------------|--------------|----------|
| POST | `/invitation/send` | Create DR invitation | `{event_time, kwh, percentage, iots}` | `{event_time}` |
| POST | `/invitation/get` | Get specific invitation | `{event_time}` | Invitation object |
| GET | `/invitation/unanswered` | Get pending invitations | None | Array of invitations |
| GET | `/invitation/answered` | Get last 5 completed | None | Array of invitations |
| POST | `/invitation/answer` | Submit response | `{event_time, iot, response}` | `{message: "answered"}` |
| GET | `/invitation/auto` | Get auto-accept config | None | `{auto_answer: boolean}` |
| POST | `/invitation/auto` | Set auto-accept config | `{auto_answer: boolean}` | `{auto_answer: boolean}` |
| POST | `/event/check` | Execute DR event | `{event_time, iot}` | Success status |
| POST | `/dr/benefit` | Record DR benefit | `{iot, value}` | `{message: "ok"}` |
### Related Flexibility/Energy Endpoints
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/energy/now` | Current consumption, generation, flexibility |
| GET | `/energy/flexibility` | Available flexibility for DR |
| GET | `/forecast/flexibility` | Forecasted flexibility |
| POST | `/iots/forecast/flexibility` | Flexibility by hour and device |
| POST | `/iot/demandresponse/enable` | Enable/disable device DR |
| POST | `/iot/instructions` | Update DR instructions |
| GET | `/iot/instructions` | Get current instructions |
| GET | `/benefits/monthly` | Monthly DR benefits |
---
## Complete Event Flow Example
### Scenario: 10 kWh Reduction Event at 2:00 PM
```
┌─────────────────────────────────────────────────────────────┐
│ STEP 1: Create Invitation (1:45 PM) │
└─────────────────────────────────────────────────────────────┘
POST /invitation/send
{
"event_time": "2025-12-10 14:00:00",
"kwh": 10,
"percentage": 20,
"iots": ["AC1", "AC2", "Water Heater"]
}
Flow:
├─ DemandResponseService.invitation()
├─ Checks auto_answer config → disabled (false)
├─ Sets response = "WAITING"
├─ DemandResponseRepository.insert_invitation()
└─ MongoDB: Creates invitation document
Result: Invitation stored, awaiting participant approval
┌─────────────────────────────────────────────────────────────┐
│ STEP 2: Check Pending Invitations (1:50 PM) │
└─────────────────────────────────────────────────────────────┘
GET /invitation/unanswered
Response:
[
{
"datetime": "2025-12-10 13:45:32",
"event_time": "2025-12-10 14:00:00",
"load_kwh": 10,
"load_percentage": 20,
"iots": ["AC1", "AC2", "Water Heater"],
"response": "WAITING"
}
]
┌─────────────────────────────────────────────────────────────┐
│ STEP 3: Answer Invitation for Each Device (1:55 PM) │
└─────────────────────────────────────────────────────────────┘
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "AC1", "response": "YES"}
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "AC2", "response": "YES"}
POST /invitation/answer
{"event_time": "2025-12-10 14:00:00", "iot": "Water Heater", "response": "NO"}
Flow per request:
├─ DemandResponseService.answer_invitation()
├─ DemandResponseRepository.answer_invitation()
└─ MongoDB: Updates invitation.response for specified iot
Result: AC1 and AC2 accepted, Water Heater declined
┌─────────────────────────────────────────────────────────────┐
│ STEP 4: Execute DR Event (2:00 PM - Event Start) │
└─────────────────────────────────────────────────────────────┘
POST /event/check
{"event_time": "2025-12-10 14:00:00", "iot": "AC1"}
POST /event/check
{"event_time": "2025-12-10 14:00:00", "iot": "AC2"}
Flow per request:
├─ Core.schedule_event("2025-12-10 14:00:00", "AC1")
├─ Finds IoT device: iot = [i for i in core.iots if i.name == "AC1"][0]
├─ Creates DemandResponseAtuator(core, iot)
└─ Starts thread
DemandResponseAtuator.run():
├─ Schedules end_event() at 14:59:00
└─ While loop (every 1 second for 59 minutes):
└─ core.dr_reduced_power += iot.get_power()
Result: Two threads running, accumulating power reduction
┌─────────────────────────────────────────────────────────────┐
│ STEP 5: Monitor Energy (2:30 PM - During Event) │
└─────────────────────────────────────────────────────────────┘
GET /energy/now
Flow:
├─ Core.get_total_consumption()
├─ totalPower = sum(iot.get_power() for iot in iots_consumption)
├─ totalPower = 50 kW (all devices)
├─ reduce = core.dr_reduced_power = 8 kW (accumulated from AC1+AC2)
├─ core.dr_reduced_power = 0 # Reset
└─ return 50 - 8 = 42 kW
Response:
{
"consumption": 42.0, // Reduced by DR
"generation": 15.0,
"flexibility": 18.0
}
Result: Consumption appears 8 kW lower due to DR reduction
┌─────────────────────────────────────────────────────────────┐
│ STEP 6: Automatic Event End (2:59 PM) │
└─────────────────────────────────────────────────────────────┘
Scheduled Task Triggered:
├─ DemandResponseAtuator.end_event() called
├─ self.event_on = False
├─ Thread exits while loop
└─ Thread terminates
Result: Both AC1 and AC2 threads stopped, DR event complete
┌─────────────────────────────────────────────────────────────┐
│ STEP 7: Record Financial Benefit (3:00 PM) │
└─────────────────────────────────────────────────────────────┘
POST /dr/benefit
{"iot": "AC1", "value": 5.50}
POST /dr/benefit
{"iot": "AC2", "value": 4.75}
Flow per request:
├─ EnergyService.add_benefit("dr", iot, value)
├─ FinancialRepository.insert_benefit()
└─ MongoDB.benefit: {source: "dr", product: iot, value: value, datetime: now}
Result: Total DR benefit = €10.25
┌─────────────────────────────────────────────────────────────┐
│ STEP 8: Hourly Storage (3:00 PM - End of Hour) │
└─────────────────────────────────────────────────────────────┘
StoringManager.save_hour() (automatic):
├─ BuildingService.save_last_hour()
├─ Calculates flexibility = power * random(0-20%)
├─ BuildingRepository.insert_hour()
└─ MongoDB.TOTALPOWERHOUR: {
datetime: "2025-12-10 14:00:00",
consumption: 42.0, // Average during hour (with DR reduction)
generation: 15.0,
flexibility: 7.8
}
Result: Hour data stored with DR-reduced consumption
┌─────────────────────────────────────────────────────────────┐
│ STEP 9: View Monthly Benefits (End of Month) │
└─────────────────────────────────────────────────────────────┘
GET /benefits/monthly
Response:
{
"dr": 185.50, // Total DR benefits for month
"p2p": 62.30 // Total P2P benefits for month
}
Result: Financial tracking shows €185.50 earned from DR participation
```
---
## Key Metrics & Statistics
| Metric | Value | Source |
|--------|-------|--------|
| **Update Frequency** | 1 second | DemandResponseAtuator.run() |
| **Event Duration** | 59 minutes | Scheduled termination |
| **Storage Frequency** | Every hour | StoringManager |
| **DR-Capable Devices** | 8 devices | config/f.json |
| **Threading Model** | 1 thread per device per event | Core.schedule_event() |
| **Database** | MongoDB (H01, BuildingRightSide) | Multiple collections |
| **API Framework** | Flask with CORS | api/main.py |
| **Flexibility Calculation** | 0-20% of device power | Based on instructions |
---
## Configuration Reference
### Device Configuration (config/f.json)
```json
{
"app": {
"dr_events_auto_accept": 1, // 1=enabled, 0=disabled
"monitoring": 0 // Debug logging
},
"storage": {
"local": {
"demand_response": ["H01", "demand_response_invitations"],
"config": ["H01", "config"],
"benefit": ["BuildingRightSide", "benefit"],
"instructions": ["H01", "instructions"]
}
},
"resources": {
"iots": [
{
"name": "AC1",
"type": "hvac",
"uri": "http://192.168.2.91:30000/api/realtime/H_01/AC1",
"control": {
"demandresponse": true
}
}
// ... more devices
]
}
}
```
### MongoDB Database Structure
```
Database: H01
├─ demand_response_invitations (DR events)
├─ config (auto_answer setting)
├─ instructions (hourly participation rules)
└─ TOTALPOWERHOUR (hourly aggregates)
Database: BuildingRightSide
└─ benefit (financial tracking)
```
---
## Summary
The Demand Response system is a comprehensive, multi-threaded solution that enables building participation in grid flexibility programs. It features:
- **Automatic or Manual Approval:** Configurable auto-accept or manual review workflow
- **Real-Time Power Tracking:** Per-device threads accumulate power reduction every second
- **Financial Benefit Tracking:** Source-based tracking (DR vs P2P) with monthly aggregation
- **Flexibility Forecasting:** Historical data and hourly instructions for predictive planning
- **Device-Level Control:** Per-device, per-hour participation configuration
- **MongoDB Persistence:** Scalable data storage with optimized queries
- **REST API:** Complete API for external integration and control
- **Thread Safety:** Separate threads per device prevent interference
**Critical Files:**
- **services/DemandResponseService.py:35-38** - Auto-accept logic
- **core/DemandResponseAtuator.py:run()** - Power reduction accumulation
- **core/Core.py:get_total_consumption()** - DR-reduced consumption calculation
- **api/main.py:230-329** - All DR endpoints
This architecture enables scalable, reliable demand response management with precise power tracking and financial incentive tracking.

View File

@@ -44,12 +44,6 @@ The system consists of 6 independent microservices coordinated by an API Gateway
## 🚀 Quick Start ## 🚀 Quick Start
### Prerequisites
- Docker 20.0+
- Docker Compose 2.0+
- 8GB RAM minimum
- 10GB free disk space
### 1. Deploy the Complete System ### 1. Deploy the Complete System
```bash ```bash
cd microservices/ cd microservices/

View File

@@ -10,17 +10,15 @@ import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class AuthMiddleware: class AuthMiddleware:
"""Authentication middleware for validating tokens"""
def __init__(self, token_service_url: str = "http://localhost:8001"): def __init__(self, token_service_url: str = "http://localhost:8001"):
self.token_service_url = token_service_url self.token_service_url = token_service_url
logger.info(f"Initialized AuthMiddleware with token service URL: {self.token_service_url}")
async def verify_token(self, request: Request) -> Optional[Dict[str, Any]]: async def verify_token(self, request: Request) -> Optional[Dict[str, Any]]:
""" """
Verify authentication token from request headers Verify authentication token from request headers
Returns token payload if valid, raises HTTPException if invalid Returns token payload if valid, raises HTTPException if invalid
""" """
# Extract token from Authorization header
auth_header = request.headers.get("Authorization") auth_header = request.headers.get("Authorization")
if not auth_header: if not auth_header:
raise HTTPException(status_code=401, detail="Authorization header required") raise HTTPException(status_code=401, detail="Authorization header required")
@@ -28,17 +26,15 @@ class AuthMiddleware:
if not auth_header.startswith("Bearer "): if not auth_header.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Bearer token required") raise HTTPException(status_code=401, detail="Bearer token required")
token = auth_header[7:] # Remove "Bearer " prefix token = auth_header.split(" ")[1].strip() # Remove "Bearer " prefix
try: try:
# Validate token with token service
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.post( async with session.post(
f"{self.token_service_url}/tokens/validate", f"{self.token_service_url}/tokens/validate",
json={"token": token}, json={'token': token},
timeout=aiohttp.ClientTimeout(total=5) timeout=aiohttp.ClientTimeout(total=5)
) as response: ) as response:
if response.status != 200: if response.status != 200:
raise HTTPException(status_code=401, detail="Token validation failed") raise HTTPException(status_code=401, detail="Token validation failed")
@@ -48,7 +44,6 @@ class AuthMiddleware:
error_msg = token_data.get("error", "Invalid token") error_msg = token_data.get("error", "Invalid token")
raise HTTPException(status_code=401, detail=error_msg) raise HTTPException(status_code=401, detail=error_msg)
# Token is valid, return decoded payload
return token_data.get("decoded") return token_data.get("decoded")
except aiohttp.ClientError as e: except aiohttp.ClientError as e:
@@ -67,10 +62,8 @@ class AuthMiddleware:
if not token_payload: if not token_payload:
return False return False
# Get list of resources the token has access to
token_resources = token_payload.get("list_of_resources", []) token_resources = token_payload.get("list_of_resources", [])
# Check if token has access to all required resources
for resource in required_resources: for resource in required_resources:
if resource not in token_resources: if resource not in token_resources:
return False return False

View File

@@ -11,10 +11,11 @@ logger = logging.getLogger(__name__)
class LoadBalancer: class LoadBalancer:
"""Simple load balancer for microservice requests""" """Simple load balancer for microservice requests"""
def __init__(self): def __init__(self, service_registry=None):
# In a real implementation, this would track multiple instances per service # In a real implementation, this would track multiple instances per service
self.service_instances: Dict[str, List[str]] = {} self.service_instances: Dict[str, List[str]] = {}
self.current_index: Dict[str, int] = {} self.current_index: Dict[str, int] = {}
self.service_registry = service_registry
def register_service_instance(self, service_name: str, instance_url: str): def register_service_instance(self, service_name: str, instance_url: str):
"""Register a new service instance""" """Register a new service instance"""
@@ -54,9 +55,11 @@ class LoadBalancer:
if strategy == "single": if strategy == "single":
# Default behavior - get the service URL from service registry # Default behavior - get the service URL from service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available")
return None
elif strategy == "round_robin": elif strategy == "round_robin":
return await self._round_robin_select(service_name) return await self._round_robin_select(service_name)
@@ -73,9 +76,11 @@ class LoadBalancer:
instances = self.service_instances.get(service_name, []) instances = self.service_instances.get(service_name, [])
if not instances: if not instances:
# Fall back to service registry # Fall back to service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available for fallback")
return None
# Round-robin selection # Round-robin selection
current_idx = self.current_index[service_name] current_idx = self.current_index[service_name]
@@ -92,9 +97,11 @@ class LoadBalancer:
instances = self.service_instances.get(service_name, []) instances = self.service_instances.get(service_name, [])
if not instances: if not instances:
# Fall back to service registry # Fall back to service registry
from service_registry import ServiceRegistry if self.service_registry:
service_registry = ServiceRegistry() return await self.service_registry.get_service_url(service_name)
return await service_registry.get_service_url(service_name) else:
logger.error("No service registry available for fallback")
return None
selected_instance = random.choice(instances) selected_instance = random.choice(instances)
logger.debug(f"Random selected {selected_instance} for {service_name}") logger.debug(f"Random selected {selected_instance} for {service_name}")

View File

@@ -27,16 +27,12 @@ logger = logging.getLogger(__name__)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager"""
logger.info("API Gateway starting up...") logger.info("API Gateway starting up...")
# Initialize service registry
await service_registry.initialize() await service_registry.initialize()
# Register all services
await service_registry.register_services(SERVICES) await service_registry.register_services(SERVICES)
# Start health check task
asyncio.create_task(health_check_task()) asyncio.create_task(health_check_task())
logger.info("API Gateway startup complete") logger.info("API Gateway startup complete")
@@ -62,49 +58,14 @@ app.add_middleware(
allow_headers=["*"], allow_headers=["*"],
) )
# Service registry and load balancer
service_registry = ServiceRegistry()
load_balancer = LoadBalancer()
auth_middleware = AuthMiddleware()
# Service configuration
SERVICES = { SERVICES = {
# "token-service": ServiceConfig( "token-service": ServiceConfig(
# name="token-service", name="token-service",
# base_url=os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"), base_url=os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"),
# health_endpoint="/health", health_endpoint="/health",
# auth_required=False auth_required=False
# ), ),
# "battery-service": ServiceConfig(
# name="battery-service",
# base_url=os.getenv("BATTERY_SERVICE_URL", "http://battery-service:8002"),
# health_endpoint="/health",
# auth_required=True
# ),
# "demand-response-service": ServiceConfig(
# name="demand-response-service",
# base_url=os.getenv("DEMAND_RESPONSE_SERVICE_URL", "http://demand-response-service:8003"),
# health_endpoint="/health",
# auth_required=True
# ),
# "p2p-trading-service": ServiceConfig(
# name="p2p-trading-service",
# base_url=os.getenv("P2P_TRADING_SERVICE_URL", "http://p2p-trading-service:8004"),
# health_endpoint="/health",
# auth_required=True
# ),
# "forecasting-service": ServiceConfig(
# name="forecasting-service",
# base_url=os.getenv("FORECASTING_SERVICE_URL", "http://forecasting-service:8005"),
# health_endpoint="/health",
# auth_required=True
# ),
# "iot-control-service": ServiceConfig(
# name="iot-control-service",
# base_url=os.getenv("IOT_CONTROL_SERVICE_URL", "http://iot-control-service:8006"),
# health_endpoint="/health",
# auth_required=True
# ),
"sensor-service": ServiceConfig( "sensor-service": ServiceConfig(
name="sensor-service", name="sensor-service",
base_url=os.getenv("SENSOR_SERVICE_URL", "http://sensor-service:8007"), base_url=os.getenv("SENSOR_SERVICE_URL", "http://sensor-service:8007"),
@@ -116,23 +77,32 @@ SERVICES = {
base_url=os.getenv("DATA_INGESTION_SERVICE_URL", "http://data-ingestion-service:8008"), base_url=os.getenv("DATA_INGESTION_SERVICE_URL", "http://data-ingestion-service:8008"),
health_endpoint="/health", health_endpoint="/health",
auth_required=False auth_required=False
),
"demand-response-service": ServiceConfig(
name="demand-response-service",
base_url=os.getenv("DEMAND_RESPONSE_SERVICE_URL", "http://demand-response-service:8003"),
health_endpoint="/health",
auth_required=True
) )
} }
# Request statistics # Service registry and load balancer
service_registry = ServiceRegistry()
load_balancer = LoadBalancer(service_registry)
auth_middleware = AuthMiddleware(os.getenv("TOKEN_SERVICE_URL", "http://token-service:8001"))
request_stats = { request_stats = {
"total_requests": 0, "total_requests": 0,
"successful_requests": 0, "successful_requests": 0,
"failed_requests": 0, "failed_requests": 0,
"service_requests": {service: 0 for service in SERVICES.keys()}, "service_requests": {service: 0 for service in SERVICES.keys()},
"start_time": datetime.utcnow() "start_time": datetime.now()
} }
@app.get("/health", response_model=HealthResponse) @app.get("/health", response_model=HealthResponse)
async def gateway_health_check(): async def gateway_health_check():
"""Gateway health check endpoint""" """Gateway health check endpoint"""
try: try:
# Check all services
service_health = await service_registry.get_all_service_health() service_health = await service_registry.get_all_service_health()
healthy_services = sum(1 for status in service_health.values() if status.get("status") == "healthy") healthy_services = sum(1 for status in service_health.values() if status.get("status") == "healthy")
@@ -187,7 +157,7 @@ async def get_gateway_stats():
@app.api_route("/api/v1/tokens/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/tokens/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def token_service_proxy(request: Request, path: str): async def token_service_proxy(request: Request, path: str):
"""Proxy requests to token service""" """Proxy requests to token service"""
return await proxy_request(request, "token-service", f"/{path}") return await proxy_request(request, "token-service", f"/tokens/{path}")
# Battery Service Routes # Battery Service Routes
@app.api_route("/api/v1/batteries/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/batteries/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
@@ -223,7 +193,7 @@ async def iot_control_service_proxy(request: Request, path: str):
@app.api_route("/api/v1/sensors/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/sensors/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def sensor_service_proxy(request: Request, path: str): async def sensor_service_proxy(request: Request, path: str):
"""Proxy requests to sensor service""" """Proxy requests to sensor service"""
return await proxy_request(request, "sensor-service", f"/{path}") return await proxy_request(request, "sensor-service", f"/sensors/{path}")
# Data Ingestion Service Routes (SA4CPS FTP Monitoring) # Data Ingestion Service Routes (SA4CPS FTP Monitoring)
@app.api_route("/api/v1/ingestion/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/ingestion/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
@@ -241,6 +211,16 @@ async def data_sources_list_proxy(request: Request):
"""Proxy requests to data ingestion service for sources list""" """Proxy requests to data ingestion service for sources list"""
return await proxy_request(request, "data-ingestion-service", "/sources") return await proxy_request(request, "data-ingestion-service", "/sources")
@app.get("/api/v1/rooms/names")
async def room_names_proxy(request: Request):
"""Proxy requests to sensor service for room names list"""
return await proxy_request(request, "sensor-service", "/rooms/names")
@app.get("/api/v1/rooms")
async def rooms_list_proxy(request: Request):
"""Proxy requests to sensor service for rooms list"""
return await proxy_request(request, "sensor-service", "/rooms")
@app.api_route("/api/v1/rooms/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/api/v1/rooms/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def room_service_proxy(request: Request, path: str): async def room_service_proxy(request: Request, path: str):
"""Proxy requests to sensor service for room management""" """Proxy requests to sensor service for room management"""
@@ -302,6 +282,7 @@ async def websocket_proxy(websocket: WebSocket):
async def proxy_request(request: Request, service_name: str, path: str): async def proxy_request(request: Request, service_name: str, path: str):
"""Generic request proxy function""" """Generic request proxy function"""
try: try:
logger.info(f"Proxying request to {service_name} at {path}")
# Update request statistics # Update request statistics
request_stats["total_requests"] += 1 request_stats["total_requests"] += 1
request_stats["service_requests"][service_name] += 1 request_stats["service_requests"][service_name] += 1
@@ -313,7 +294,7 @@ async def proxy_request(request: Request, service_name: str, path: str):
# Check authentication if required # Check authentication if required
if service_config.auth_required: if service_config.auth_required:
await auth_middleware.verify_token(request) decoded = await auth_middleware.verify_token(request)
# Get healthy service instance # Get healthy service instance
service_url = await load_balancer.get_service_url(service_name) service_url = await load_balancer.get_service_url(service_name)

View File

@@ -1,7 +1,3 @@
"""
Service registry for managing microservice discovery and health monitoring
"""
import aiohttp import aiohttp
import asyncio import asyncio
from datetime import datetime from datetime import datetime
@@ -13,7 +9,6 @@ from models import ServiceConfig, ServiceHealth
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ServiceRegistry: class ServiceRegistry:
"""Service registry for microservice management"""
def __init__(self): def __init__(self):
self.services: Dict[str, ServiceConfig] = {} self.services: Dict[str, ServiceConfig] = {}
@@ -21,20 +16,17 @@ class ServiceRegistry:
self.session: Optional[aiohttp.ClientSession] = None self.session: Optional[aiohttp.ClientSession] = None
async def initialize(self): async def initialize(self):
"""Initialize the service registry"""
self.session = aiohttp.ClientSession( self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=10) timeout=aiohttp.ClientTimeout(total=10)
) )
logger.info("Service registry initialized") logger.info("Service registry initialized")
async def close(self): async def close(self):
"""Close the service registry"""
if self.session: if self.session:
await self.session.close() await self.session.close()
logger.info("Service registry closed") logger.info("Service registry closed")
async def register_services(self, services: Dict[str, ServiceConfig]): async def register_services(self, services: Dict[str, ServiceConfig]):
"""Register multiple services"""
self.services.update(services) self.services.update(services)
# Initialize health status for all services # Initialize health status for all services
@@ -47,11 +39,9 @@ class ServiceRegistry:
logger.info(f"Registered {len(services)} services") logger.info(f"Registered {len(services)} services")
# Perform initial health check
await self.update_all_service_health() await self.update_all_service_health()
async def register_service(self, service_config: ServiceConfig): async def register_service(self, service_config: ServiceConfig):
"""Register a single service"""
self.services[service_config.name] = service_config self.services[service_config.name] = service_config
self.service_health[service_config.name] = ServiceHealth( self.service_health[service_config.name] = ServiceHealth(
service=service_config.name, service=service_config.name,
@@ -65,13 +55,11 @@ class ServiceRegistry:
await self.check_service_health(service_config.name) await self.check_service_health(service_config.name)
async def unregister_service(self, service_name: str): async def unregister_service(self, service_name: str):
"""Unregister a service"""
self.services.pop(service_name, None) self.services.pop(service_name, None)
self.service_health.pop(service_name, None) self.service_health.pop(service_name, None)
logger.info(f"Unregistered service: {service_name}") logger.info(f"Unregistered service: {service_name}")
async def check_service_health(self, service_name: str) -> ServiceHealth: async def check_service_health(self, service_name: str) -> ServiceHealth:
"""Check health of a specific service"""
service_config = self.services.get(service_name) service_config = self.services.get(service_name)
if not service_config: if not service_config:
logger.error(f"Service {service_name} not found in registry") logger.error(f"Service {service_name} not found in registry")
@@ -94,7 +82,6 @@ class ServiceRegistry:
if response.status == 200: if response.status == 200:
health_data = await response.json() health_data = await response.json()
status = "healthy" if health_data.get("status") in ["healthy", "ok"] else "unhealthy" status = "healthy" if health_data.get("status") in ["healthy", "ok"] else "unhealthy"
health = ServiceHealth( health = ServiceHealth(
service=service_name, service=service_name,
status=status, status=status,
@@ -125,36 +112,29 @@ class ServiceRegistry:
error_message=f"Health check failed: {str(e)}" error_message=f"Health check failed: {str(e)}"
) )
# Update health status
self.service_health[service_name] = health self.service_health[service_name] = health
# Log health status changes
if health.status != "healthy": if health.status != "healthy":
logger.warning(f"Service {service_name} health check failed: {health.error_message}") logger.warning(f"Service {service_name} health check failed: {health.error_message}")
return health return health
async def update_all_service_health(self): async def update_all_service_health(self):
"""Update health status for all registered services"""
health_checks = [ health_checks = [
self.check_service_health(service_name) self.check_service_health(service_name)
for service_name in self.services.keys() for service_name in self.services.keys()
] ]
if health_checks: if health_checks:
await asyncio.gather(*health_checks, return_exceptions=True) await asyncio.gather(*health_checks, return_exceptions=True)
# Log summary
healthy_count = sum(1 for h in self.service_health.values() if h.status == "healthy") healthy_count = sum(1 for h in self.service_health.values() if h.status == "healthy")
total_count = len(self.services) total_count = len(self.services)
logger.info(f"Health check complete: {healthy_count}/{total_count} services healthy") logger.info(f"Health check complete: {healthy_count}/{total_count} services healthy")
async def get_service_health(self, service_name: str) -> Optional[ServiceHealth]: async def get_service_health(self, service_name: str) -> Optional[ServiceHealth]:
"""Get health status of a specific service"""
return self.service_health.get(service_name) return self.service_health.get(service_name)
async def get_all_service_health(self) -> Dict[str, Dict]: async def get_all_service_health(self) -> Dict[str, Dict]:
"""Get health status of all services"""
health_dict = {} health_dict = {}
for service_name, health in self.service_health.items(): for service_name, health in self.service_health.items():
health_dict[service_name] = { health_dict[service_name] = {
@@ -166,12 +146,10 @@ class ServiceRegistry:
return health_dict return health_dict
async def is_service_healthy(self, service_name: str) -> bool: async def is_service_healthy(self, service_name: str) -> bool:
"""Check if a service is healthy"""
health = self.service_health.get(service_name) health = self.service_health.get(service_name)
return health is not None and health.status == "healthy" return health is not None and health.status == "healthy"
async def get_healthy_services(self) -> List[str]: async def get_healthy_services(self) -> List[str]:
"""Get list of healthy service names"""
return [ return [
service_name service_name
for service_name, health in self.service_health.items() for service_name, health in self.service_health.items()
@@ -179,15 +157,12 @@ class ServiceRegistry:
] ]
def get_service_config(self, service_name: str) -> Optional[ServiceConfig]: def get_service_config(self, service_name: str) -> Optional[ServiceConfig]:
"""Get configuration for a specific service"""
return self.services.get(service_name) return self.services.get(service_name)
def get_all_services(self) -> Dict[str, ServiceConfig]: def get_all_services(self) -> Dict[str, ServiceConfig]:
"""Get all registered services"""
return self.services.copy() return self.services.copy()
async def get_service_url(self, service_name: str) -> Optional[str]: async def get_service_url(self, service_name: str) -> Optional[str]:
"""Get base URL for a healthy service"""
if await self.is_service_healthy(service_name): if await self.is_service_healthy(service_name):
service_config = self.services.get(service_name) service_config = self.services.get(service_name)
return service_config.base_url if service_config else None return service_config.base_url if service_config else None

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env python3
"""
Test script to validate that the coroutine fix works
"""
import asyncio
import sys
from unittest.mock import MagicMock, AsyncMock
# Mock the dependencies
sys.modules['aiohttp'] = MagicMock()
sys.modules['models'] = MagicMock()
sys.modules['service_registry'] = MagicMock()
sys.modules['load_balancer'] = MagicMock()
sys.modules['auth_middleware'] = MagicMock()
# Import the main module after mocking
import main
async def test_lifespan():
"""Test that the lifespan function works correctly"""
# Mock the service registry
main.service_registry.initialize = AsyncMock()
main.service_registry.register_services = AsyncMock()
main.service_registry.close = AsyncMock()
# Test the lifespan context manager
async with main.lifespan(None):
print("✅ Lifespan startup completed successfully")
# Verify that the methods were called
main.service_registry.initialize.assert_called_once()
main.service_registry.register_services.assert_called_once_with(main.SERVICES)
# Verify shutdown was called
main.service_registry.close.assert_called_once()
print("✅ Lifespan shutdown completed successfully")
print("✅ All coroutines are properly awaited - RuntimeWarning should be resolved")
if __name__ == "__main__":
asyncio.run(test_lifespan())

View File

@@ -15,6 +15,7 @@ class DatabaseManager:
self.client: Optional[MongoClient] = None self.client: Optional[MongoClient] = None
self.db = None self.db = None
self.collections = {} self.collections = {}
self.energy_collections_cache = {} # Cache for dynamically created energy data collections
self.connection_string = MONGO_CONFIG["connection_string"] self.connection_string = MONGO_CONFIG["connection_string"]
self.database_name = MONGO_CONFIG["database_name"] self.database_name = MONGO_CONFIG["database_name"]
@@ -31,12 +32,11 @@ class DatabaseManager:
self.db = self.client[self.database_name] self.db = self.client[self.database_name]
self.collections = { self.collections = {
'files': self.db.sa4cps_files, 'files': self.db.sa4cps_files,
'energy_data': self.db.sa4cps_energy_data,
'metadata': self.db.sa4cps_metadata, 'metadata': self.db.sa4cps_metadata,
'scanned_directories': self.db.sa4cps_scanned_directories 'scanned_directories': self.db.sa4cps_scanned_directories
} }
self._create_indexes() self._create_base_indexes()
logger.info(f"Connected to MongoDB database: {self.database_name}") logger.info(f"Connected to MongoDB database: {self.database_name}")
@@ -78,13 +78,12 @@ class DatabaseManager:
logger.error(f"MongoDB ping failed with error: {e}") logger.error(f"MongoDB ping failed with error: {e}")
raise ConnectionFailure(f"Ping failed: {e}") raise ConnectionFailure(f"Ping failed: {e}")
def _create_indexes(self): def _create_base_indexes(self):
"""Create indexes for base collections (not energy data collections)"""
try: try:
self.collections['files'].create_index("filename", unique=True) self.collections['files'].create_index("filename", unique=True)
self.collections['files'].create_index("processed_at") self.collections['files'].create_index("processed_at")
self.collections['files'].create_index("directory_path")
self.collections['energy_data'].create_index([("filename", 1), ("timestamp", 1)])
self.collections['energy_data'].create_index("timestamp")
self.collections['scanned_directories'].create_index("directory_path", unique=True) self.collections['scanned_directories'].create_index("directory_path", unique=True)
self.collections['scanned_directories'].create_index("last_scanned") self.collections['scanned_directories'].create_index("last_scanned")
@@ -94,13 +93,97 @@ class DatabaseManager:
except Exception as e: except Exception as e:
logger.warning(f"Failed to create indexes: {e}") logger.warning(f"Failed to create indexes: {e}")
async def store_file_data(self, filename: str, records: List[Dict[str, Any]]) -> bool: def _extract_level3_path(self, directory_path: str) -> Optional[str]:
"""Extract level 3 directory path (SLGs/Community/Building) from full path"""
# Expected structure: /SLGs/Community/Building/...
parts = directory_path.strip('/').split('/')
if len(parts) >= 3 and parts[0] == 'SLGs':
# Return SLGs/Community/Building
return '/'.join(parts[:3])
return None
def _sanitize_collection_name(self, level3_path: str) -> str:
"""Convert level 3 directory path to valid MongoDB collection name
Example: SLGs/CommunityA/Building1 -> energy_data__CommunityA_Building1
"""
parts = level3_path.strip('/').split('/')
if len(parts) >= 3 and parts[0] == 'SLGs':
# Use Community_Building as the collection suffix
collection_suffix = f"{parts[1]}_{parts[2]}"
collection_name = f"energy_data__{collection_suffix}"
return collection_name
# Fallback: sanitize the entire path
sanitized = level3_path.replace('/', '_').replace('.', '_').replace(' ', '_')
sanitized = sanitized.strip('_')
return f"energy_data__{sanitized}"
def _get_energy_collection(self, directory_path: str):
"""Get or create energy data collection for a specific level 3 directory path"""
level3_path = self._extract_level3_path(directory_path)
if not level3_path:
logger.warning(f"Could not extract level 3 path from: {directory_path}, using default collection")
# Fallback to a default collection for non-standard paths
collection_name = "energy_data__other"
else:
collection_name = self._sanitize_collection_name(level3_path)
# Check cache first
if collection_name in self.energy_collections_cache:
return self.energy_collections_cache[collection_name]
# Create/get collection
collection = self.db[collection_name]
# Create indexes for this energy collection
try:
collection.create_index([("filename", 1), ("timestamp", 1)])
collection.create_index("timestamp")
collection.create_index("meter_id")
logger.debug(f"Created indexes for collection: {collection_name}")
except Exception as e:
logger.warning(f"Failed to create indexes for {collection_name}: {e}")
# Cache the collection
self.energy_collections_cache[collection_name] = collection
logger.info(f"Initialized energy data collection: {collection_name} for path: {directory_path}")
return collection
def _list_energy_collections(self) -> List[str]:
"""List all energy data collections in the database"""
try:
all_collections = self.db.list_collection_names()
# Filter collections that start with 'energy_data__'
energy_collections = [c for c in all_collections if c.startswith('energy_data__')]
return energy_collections
except Exception as e:
logger.error(f"Error listing energy collections: {e}")
return []
async def store_file_data(self, filename: str, records: List[Dict[str, Any]], directory_path: str = None) -> bool:
try: try:
current_time = datetime.now() current_time = datetime.now()
# Determine which collection to use based on directory path
if directory_path:
energy_collection = self._get_energy_collection(directory_path)
level3_path = self._extract_level3_path(directory_path)
else:
logger.warning(f"No directory path provided for {filename}, using default collection")
energy_collection = self._get_energy_collection("/SLGs/unknown/unknown")
level3_path = None
# Store file metadata # Store file metadata
file_metadata = { file_metadata = {
"filename": filename, "filename": filename,
"directory_path": directory_path,
"level3_path": level3_path,
"record_count": len(records), "record_count": len(records),
"processed_at": current_time, "processed_at": current_time,
"file_size": sum(len(str(record)) for record in records), "file_size": sum(len(str(record)) for record in records),
@@ -118,12 +201,13 @@ class DatabaseManager:
for record in records: for record in records:
record["filename"] = filename record["filename"] = filename
record["processed_at"] = current_time record["processed_at"] = current_time
record["directory_path"] = directory_path
# Insert energy data records # Insert energy data records into the appropriate collection
if records: if records:
result = self.collections['energy_data'].insert_many(records) result = energy_collection.insert_many(records)
inserted_count = len(result.inserted_ids) inserted_count = len(result.inserted_ids)
logger.debug(f"Stored {inserted_count} records from {filename}") logger.debug(f"Stored {inserted_count} records from {filename} to {energy_collection.name}")
return True return True
return False return False
@@ -134,6 +218,7 @@ class DatabaseManager:
# Store error metadata # Store error metadata
error_metadata = { error_metadata = {
"filename": filename, "filename": filename,
"directory_path": directory_path,
"processed_at": current_time, "processed_at": current_time,
"status": "error", "status": "error",
"error_message": str(e) "error_message": str(e)
@@ -178,8 +263,14 @@ class DatabaseManager:
return None return None
# Directory scanning tracking methods # Directory scanning tracking methods
# Note: Only level 4+ directories (/SLGs/Community/Building/SubDir) are tracked
# to avoid unnecessary caching of high-level organizational directories
async def is_directory_scanned(self, directory_path: str, since_timestamp: datetime = None) -> bool: async def is_directory_scanned(self, directory_path: str, since_timestamp: datetime = None) -> bool:
"""Check if directory has been scanned recently""" """Check if directory has been scanned recently
Note: Only level 4+ directories are tracked in the database
"""
try: try:
query = {"directory_path": directory_path, "scan_status": "complete"} query = {"directory_path": directory_path, "scan_status": "complete"}
if since_timestamp: if since_timestamp:
@@ -249,14 +340,14 @@ class DatabaseManager:
return False return False
async def get_stats(self) -> Dict[str, Any]: async def get_stats(self) -> Dict[str, Any]:
"""Get database statistics""" """Get database statistics including all energy collections"""
try: try:
stats = { stats = {
"database": self.database_name, "database": self.database_name,
"timestamp": datetime.now().isoformat() "timestamp": datetime.now().isoformat()
} }
# Count documents in each collection # Count documents in base collections
for name, collection in self.collections.items(): for name, collection in self.collections.items():
try: try:
count = collection.count_documents({}) count = collection.count_documents({})
@@ -264,12 +355,35 @@ class DatabaseManager:
except Exception as e: except Exception as e:
stats[f"{name}_count"] = f"error: {e}" stats[f"{name}_count"] = f"error: {e}"
# Get all energy collections and their counts
try:
energy_collections = self._list_energy_collections()
energy_stats = []
total_energy_records = 0
for collection_name in energy_collections:
collection = self.db[collection_name]
count = collection.count_documents({})
total_energy_records += count
energy_stats.append({
"collection": collection_name,
"record_count": count
})
stats["energy_collections"] = energy_stats
stats["total_energy_collections"] = len(energy_collections)
stats["total_energy_records"] = total_energy_records
except Exception as e:
stats["energy_collections"] = f"error: {e}"
# Get recent files # Get recent files
try: try:
recent_files = [] recent_files = []
cursor = self.collections['files'].find( cursor = self.collections['files'].find(
{}, {},
{"filename": 1, "processed_at": 1, "record_count": 1, "status": 1, "_id": 0} {"filename": 1, "processed_at": 1, "record_count": 1, "status": 1, "directory_path": 1, "level3_path": 1, "_id": 0}
).sort("processed_at", -1).limit(5) ).sort("processed_at", -1).limit(5)
for doc in cursor: for doc in cursor:
@@ -292,8 +406,17 @@ class DatabaseManager:
filename: Optional[str] = None, filename: Optional[str] = None,
start_time: Optional[datetime] = None, start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
directory_path: Optional[str] = None,
limit: int = 100) -> List[Dict[str, Any]]: limit: int = 100) -> List[Dict[str, Any]]:
"""Retrieve energy data with optional filtering""" """Retrieve energy data with optional filtering
Args:
filename: Filter by specific filename
start_time: Filter by start timestamp
end_time: Filter by end timestamp
directory_path: Filter by specific directory path (level 3). If None, queries all collections
limit: Maximum number of records to return
"""
try: try:
query = {} query = {}
@@ -308,22 +431,48 @@ class DatabaseManager:
time_query["$lte"] = end_time time_query["$lte"] = end_time
query["timestamp"] = time_query query["timestamp"] = time_query
cursor = self.collections['energy_data'].find(query).sort("timestamp", -1).limit(limit)
data = [] data = []
for doc in cursor:
# Convert ObjectId to string and datetime to ISO string
if "_id" in doc:
doc["_id"] = str(doc["_id"])
if "timestamp" in doc and hasattr(doc["timestamp"], "isoformat"):
doc["timestamp"] = doc["timestamp"].isoformat()
if "processed_at" in doc and hasattr(doc["processed_at"], "isoformat"):
doc["processed_at"] = doc["processed_at"].isoformat()
data.append(doc) # If directory_path is specified, query only that collection
if directory_path:
collection = self._get_energy_collection(directory_path)
cursor = collection.find(query).sort("timestamp", -1).limit(limit)
for doc in cursor:
data.append(self._format_energy_document(doc))
else:
# Query across all energy collections
energy_collection_names = self._list_energy_collections()
# Collect data from all collections, then sort and limit
all_data = []
per_collection_limit = max(limit, 1000) # Get more from each to ensure we have enough after sorting
for collection_name in energy_collection_names:
collection = self.db[collection_name]
cursor = collection.find(query).sort("timestamp", -1).limit(per_collection_limit)
for doc in cursor:
all_data.append(self._format_energy_document(doc))
# Sort all data by timestamp and apply final limit
all_data.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
data = all_data[:limit]
return data return data
except Exception as e: except Exception as e:
logger.error(f"Error retrieving energy data: {e}") logger.error(f"Error retrieving energy data: {e}")
return [] return []
def _format_energy_document(self, doc: Dict[str, Any]) -> Dict[str, Any]:
"""Format energy document for API response"""
# Convert ObjectId to string and datetime to ISO string
if "_id" in doc:
doc["_id"] = str(doc["_id"])
if "timestamp" in doc and hasattr(doc["timestamp"], "isoformat"):
doc["timestamp"] = doc["timestamp"].isoformat()
if "processed_at" in doc and hasattr(doc["processed_at"], "isoformat"):
doc["processed_at"] = doc["processed_at"].isoformat()
return doc

View File

@@ -17,6 +17,7 @@ class FTPFileInfo:
path: str path: str
name: str name: str
size: int size: int
directory_path: str # Directory containing the file
modified_time: Optional[datetime] = None modified_time: Optional[datetime] = None
@@ -79,6 +80,7 @@ class FTPMonitor:
self.status = "stopped" self.status = "stopped"
return return
await asyncio.wait_for(self.check_for_new_files(), timeout=300.0) # 5 minute timeout
self.status = "running" self.status = "running"
# Optionally skip initial scan and wait for first scheduled interval # Optionally skip initial scan and wait for first scheduled interval
@@ -202,9 +204,13 @@ class FTPMonitor:
visited_dirs.add(normalized_path) visited_dirs.add(normalized_path)
# Check if directory should be skipped based on previous scans # Determine directory depth (level 4 = /SLGs/Community/Building/SubDir)
if await self.db_manager.should_skip_directory(normalized_path): path_parts = normalized_path.strip('/').split('/')
logger.info(f"Skipping previously scanned directory: {normalized_path}") directory_level = len(path_parts)
# Check if directory should be skipped based on previous scans (only for level 4+)
if directory_level >= 4 and await self.db_manager.should_skip_directory(normalized_path):
logger.info(f"Skipping previously scanned level {directory_level} directory: {normalized_path}")
skipped_dirs += 1 skipped_dirs += 1
continue continue
@@ -257,7 +263,8 @@ class FTPMonitor:
files.append(FTPFileInfo( files.append(FTPFileInfo(
path=full_path, path=full_path,
name=filename, name=filename,
size=size size=size,
directory_path=normalized_path
)) ))
files_found_in_dir += 1 files_found_in_dir += 1
@@ -266,9 +273,12 @@ class FTPMonitor:
ftp.cwd(original_dir) ftp.cwd(original_dir)
# Mark directory as scanned # Mark directory as scanned (only for level 4+ directories)
await self.db_manager.mark_directory_scanned(normalized_path, files_found_in_dir) if directory_level >= 4:
logger.debug(f"Completed scanning: {normalized_path} ({files_found_in_dir} files found)") await self.db_manager.mark_directory_scanned(normalized_path, files_found_in_dir)
logger.debug(f"Completed scanning level {directory_level} directory: {normalized_path} ({files_found_in_dir} files found)")
else:
logger.debug(f"Completed scanning level {directory_level} directory (not saved to cache): {normalized_path} ({files_found_in_dir} files found)")
except Exception as e: except Exception as e:
logger.warning(f"Error scanning directory {normalized_path}: {e}") logger.warning(f"Error scanning directory {normalized_path}: {e}")
@@ -277,7 +287,7 @@ class FTPMonitor:
logger.info(f"Iterative scan completed. Scanned: {scanned_dirs} directories, Skipped: {skipped_dirs} directories (Total visited: {len(visited_dirs)})") logger.info(f"Iterative scan completed. Scanned: {scanned_dirs} directories, Skipped: {skipped_dirs} directories (Total visited: {len(visited_dirs)})")
async def _process_file(self, ftp: FTP, file_info: FTPFileInfo) -> bool: async def _process_file(self, ftp: FTP, file_info: FTPFileInfo) -> bool:
logger.debug(f"Processing file: {file_info.path} ({file_info.size} bytes)") logger.debug(f"Processing file: {file_info.path} ({file_info.size} bytes) from directory: {file_info.directory_path}")
try: try:
with tempfile.NamedTemporaryFile(mode='wb', suffix='.slg_v2', delete=False) as temp_file: with tempfile.NamedTemporaryFile(mode='wb', suffix='.slg_v2', delete=False) as temp_file:
@@ -289,8 +299,9 @@ class FTPMonitor:
records = await self.processor.process_file(temp_path, file_info.name) records = await self.processor.process_file(temp_path, file_info.name)
if records: if records:
await self.db_manager.store_file_data(file_info.name, records) # Pass directory path to store_file_data for collection selection
logger.debug(f"Stored {len(records)} records from {file_info.name}") await self.db_manager.store_file_data(file_info.name, records, file_info.directory_path)
logger.debug(f"Stored {len(records)} records from {file_info.name} to collection for {file_info.directory_path}")
return True return True
else: else:
logger.warning(f"No valid records found in {file_info.name}") logger.warning(f"No valid records found in {file_info.name}")

View File

@@ -19,19 +19,12 @@ db_manager = None
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
global ftp_monitor, db_manager global ftp_monitor, db_manager
logger.info("Starting SA4CPS Data Ingestion Service...")
db_manager = DatabaseManager() db_manager = DatabaseManager()
await db_manager.connect() await db_manager.connect()
logger.info("Database connection established")
ftp_monitor = FTPMonitor(db_manager) ftp_monitor = FTPMonitor(db_manager)
logger.info("FTP monitor created")
monitoring_task = asyncio.create_task(ftp_monitor.start_monitoring()) monitoring_task = asyncio.create_task(ftp_monitor.start_monitoring())
logger.info("FTP monitoring task started in background")
logger.info("Service startup complete - HTTP server ready to accept requests")
yield yield
@@ -78,7 +71,8 @@ async def health_check():
global ftp_monitor, db_manager global ftp_monitor, db_manager
health_status = { health_status = {
"service": "healthy", "service": "data-ingestion-service",
"status": "healthy",
"timestamp": datetime.now().isoformat(), "timestamp": datetime.now().isoformat(),
"database": "unknown", "database": "unknown",
"ftp_monitor": "unknown" "ftp_monitor": "unknown"

View File

@@ -32,7 +32,7 @@ class MockDatabaseManager:
"""Mock get list of processed files""" """Mock get list of processed files"""
return list(self.processed_files) return list(self.processed_files)
async def store_file_data(self, filename: str, records: List) -> bool: async def store_file_data(self, filename: str, records: List, directory_path: str = None) -> bool:
"""Mock store file data""" """Mock store file data"""
self.processed_files.add(filename) self.processed_files.add(filename)
self.stored_files[filename] = records self.stored_files[filename] = records

View File

@@ -0,0 +1,26 @@
FROM python:3.9-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
gcc \
curl \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements and install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Expose port
EXPOSE 8003
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD curl -f http://localhost:8003/health || exit 1
# Run the application
CMD ["python", "main.py"]

View File

@@ -0,0 +1,208 @@
"""
Database configuration and connection management for Demand Response Service
"""
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
import redis.asyncio as redis
import logging
import os
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Configuration from environment variables
MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017")
DATABASE_NAME = os.getenv("DATABASE_NAME", "energy_dashboard_demand_response")
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
# Global database clients
_mongo_client: AsyncIOMotorClient = None
_database: AsyncIOMotorDatabase = None
_redis_client: redis.Redis = None
async def connect_to_mongo():
"""Initialize MongoDB connection and create indexes"""
global _mongo_client, _database
try:
logger.info(f"Connecting to MongoDB at {MONGO_URL}")
_mongo_client = AsyncIOMotorClient(MONGO_URL)
_database = _mongo_client[DATABASE_NAME]
# Test connection
await _database.command("ping")
logger.info(f"Successfully connected to MongoDB database: {DATABASE_NAME}")
# Create indexes
await create_indexes()
except Exception as e:
logger.error(f"Failed to connect to MongoDB: {e}")
raise
async def close_mongo_connection():
"""Close MongoDB connection"""
global _mongo_client
if _mongo_client:
_mongo_client.close()
logger.info("MongoDB connection closed")
async def get_database() -> AsyncIOMotorDatabase:
"""Get database instance"""
if _database is None:
await connect_to_mongo()
return _database
async def connect_to_redis():
"""Initialize Redis connection"""
global _redis_client
try:
logger.info(f"Connecting to Redis at {REDIS_URL}")
_redis_client = redis.from_url(REDIS_URL, decode_responses=True)
# Test connection
await _redis_client.ping()
logger.info("Successfully connected to Redis")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
raise
async def close_redis_connection():
"""Close Redis connection"""
global _redis_client
if _redis_client:
await _redis_client.close()
logger.info("Redis connection closed")
async def get_redis() -> redis.Redis:
"""Get Redis client instance"""
if _redis_client is None:
await connect_to_redis()
return _redis_client
async def create_indexes():
"""Create MongoDB indexes for optimal query performance"""
db = await get_database()
logger.info("Creating MongoDB indexes...")
try:
# Indexes for demand_response_invitations collection
await db.demand_response_invitations.create_index("event_id", unique=True)
await db.demand_response_invitations.create_index([("event_time", 1), ("status", 1)])
await db.demand_response_invitations.create_index("status")
await db.demand_response_invitations.create_index("created_at")
await db.demand_response_invitations.create_index("response")
logger.info("Created indexes for demand_response_invitations collection")
# Indexes for demand_response_events collection
await db.demand_response_events.create_index("event_id", unique=True)
await db.demand_response_events.create_index([("start_time", 1), ("status", 1)])
await db.demand_response_events.create_index([("status", 1), ("start_time", 1)])
await db.demand_response_events.create_index("status")
await db.demand_response_events.create_index("invitation_id")
logger.info("Created indexes for demand_response_events collection")
# Indexes for demand_response_responses collection
await db.demand_response_responses.create_index([("event_id", 1), ("device_id", 1)], unique=True)
await db.demand_response_responses.create_index("event_id")
await db.demand_response_responses.create_index("device_id")
await db.demand_response_responses.create_index("responded_at")
logger.info("Created indexes for demand_response_responses collection")
# Indexes for flexibility_snapshots collection (with TTL for auto-cleanup)
await db.flexibility_snapshots.create_index([("timestamp", -1)])
await db.flexibility_snapshots.create_index(
"timestamp",
expireAfterSeconds=7776000 # 90 days TTL
)
logger.info("Created indexes for flexibility_snapshots collection")
# Indexes for auto_response_config collection (singleton document)
await db.auto_response_config.create_index("config_id", unique=True)
logger.info("Created indexes for auto_response_config collection")
# Indexes for device_instructions collection
await db.device_instructions.create_index("device_id", unique=True)
await db.device_instructions.create_index("updated_at")
logger.info("Created indexes for device_instructions collection")
logger.info("All MongoDB indexes created successfully")
except Exception as e:
logger.error(f"Error creating indexes: {e}")
# Don't raise - indexes may already exist
async def initialize_default_config():
"""Initialize default auto-response configuration if it doesn't exist"""
db = await get_database()
try:
# Check if default config exists
existing_config = await db.auto_response_config.find_one({"config_id": "default"})
if not existing_config:
default_config = {
"config_id": "default",
"enabled": False,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60,
"created_at": None,
"updated_at": None
}
await db.auto_response_config.insert_one(default_config)
logger.info("Created default auto-response configuration")
else:
logger.info("Auto-response configuration already exists")
except Exception as e:
logger.error(f"Error initializing default config: {e}")
# Utility functions for common database operations
async def get_collection(collection_name: str):
"""Get a collection by name"""
db = await get_database()
return db[collection_name]
async def health_check() -> dict:
"""Check database connections health"""
status = {
"mongodb": False,
"redis": False
}
try:
# Check MongoDB
db = await get_database()
await db.command("ping")
status["mongodb"] = True
except Exception as e:
logger.error(f"MongoDB health check failed: {e}")
try:
# Check Redis
redis_client = await get_redis()
await redis_client.ping()
status["redis"] = True
except Exception as e:
logger.error(f"Redis health check failed: {e}")
return status

View File

@@ -0,0 +1,747 @@
"""
Demand Response Service - Core Business Logic
Handles DR invitations, event execution, auto-response, and flexibility calculation
"""
import asyncio
import json
import uuid
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Any
import logging
from motor.motor_asyncio import AsyncIOMotorDatabase
import redis.asyncio as redis
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DemandResponseService:
"""Core Demand Response service business logic"""
def __init__(self, db: AsyncIOMotorDatabase, redis_client: redis.Redis):
self.db = db
self.redis = redis_client
self.active_events: Dict[str, asyncio.Task] = {} # event_id -> task
self.device_power_cache: Dict[str, float] = {} # device_id -> power_kw (updated by Redis subscriber)
# ===== INVITATION MANAGEMENT =====
async def send_invitation(
self,
event_time: datetime,
load_kwh: float,
load_percentage: float,
iots: List[str],
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Create and send DR invitation
Returns: {"event_id": str, "response": str, "message": str}
"""
logger.info(f"Creating DR invitation for {len(iots)} devices at {event_time}")
# Generate unique event ID
event_id = str(uuid.uuid4())
# Check auto-response configuration
auto_config = await self.get_auto_response_config()
response = "YES" if auto_config.get("enabled", False) else "WAITING"
# Create invitation document
invitation = {
"event_id": event_id,
"created_at": datetime.utcnow(),
"event_time": event_time,
"load_kwh": load_kwh,
"load_percentage": load_percentage,
"iots": iots,
"duration_minutes": duration_minutes,
"response": response,
"status": "pending"
}
# Store in MongoDB
await self.db.demand_response_invitations.insert_one(invitation)
# Cache in Redis for fast access (24 hour TTL)
cache_key = f"dr:invitation:{event_id}"
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
# Publish event to Redis pub/sub
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_created",
"event_id": event_id,
"event_time": event_time.isoformat(),
"load_kwh": load_kwh,
"response": response
}))
logger.info(f"Invitation {event_id} created with response: {response}")
return {
"event_id": event_id,
"response": response,
"message": "Invitation created successfully"
}
async def answer_invitation(
self,
event_id: str,
iot_id: str,
response: str,
committed_reduction_kw: Optional[float] = None
) -> Dict[str, Any]:
"""
Record device response to invitation
Returns: {"success": bool, "message": str}
"""
logger.info(f"Recording response for invitation {event_id}, device {iot_id}: {response}")
# Validate invitation exists
invitation = await self.get_invitation(event_id)
if not invitation:
return {"success": False, "message": f"Invitation {event_id} not found"}
if iot_id not in invitation["iots"]:
return {"success": False, "message": f"Device {iot_id} not in invitation"}
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": iot_id
})
if existing:
return {"success": False, "message": f"Device {iot_id} has already responded"}
# Store response
response_doc = {
"event_id": event_id,
"device_id": iot_id,
"response": response,
"committed_reduction_kw": committed_reduction_kw,
"responded_at": datetime.utcnow()
}
await self.db.demand_response_responses.insert_one(response_doc)
# Check if all devices have responded
total_devices = len(invitation["iots"])
total_responses = await self.db.demand_response_responses.count_documents({"event_id": event_id})
if total_responses == total_devices:
# All devices responded - update invitation status
yes_count = await self.db.demand_response_responses.count_documents({
"event_id": event_id,
"response": "YES"
})
all_yes = yes_count == total_devices
new_response = "YES" if all_yes else "NO"
new_status = "scheduled" if all_yes else "cancelled"
await self.db.demand_response_invitations.update_one(
{"event_id": event_id},
{"$set": {"response": new_response, "status": new_status}}
)
logger.info(f"Invitation {event_id} final response: {new_response} (status: {new_status})")
# Clear cache
await self.redis.delete(f"dr:invitation:{event_id}")
# Publish event
await self.redis.publish("dr_events", json.dumps({
"event": "invitation_answered",
"event_id": event_id,
"device_id": iot_id,
"response": response
}))
return {"success": True, "message": "Response recorded successfully"}
async def get_invitation(self, event_id: str) -> Optional[Dict[str, Any]]:
"""
Get invitation by event_id (with Redis caching)
"""
# Try cache first
cache_key = f"dr:invitation:{event_id}"
cached = await self.redis.get(cache_key)
if cached:
invitation = json.loads(cached)
return invitation
# Fallback to MongoDB
invitation = await self.db.demand_response_invitations.find_one({"event_id": event_id})
if invitation:
invitation["_id"] = str(invitation["_id"])
# Cache for 24 hours
await self.redis.setex(
cache_key,
86400,
json.dumps(invitation, default=str)
)
return invitation
return None
async def get_unanswered_invitations(self) -> List[Dict[str, Any]]:
"""Get all pending invitations awaiting response"""
cursor = self.db.demand_response_invitations.find({
"response": "WAITING",
"status": "pending"
}).sort("created_at", -1)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
async def get_answered_invitations(self, hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get recent answered invitations"""
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.demand_response_invitations.find({
"response": {"$ne": "WAITING"},
"created_at": {"$gte": start_time}
}).sort("created_at", -1).limit(limit)
invitations = []
async for inv in cursor:
inv["_id"] = str(inv["_id"])
invitations.append(inv)
return invitations
# ===== EVENT EXECUTION =====
async def schedule_event(
self,
event_time: datetime,
iots: List[str],
load_reduction_kw: float,
duration_minutes: int = 59
) -> Dict[str, Any]:
"""
Schedule a DR event for execution
Returns: {"event_id": str, "message": str}
"""
logger.info(f"Scheduling DR event for {len(iots)} devices at {event_time}")
# Create event document
event_id = str(uuid.uuid4())
end_time = event_time + timedelta(minutes=duration_minutes)
event = {
"event_id": event_id,
"start_time": event_time,
"end_time": end_time,
"status": "scheduled",
"participating_devices": iots,
"target_reduction_kw": load_reduction_kw,
"actual_reduction_kw": 0.0,
"power_samples": []
}
await self.db.demand_response_events.insert_one(event)
# Publish scheduled event
await self.redis.publish("dr_events", json.dumps({
"event": "event_scheduled",
"event_id": event_id,
"start_time": event_time.isoformat(),
"end_time": end_time.isoformat(),
"devices": iots
}))
logger.info(f"Event {event_id} scheduled successfully")
return {
"event_id": event_id,
"message": "Event scheduled successfully"
}
async def execute_event(self, event_id: str):
"""
Execute a DR event (spawns background task)
"""
logger.info(f"Executing DR event {event_id}")
# Get event details
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if not event:
logger.error(f"Event {event_id} not found")
return
# Update status to active
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{"$set": {"status": "active", "actual_start_time": datetime.utcnow()}}
)
# Publish event started
await self.redis.publish("dr_events", json.dumps({
"event": "event_started",
"event_id": event_id,
"devices": event["participating_devices"]
}))
# Create and store async task for this event
task = asyncio.create_task(self._run_event_loop(event))
self.active_events[event_id] = task
logger.info(f"DR event {event_id} started successfully")
async def _run_event_loop(self, event: Dict[str, Any]):
"""
CRITICAL: Core event execution loop - runs for duration_minutes
Samples power every 5 seconds, accumulates reduction, handles cancellation
"""
event_id = event["event_id"]
end_time = event["end_time"]
devices = event["participating_devices"]
total_reduction_kwh = 0.0
sample_count = 0
logger.info(f"Starting event loop for {event_id}, ending at {end_time}")
try:
while datetime.utcnow() < end_time:
# Get current power for all participating devices from cache
device_powers = {
device_id: self.device_power_cache.get(device_id, 0.0)
for device_id in devices
}
# Calculate reduction for this 5-second interval
# interval_hours = 5.0 / 3600.0 = 0.00139 hours
interval_reduction_kwh = sum(device_powers.values()) * (5.0 / 3600.0)
total_reduction_kwh += interval_reduction_kwh
sample_count += 1
# Store sample in MongoDB (every sample to maintain accuracy)
sample = {
"timestamp": datetime.utcnow(),
"device_powers": device_powers,
"interval_reduction_kwh": interval_reduction_kwh
}
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$push": {"power_samples": sample},
"$set": {"actual_reduction_kw": total_reduction_kwh}
}
)
# Update Redis cache for fast access to current reduction
cache_key = f"dr:event:active:{event_id}"
await self.redis.setex(
cache_key,
300, # 5 minute TTL
json.dumps({
"event_id": event_id,
"current_reduction_kwh": total_reduction_kwh,
"devices": device_powers,
"last_update": datetime.utcnow().isoformat()
}, default=str)
)
# Publish progress every 10 samples (50 seconds)
if sample_count % 10 == 0:
await self.redis.publish("dr_events", json.dumps({
"event": "event_progress",
"event_id": event_id,
"total_reduction_kwh": round(total_reduction_kwh, 3),
"device_powers": device_powers,
"timestamp": datetime.utcnow().isoformat()
}))
logger.info(f"Event {event_id} progress: {total_reduction_kwh:.3f} kWh ({sample_count} samples)")
# Sleep for 5 seconds
await asyncio.sleep(5)
# Event completed successfully
logger.info(f"Event {event_id} completed with {total_reduction_kwh:.3f} kWh reduction")
await self._complete_event(event_id, total_reduction_kwh)
except asyncio.CancelledError:
logger.info(f"Event {event_id} cancelled by user")
await self._cancel_event(event_id)
raise
except Exception as e:
logger.error(f"Error in event {event_id}: {e}", exc_info=True)
await self._cancel_event(event_id)
async def _complete_event(self, event_id: str, total_reduction_kwh: float):
"""Mark event as completed"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "completed",
"actual_end_time": datetime.utcnow(),
"actual_reduction_kw": total_reduction_kwh
}
}
)
# Remove from active events
self.active_events.pop(event_id, None)
# Clear cache
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish completion
await self.redis.publish("dr_events", json.dumps({
"event": "event_completed",
"event_id": event_id,
"total_reduction_kwh": total_reduction_kwh
}))
logger.info(f"DR event {event_id} marked as completed")
async def _cancel_event(self, event_id: str):
"""Internal method to cancel an event"""
await self.db.demand_response_events.update_one(
{"event_id": event_id},
{
"$set": {
"status": "cancelled",
"cancelled_at": datetime.utcnow()
}
}
)
self.active_events.pop(event_id, None)
await self.redis.delete(f"dr:event:active:{event_id}")
# Publish cancellation
await self.redis.publish("dr_events", json.dumps({
"event": "event_cancelled",
"event_id": event_id,
"timestamp": datetime.utcnow().isoformat()
}))
async def cancel_event(self, event_id: str):
"""
Public method to cancel a running DR event gracefully
"""
logger.info(f"Cancelling DR event {event_id}")
# Cancel the async task
task = self.active_events.get(event_id)
if task and not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
# Expected - task cancelled successfully
pass
except Exception as e:
logger.error(f"Error cancelling event task {event_id}: {e}")
# Update database status (if not already done by _cancel_event)
event = await self.db.demand_response_events.find_one({"event_id": event_id})
if event and event.get("status") != "cancelled":
await self._cancel_event(event_id)
logger.info(f"DR event {event_id} cancelled successfully")
async def get_active_events(self) -> List[Dict[str, Any]]:
"""Get currently running events with real-time data"""
cursor = self.db.demand_response_events.find({
"status": "active"
}).sort("start_time", -1)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
# Add real-time data from cache
cache_key = f"dr:event:active:{event['event_id']}"
cached = await self.redis.get(cache_key)
if cached:
realtime_data = json.loads(cached)
event["current_reduction_kwh"] = realtime_data.get("current_reduction_kwh")
event["current_device_powers"] = realtime_data.get("devices")
events.append(event)
return events
# ===== DEVICE POWER INTEGRATION =====
def update_device_power_cache(self, device_id: str, power_kw: float):
"""
Update device power cache (called by Redis subscriber)
This is synchronous because it's just updating a dict
"""
self.device_power_cache[device_id] = power_kw
# No logging here to avoid spam (called every few seconds per device)
async def get_device_power(self, device_id: str) -> float:
"""Get current power for a device from cache"""
return self.device_power_cache.get(device_id, 0.0)
# ===== AUTO-RESPONSE CONFIGURATION =====
async def get_auto_response_config(self) -> Dict[str, Any]:
"""Get auto-response configuration"""
config = await self.db.auto_response_config.find_one({"config_id": "default"})
if not config:
# Create default config
default_config = {
"config_id": "default",
"enabled": False,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60,
"updated_at": datetime.utcnow()
}
await self.db.auto_response_config.insert_one(default_config)
return default_config
return config
async def set_auto_response_config(
self,
enabled: bool,
max_reduction_percentage: float = 20.0,
response_delay_seconds: int = 300,
min_notice_minutes: int = 60
) -> Dict[str, Any]:
"""Update auto-response configuration"""
await self.db.auto_response_config.update_one(
{"config_id": "default"},
{
"$set": {
"enabled": enabled,
"max_reduction_percentage": max_reduction_percentage,
"response_delay_seconds": response_delay_seconds,
"min_notice_minutes": min_notice_minutes,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
# Clear cache
await self.redis.delete("dr:config:auto_response")
logger.info(f"Auto-response config updated: enabled={enabled}")
return await self.get_auto_response_config()
async def process_auto_responses(self):
"""
Process pending invitations with auto-response (called by background task)
"""
# Get auto-response configuration
auto_config = await self.get_auto_response_config()
if not auto_config.get("enabled"):
return
# Find unanswered invitations
invitations = await self.get_unanswered_invitations()
for invitation in invitations:
event_id = invitation["event_id"]
event_time = invitation["event_time"]
# Parse event_time (might be string from cache)
if isinstance(event_time, str):
event_time = datetime.fromisoformat(event_time.replace('Z', '+00:00'))
# Check if event is within auto-response criteria
time_until_event = (event_time - datetime.utcnow()).total_seconds() / 60 # minutes
min_notice = auto_config.get("min_notice_minutes", 60)
if time_until_event >= min_notice:
logger.info(f"Auto-responding to invitation {event_id}")
# Auto-accept for all devices
for device_id in invitation["iots"]:
# Check if already responded
existing = await self.db.demand_response_responses.find_one({
"event_id": event_id,
"device_id": device_id
})
if not existing:
# Get device current power
device_power = await self.get_device_power(device_id)
# Calculate committed reduction based on max_reduction_percentage
max_reduction_pct = auto_config.get("max_reduction_percentage", 20.0)
committed_reduction = device_power * (max_reduction_pct / 100) if device_power > 0 else 0.5
# Submit auto-response
try:
await self.answer_invitation(event_id, device_id, "YES", committed_reduction)
logger.info(f"Auto-accepted for device {device_id} with {committed_reduction:.2f} kW commitment")
except Exception as e:
logger.error(f"Error auto-responding for {device_id}: {e}")
else:
logger.debug(f"Invitation {event_id} too soon ({time_until_event:.0f}m < {min_notice}m)")
# ===== BACKGROUND TASK SUPPORT =====
async def check_scheduled_events(self):
"""
Check for events that need to be started (called by scheduler task)
"""
now = datetime.utcnow()
threshold = now + timedelta(minutes=1) # Start events within next minute
# Find scheduled events that should start
cursor = self.db.demand_response_events.find({
"status": "scheduled",
"start_time": {"$lte": threshold, "$gte": now}
})
async for event in cursor:
event_id = event["event_id"]
# Check if not already active
if event_id not in self.active_events:
logger.info(f"Starting scheduled DR event {event_id}")
await self.execute_event(event_id)
# ===== BASIC FLEXIBILITY CALCULATION =====
async def get_current_flexibility(self) -> Dict[str, Any]:
"""
Calculate current available flexibility from device power cache
"""
total_flexibility_kw = 0.0
devices = []
# Get all devices with instructions
cursor = self.db.device_instructions.find({})
current_hour = datetime.utcnow().hour
async for device_doc in cursor:
device_id = device_doc["device_id"]
instruction = device_doc["instructions"].get(str(current_hour), "off")
if instruction != "off":
# Get device current power from cache
device_power = self.device_power_cache.get(device_id, 0.0)
if instruction == "participation":
# Full flexibility (100%)
flexibility = device_power
elif instruction == "shifting":
# Partial flexibility (20%)
flexibility = device_power * 0.20
else:
flexibility = 0.0
if flexibility > 0:
devices.append({
"device_id": device_id,
"available_kw": round(flexibility, 2),
"instruction": instruction,
"current_power": round(device_power, 2)
})
total_flexibility_kw += flexibility
snapshot = {
"timestamp": datetime.utcnow(),
"total_flexibility_kw": round(total_flexibility_kw, 2),
"devices": devices
}
# Store snapshot
await self.db.flexibility_snapshots.insert_one(dict(snapshot))
# Cache for 5 minutes
await self.redis.setex(
"dr:flexibility:current",
300,
json.dumps(snapshot, default=str)
)
return snapshot
async def get_device_instructions(self, device_id: Optional[str] = None) -> Dict[str, Any]:
"""Get DR instructions for device(s)"""
if device_id:
doc = await self.db.device_instructions.find_one({"device_id": device_id})
return doc if doc else {"device_id": device_id, "instructions": {}}
else:
cursor = self.db.device_instructions.find({})
instructions = {}
async for doc in cursor:
instructions[doc["device_id"]] = doc["instructions"]
return instructions
async def update_device_instructions(self, device_id: str, instructions: Dict[str, str]):
"""Update hourly instructions for a device"""
await self.db.device_instructions.update_one(
{"device_id": device_id},
{
"$set": {
"instructions": instructions,
"updated_at": datetime.utcnow()
}
},
upsert=True
)
logger.info(f"Updated instructions for device {device_id}")
# ===== ANALYTICS =====
async def get_performance_analytics(self, days: int = 30) -> Dict[str, Any]:
"""Get DR performance analytics"""
start_date = datetime.utcnow() - timedelta(days=days)
# Query completed events
cursor = self.db.demand_response_events.find({
"status": "completed",
"start_time": {"$gte": start_date}
})
events = await cursor.to_list(length=None)
if not events:
return {
"period_days": days,
"total_events": 0,
"total_reduction_kwh": 0.0,
"total_target_kwh": 0.0,
"average_reduction_kwh": 0.0,
"achievement_rate": 0.0,
"average_event_duration_minutes": 59
}
total_reduction = sum(e.get("actual_reduction_kw", 0) for e in events)
total_target = sum(e.get("target_reduction_kw", 0) for e in events)
return {
"period_days": days,
"total_events": len(events),
"total_reduction_kwh": round(total_reduction, 2),
"total_target_kwh": round(total_target, 2),
"average_reduction_kwh": round(total_reduction / len(events), 2),
"achievement_rate": round((total_reduction / total_target * 100) if total_target > 0 else 0, 2),
"average_event_duration_minutes": 59
}

View File

@@ -23,6 +23,96 @@ from demand_response_service import DemandResponseService
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Background task functions
async def event_scheduler_task():
"""Background task for checking and executing scheduled events"""
logger.info("Starting event scheduler task")
while True:
try:
db = await get_database()
redis = await get_redis()
service = DemandResponseService(db, redis)
# Check for events that need to be executed
await service.check_scheduled_events()
# Sleep for 60 seconds between checks
await asyncio.sleep(60)
except asyncio.CancelledError:
logger.info("Event scheduler task cancelled")
raise
except Exception as e:
logger.error(f"Error in event scheduler task: {e}")
await asyncio.sleep(120) # Wait longer on error
async def auto_response_task():
"""Background task for automatic demand response"""
logger.info("Starting auto-response task")
while True:
try:
db = await get_database()
redis = await get_redis()
service = DemandResponseService(db, redis)
# Check for auto-response opportunities
await service.process_auto_responses()
# Sleep for 30 seconds between checks
await asyncio.sleep(30)
except asyncio.CancelledError:
logger.info("Auto-response task cancelled")
raise
except Exception as e:
logger.error(f"Error in auto-response task: {e}")
await asyncio.sleep(90) # Wait longer on error
async def energy_data_subscriber_task():
"""Subscribe to energy_data Redis channel for device power updates"""
logger.info("Starting energy data subscriber task")
try:
redis = await get_redis()
db = await get_database()
service = DemandResponseService(db, redis)
pubsub = redis.pubsub()
await pubsub.subscribe("energy_data")
logger.info("Subscribed to energy_data channel")
while True:
try:
message = await pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
if message and message.get('type') == 'message':
import json
data = json.loads(message['data'])
# Format: {"sensorId": "sensor_1", "timestamp": 123, "value": 3.5, "unit": "kWh"}
sensor_id = data.get("sensorId")
power_kw = data.get("value", 0.0)
# Update service cache
service.update_device_power_cache(sensor_id, power_kw)
except json.JSONDecodeError as e:
logger.warning(f"Invalid JSON in energy_data message: {e}")
except Exception as e:
logger.error(f"Error processing energy data message: {e}")
await asyncio.sleep(5)
except asyncio.CancelledError:
logger.info("Energy data subscriber task cancelled")
raise
except Exception as e:
logger.error(f"Energy data subscriber task failed: {e}")
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager""" """Application lifespan manager"""
@@ -30,15 +120,33 @@ async def lifespan(app: FastAPI):
await connect_to_mongo() await connect_to_mongo()
await connect_to_redis() await connect_to_redis()
# Create global service instance for shutdown cleanup
db = await get_database()
redis = await get_redis()
app.state.dr_service = DemandResponseService(db, redis)
# Start background tasks # Start background tasks
asyncio.create_task(event_scheduler_task()) asyncio.create_task(event_scheduler_task())
asyncio.create_task(auto_response_task()) asyncio.create_task(auto_response_task())
asyncio.create_task(energy_data_subscriber_task())
logger.info("Demand Response Service startup complete") logger.info("Demand Response Service startup complete")
yield yield
logger.info("Demand Response Service shutting down...") logger.info("Demand Response Service shutting down...")
# Cancel all active DR events gracefully
if hasattr(app.state, 'dr_service'):
active_event_ids = list(app.state.dr_service.active_events.keys())
if active_event_ids:
logger.info(f"Cancelling {len(active_event_ids)} active events...")
for event_id in active_event_ids:
try:
await app.state.dr_service.cancel_event(event_id)
except Exception as e:
logger.error(f"Error cancelling event {event_id}: {e}")
await close_mongo_connection() await close_mongo_connection()
logger.info("Demand Response Service shutdown complete") logger.info("Demand Response Service shutdown complete")

View File

@@ -0,0 +1,338 @@
"""
Pydantic models for Demand Response Service
"""
from datetime import datetime
from typing import List, Dict, Optional, Literal
from pydantic import BaseModel, Field
from enum import Enum
# Enums
class InvitationStatus(str, Enum):
"""Invitation status states"""
PENDING = "pending"
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class ResponseType(str, Enum):
"""Device response types"""
WAITING = "WAITING"
YES = "YES"
NO = "NO"
class EventStatus(str, Enum):
"""DR event status states"""
SCHEDULED = "scheduled"
ACTIVE = "active"
COMPLETED = "completed"
CANCELLED = "cancelled"
class InstructionType(str, Enum):
"""Device participation instruction types"""
PARTICIPATION = "participation" # Full DR participation (100%)
SHIFTING = "shifting" # Partial participation (0-20%)
OFF = "off" # No DR participation
# Invitation Models
class EventRequest(BaseModel):
"""Request model for creating a DR event (alias for DRInvitationCreate)"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationCreate(BaseModel):
"""Request model for creating a DR invitation"""
event_time: datetime = Field(..., description="When the DR event should occur")
load_kwh: float = Field(..., description="Target load reduction in kWh", gt=0)
load_percentage: float = Field(..., description="Target reduction as percentage of total load", ge=0, le=100)
iots: List[str] = Field(..., description="List of device IDs to participate", min_items=1)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59
}
}
class DRInvitationResponse(BaseModel):
"""Response model for device answering invitation"""
event_id: str = Field(..., description="Event identifier")
iot_id: str = Field(..., description="Device identifier")
response: ResponseType = Field(..., description="Device response (YES/NO)")
committed_reduction_kw: Optional[float] = Field(None, description="Committed power reduction in kW", ge=0)
class Config:
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"iot_id": "sensor_1",
"response": "YES",
"committed_reduction_kw": 2.5
}
}
class DRInvitation(BaseModel):
"""Full DR invitation model"""
event_id: str = Field(..., description="Unique event identifier")
created_at: datetime = Field(..., description="Invitation creation time")
event_time: datetime = Field(..., description="Scheduled event start time")
load_kwh: float = Field(..., description="Target load reduction in kWh")
load_percentage: float = Field(..., description="Target reduction percentage")
iots: List[str] = Field(..., description="Participating device IDs")
duration_minutes: int = Field(..., description="Event duration in minutes")
response: str = Field(..., description="Overall response status")
status: str = Field(..., description="Invitation status")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"event_id": "550e8400-e29b-41d4-a716-446655440000",
"created_at": "2025-12-10T13:45:00",
"event_time": "2025-12-10T14:00:00",
"load_kwh": 5.0,
"load_percentage": 15.0,
"iots": ["sensor_1", "sensor_2"],
"duration_minutes": 59,
"response": "WAITING",
"status": "pending"
}
}
# Event Models
class EventScheduleRequest(BaseModel):
"""Request model for scheduling a DR event"""
event_time: datetime = Field(..., description="Event start time")
iots: List[str] = Field(..., description="Participating device IDs", min_items=1)
load_reduction_kw: float = Field(..., description="Target reduction in kW", gt=0)
duration_minutes: int = Field(59, description="Event duration in minutes", gt=0, le=120)
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iots": ["sensor_1", "sensor_2"],
"load_reduction_kw": 5.0,
"duration_minutes": 59
}
}
class PowerSample(BaseModel):
"""Individual power sample during event"""
timestamp: datetime = Field(..., description="Sample timestamp")
device_powers: Dict[str, float] = Field(..., description="Device power readings (device_id -> kW)")
interval_reduction_kwh: Optional[float] = Field(None, description="Reduction for this interval")
class DREvent(BaseModel):
"""DR event execution model"""
event_id: str = Field(..., description="Unique event identifier")
invitation_id: Optional[str] = Field(None, description="Source invitation ID if applicable")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
status: EventStatus = Field(..., description="Event status")
participating_devices: List[str] = Field(..., description="Device IDs participating")
target_reduction_kw: float = Field(..., description="Target power reduction in kW")
actual_reduction_kw: float = Field(0.0, description="Actual achieved reduction in kWh")
power_samples: List[Dict] = Field(default_factory=list, description="Power samples during event")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class ActiveEventResponse(BaseModel):
"""Response model for active event with real-time data"""
event_id: str = Field(..., description="Event identifier")
status: EventStatus = Field(..., description="Current status")
start_time: datetime = Field(..., description="Event start time")
end_time: datetime = Field(..., description="Event end time")
participating_devices: List[str] = Field(..., description="Participating devices")
target_reduction_kw: float = Field(..., description="Target reduction")
actual_reduction_kw: float = Field(..., description="Current achieved reduction")
current_device_powers: Optional[Dict[str, float]] = Field(None, description="Current device power readings")
progress_percentage: Optional[float] = Field(None, description="Event progress (0-100%)")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class LoadReductionRequest(BaseModel):
"""Request model for executing load reduction"""
event_time: datetime = Field(..., description="Event start time")
iot: str = Field(..., description="Device ID")
class Config:
json_schema_extra = {
"example": {
"event_time": "2025-12-10T14:00:00",
"iot": "sensor_1"
}
}
# Flexibility Models
class DeviceFlexibility(BaseModel):
"""Per-device flexibility information"""
device_id: str = Field(..., description="Device identifier")
available_kw: float = Field(..., description="Available flexibility in kW", ge=0)
instruction: str = Field(..., description="Current DR instruction")
current_power: float = Field(..., description="Current power consumption in kW", ge=0)
class FlexibilityResponse(BaseModel):
"""Response model for current flexibility"""
timestamp: datetime = Field(..., description="Calculation timestamp")
total_flexibility_kw: float = Field(..., description="Total available flexibility in kW", ge=0)
devices: List[DeviceFlexibility] = Field(..., description="Per-device breakdown")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"timestamp": "2025-12-10T13:45:00",
"total_flexibility_kw": 15.5,
"devices": [
{
"device_id": "sensor_1",
"available_kw": 3.5,
"instruction": "participation",
"current_power": 3.5
},
{
"device_id": "sensor_2",
"available_kw": 0.8,
"instruction": "shifting",
"current_power": 4.0
}
]
}
}
class DeviceInstructionUpdate(BaseModel):
"""Model for updating device instructions"""
device_id: str = Field(..., description="Device identifier")
instructions: Dict[str, str] = Field(..., description="Hourly instructions (hour -> instruction type)")
class Config:
json_schema_extra = {
"example": {
"device_id": "sensor_1",
"instructions": {
"0": "participation",
"1": "shifting",
"2": "off",
"3": "participation"
}
}
}
# Configuration Models
class AutoResponseConfig(BaseModel):
"""Auto-response configuration model"""
enabled: bool = Field(..., description="Whether auto-response is enabled")
max_reduction_percentage: float = Field(20.0, description="Maximum reduction percentage for auto-accept", ge=0, le=100)
response_delay_seconds: int = Field(300, description="Delay before auto-responding (seconds)", ge=0)
min_notice_minutes: int = Field(60, description="Minimum notice required for auto-accept (minutes)", ge=0)
class Config:
json_schema_extra = {
"example": {
"enabled": True,
"max_reduction_percentage": 20.0,
"response_delay_seconds": 300,
"min_notice_minutes": 60
}
}
# Response Models
class InvitationSendResponse(BaseModel):
"""Response for sending invitation"""
event_id: str = Field(..., description="Created event identifier")
response: str = Field(..., description="Initial response status")
message: str = Field(..., description="Status message")
class InvitationAnswerResponse(BaseModel):
"""Response for answering invitation"""
success: bool = Field(..., description="Whether answer was recorded")
message: str = Field(..., description="Status message")
class EventScheduleResponse(BaseModel):
"""Response for scheduling event"""
event_id: str = Field(..., description="Scheduled event identifier")
message: str = Field(..., description="Status message")
class PerformanceAnalytics(BaseModel):
"""Performance analytics response"""
period_days: int = Field(..., description="Analysis period in days")
total_events: int = Field(..., description="Total number of events")
total_reduction_kwh: float = Field(..., description="Total energy reduced")
total_target_kwh: float = Field(..., description="Total target reduction")
average_reduction_kwh: float = Field(..., description="Average reduction per event")
achievement_rate: float = Field(..., description="Achievement rate (%)")
average_event_duration_minutes: int = Field(..., description="Average event duration")
# Health Check Model
class HealthResponse(BaseModel):
"""Health check response model"""
service: str = Field(..., description="Service name")
status: str = Field(..., description="Service status")
timestamp: datetime = Field(..., description="Check timestamp")
version: str = Field(..., description="Service version")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
json_schema_extra = {
"example": {
"service": "demand-response-service",
"status": "healthy",
"timestamp": "2025-12-10T13:45:00",
"version": "1.0.0"
}
}

View File

@@ -0,0 +1,11 @@
fastapi>=0.104.0
uvicorn[standard]>=0.24.0
pymongo>=4.5.0
motor>=3.3.0
redis>=5.0.0
python-dotenv>=1.0.0
pydantic>=2.4.0
aiohttp>=3.9.0
pytest>=7.4.0
pytest-asyncio>=0.21.0
python-multipart

View File

@@ -0,0 +1,524 @@
"""
Unit tests for Demand Response Service
Run with: pytest test_demand_response.py -v
"""
import pytest
import asyncio
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock, patch
import json
from demand_response_service import DemandResponseService
# Test fixtures
@pytest.fixture
def mock_db():
"""Mock MongoDB database"""
db = MagicMock()
# Mock collections
db.demand_response_invitations = MagicMock()
db.demand_response_events = MagicMock()
db.demand_response_responses = MagicMock()
db.auto_response_config = MagicMock()
db.device_instructions = MagicMock()
db.flexibility_snapshots = MagicMock()
return db
@pytest.fixture
def mock_redis():
"""Mock Redis client"""
redis = AsyncMock()
redis.get = AsyncMock(return_value=None)
redis.setex = AsyncMock()
redis.delete = AsyncMock()
redis.publish = AsyncMock()
return redis
@pytest.fixture
def dr_service(mock_db, mock_redis):
"""Create DemandResponseService instance with mocks"""
return DemandResponseService(mock_db, mock_redis)
# Test: Invitation Management
@pytest.mark.asyncio
async def test_send_invitation_with_auto_accept(dr_service, mock_db, mock_redis):
"""Test sending invitation with auto-accept enabled"""
# Mock auto-response config (enabled)
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
assert "event_id" in result
assert result["response"] == "YES"
assert result["message"] == "Invitation created successfully"
# Verify MongoDB insert was called
mock_db.demand_response_invitations.insert_one.assert_called_once()
# Verify Redis caching
mock_redis.setex.assert_called()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_send_invitation_manual(dr_service, mock_db, mock_redis):
"""Test sending invitation with auto-accept disabled (manual mode)"""
# Mock auto-response config (disabled)
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
assert result["response"] == "WAITING"
@pytest.mark.asyncio
async def test_answer_invitation_success(dr_service, mock_db, mock_redis):
"""Test answering an invitation successfully"""
event_id = "test-event-123"
# Mock get_invitation to return a valid invitation
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
# Mock that device hasn't responded yet
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
mock_db.demand_response_responses.insert_one = AsyncMock()
mock_db.demand_response_responses.count_documents = AsyncMock(return_value=1)
result = await dr_service.answer_invitation(
event_id=event_id,
iot_id="sensor_1",
response="YES",
committed_reduction_kw=2.5
)
assert result["success"] is True
assert result["message"] == "Response recorded successfully"
# Verify response was stored
mock_db.demand_response_responses.insert_one.assert_called_once()
mock_redis.delete.assert_called()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_answer_invitation_device_not_in_list(dr_service, mock_db, mock_redis):
"""Test answering invitation for device not in invitation list"""
event_id = "test-event-123"
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
result = await dr_service.answer_invitation(
event_id=event_id,
iot_id="sensor_3", # Not in list
response="YES"
)
assert result["success"] is False
assert "not in invitation" in result["message"]
# Test: Event Execution
@pytest.mark.asyncio
async def test_schedule_event(dr_service, mock_db, mock_redis):
"""Test scheduling a DR event"""
mock_db.demand_response_events.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=1)
result = await dr_service.schedule_event(
event_time=event_time,
iots=["sensor_1", "sensor_2"],
load_reduction_kw=5.0,
duration_minutes=59
)
assert "event_id" in result
assert result["message"] == "Event scheduled successfully"
mock_db.demand_response_events.insert_one.assert_called_once()
mock_redis.publish.assert_called()
@pytest.mark.asyncio
async def test_execute_event(dr_service, mock_db, mock_redis):
"""Test executing a DR event (spawns background task)"""
event_id = "test-event-456"
# Mock event document
event = {
"event_id": event_id,
"start_time": datetime.utcnow(),
"end_time": datetime.utcnow() + timedelta(minutes=59),
"participating_devices": ["sensor_1"],
"target_reduction_kw": 5.0
}
mock_db.demand_response_events.find_one = AsyncMock(return_value=event)
mock_db.demand_response_events.update_one = AsyncMock()
# Execute event (starts background task)
await dr_service.execute_event(event_id)
# Verify event status updated to active
mock_db.demand_response_events.update_one.assert_called()
mock_redis.publish.assert_called()
# Verify task was created and stored
assert event_id in dr_service.active_events
# Cancel the task to prevent it from running
task = dr_service.active_events[event_id]
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@pytest.mark.asyncio
async def test_cancel_event(dr_service, mock_db, mock_redis):
"""Test cancelling a running DR event"""
event_id = "test-event-789"
# Create a mock task
mock_task = AsyncMock()
mock_task.done = MagicMock(return_value=False)
mock_task.cancel = MagicMock()
dr_service.active_events[event_id] = mock_task
# Mock database operations
mock_db.demand_response_events.find_one = AsyncMock(return_value={
"event_id": event_id,
"status": "active"
})
mock_db.demand_response_events.update_one = AsyncMock()
await dr_service.cancel_event(event_id)
# Verify task was cancelled
mock_task.cancel.assert_called_once()
# Verify database updated
mock_db.demand_response_events.update_one.assert_called()
mock_redis.delete.assert_called()
mock_redis.publish.assert_called()
# Test: Device Power Integration
@pytest.mark.asyncio
async def test_update_device_power_cache(dr_service):
"""Test updating device power cache"""
dr_service.update_device_power_cache("sensor_1", 2.5)
assert dr_service.device_power_cache["sensor_1"] == 2.5
dr_service.update_device_power_cache("sensor_1", 3.0)
assert dr_service.device_power_cache["sensor_1"] == 3.0
@pytest.mark.asyncio
async def test_get_device_power(dr_service):
"""Test getting device power from cache"""
dr_service.device_power_cache["sensor_1"] = 2.5
power = await dr_service.get_device_power("sensor_1")
assert power == 2.5
# Test non-existent device returns 0
power = await dr_service.get_device_power("sensor_999")
assert power == 0.0
# Test: Auto-Response Configuration
@pytest.mark.asyncio
async def test_get_auto_response_config_exists(dr_service, mock_db):
"""Test getting existing auto-response config"""
mock_config = {
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 20.0
}
mock_db.auto_response_config.find_one = AsyncMock(return_value=mock_config)
config = await dr_service.get_auto_response_config()
assert config["enabled"] is True
assert config["max_reduction_percentage"] == 20.0
@pytest.mark.asyncio
async def test_get_auto_response_config_creates_default(dr_service, mock_db):
"""Test creating default config when none exists"""
mock_db.auto_response_config.find_one = AsyncMock(return_value=None)
mock_db.auto_response_config.insert_one = AsyncMock()
config = await dr_service.get_auto_response_config()
assert config["enabled"] is False
mock_db.auto_response_config.insert_one.assert_called_once()
@pytest.mark.asyncio
async def test_set_auto_response_config(dr_service, mock_db, mock_redis):
"""Test updating auto-response configuration"""
mock_db.auto_response_config.update_one = AsyncMock()
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 25.0
})
config = await dr_service.set_auto_response_config(
enabled=True,
max_reduction_percentage=25.0
)
assert config["enabled"] is True
assert config["max_reduction_percentage"] == 25.0
mock_db.auto_response_config.update_one.assert_called_once()
mock_redis.delete.assert_called()
# Test: Auto-Response Processing
@pytest.mark.asyncio
async def test_process_auto_responses_disabled(dr_service, mock_db):
"""Test auto-response processing when disabled"""
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
# Should return early without processing
await dr_service.process_auto_responses()
# No invitations should be queried
mock_db.demand_response_invitations.find.assert_not_called()
@pytest.mark.asyncio
async def test_process_auto_responses_enabled(dr_service, mock_db, mock_redis):
"""Test auto-response processing when enabled"""
# Mock enabled config
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": True,
"max_reduction_percentage": 20.0,
"min_notice_minutes": 60
})
# Mock pending invitation
future_time = datetime.utcnow() + timedelta(hours=2)
mock_invitation = {
"event_id": "test-event-auto",
"event_time": future_time,
"iots": ["sensor_1"]
}
dr_service.get_unanswered_invitations = AsyncMock(return_value=[mock_invitation])
dr_service.get_device_power = AsyncMock(return_value=5.0)
dr_service.answer_invitation = AsyncMock(return_value={"success": True})
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
await dr_service.process_auto_responses()
# Should have auto-responded
dr_service.answer_invitation.assert_called_once()
# Test: Flexibility Calculation
@pytest.mark.asyncio
async def test_get_current_flexibility(dr_service, mock_db, mock_redis):
"""Test calculating current flexibility"""
# Mock device with instructions
mock_device = {
"device_id": "sensor_1",
"instructions": {
str(datetime.utcnow().hour): "participation"
}
}
async def mock_cursor():
yield mock_device
mock_db.device_instructions.find = MagicMock(return_value=mock_cursor())
mock_db.flexibility_snapshots.insert_one = AsyncMock()
# Set device power in cache
dr_service.device_power_cache["sensor_1"] = 5.0
result = await dr_service.get_current_flexibility()
assert result["total_flexibility_kw"] == 5.0
assert len(result["devices"]) == 1
assert result["devices"][0]["device_id"] == "sensor_1"
mock_db.flexibility_snapshots.insert_one.assert_called_once()
mock_redis.setex.assert_called()
# Test: Device Instructions
@pytest.mark.asyncio
async def test_update_device_instructions(dr_service, mock_db):
"""Test updating device DR instructions"""
mock_db.device_instructions.update_one = AsyncMock()
instructions = {
"0": "participation",
"1": "shifting",
"2": "off"
}
await dr_service.update_device_instructions("sensor_1", instructions)
mock_db.device_instructions.update_one.assert_called_once()
@pytest.mark.asyncio
async def test_get_device_instructions_single(dr_service, mock_db):
"""Test getting instructions for single device"""
mock_instructions = {
"device_id": "sensor_1",
"instructions": {"0": "participation"}
}
mock_db.device_instructions.find_one = AsyncMock(return_value=mock_instructions)
result = await dr_service.get_device_instructions("sensor_1")
assert result["device_id"] == "sensor_1"
assert "instructions" in result
# Test: Analytics
@pytest.mark.asyncio
async def test_get_performance_analytics(dr_service, mock_db):
"""Test getting performance analytics"""
# Mock completed events
mock_events = [
{"actual_reduction_kw": 5.0, "target_reduction_kw": 6.0},
{"actual_reduction_kw": 4.5, "target_reduction_kw": 5.0}
]
mock_cursor = AsyncMock()
mock_cursor.to_list = AsyncMock(return_value=mock_events)
mock_db.demand_response_events.find = MagicMock(return_value=mock_cursor)
analytics = await dr_service.get_performance_analytics(days=30)
assert analytics["total_events"] == 2
assert analytics["total_reduction_kwh"] == 9.5
assert analytics["total_target_kwh"] == 11.0
assert analytics["achievement_rate"] > 0
@pytest.mark.asyncio
async def test_get_performance_analytics_no_events(dr_service, mock_db):
"""Test analytics with no completed events"""
mock_cursor = AsyncMock()
mock_cursor.to_list = AsyncMock(return_value=[])
mock_db.demand_response_events.find = MagicMock(return_value=mock_cursor)
analytics = await dr_service.get_performance_analytics(days=30)
assert analytics["total_events"] == 0
assert analytics["total_reduction_kwh"] == 0.0
assert analytics["achievement_rate"] == 0.0
# Integration-style tests
@pytest.mark.asyncio
async def test_full_invitation_workflow(dr_service, mock_db, mock_redis):
"""Test complete invitation workflow from creation to response"""
# Step 1: Create invitation
mock_db.auto_response_config.find_one = AsyncMock(return_value={
"config_id": "default",
"enabled": False
})
mock_db.demand_response_invitations.insert_one = AsyncMock()
event_time = datetime.utcnow() + timedelta(hours=2)
invite_result = await dr_service.send_invitation(
event_time=event_time,
load_kwh=5.0,
load_percentage=15.0,
iots=["sensor_1", "sensor_2"],
duration_minutes=59
)
event_id = invite_result["event_id"]
assert invite_result["response"] == "WAITING"
# Step 2: Answer invitation for device 1
dr_service.get_invitation = AsyncMock(return_value={
"event_id": event_id,
"iots": ["sensor_1", "sensor_2"]
})
mock_db.demand_response_responses.find_one = AsyncMock(return_value=None)
mock_db.demand_response_responses.insert_one = AsyncMock()
mock_db.demand_response_responses.count_documents = AsyncMock(side_effect=[1, 1, 2, 2])
mock_db.demand_response_invitations.update_one = AsyncMock()
answer1 = await dr_service.answer_invitation(event_id, "sensor_1", "YES", 2.5)
assert answer1["success"] is True
# Step 3: Answer invitation for device 2
answer2 = await dr_service.answer_invitation(event_id, "sensor_2", "YES", 2.5)
assert answer2["success"] is True
# Verify final invitation update was called (all devices responded)
assert mock_db.demand_response_invitations.update_one.call_count >= 1
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"])

View File

@@ -1,309 +0,0 @@
#!/bin/bash
# Energy Management Microservices Deployment Script
# This script handles deployment, startup, and management of all microservices
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
COMPOSE_FILE="docker-compose.yml"
PROJECT_NAME="energy-dashboard"
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to check if Docker and Docker Compose are installed
check_dependencies() {
print_status "Checking dependencies..."
if ! command -v docker &> /dev/null; then
print_error "Docker is not installed. Please install Docker first."
exit 1
fi
if ! command -v docker compose &> /dev/null; then
print_error "Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
print_success "Dependencies check passed"
}
# Function to create necessary directories and files
setup_environment() {
print_status "Setting up environment..."
# Create nginx configuration directory
mkdir -p nginx/ssl
# Create init-mongo directory for database initialization
mkdir -p init-mongo
# Create a simple nginx configuration if it doesn't exist
if [ ! -f "nginx/nginx.conf" ]; then
cat > nginx/nginx.conf << 'EOF'
events {
worker_connections 1024;
}
http {
upstream api_gateway {
server api-gateway:8000;
}
server {
listen 80;
location / {
proxy_pass http://api_gateway;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /ws {
proxy_pass http://api_gateway;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
}
EOF
print_success "Created nginx configuration"
fi
# Create MongoDB initialization script if it doesn't exist
if [ ! -f "init-mongo/init.js" ]; then
cat > init-mongo/init.js << 'EOF'
// MongoDB initialization script
db = db.getSiblingDB('energy_dashboard');
db.createUser({
user: 'dashboard_user',
pwd: 'dashboard_pass',
roles: [
{ role: 'readWrite', db: 'energy_dashboard' },
{ role: 'readWrite', db: 'energy_dashboard_tokens' },
{ role: 'readWrite', db: 'energy_dashboard_batteries' },
{ role: 'readWrite', db: 'energy_dashboard_demand_response' },
{ role: 'readWrite', db: 'energy_dashboard_p2p' },
{ role: 'readWrite', db: 'energy_dashboard_forecasting' },
{ role: 'readWrite', db: 'energy_dashboard_iot' }
]
});
// Create initial collections and indexes
db.sensors.createIndex({ "sensor_id": 1 }, { unique: true });
db.sensor_readings.createIndex({ "sensor_id": 1, "timestamp": -1 });
db.room_metrics.createIndex({ "room": 1, "timestamp": -1 });
print("MongoDB initialization completed");
EOF
print_success "Created MongoDB initialization script"
fi
print_success "Environment setup completed"
}
# Function to build all services
build_services() {
print_status "Building all microservices..."
docker compose -f $COMPOSE_FILE build
if [ $? -eq 0 ]; then
print_success "All services built successfully"
else
print_error "Failed to build services"
exit 1
fi
}
# Function to start all services
start_services() {
print_status "Starting all services..."
docker compose -f $COMPOSE_FILE up -d
if [ $? -eq 0 ]; then
print_success "All services started successfully"
else
print_error "Failed to start services"
exit 1
fi
}
# Function to stop all services
stop_services() {
print_status "Stopping all services..."
docker compose -f $COMPOSE_FILE down
print_success "All services stopped"
}
# Function to restart all services
restart_services() {
stop_services
start_services
}
# Function to show service status
show_status() {
print_status "Service status:"
docker compose -f $COMPOSE_FILE ps
print_status "Service health checks:"
# Wait a moment for services to start
sleep 5
# services=("api-gateway:8000" "token-service:8001" "battery-service:8002" "demand-response-service:8003")
services=("api-gateway:8000" "token-service:8001")
for service in "${services[@]}"; do
name="${service%:*}"
port="${service#*:}"
if curl -f -s "http://localhost:$port/health" > /dev/null; then
print_success "$name is healthy"
else
print_warning "$name is not responding to health checks"
fi
done
}
# Function to view logs
view_logs() {
if [ -z "$2" ]; then
print_status "Showing logs for all services..."
docker compose -f $COMPOSE_FILE logs -f
else
print_status "Showing logs for $2..."
docker compose -f $COMPOSE_FILE logs -f $2
fi
}
# Function to clean up everything
cleanup() {
print_warning "This will remove all containers, images, and volumes. Are you sure? (y/N)"
read -r response
if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then
print_status "Cleaning up everything..."
docker compose -f $COMPOSE_FILE down -v --rmi all
docker system prune -f
print_success "Cleanup completed"
else
print_status "Cleanup cancelled"
fi
}
# Function to run database migrations or setup
setup_database() {
print_status "Setting up databases..."
# Wait for MongoDB to be ready
print_status "Waiting for MongoDB to be ready..."
sleep 10
# Run any additional setup scripts here
print_success "Database setup completed"
}
# Function to show help
show_help() {
echo "Energy Management Microservices Deployment Script"
echo ""
echo "Usage: $0 [COMMAND]"
echo ""
echo "Commands:"
echo " setup Setup environment and dependencies"
echo " build Build all microservices"
echo " start Start all services"
echo " stop Stop all services"
echo " restart Restart all services"
echo " status Show service status and health"
echo " logs Show logs for all services"
echo " logs <svc> Show logs for specific service"
echo " deploy Full deployment (setup + build + start)"
echo " db-setup Setup databases"
echo " cleanup Remove all containers, images, and volumes"
echo " help Show this help message"
echo ""
echo "Examples:"
echo " $0 deploy # Full deployment"
echo " $0 logs battery-service # Show battery service logs"
echo " $0 status # Check service health"
}
# Main script logic
case "${1:-help}" in
setup)
check_dependencies
setup_environment
;;
build)
check_dependencies
build_services
;;
start)
check_dependencies
start_services
;;
stop)
stop_services
;;
restart)
restart_services
;;
status)
show_status
;;
logs)
view_logs $@
;;
deploy)
check_dependencies
setup_environment
build_services
start_services
setup_database
show_status
;;
db-setup)
setup_database
;;
cleanup)
cleanup
;;
help|--help|-h)
show_help
;;
*)
print_error "Unknown command: $1"
show_help
exit 1
;;
esac

View File

@@ -51,30 +51,30 @@ services:
depends_on: depends_on:
- mongodb - mongodb
- redis - redis
# - token-service - token-service
- sensor-service - sensor-service
- data-ingestion-service - data-ingestion-service
- demand-response-service
# - battery-service # - battery-service
# - demand-response-service
networks: networks:
- energy-network - energy-network
# Token Management Service # Token Management Service
# token-service: token-service:
# build: build:
# context: ./token-service context: ./token-service
# dockerfile: Dockerfile dockerfile: Dockerfile
# container_name: token-service container_name: token-service
# restart: unless-stopped restart: unless-stopped
# ports: ports:
# - "8001:8001" - "8001:8001"
# environment: environment:
# - MONGO_URL=mongodb://admin:password123@localhost:27017/energy_dashboard_tokens?authSource=admin - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_tokens?authSource=admin
# - JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production - JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production
# depends_on: depends_on:
# - mongodb - mongodb
# networks: networks:
# - energy-network - energy-network
# Battery Management Service # Battery Management Service
# battery-service: # battery-service:
@@ -95,23 +95,24 @@ services:
# - energy-network # - energy-network
# Demand Response Service # Demand Response Service
# demand-response-service: demand-response-service:
# build: build:
# context: ./demand-response-service context: ./demand-response-service
# dockerfile: Dockerfile dockerfile: Dockerfile
# container_name: demand-response-service container_name: demand-response-service
# restart: unless-stopped restart: unless-stopped
# ports: ports:
# - "8003:8003" - "8003:8003"
# environment: environment:
# - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_demand_response?authSource=admin - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_demand_response?authSource=admin
# - REDIS_URL=redis://redis:6379 - REDIS_URL=redis://redis:6379
# - IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006 - SENSOR_SERVICE_URL=http://sensor-service:8007
# depends_on: depends_on:
# - mongodb - mongodb
# - redis - redis
# networks: - sensor-service
# - energy-network networks:
- energy-network
# P2P Trading Service # P2P Trading Service
# p2p-trading-service: # p2p-trading-service:
@@ -169,7 +170,6 @@ services:
# networks: # networks:
# - energy-network # - energy-network
# Data Ingestion Service (FTP Monitoring & SA4CPS Integration)
data-ingestion-service: data-ingestion-service:
build: build:
context: ./data-ingestion-service context: ./data-ingestion-service
@@ -185,6 +185,7 @@ services:
- FTP_SA4CPS_USERNAME=curvascarga@sa4cps.pt - FTP_SA4CPS_USERNAME=curvascarga@sa4cps.pt
- FTP_SA4CPS_REMOTE_PATH=/SLGs/ - FTP_SA4CPS_REMOTE_PATH=/SLGs/
- FTP_CHECK_INTERVAL=21600 - FTP_CHECK_INTERVAL=21600
- FTP_SKIP_INITIAL_SCAN=true
depends_on: depends_on:
- mongodb - mongodb
networks: networks:
@@ -202,7 +203,7 @@ services:
environment: environment:
- MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://redis:6379
- TOKEN_SERVICE_URL=http://token-service:8001 # - TOKEN_SERVICE_URL=http://token-service:8001
depends_on: depends_on:
- mongodb - mongodb
- redis - redis

1748
microservices/openapi.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,377 @@
"""
Analytics service for processing sensor data and generating insights
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class AnalyticsService:
"""Service for analytics and data processing"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def query_data(self, query_params) -> Dict[str, Any]:
"""Execute advanced data query"""
try:
# Build query
query = {}
if hasattr(query_params, 'sensor_ids') and query_params.sensor_ids:
query["sensor_id"] = {"$in": query_params.sensor_ids}
if hasattr(query_params, 'start_time') and query_params.start_time:
query.setdefault("timestamp", {})["$gte"] = query_params.start_time
if hasattr(query_params, 'end_time') and query_params.end_time:
query.setdefault("timestamp", {})["$lte"] = query_params.end_time
# Execute query
cursor = self.db.sensor_readings.find(query)
if hasattr(query_params, 'limit') and query_params.limit:
cursor = cursor.limit(query_params.limit)
if hasattr(query_params, 'offset') and query_params.offset:
cursor = cursor.skip(query_params.offset)
cursor = cursor.sort("timestamp", -1)
# Get results
results = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
results.append(reading)
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
return {
"data": results,
"total_count": total_count,
"query": query_params.__dict__ if hasattr(query_params, '__dict__') else {},
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error executing data query: {e}")
raise
async def get_analytics_summary(self, hours: int = 24) -> Dict[str, Any]:
"""Get comprehensive analytics summary"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get basic statistics
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time}
}
},
{
"$group": {
"_id": None,
"total_readings": {"$sum": 1},
"average_value": {"$avg": "$value"},
"min_value": {"$min": "$value"},
"max_value": {"$max": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
stats = await cursor.to_list(length=1)
base_stats = stats[0] if stats else {
"total_readings": 0,
"average_value": 0,
"min_value": 0,
"max_value": 0,
"unique_sensors": []
}
# Get room-level statistics
room_stats = await self._get_room_analytics(hours)
# Get energy trends
energy_trends = await self._get_energy_trends(hours)
return {
"period_hours": hours,
"timestamp": datetime.utcnow().isoformat(),
"total_readings": base_stats["total_readings"],
"unique_sensors": len(base_stats["unique_sensors"]),
"value_statistics": {
"average": round(base_stats["average_value"], 2) if base_stats["average_value"] else 0,
"minimum": base_stats["min_value"],
"maximum": base_stats["max_value"]
},
"room_statistics": room_stats,
"energy_trends": energy_trends
}
except Exception as e:
logger.error(f"Error getting analytics summary: {e}")
raise
async def get_energy_analytics(self, hours: int = 24, room: Optional[str] = None) -> Dict[str, Any]:
"""Get energy-specific analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Build query
query = {"created_at": {"$gte": start_time}}
if room:
query["room"] = room
# Energy consumption over time
pipeline = [
{"$match": query},
{
"$group": {
"_id": {
"hour": {"$hour": "$created_at"},
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$created_at"}}
},
"total_energy": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"_id.date": 1, "_id.hour": 1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_data = []
async for data in cursor:
hourly_data.append({
"hour": data["_id"]["hour"],
"date": data["_id"]["date"],
"total_energy": data["total_energy"],
"reading_count": data["reading_count"]
})
# Peak consumption analysis
peak_analysis = await self._get_peak_consumption_analysis(query)
# Energy efficiency metrics
efficiency_metrics = await self._get_efficiency_metrics(query)
return {
"period_hours": hours,
"room": room,
"timestamp": datetime.utcnow().isoformat(),
"hourly_consumption": hourly_data,
"peak_analysis": peak_analysis,
"efficiency_metrics": efficiency_metrics,
"total_consumption": sum(item["total_energy"] for item in hourly_data)
}
except Exception as e:
logger.error(f"Error getting energy analytics: {e}")
raise
async def _get_room_analytics(self, hours: int) -> Dict[str, Any]:
"""Get room-level analytics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
pipeline = [
{
"$match": {
"created_at": {"$gte": start_time},
"room": {"$ne": None}
}
},
{
"$group": {
"_id": "$room",
"total_readings": {"$sum": 1},
"total_energy": {"$sum": "$value"},
"average_energy": {"$avg": "$value"},
"unique_sensors": {"$addToSet": "$sensor_id"}
}
},
{"$sort": {"total_energy": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
room_data = []
async for room in cursor:
room_data.append({
"room": room["_id"],
"total_readings": room["total_readings"],
"total_energy": room["total_energy"],
"average_energy": round(room["average_energy"], 2),
"sensor_count": len(room["unique_sensors"])
})
return {
"by_room": room_data,
"total_rooms": len(room_data)
}
except Exception as e:
logger.error(f"Error getting room analytics: {e}")
return {"by_room": [], "total_rooms": 0}
async def _get_energy_trends(self, hours: int) -> Dict[str, Any]:
"""Get energy consumption trends"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
# Get current period data
current_query = {"created_at": {"$gte": start_time}}
current_cursor = self.db.sensor_readings.aggregate([
{"$match": current_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
current_data = await current_cursor.to_list(length=1)
current_total = current_data[0]["total"] if current_data else 0
current_count = current_data[0]["count"] if current_data else 0
# Get previous period for comparison
previous_start = start_time - timedelta(hours=hours)
previous_query = {
"created_at": {"$gte": previous_start, "$lt": start_time}
}
previous_cursor = self.db.sensor_readings.aggregate([
{"$match": previous_query},
{"$group": {"_id": None, "total": {"$sum": "$value"}, "count": {"$sum": 1}}}
])
previous_data = await previous_cursor.to_list(length=1)
previous_total = previous_data[0]["total"] if previous_data else 0
# Calculate trend
if previous_total > 0:
trend_percentage = ((current_total - previous_total) / previous_total) * 100
else:
trend_percentage = 0
return {
"current_period": {
"total_energy": current_total,
"reading_count": current_count,
"average_per_reading": current_total / current_count if current_count > 0 else 0
},
"previous_period": {
"total_energy": previous_total
},
"trend": {
"percentage_change": round(trend_percentage, 2),
"direction": "up" if trend_percentage > 0 else "down" if trend_percentage < 0 else "stable"
}
}
except Exception as e:
logger.error(f"Error getting energy trends: {e}")
return {}
async def _get_peak_consumption_analysis(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Analyze peak consumption patterns"""
try:
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": {"$hour": "$created_at"},
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{"$sort": {"total_consumption": -1}}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
hourly_consumption = await cursor.to_list(length=None)
if not hourly_consumption:
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
peak_data = hourly_consumption[0]
return {
"peak_hour": peak_data["_id"],
"peak_consumption": peak_data["total_consumption"],
"hourly_pattern": [
{
"hour": item["_id"],
"consumption": item["total_consumption"],
"reading_count": item["reading_count"]
}
for item in hourly_consumption
]
}
except Exception as e:
logger.error(f"Error analyzing peak consumption: {e}")
return {"peak_hour": None, "peak_consumption": 0, "hourly_pattern": []}
async def _get_efficiency_metrics(self, base_query: Dict[str, Any]) -> Dict[str, Any]:
"""Calculate energy efficiency metrics"""
try:
# Average consumption per sensor
pipeline = [
{"$match": base_query},
{
"$group": {
"_id": "$sensor_id",
"total_consumption": {"$sum": "$value"},
"reading_count": {"$sum": 1}
}
},
{
"$group": {
"_id": None,
"average_per_sensor": {"$avg": "$total_consumption"},
"sensor_count": {"$sum": 1},
"min_consumption": {"$min": "$total_consumption"},
"max_consumption": {"$max": "$total_consumption"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
efficiency_data = await cursor.to_list(length=1)
if not efficiency_data:
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}
data = efficiency_data[0]
# Calculate efficiency score (lower variation = higher efficiency)
if data["average_per_sensor"] > 0:
variation_coefficient = (data["max_consumption"] - data["min_consumption"]) / data["average_per_sensor"]
efficiency_score = max(0, 100 - (variation_coefficient * 10)) # Scale to 0-100
else:
variation_coefficient = 0
efficiency_score = 100
return {
"average_per_sensor": round(data["average_per_sensor"], 2),
"sensor_count": data["sensor_count"],
"efficiency_score": round(efficiency_score, 1),
"variation_coefficient": round(variation_coefficient, 2)
}
except Exception as e:
logger.error(f"Error calculating efficiency metrics: {e}")
return {
"average_per_sensor": 0,
"sensor_count": 0,
"efficiency_score": 0,
"variation_coefficient": 0
}

View File

@@ -0,0 +1,60 @@
"""
Database connection and management for sensor service
"""
import asyncio
import logging
from motor.motor_asyncio import AsyncIOMotorClient
import redis.asyncio as redis
from typing import Optional
import os
logger = logging.getLogger(__name__)
# Global database connections
mongo_client: Optional[AsyncIOMotorClient] = None
redis_client: Optional[redis.Redis] = None
database = None
async def connect_to_mongo():
global mongo_client, database
try:
mongo_url = os.getenv("MONGO_URL", "mongodb://admin:password123@mongodb:27017/energy_dashboard_sensors?authSource=admin")
mongo_client = AsyncIOMotorClient(mongo_url)
database = mongo_client.energy_dashboard_sensors
await mongo_client.admin.command('ping')
logger.info("Connected to MongoDB successfully")
except Exception as e:
logger.error(f"Failed to connect to MongoDB: {e}")
raise
async def close_mongo_connection():
global mongo_client
if mongo_client:
mongo_client.close()
logger.info("Closed MongoDB connection")
async def connect_to_redis():
global redis_client
try:
redis_url = os.getenv("REDIS_URL", "redis://redis:6379")
redis_client = redis.from_url(redis_url, decode_responses=True)
# Test connection
await redis_client.ping()
logger.info("Connected to Redis successfully")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
raise
async def get_database():
return database
async def get_redis():
return redis_client

View File

@@ -16,7 +16,8 @@ import json
from models import ( from models import (
SensorReading, SensorMetadata, RoomMetrics, SystemEvent, DataQuery, DataResponse, SensorReading, SensorMetadata, RoomMetrics, SystemEvent, DataQuery, DataResponse,
SensorType, SensorStatus, CO2Status, OccupancyLevel, HealthResponse SensorType, SensorStatus, CO2Status, OccupancyLevel, HealthResponse,
Room, RoomCreate, RoomUpdate, RoomInfo
) )
from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis
from sensor_service import SensorService from sensor_service import SensorService
@@ -24,20 +25,22 @@ from room_service import RoomService
from analytics_service import AnalyticsService from analytics_service import AnalyticsService
from websocket_manager import WebSocketManager from websocket_manager import WebSocketManager
# Configure logging
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# WebSocket manager for real-time updates
websocket_manager = WebSocketManager() websocket_manager = WebSocketManager()
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Application lifespan manager"""
logger.info("Sensor Service starting up...") logger.info("Sensor Service starting up...")
await connect_to_mongo() await connect_to_mongo()
await connect_to_redis() await connect_to_redis()
db = await get_database()
redis_client = await get_redis()
room_service = RoomService(db, redis_client)
await room_service.initialize_default_rooms()
# Start background tasks # Start background tasks
asyncio.create_task(redis_subscriber_task()) asyncio.create_task(redis_subscriber_task())
asyncio.create_task(room_metrics_aggregation_task()) asyncio.create_task(room_metrics_aggregation_task())
@@ -117,7 +120,7 @@ async def websocket_endpoint(websocket: WebSocket):
# Original Dashboard API Endpoints # Original Dashboard API Endpoints
# Sensor Management # Sensor Management
@app.get("/sensors") @app.get("/sensors/get")
async def get_sensors( async def get_sensors(
room: Optional[str] = Query(None, description="Filter by room"), room: Optional[str] = Query(None, description="Filter by room"),
sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"), sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"),
@@ -250,6 +253,19 @@ async def delete_sensor(
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
# Room Management # Room Management
@app.get("/rooms/names")
async def get_room_names(service: RoomService = Depends(get_room_service)):
"""Get simple list of room names for dropdowns"""
try:
room_names = await service.get_all_room_names()
return {
"rooms": room_names,
"count": len(room_names)
}
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.get("/rooms") @app.get("/rooms")
async def get_rooms(service: RoomService = Depends(get_room_service)): async def get_rooms(service: RoomService = Depends(get_room_service)):
"""Get all rooms with sensor counts and metrics""" """Get all rooms with sensor counts and metrics"""
@@ -265,16 +281,16 @@ async def get_rooms(service: RoomService = Depends(get_room_service)):
@app.post("/rooms") @app.post("/rooms")
async def create_room( async def create_room(
room_data: dict, room_data: RoomCreate,
service: RoomService = Depends(get_room_service) service: RoomService = Depends(get_room_service)
): ):
"""Create a new room""" """Create a new room"""
try: try:
result = await service.create_room(room_data) result = await service.create_room(room_data.dict())
return { return {
"message": "Room created successfully", "message": "Room created successfully",
"room": room_data.get("name"), "room": result["name"],
"created_at": result.get("created_at") "created_at": result["created_at"]
} }
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))
@@ -282,6 +298,40 @@ async def create_room(
logger.error(f"Error creating room: {e}") logger.error(f"Error creating room: {e}")
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.put("/rooms/{room_name}")
async def update_room(
room_name: str,
room_data: RoomUpdate,
service: RoomService = Depends(get_room_service)
):
"""Update an existing room"""
try:
result = await service.update_room(room_name, room_data.dict(exclude_unset=True))
return {
"message": "Room updated successfully",
"room": result["name"],
"updated_at": result["updated_at"],
"modified": result["modified"]
}
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error updating room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.delete("/rooms/{room_name}")
async def delete_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Delete a room"""
try:
result = await service.delete_room(room_name)
return {
"message": "Room deleted successfully",
**result
}
except Exception as e:
logger.error(f"Error deleting room {room_name}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@app.get("/rooms/{room_name}") @app.get("/rooms/{room_name}")
async def get_room(room_name: str, service: RoomService = Depends(get_room_service)): async def get_room(room_name: str, service: RoomService = Depends(get_room_service)):
"""Get detailed room information""" """Get detailed room information"""

View File

@@ -296,6 +296,69 @@ class AnalyticsSummary(BaseModel):
datetime: lambda v: v.isoformat() datetime: lambda v: v.isoformat()
} }
# Room Management Models
class Room(BaseModel):
"""Room model for database storage and API responses"""
name: str = Field(..., description="Unique room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type (office, meeting, storage, etc.)")
# Metadata
created_at: datetime = Field(default_factory=datetime.utcnow, description="Room creation timestamp")
updated_at: datetime = Field(default_factory=datetime.utcnow, description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class RoomCreate(BaseModel):
"""Model for creating new rooms"""
name: str = Field(..., description="Unique room name", min_length=1, max_length=100)
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomUpdate(BaseModel):
"""Model for updating existing rooms"""
description: Optional[str] = Field(None, description="Room description", max_length=500)
floor: Optional[str] = Field(None, description="Floor designation", max_length=50)
building: Optional[str] = Field(None, description="Building name", max_length=100)
area: Optional[float] = Field(None, description="Room area in square meters", gt=0)
capacity: Optional[int] = Field(None, description="Maximum occupancy", gt=0)
room_type: Optional[str] = Field(None, description="Room type", max_length=50)
class RoomInfo(BaseModel):
"""Comprehensive room information for API responses"""
name: str = Field(..., description="Room name")
description: Optional[str] = Field(None, description="Room description")
floor: Optional[str] = Field(None, description="Floor designation")
building: Optional[str] = Field(None, description="Building name")
area: Optional[float] = Field(None, description="Room area in square meters")
capacity: Optional[int] = Field(None, description="Maximum occupancy")
room_type: Optional[str] = Field(None, description="Room type")
# Runtime information
sensor_count: int = Field(0, description="Number of sensors in room")
active_sensors: int = Field(0, description="Number of active sensors")
last_updated: Optional[datetime] = Field(None, description="Last metrics update")
# Timestamps
created_at: datetime = Field(..., description="Room creation timestamp")
updated_at: datetime = Field(..., description="Room update timestamp")
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class HealthResponse(BaseModel): class HealthResponse(BaseModel):
"""Health check response""" """Health check response"""
service: str service: str

View File

@@ -0,0 +1,467 @@
"""
Room service for managing rooms and room-level metrics
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class RoomService:
"""Service for managing rooms and room-level analytics"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_all_room_names(self) -> List[str]:
"""Get a simple list of all room names for dropdowns/selections"""
try:
# Get rooms from the rooms collection
room_cursor = self.db.rooms.find({}, {"name": 1})
room_names = set()
async for room in room_cursor:
room_names.add(room["name"])
# Also get rooms that exist only in sensor data (legacy support)
sensor_cursor = self.db.sensors.find(
{"room": {"$ne": None, "$exists": True}},
{"room": 1}
)
async for sensor in sensor_cursor:
if sensor.get("room"):
room_names.add(sensor["room"])
# Convert to sorted list
return sorted(list(room_names))
except Exception as e:
logger.error(f"Error getting room names: {e}")
raise
async def initialize_default_rooms(self) -> None:
"""Initialize default rooms if none exist"""
try:
# Check if any rooms exist
room_count = await self.db.rooms.count_documents({})
if room_count == 0:
# Create default rooms
default_rooms = [
{"name": "Conference Room A", "description": "Main conference room", "room_type": "meeting"},
{"name": "Conference Room B", "description": "Secondary conference room", "room_type": "meeting"},
{"name": "Office Floor 1", "description": "First floor office space", "room_type": "office"},
{"name": "Office Floor 2", "description": "Second floor office space", "room_type": "office"},
{"name": "Kitchen", "description": "Employee kitchen and break room", "room_type": "common"},
{"name": "Lobby", "description": "Main entrance and reception", "room_type": "common"},
{"name": "Server Room", "description": "IT equipment room", "room_type": "technical"},
{"name": "Storage Room", "description": "General storage", "room_type": "storage"},
{"name": "Meeting Room 1", "description": "Small meeting room", "room_type": "meeting"},
{"name": "Meeting Room 2", "description": "Small meeting room", "room_type": "meeting"}
]
for room_data in default_rooms:
room_doc = {
**room_data,
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
await self.db.rooms.insert_one(room_doc)
logger.info(f"Initialized {len(default_rooms)} default rooms")
except Exception as e:
logger.error(f"Error initializing default rooms: {e}")
raise
async def get_rooms(self) -> List[Dict[str, Any]]:
"""Get all rooms with sensor counts and metrics"""
try:
# Get unique rooms from sensors
pipeline = [
{"$group": {"_id": "$room", "sensor_count": {"$sum": 1}}},
{"$match": {"_id": {"$ne": None}}}
]
cursor = self.db.sensors.aggregate(pipeline)
rooms = []
async for room_data in cursor:
room_name = room_data["_id"]
# Get latest room metrics
latest_metrics = await self._get_latest_room_metrics(room_name)
room_info = {
"name": room_name,
"sensor_count": room_data["sensor_count"],
"latest_metrics": latest_metrics,
"last_updated": latest_metrics.get("timestamp") if latest_metrics else None
}
rooms.append(room_info)
return rooms
except Exception as e:
logger.error(f"Error getting rooms: {e}")
raise
async def create_room(self, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new room"""
try:
room_doc = {
"name": room_data.get("name"),
"description": room_data.get("description", ""),
"floor": room_data.get("floor"),
"building": room_data.get("building"),
"area": room_data.get("area"),
"capacity": room_data.get("capacity"),
"room_type": room_data.get("room_type"),
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
# Validate required fields
if not room_doc["name"] or not room_doc["name"].strip():
raise ValueError("Room name is required")
# Check if room already exists
existing = await self.db.rooms.find_one({"name": room_doc["name"]})
if existing:
raise ValueError(f"Room {room_doc['name']} already exists")
result = await self.db.rooms.insert_one(room_doc)
return {
"id": str(result.inserted_id),
"name": room_doc["name"],
"created_at": room_doc["created_at"]
}
except Exception as e:
logger.error(f"Error creating room: {e}")
raise
async def update_room(self, room_name: str, room_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update an existing room"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
if not existing:
raise ValueError(f"Room {room_name} not found")
# Prepare update document
update_doc = {
"updated_at": datetime.utcnow()
}
# Update only provided fields
for field in ["description", "floor", "building", "area", "capacity", "room_type"]:
if field in room_data and room_data[field] is not None:
update_doc[field] = room_data[field]
# Perform update
result = await self.db.rooms.update_one(
{"name": room_name},
{"$set": update_doc}
)
if result.modified_count == 0:
logger.warning(f"No changes made to room {room_name}")
return {
"name": room_name,
"updated_at": update_doc["updated_at"],
"modified": result.modified_count > 0
}
except Exception as e:
logger.error(f"Error updating room: {e}")
raise
async def delete_room(self, room_name: str) -> Dict[str, Any]:
"""Delete a room and optionally reassign sensors"""
try:
# Check if room exists
existing = await self.db.rooms.find_one({"name": room_name})
# Check for sensors in this room
sensors_in_room = await self.db.sensors.find({"room": room_name}).to_list(None)
if sensors_in_room:
# Update sensors to have null room (don't delete sensors)
await self.db.sensors.update_many(
{"room": room_name},
{"$unset": {"room": ""}}
)
# Delete room from rooms collection if it exists
room_deleted = False
if existing:
result = await self.db.rooms.delete_one({"name": room_name})
room_deleted = result.deleted_count > 0
# Delete room metrics
metrics_result = await self.db.room_metrics.delete_many({"room": room_name})
return {
"room": room_name,
"room_deleted": room_deleted,
"sensors_updated": len(sensors_in_room),
"metrics_deleted": metrics_result.deleted_count
}
except Exception as e:
logger.error(f"Error deleting room: {e}")
raise
async def get_room_details(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get detailed room information"""
try:
# Get room info
room = await self.db.rooms.find_one({"name": room_name})
if not room:
# Create basic room info from sensor data
sensors = await self.db.sensors.find({"room": room_name}).to_list(None)
if not sensors:
return None
room = {
"name": room_name,
"description": f"Room with {len(sensors)} sensors",
"sensor_count": len(sensors)
}
else:
room["_id"] = str(room["_id"])
# Get sensor count
sensor_count = await self.db.sensors.count_documents({"room": room_name})
room["sensor_count"] = sensor_count
# Get sensors in this room
cursor = self.db.sensors.find({"room": room_name})
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
room["sensors"] = sensors
# Get recent room metrics
room["recent_metrics"] = await self._get_recent_room_metrics(room_name, hours=24)
return room
except Exception as e:
logger.error(f"Error getting room details: {e}")
raise
async def get_room_data(self, room_name: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100) -> Dict[str, Any]:
"""Get historical data for a room"""
try:
# Get room metrics
room_query = {"room": room_name}
if start_time or end_time:
room_query["timestamp"] = {}
if start_time:
room_query["timestamp"]["$gte"] = start_time
if end_time:
room_query["timestamp"]["$lte"] = end_time
room_metrics_cursor = self.db.room_metrics.find(room_query).sort("timestamp", -1).limit(limit)
room_metrics = []
async for metric in room_metrics_cursor:
metric["_id"] = str(metric["_id"])
room_metrics.append(metric)
# Get sensor readings for this room
sensor_query = {"room": room_name}
if start_time or end_time:
sensor_query["timestamp"] = {}
if start_time:
sensor_query["timestamp"]["$gte"] = start_time
if end_time:
sensor_query["timestamp"]["$lte"] = end_time
sensor_readings_cursor = self.db.sensor_readings.find(sensor_query).sort("timestamp", -1).limit(limit)
sensor_readings = []
async for reading in sensor_readings_cursor:
reading["_id"] = str(reading["_id"])
sensor_readings.append(reading)
return {
"room_metrics": room_metrics,
"sensor_readings": sensor_readings
}
except Exception as e:
logger.error(f"Error getting room data: {e}")
raise
async def update_room_metrics(self, sensor_data):
"""Update room-level metrics when sensor data is received"""
try:
if not sensor_data.room:
return
# Calculate room-level aggregates
room_metrics = await self._calculate_room_metrics(sensor_data.room)
if room_metrics:
# Store room metrics
metrics_doc = {
"room": sensor_data.room,
"timestamp": sensor_data.timestamp,
"total_energy": room_metrics.get("total_energy", 0),
"average_temperature": room_metrics.get("avg_temperature"),
"co2_level": room_metrics.get("co2_level"),
"occupancy_estimate": room_metrics.get("occupancy_estimate"),
"sensor_count": room_metrics.get("sensor_count", 0),
"created_at": datetime.utcnow()
}
await self.db.room_metrics.insert_one(metrics_doc)
# Cache latest metrics
if self.redis:
cache_key = f"room:{sensor_data.room}:latest_metrics"
await self.redis.setex(cache_key, 3600, json.dumps(metrics_doc, default=str))
except Exception as e:
logger.error(f"Error updating room metrics: {e}")
async def aggregate_all_room_metrics(self):
"""Aggregate metrics for all rooms"""
try:
# Get all unique rooms
pipeline = [{"$group": {"_id": "$room"}}]
cursor = self.db.sensors.aggregate(pipeline)
async for room_data in cursor:
room_name = room_data["_id"]
if room_name:
await self._calculate_room_metrics(room_name)
except Exception as e:
logger.error(f"Error aggregating room metrics: {e}")
async def _get_latest_room_metrics(self, room_name: str) -> Optional[Dict[str, Any]]:
"""Get latest room metrics"""
try:
# Try Redis cache first
if self.redis:
cache_key = f"room:{room_name}:latest_metrics"
cached = await self.redis.get(cache_key)
if cached:
return json.loads(cached)
# Fall back to database
latest = await self.db.room_metrics.find_one(
{"room": room_name},
sort=[("timestamp", -1)]
)
if latest:
latest["_id"] = str(latest["_id"])
return latest
return None
except Exception as e:
logger.error(f"Error getting latest room metrics: {e}")
return None
async def _get_recent_room_metrics(self, room_name: str, hours: int = 24) -> List[Dict[str, Any]]:
"""Get recent room metrics"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
cursor = self.db.room_metrics.find({
"room": room_name,
"created_at": {"$gte": start_time}
}).sort("timestamp", -1)
metrics = []
async for metric in cursor:
metric["_id"] = str(metric["_id"])
metrics.append(metric)
return metrics
except Exception as e:
logger.error(f"Error getting recent room metrics: {e}")
return []
async def _calculate_room_metrics(self, room_name: str) -> Dict[str, Any]:
"""Calculate aggregated metrics for a room"""
try:
# Get recent sensor readings (last 5 minutes)
five_minutes_ago = datetime.utcnow() - timedelta(minutes=5)
pipeline = [
{
"$match": {
"room": room_name,
"created_at": {"$gte": five_minutes_ago}
}
},
{
"$group": {
"_id": "$sensor_id",
"latest_value": {"$last": "$value"},
"sensor_type": {"$last": "$sensor_type"} if "sensor_type" in ["$first", "$last"] else {"$first": "energy"},
"unit": {"$last": "$unit"}
}
}
]
cursor = self.db.sensor_readings.aggregate(pipeline)
total_energy = 0
temperatures = []
co2_levels = []
sensor_count = 0
async for sensor_data in cursor:
sensor_count += 1
value = sensor_data.get("latest_value", 0)
sensor_type = sensor_data.get("sensor_type", "energy")
if sensor_type == "energy" or "energy" in str(sensor_data.get("unit", "")).lower():
total_energy += value
elif sensor_type == "temperature":
temperatures.append(value)
elif sensor_type == "co2":
co2_levels.append(value)
metrics = {
"total_energy": total_energy,
"sensor_count": sensor_count,
"avg_temperature": sum(temperatures) / len(temperatures) if temperatures else None,
"co2_level": sum(co2_levels) / len(co2_levels) if co2_levels else None,
"occupancy_estimate": self._estimate_occupancy(sensor_count, total_energy)
}
return metrics
except Exception as e:
logger.error(f"Error calculating room metrics: {e}")
return {}
def _estimate_occupancy(self, sensor_count: int, total_energy: float) -> Optional[str]:
"""Estimate occupancy level based on energy consumption"""
if total_energy == 0:
return "vacant"
elif total_energy < sensor_count * 50: # Low threshold
return "low"
elif total_energy < sensor_count * 150: # Medium threshold
return "medium"
else:
return "high"

View File

@@ -0,0 +1,251 @@
"""
Sensor service business logic
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
import json
logger = logging.getLogger(__name__)
class SensorService:
"""Service for managing sensors and sensor data"""
def __init__(self, db, redis_client):
self.db = db
self.redis = redis_client
async def get_sensors(self, room: Optional[str] = None, sensor_type: Optional[str] = None, status: Optional[str] = None) -> List[Dict[str, Any]]:
"""Get sensors with optional filtering"""
try:
query = {}
if room:
query["room"] = room
if sensor_type:
query["sensor_type"] = sensor_type
if status:
query["status"] = status
cursor = self.db.sensors.find(query)
sensors = []
async for sensor in cursor:
sensor["_id"] = str(sensor["_id"])
sensors.append(sensor)
return sensors
except Exception as e:
logger.error(f"Error getting sensors: {e}")
raise
async def get_sensor_details(self, sensor_id: str) -> Optional[Dict[str, Any]]:
"""Get detailed sensor information"""
try:
sensor = await self.db.sensors.find_one({"sensor_id": sensor_id})
if sensor:
sensor["_id"] = str(sensor["_id"])
# Get recent readings
recent_readings = await self.get_sensor_data(sensor_id, limit=10)
sensor["recent_readings"] = recent_readings.get("readings", [])
return sensor
return None
except Exception as e:
logger.error(f"Error getting sensor details: {e}")
raise
async def get_sensor_data(self, sensor_id: str, start_time: Optional[int] = None,
end_time: Optional[int] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
"""Get historical sensor data"""
try:
query = {"sensor_id": sensor_id}
if start_time or end_time:
query["timestamp"] = {}
if start_time:
query["timestamp"]["$gte"] = start_time
if end_time:
query["timestamp"]["$lte"] = end_time
# Get total count
total_count = await self.db.sensor_readings.count_documents(query)
# Get readings
cursor = self.db.sensor_readings.find(query).sort("timestamp", -1).skip(offset).limit(limit)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"readings": readings,
"total_count": total_count,
"execution_time_ms": 0 # Placeholder
}
except Exception as e:
logger.error(f"Error getting sensor data: {e}")
raise
async def create_sensor(self, sensor_data) -> Dict[str, Any]:
"""Create a new sensor"""
try:
# Check if sensor already exists
existing = await self.db.sensors.find_one({"sensor_id": sensor_data.sensor_id})
if existing:
raise ValueError(f"Sensor {sensor_data.sensor_id} already exists")
# Create sensor document
sensor_doc = {
"sensor_id": sensor_data.sensor_id,
"name": sensor_data.name,
"sensor_type": sensor_data.sensor_type.value if hasattr(sensor_data.sensor_type, 'value') else str(sensor_data.sensor_type),
"room": sensor_data.room,
"location": sensor_data.location if hasattr(sensor_data, 'location') else None,
"status": "active",
"created_at": datetime.utcnow(),
"updated_at": datetime.utcnow()
}
result = await self.db.sensors.insert_one(sensor_doc)
return {"created_at": datetime.utcnow()}
except Exception as e:
logger.error(f"Error creating sensor: {e}")
raise
async def update_sensor(self, sensor_id: str, update_data: Dict[str, Any]) -> bool:
"""Update sensor metadata"""
try:
update_data["updated_at"] = datetime.utcnow()
result = await self.db.sensors.update_one(
{"sensor_id": sensor_id},
{"$set": update_data}
)
return result.modified_count > 0
except Exception as e:
logger.error(f"Error updating sensor: {e}")
raise
async def delete_sensor(self, sensor_id: str) -> Dict[str, Any]:
"""Delete a sensor and its data"""
try:
# Delete readings
readings_result = await self.db.sensor_readings.delete_many({"sensor_id": sensor_id})
# Delete sensor
await self.db.sensors.delete_one({"sensor_id": sensor_id})
return {"readings_deleted": readings_result.deleted_count}
except Exception as e:
logger.error(f"Error deleting sensor: {e}")
raise
async def ingest_sensor_data(self, sensor_data) -> Dict[str, Any]:
"""Ingest real-time sensor data"""
try:
# Create reading document
reading_doc = {
"sensor_id": sensor_data.sensor_id,
"timestamp": sensor_data.timestamp,
"value": sensor_data.value,
"unit": sensor_data.unit if hasattr(sensor_data, 'unit') else None,
"room": sensor_data.room if hasattr(sensor_data, 'room') else None,
"created_at": datetime.utcnow()
}
# Store in database
await self.db.sensor_readings.insert_one(reading_doc)
# Cache recent value in Redis
if self.redis:
cache_key = f"sensor:{sensor_data.sensor_id}:latest"
await self.redis.setex(cache_key, 3600, json.dumps(reading_doc, default=str))
return {"success": True}
except Exception as e:
logger.error(f"Error ingesting sensor data: {e}")
raise
async def export_data(self, start_time: int, end_time: int, sensor_ids: Optional[str] = None,
format: str = "json") -> Dict[str, Any]:
"""Export sensor data"""
try:
query = {
"timestamp": {"$gte": start_time, "$lte": end_time}
}
if sensor_ids:
sensor_list = [s.strip() for s in sensor_ids.split(",")]
query["sensor_id"] = {"$in": sensor_list}
cursor = self.db.sensor_readings.find(query).sort("timestamp", 1)
readings = []
async for reading in cursor:
reading["_id"] = str(reading["_id"])
readings.append(reading)
return {
"format": format,
"data": readings,
"total_records": len(readings),
"period": {"start": start_time, "end": end_time}
}
except Exception as e:
logger.error(f"Error exporting data: {e}")
raise
async def get_events(self, severity: Optional[str] = None, event_type: Optional[str] = None,
hours: int = 24, limit: int = 50) -> List[Dict[str, Any]]:
"""Get system events"""
try:
start_time = datetime.utcnow() - timedelta(hours=hours)
query = {"timestamp": {"$gte": start_time}}
if severity:
query["severity"] = severity
if event_type:
query["event_type"] = event_type
cursor = self.db.system_events.find(query).sort("timestamp", -1).limit(limit)
events = []
async for event in cursor:
event["_id"] = str(event["_id"])
events.append(event)
return events
except Exception as e:
logger.error(f"Error getting events: {e}")
return []
async def cleanup_old_data(self, cutoff_date: datetime):
"""Clean up old sensor data"""
try:
result = await self.db.sensor_readings.delete_many({
"created_at": {"$lt": cutoff_date}
})
logger.info(f"Cleaned up {result.deleted_count} old sensor readings")
except Exception as e:
logger.error(f"Error cleaning up old data: {e}")
raise

View File

@@ -15,7 +15,7 @@ from typing import List, Optional
from models import ( from models import (
TokenGenerateRequest, TokenResponse, TokenValidationResponse, TokenGenerateRequest, TokenResponse, TokenValidationResponse,
TokenListResponse, HealthResponse TokenListResponse, HealthResponse, Token
) )
from database import connect_to_mongo, close_mongo_connection, get_database from database import connect_to_mongo, close_mongo_connection, get_database
from token_service import TokenService from token_service import TokenService
@@ -95,6 +95,16 @@ async def generate_token(request: TokenGenerateRequest, db=Depends(get_db)):
"""Generate a new JWT token""" """Generate a new JWT token"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
if not request.data_aggregation:
request.data_aggregation = False
if not request.time_aggregation:
request.time_aggregation = False
if not request.embargo:
request.embargo = False
if not request.exp_hours:
request.exp_hours = 24
token = token_service.generate_token( token = token_service.generate_token(
name=request.name, name=request.name,
list_of_resources=request.list_of_resources, list_of_resources=request.list_of_resources,
@@ -110,16 +120,16 @@ async def generate_token(request: TokenGenerateRequest, db=Depends(get_db)):
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.post("/tokens/validate", response_model=TokenValidationResponse) @app.post("/tokens/validate", response_model=TokenValidationResponse)
async def validate_token(token: str, db=Depends(get_db)): async def validate_token(token: Token, db=Depends(get_db)):
"""Validate and decode a JWT token""" """Validate and decode a JWT token"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
is_valid = await token_service.is_token_valid(token) is_valid = await token_service.is_token_valid(token.token)
decoded = token_service.decode_token(token) if is_valid else None decoded = token_service.decode_token(token.token) if is_valid else None
return TokenValidationResponse( return TokenValidationResponse(
valid=is_valid, valid=is_valid,
token=token, token=token.token,
decoded=decoded if is_valid and "error" not in (decoded or {}) else None, decoded=decoded if is_valid and "error" not in (decoded or {}) else None,
error=decoded.get("error") if decoded and "error" in decoded else None error=decoded.get("error") if decoded and "error" in decoded else None
) )
@@ -128,11 +138,11 @@ async def validate_token(token: str, db=Depends(get_db)):
raise HTTPException(status_code=500, detail="Internal server error") raise HTTPException(status_code=500, detail="Internal server error")
@app.post("/tokens/save") @app.post("/tokens/save")
async def save_token(token: str, db=Depends(get_db)): async def save_token(token: Token, db=Depends(get_db)):
"""Save a token to database""" """Save a token to database"""
try: try:
token_service = TokenService(db) token_service = TokenService(db)
result = await token_service.insert_token(token) result = await token_service.insert_token(token.token)
return result return result
except ValueError as e: except ValueError as e:
raise HTTPException(status_code=400, detail=str(e)) raise HTTPException(status_code=400, detail=str(e))

View File

@@ -6,6 +6,9 @@ from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any from typing import List, Optional, Dict, Any
from datetime import datetime from datetime import datetime
class Token(BaseModel):
token: str = Field(..., description="JWT token")
class TokenGenerateRequest(BaseModel): class TokenGenerateRequest(BaseModel):
"""Request model for token generation""" """Request model for token generation"""
name: str = Field(..., description="Token owner name") name: str = Field(..., description="Token owner name")

View File

@@ -126,7 +126,6 @@ class TokenService:
async def is_token_valid(self, token: str) -> bool: async def is_token_valid(self, token: str) -> bool:
"""Check if token is valid and active""" """Check if token is valid and active"""
# Check if token exists and is active in database
token_record = await self.tokens_collection.find_one({ token_record = await self.tokens_collection.find_one({
"token": token, "token": token,
"active": True "active": True
@@ -135,7 +134,6 @@ class TokenService:
if not token_record: if not token_record:
return False return False
# Verify JWT signature and expiration
decoded = self.decode_token(token) decoded = self.decode_token(token)
return decoded is not None and "error" not in decoded return decoded is not None and "error" not in decoded
@@ -147,7 +145,7 @@ class TokenService:
async def cleanup_expired_tokens(self) -> int: async def cleanup_expired_tokens(self) -> int:
"""Remove expired tokens from database""" """Remove expired tokens from database"""
now = datetime.utcnow() now = datetime.now()
# Delete tokens that have expired # Delete tokens that have expired
result = await self.tokens_collection.delete_many({ result = await self.tokens_collection.delete_many({