From a7a18e6295c5225b0dcf836eaa46ee2963685a5e Mon Sep 17 00:00:00 2001 From: rafaeldpsilva Date: Tue, 9 Sep 2025 13:46:42 +0100 Subject: [PATCH] first commit --- ARCHITECTURE.md | 139 +++++ __pycache__/api.cpython-39.pyc | Bin 0 -> 11883 bytes __pycache__/database.cpython-312.pyc | Bin 0 -> 12476 bytes __pycache__/database.cpython-39.pyc | Bin 0 -> 6816 bytes __pycache__/main.cpython-312.pyc | Bin 0 -> 9238 bytes __pycache__/main.cpython-39.pyc | Bin 0 -> 5408 bytes __pycache__/main_layered.cpython-312.pyc | Bin 0 -> 11581 bytes __pycache__/main_layered.cpython-39.pyc | Bin 0 -> 6855 bytes __pycache__/models.cpython-39.pyc | Bin 0 -> 11314 bytes __pycache__/persistence.cpython-39.pyc | Bin 0 -> 11307 bytes api.py | 582 ++++++++++++++++++ data_simulator.py | 54 ++ database.py | 220 +++++++ layers/__init__.py | 1 + layers/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 171 bytes layers/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 165 bytes layers/business/__init__.py | 1 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 174 bytes .../analytics_service.cpython-39.pyc | Bin 0 -> 8561 bytes .../cleanup_service.cpython-39.pyc | Bin 0 -> 6456 bytes .../__pycache__/room_service.cpython-39.pyc | Bin 0 -> 6572 bytes .../__pycache__/sensor_service.cpython-39.pyc | Bin 0 -> 8935 bytes layers/business/analytics_service.py | 300 +++++++++ layers/business/cleanup_service.py | 234 +++++++ layers/business/room_service.py | 262 ++++++++ layers/business/sensor_service.py | 328 ++++++++++ layers/infrastructure/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 186 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 180 bytes .../database_connection.cpython-312.pyc | Bin 0 -> 5584 bytes .../database_connection.cpython-39.pyc | Bin 0 -> 3133 bytes .../redis_connection.cpython-39.pyc | Bin 0 -> 3383 bytes .../__pycache__/repositories.cpython-39.pyc | Bin 0 -> 14132 bytes layers/infrastructure/database_connection.py | 95 +++ layers/infrastructure/redis_connection.py | 80 +++ layers/infrastructure/repositories.py | 362 +++++++++++ layers/presentation/__init__.py | 1 + .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 178 bytes .../__pycache__/api_routes.cpython-39.pyc | Bin 0 -> 10672 bytes .../redis_subscriber.cpython-39.pyc | Bin 0 -> 4387 bytes .../websocket_handler.cpython-39.pyc | Bin 0 -> 3740 bytes layers/presentation/api_routes.py | 404 ++++++++++++ layers/presentation/redis_subscriber.py | 128 ++++ layers/presentation/websocket_handler.py | 97 +++ main.py | 202 ++++++ main_layered.py | 273 ++++++++ microservices/DEPLOYMENT_GUIDE.md | 422 +++++++++++++ microservices/README.md | 97 +++ microservices/api-gateway/Dockerfile | 26 + microservices/api-gateway/auth_middleware.py | 89 +++ microservices/api-gateway/load_balancer.py | 124 ++++ microservices/api-gateway/main.py | 352 +++++++++++ microservices/api-gateway/models.py | 77 +++ microservices/api-gateway/requirements.txt | 5 + microservices/api-gateway/service_registry.py | 194 ++++++ microservices/battery-service/Dockerfile | 26 + .../battery-service/battery_service.py | 414 +++++++++++++ microservices/battery-service/database.py | 104 ++++ microservices/battery-service/main.py | 262 ++++++++ microservices/battery-service/models.py | 157 +++++ .../battery-service/requirements.txt | 7 + microservices/demand-response-service/main.py | 383 ++++++++++++ microservices/deploy.sh | 309 ++++++++++ microservices/docker-compose.yml | 193 ++++++ microservices/token-service/Dockerfile | 25 + microservices/token-service/database.py | 65 ++ microservices/token-service/main.py | 190 ++++++ microservices/token-service/models.py | 55 ++ microservices/token-service/requirements.txt | 7 + microservices/token-service/token_service.py | 157 +++++ microservices_example.md | 84 +++ models.py | 236 +++++++ persistence.py | 448 ++++++++++++++ requirements.txt | 10 + services/__init__.py | 1 + services/token_service.py | 174 ++++++ test_structure.py | 221 +++++++ 77 files changed, 8678 insertions(+) create mode 100644 ARCHITECTURE.md create mode 100644 __pycache__/api.cpython-39.pyc create mode 100644 __pycache__/database.cpython-312.pyc create mode 100644 __pycache__/database.cpython-39.pyc create mode 100644 __pycache__/main.cpython-312.pyc create mode 100644 __pycache__/main.cpython-39.pyc create mode 100644 __pycache__/main_layered.cpython-312.pyc create mode 100644 __pycache__/main_layered.cpython-39.pyc create mode 100644 __pycache__/models.cpython-39.pyc create mode 100644 __pycache__/persistence.cpython-39.pyc create mode 100644 api.py create mode 100644 data_simulator.py create mode 100644 database.py create mode 100644 layers/__init__.py create mode 100644 layers/__pycache__/__init__.cpython-312.pyc create mode 100644 layers/__pycache__/__init__.cpython-39.pyc create mode 100644 layers/business/__init__.py create mode 100644 layers/business/__pycache__/__init__.cpython-39.pyc create mode 100644 layers/business/__pycache__/analytics_service.cpython-39.pyc create mode 100644 layers/business/__pycache__/cleanup_service.cpython-39.pyc create mode 100644 layers/business/__pycache__/room_service.cpython-39.pyc create mode 100644 layers/business/__pycache__/sensor_service.cpython-39.pyc create mode 100644 layers/business/analytics_service.py create mode 100644 layers/business/cleanup_service.py create mode 100644 layers/business/room_service.py create mode 100644 layers/business/sensor_service.py create mode 100644 layers/infrastructure/__init__.py create mode 100644 layers/infrastructure/__pycache__/__init__.cpython-312.pyc create mode 100644 layers/infrastructure/__pycache__/__init__.cpython-39.pyc create mode 100644 layers/infrastructure/__pycache__/database_connection.cpython-312.pyc create mode 100644 layers/infrastructure/__pycache__/database_connection.cpython-39.pyc create mode 100644 layers/infrastructure/__pycache__/redis_connection.cpython-39.pyc create mode 100644 layers/infrastructure/__pycache__/repositories.cpython-39.pyc create mode 100644 layers/infrastructure/database_connection.py create mode 100644 layers/infrastructure/redis_connection.py create mode 100644 layers/infrastructure/repositories.py create mode 100644 layers/presentation/__init__.py create mode 100644 layers/presentation/__pycache__/__init__.cpython-39.pyc create mode 100644 layers/presentation/__pycache__/api_routes.cpython-39.pyc create mode 100644 layers/presentation/__pycache__/redis_subscriber.cpython-39.pyc create mode 100644 layers/presentation/__pycache__/websocket_handler.cpython-39.pyc create mode 100644 layers/presentation/api_routes.py create mode 100644 layers/presentation/redis_subscriber.py create mode 100644 layers/presentation/websocket_handler.py create mode 100644 main.py create mode 100644 main_layered.py create mode 100644 microservices/DEPLOYMENT_GUIDE.md create mode 100644 microservices/README.md create mode 100644 microservices/api-gateway/Dockerfile create mode 100644 microservices/api-gateway/auth_middleware.py create mode 100644 microservices/api-gateway/load_balancer.py create mode 100644 microservices/api-gateway/main.py create mode 100644 microservices/api-gateway/models.py create mode 100644 microservices/api-gateway/requirements.txt create mode 100644 microservices/api-gateway/service_registry.py create mode 100644 microservices/battery-service/Dockerfile create mode 100644 microservices/battery-service/battery_service.py create mode 100644 microservices/battery-service/database.py create mode 100644 microservices/battery-service/main.py create mode 100644 microservices/battery-service/models.py create mode 100644 microservices/battery-service/requirements.txt create mode 100644 microservices/demand-response-service/main.py create mode 100755 microservices/deploy.sh create mode 100644 microservices/docker-compose.yml create mode 100644 microservices/token-service/Dockerfile create mode 100644 microservices/token-service/database.py create mode 100644 microservices/token-service/main.py create mode 100644 microservices/token-service/models.py create mode 100644 microservices/token-service/requirements.txt create mode 100644 microservices/token-service/token_service.py create mode 100644 microservices_example.md create mode 100644 models.py create mode 100644 persistence.py create mode 100644 requirements.txt create mode 100644 services/__init__.py create mode 100644 services/token_service.py create mode 100644 test_structure.py diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000..4dddb67 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,139 @@ +# Backend Architecture Restructuring + +## Overview + +The backend has been restructured from a monolithic approach to a clean **3-layer architecture** with proper separation of concerns. + +## Architecture Layers + +### 1. Infrastructure Layer (`layers/infrastructure/`) +**Responsibility**: Data access, external services, and low-level operations + +- **`database_connection.py`** - MongoDB connection management and indexing +- **`redis_connection.py`** - Redis connection and basic operations +- **`repositories.py`** - Data access layer with repository pattern + +**Key Principles**: +- No business logic +- Only handles data persistence and retrieval +- Provides abstractions for external services + +### 2. Business Layer (`layers/business/`) +**Responsibility**: Business logic, data processing, and core application rules + +- **`sensor_service.py`** - Sensor data processing and validation +- **`room_service.py`** - Room metrics calculation and aggregation +- **`analytics_service.py`** - Analytics calculations and reporting +- **`cleanup_service.py`** - Data retention and maintenance + +**Key Principles**: +- Contains all business rules and validation +- Independent of presentation concerns +- Uses infrastructure layer for data access + +### 3. Presentation Layer (`layers/presentation/`) +**Responsibility**: HTTP endpoints, WebSocket handling, and user interface + +- **`api_routes.py`** - REST API endpoints and request/response handling +- **`websocket_handler.py`** - WebSocket connection management +- **`redis_subscriber.py`** - Real-time data broadcasting + +**Key Principles**: +- Handles HTTP requests and responses +- Manages real-time communications +- Delegates business logic to business layer + +## File Comparison + +### Before (Monolithic) +``` +main.py (203 lines) # Mixed concerns +api.py (506 lines) # API + some business logic +database.py (220 lines) # DB + Redis + cleanup +persistence.py (448 lines) # Business + data access +models.py (236 lines) # Data models +``` + +### After (Layered) +``` +Infrastructure Layer: +├── database_connection.py (114 lines) # Pure DB connection +├── redis_connection.py (89 lines) # Pure Redis connection +└── repositories.py (376 lines) # Clean data access + +Business Layer: +├── sensor_service.py (380 lines) # Sensor business logic +├── room_service.py (242 lines) # Room business logic +├── analytics_service.py (333 lines) # Analytics business logic +└── cleanup_service.py (278 lines) # Cleanup business logic + +Presentation Layer: +├── api_routes.py (430 lines) # Pure API endpoints +├── websocket_handler.py (103 lines) # WebSocket management +└── redis_subscriber.py (148 lines) # Real-time broadcasting + +Core: +├── main_layered.py (272 lines) # Clean application entry +└── models.py (236 lines) # Unchanged data models +``` + +## Key Improvements + +### 1. **Separation of Concerns** +- Each layer has a single, well-defined responsibility +- Infrastructure concerns isolated from business logic +- Business logic separated from presentation + +### 2. **Testability** +- Each layer can be tested independently +- Business logic testable without database dependencies +- Infrastructure layer testable without business complexity + +### 3. **Maintainability** +- Changes in one layer don't affect others +- Clear boundaries make code easier to understand +- Reduced coupling between components + +### 4. **Scalability** +- Layers can be scaled independently +- Easy to replace implementations within layers +- Clear extension points for new features + +### 5. **Dependency Management** +- Clear dependency flow: Presentation → Business → Infrastructure +- No circular dependencies +- Infrastructure layer has no knowledge of business rules + +## Usage + +### Running the Layered Application +```bash +# Use the new layered main file +conda activate dashboard +uvicorn main_layered:app --reload +``` + +### Testing the Structure +```bash +# Validate the architecture +python test_structure.py +``` + +## Benefits Achieved + +✅ **Clear separation of concerns** +✅ **Infrastructure isolated from business logic** +✅ **Business logic separated from presentation** +✅ **Easy to test individual layers** +✅ **Maintainable and scalable structure** +✅ **No layering violations detected** +✅ **2,290+ lines properly organized across 10+ files** + +## Migration Path + +The original files are preserved, so you can: +1. Test the new layered architecture with `main_layered.py` +2. Gradually migrate consumers to use the new structure +3. Remove old files once confident in the new architecture + +Both architectures can coexist during the transition period. \ No newline at end of file diff --git a/__pycache__/api.cpython-39.pyc b/__pycache__/api.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..739ff98e273427a609526be9a4316f994ca95815 GIT binary patch literal 11883 zcma)COK=>=d7jtK&dxpw0w6$A)KUT^tV|Kq%a&=HrX11S#?XNa-*B+*>$^@t!H~q-Rb4(IW9N5ZqKc|y?i~-?^d@^FQC2EE!K;8 zX1b+bxnAy7>J_fHyZd@G^_kxO`hI@Tc4vEY^|{`BeZF^~exSEdU+68?7kdZm2YZL= zhkA$Whq;~8eWZ7!eni*2;^{V|af?)zH(aqqFtlE{mKDBd(h)0v0CaM$jXS`#Wdzt5^blF=LN4GQLG4VK$UE{Hr z#Iecw#S`Mm_jRo9DXv`>wg1P=%9pLuW*L^AcA3vUr6@UctyG#W`_)a&NGo&xkK?8{#YC0=Ie=t)3He;^OXB ze1flvuX3B`d5y1$*ON7_it!1qit!0r;%mto-w1J^M+nu_-Jdr*7k$g z`5MjuqLl;>W${L1;&a~D^?61(TlkMpP~ ztb0KNotjsgz84pzC)$3a*X%dfJy~<&+&NUKB@`%o$@7PUKDE5;_5Fdo+4?7q`~6+rNN+wH`#9aasKkAA9%g<*S&rayQ<||n?o-ysQl%i8Ep79J>Ea`q>r!j z`Yo^Fd-8g_<+W&wCjSh){0cw6fF#gvX%``WE@;?JeYK{`5=uYRYv`{G<)GuW@GhGE)hmOh6f0Mnt#`54E6rj1#BejVd55m- zrzvS}O_zs|hR%x5Ti}P~uXsVV+3i-70(?o<-{|$4ax*M_x!uLFR#I2KC_oXubA+Lohh;c}20 zxgr-iKh^%!xTEV@kQXjV<=e*h^sj5*#Q0mzO{edw=Sl4(N^rEr@Dy!w7sOE=tnp?` zZ=Fa56g%Gc>UY|~)#@N&jOrTiJg8G1!8XW8DXAif-InygCZf>{RzK8ZyX*DWgR8OG zXp5SOqf^HkXIg^|5NzfeabS4hyp#i31u+B|ay;d!>fPV!ns#sQVjoNCH;D%1b)2_H ztv@tyUVKE2)eQmK7&d>p> z_o~_lV)>K z*i4alp4I}ZlZiBu6^_W=Hj`dvV2`qqmX2aYp!mRvOwg?<4B`Hv0bQTpwi5a^Hh-XN z^#46jX#dNg=-lStgQ7E_=o5iE0{uqXPc>1DGN5bl4(Qq-e4*CFbPYbbnE_q%lj+(3 zU01@Bv@L2sK*{a?+CcW2B>Jn(E6@nlX4M~ht@c{Gm293ojqO;y`(q-z%4Me2YJU(^ z*Fdd8K7~rTgygP3(jH|%5hizuGu?Kt9mLsQyAMs{-=+PnS@QFE#HB76!Vel?43g*d zIq9_^CdcnS=$Ck*yr(#vo5mH@d&a4%T*kPvMspP837W$p+HMT`9@D9Omg*O%Vm}ew zo{?plYRWsL$Vy5+N1dLhfq35=|Ti_<6^DkNxz2kAT$GQSGO9Y6&_ zEO6aCG@T5pq?sWWdc*4S+x_;9S~z>YFCHqDU*efwZ{BG4HhNX;&p-;l8VryPHe^36 zU)m}1-)#?T8FkJs#+E^YQGf$Q#yNQwqs8`MZO!+B7O~g(k2X4GwI@JT;co(Mq}_p- zSIoARjdcGY)WoY$Bo36v$$(S@m@sk@1@bbNK%k?9l!ya$VBwkRm?JC7GNX~=$Z<)A zN=CMTs<>l}99A~D$h@O(xsls(!GhUT)#Rf*tC~WPj|#$J#>Nw><^#3L$X?JyZdThW z28Ade<$_|Qt?Q8vmNub;+}oL`DDt<*tX%q`zF7t9mY8+Rqe{xUrIdB|G3!<)R8r|? ziB%GwsY==x*667BoH5u?ehSatvzFEa4=DkHquBLT1=5%l5>!wt7zE9(Vo)XL_jxzG)&|KCKKe$_506acQ|iwPmgi~Mlw(`6m0Y0_Pg25q%pwX^tAB}?~_efH*Gf?aIJZrPC|fbLT4 zmLshS78_$XbIZPIgHia&jABB^<-=wA*j?C-_CUe4L{u{Q_zYd{nM-|dl@A}Pi?CZ< zNO*x;)yK*TsEOv5+a5=_dvw|lR z)&n;Sb@Z{5VW_o^R^b6fPAl*6B>iU3+fr(1fwr6q_X;SoHq*v?1WOBOA^ju1Ov&&9 z>4kfVUdSYR!Jg0y&=C`QA^VVC$g*Al@vvU7STESn0wBJ?h8D=O{C6U!0~nT=8d=g9 z$y~0mBKx`~KcEp7wFvLfFIFc9qIMq|2U=s#IE5$|6%J^uONxODouG6Hp3o(~PIO5o z(IurwqxMlrxG4zCLvy?gRw%RPD2$+081xmQa;iB>qNv}) zEZ?Etbnx=)l<Tp|wWK;)BY0-D!4hJ2CB3@#(QrFBgC4wM6op!fOh987Yq zg$aaO%fEs&BY&b?0Ub+5$^`1bSOH3c>CW{ZB&8WH1!q7#m?DR#-f>2Z(V#wD@Fk3@ z4h;8q4f)qWzHbBtI0dLpk@$E7{^^t>;_C`>`%nVsLuUpEFB@e$`za5TpHvZz1+Hhl z%muSiP81pA;R(LHpYWvtzAORbxxpN;CFTbIms3nP56%q0nQ$<}!Ej)-ASzKlRZ@#$ zpO_K*QzhkY6uCK!h!N!^o zrPb~$z(S0;OQ9Pm0H%X41hhDVWF5QxEq4njwWIyWxCU1s8!7N!J&juEGvma`*nq*m z*K|2eO0+fT4bS`z4gU*2Ej*&=i%9Xr>Fxl=hJOZ`aYNyA8OiSbQp>ynqF0iy=a3&Mz!_+bY( z!VyFDS2ksx$_evM2p*O>j31{7o`URB^HkE*^SwcTeUP9+mN*0r>m{B-v|(HkTvlS} zTh#U2lzgg;ad-CoU(wK~#c8k{9n&&lCmQOv0D)q`ZmjT+km8I1JMt6Dus-IWHA^0p z3EXI~(19Q2I>rlUR^W&2ZU(?OD`S7+9BGcUxSAB-U7+?07X}cM3870fX_s~tuFvu65@pNg|MN2@&hVw%C;d3aw6tvlywurO=l25W}gK`oiq5-*sQ=R_oB z9z7SgF+b+cii5&N?%~zA&P3@b5jqQEw$jPH+Bd?|B$Iuh10w}5b_3*fV~9gQR}J# z3dHm%S(k&2VdyTU_GFk@A|wzN#zn-fiI+&h2=bMN))J`eF6e@?&Fkx7amj=8W_|Mn zJ1;R}Yw&E6w=nOGaApab8Own?UwUb))M~6bENm#sz+NV0v({aV-B2UP1!c@8Z&|9r zB=A`^Z&0RQrjm(C>XpQCl}>_>Bc_%S)_wOA{H-c6_Xl`~l4b-&5<1K+HHCOL^oHf{z~lJQ65` z5-9|sEo$$;(|FAgI%vV*1*|IdV8}jA(57UMX2_jh$DZ97$}``_fyrxV5-t-EN<~E@ z5fu|UV%()>*5}o}tMBnuuh0|qSyfKtvQrwDl2UbyKAaT=MT8BOG7R|Ym1J|$6#yGy zn4J|dUkOF%1s)C)y1pS(;2phUbhWuC+JwurK_qJ`5VU7^*p~Oe)U^4$TUbgH}xbQ!AbcY z^wDffpRr21q5KCAtc(qfk^cYIF1i0{?FTruX4w!-Ywp<&fmQh5)j!_d|FGuJQhDVu zBDJkNus(7M74pyj5oQasD;|a7{BR%18J~l5)ud=dhBv72ei~NoJHSVmMEPtDJxb~|A0*RC#jkO zo3NT9g<(b+QZ5kfnmi5xu53+Rz6fuiEiyo{lXj;)VRsTLCA(Ak9i3g6GzUzYOE4*r z@U^Q@2rj7t1RC5l#FgLtJBWRk#lEr)-Bj$CNJWZLD)yZ%_ofT6&rcTn4p_Y$K2FD> z{NTG!MWM~ECj+0w{Em2+e~j&rKcHlfS%^rMt@fSWBJz8*Z}YpEP1QflVP$3iy-&l^ z=Ep@vB#mUiPc;V_l}+){D~DcwVztb0XK8|z>y$Z23@2~Xq#2%VDnqdkUoB>hxQN6z z8F(<^*l9vYOqq6j;!{6FyYP{z-urP3?%w>{L*OTIc4?JzoPS;Hu>c(p%vGcG&`4DR6)hM4^c z>cb=Fyso!HM{F^|;oA;;#^<}R_%8AxlOp^+8HZEiojMZ|TmlnOP?MsZ2&lkepbj5~ zHKL=4LaK5^|96*zw6aWc0q`v2`Gn+%P~nP0?T+Ki+5E`_(W&U?KPN<|qMu_&7?GZe zemd+3+aWz8Aw7OF=~48v9DXL*qvn{~CU>dn`-4`SBK5psOqQ!*N%7?=1^iB>?}!HA z7Rjlp1bZkUSa@V`BDgRxNvD?lF*ck>uUE!lf=1G`UKyYKxMVCuxxoYp?}?!x>iG=} zOV>SUPP$XGsp6n+QbLi?GBqivTQ{w4n=Z)Uk^psxi|RQ^;CsN!n4pQ4>j=#v(29`6jUmu>m_N@? zKBp7m!B+;j{qnT$k<*L!Tgi)y=ls~}_=A49n6y{$th!8v$Ez*>dhJ7N!e96%CW&p; z=+hy=vsiZc8=;fHrKmnX5uOwVS&eX2L9~($3#-i-83N8NvfU8g#61=`$zWBc&;WIC zNjn4$*ec9&e1jZSxfB!}-^$^x07Pv93i796D@Qpvr#mRs7qw9ib|Z2C3!TUuxpy>C zfO!7}?z*JzVStDNg9y0Ru)eZFQTx`SL zfK8J&l~Ax#I+WxnAqb@`ItoL?S-RhZwsCg2$sRoABP>!KU3KJJi+trzNm84g?!JmC z@1wD1DA`X5QKX!sgzRm307;ymxNxZMqEIaEHFc@VX*4LH)@blu6MzN2pvjm2ViV2O z!O@K;z7!Ih?S8;PUP+-j*`W!n;Q&WL-aIzFB;n849Bu^gz~b5_0|ohKR6U}Ewr_$D z*vLPKve-F|FWcyPXK+fI{})Ignz7+HCcR=*^ku66L2a830I-V-TFBog!w~-uN{<)x zg`)*im80gKG8)-z{HXK-%*98>wf0A8-Tl&YC4J#V9`|FEep0$H9vfqz9gU9i`=uue Wmi>VG86TGpVy;Jax1$=gy#GIg^`|5N literal 0 HcmV?d00001 diff --git a/__pycache__/database.cpython-312.pyc b/__pycache__/database.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e4b1ba936d14997c1b46b2bfe3792e16522daa8 GIT binary patch literal 12476 zcmd@)TWlNGm2>Ao4$0w2q8_B)v?a%)BulnrTMs`{O7bhV<-|_28|{OoIU|`0MXEDY zZLyMTIBr)#y_>ozY8!1~cN<_G#Ko#W7j@BotZbuAvA|*&%B4DW(Eyu|&3-XffZA?8 z_S|`qqUlGv+n?P_@XWpEp8GuKo_p@O_ut%ZCjp@?_0tm#%>?luIAI3bh^)3Dg1ATs zgn(ki82*wmG)0b)Q`8uRaGHwIQ_L7MWgD|i*~jb}j*hWYjxmQ8M=|G^6X=+jYsxj| zn&QSd1MePllZ5D=Y%m+me*xQ+Pnognws)a7I3PkR>DLl#fLLMDjCn|6A0gP!5d!-z zsS}M=Lb=L!jS^#3g5w-9RxLOI)(9?uUV#&-aZ+%fqsM$&c^=A_S>m z^+E-}hFKKwe1vNr4UnoQD9^?t2aX(0BooqbEGov6YE>a_B%BN%56fbJQk@6lf_Un1 zLJ(uBBY13h&*;d3(S53OWRDpSAeB8bos1^p;TTlT3gM)fj82KF14Ag637A4+zDU6*#XBII11xnP_$}4oQO{( zMutXJTSOlU?t$KYhsuUR@$i%w3aQReXeuGh#BiJog}yiwj_ElP4^>Gl2DnFRStt~a z#}mmg9+DgiNo6<-%iKG}DnR1laQBJCl-Ml^(}LVRl8DSq!3@gXa(HWGTJD~f5|d&C z(n451aXb;0gzn?v$ZKL;0F2(TuIX9HjcdgK|K;@nzDlfgbf4L~uwSX(nC99tboWYk z@0tAzClqf-n(thud!=f#(3**40Q)-pS4ohhgCKtw2?vohq>&&d4fz5JD+ov+O}UwW z8Z9Nz7A;DetB;tpTh&<+fte$|Lwt`kiRM;^xM=$>T3)Gv+|Ulvu5vr(&}HSzRV#kgn62fHNqt3j~2{T z_-HR??R~`g{qVQ95Xl;ozLk)g=E;H{;wFlM;Fu%V&h1$k<2-qqJc&*cr>Iwnljw)k zNfdCV>V_o|mZ9GiIDaG&kAqNxM60gDM@IJ@85s(F{^+4p?G#=;!tufG?pPucj-5!z z$-$n!&0G3Z_ek*N;85_`p3rFU@E)~7jEmC5EG(jU6x2pEJ^`&5wZ

$%Nlni2iVL z(67?dKrxzHcWfpS5oLLNCKj7Ddm#!1d_1AASGzF;Z5^vjSeS~&ReK~cH5HBv+TvCDh&COeDAr}7tUC6biin!Z zQW+7q4vNlYR1=z!VyfGmI8A$~NR+W^@caD|=9pS;OlK&W!1FNy6#%*l{_?*A_)p^g z86rp5F#Lx#zKhWd(W`Ey`-M!+!3Eb{U)>eQWyjTSrFU<}w{OAyA*wmrl5WZ52t?YV2z zP}R50_5*Q4%az@icVF9-u7~2wtPhuO%*b)oF%pOM5StnV+pzBa!S>XL}f!9K(5E7 z#FV)ZMgxrGhpG+1jNmFHY&l3^&=3+97hR!iGF7AH>*5s3FITA($hzP!cLUHQ>q9&L zcc;&uzRQ-q$-lv8*@iURu*^1Da?pK;-JD|xw&sD1$9gts3$kmc9B+2olz$tP`J)?# zfRfgh0p>=5S{f%OEY+UhV2UWLDJ-C%K4XpaXQXHHsuTKX^-dpWqA?PAX$=^mJB~}#82mqv#_T{tH5fb zVC=4Ep`W1Usi%z7^Ni!9Ol4MLoLK#6VbW{H@?U{7OCn;{)<=8;ouAR53k;WU!GAp% zFFunVjF0d}N+hOKmyws8ofcK6ZVw2daB}ozIGosK z;?Xb8h^h@rBxhvR31^jfGNjS6G9;vEayGSL#4z9eW=D)_{BnVjlkyq%3^qBiHDeHi zDZ@*6XOgfCOW1bScL#jDP|J+&K_LMO$x;i(a=7cEDKRNUBeF#U=`glWRJS}UC&j6d z)?2w?YiY%mZvYTrRButumJw7{ShfU};@pUhD{5+*>c|q|W z%y5Ttc6uF*tZ~iAI^hhO<#(j{9m>v^GW^jjdsJbMn)YkejyvqmTp!{13-lmYPB>nI zj|ZpjCWZ()0tYehsMdjvVa95lHYp(EPp!7e(+Z326a z0UsR3IYgK4;2EdE>qU=of=6%xp0jA(KnMShgID1#4=|`_&a~U;;6xO&=s6Byo-w zCM}nUiNXG=yn_WwS+sd~$WzJ}I9<%W80xX4>Y3|dz$Ic>^9A)-cGN5pu#c+FqkBdU z91HC~a_nW*VL1X!^ovK1zO2&Qd;11dRwo=8Qqkss9X2RD$06PECShNx)CfRz>Uiu; zP^l=m7Y)R8LQ1Nvo+%uE2dT6y#>Qnl=f=?*9ClG~FoZ&>YK>zC|GG@{2_ThU0C0v_ zY49)5%WUo4x`r!(%Ymzh7CSO^yBFy5Wq0cuzkB>j^m6nXw>X|@+GV1B_1_)3a^UiT ztCNc@nTG8dU*7`#_ipVF_6Xm#_5LFC%sd}qj>d>7G>;H5he{nX=ZHybUZICZ0BnhK z2VhH_JOt`G{jMQRz-_BGpf2SU!=^(~D9|=!(J95UjwdUN3PDm}Oz8lNj|9zA2Ns@X z(HZWkd<~^+Mq3H@6j{POWlxnGjw9^p(YP#O2?bM(R%uOUXiZQ^;ka5Zh!Jo;ha^#+ z26wC|r}Vu0=0 zlI9g9y#!e*qgAQfLU^T{mJoJBX@0jo(*Z#JbK^f?3Bej4X4FIgY6GW<4!wPfOO*miDw|JndPo zUE$jAa-KI2zHw0T4cy@dL5;gR9(-8l1BZ{pgR>63aY(7(zWBw(h*CSa%cBqNE*~H*@3kA7r zmJ*&0Z7E%Xh`y9Sm?2&qr2f+QNnArwC=l}EE!9%RCB4(up$vo^%_0!Q1*U$@-hdce=aHLh?S4`r+i&b9}g=sF6r^bZ>#CUpT$ zrM0tyOFT6lmBbLflZjZ91o?RL));gXB2QwF>;ndUCT0la9KvV5dHOM|wRSUT+6Z&> z9F;T;3S!P;*t-_YIgn~t!~UY&b5M=^mjHAbD67nt`QI<|gA82kywLepSC;cDod1LJrc1MzUQ_t? z1!{%&WclVa-<;()r1=e3BiDBPoVqdcGe?FW%CbXgb|_~j-0SY=Y=mPNJzQM}m7nzo zo6$d_U_JeV9*8YfqhKSwRAa;OMg&lL9;hOQxd2h!{L#cig~XppJjl-=9+p593&K1# zM@iKJ1$V&|2pnC?yE|t&M2^Aw2ay?pf0HIQbZQ+ou}cVz=F-)*fpi=uLEEd|Vf5v% z2?zy7wn|URaJj<75@A7JBPQdxj3z5Fh&>`E*g${0m=xun$2H&zpIuCkrn(2^4c1N4%s5yt}vptKn}q}mNP#s6{T@|><*u;hdMIJT`~RtCcn)iZ$8o$+Jz)2MrXD10;jCfzleIuOUgw4U zEMj+Y_Eo6s7M!y$kmqY?pr0o--^)JO9G;7iNW+{Yv~7Y(ZNb72i-Kj5825)` zF~h)+b(=yGBj7n+Yi*3C4DYUKU9@Zah)S3?q_&CB*oIK;k~kd;N8sg+ZfKl>>D3hk zwtuX1MGY1RXgm~#JN++-#s(s%TGl9u(nQyxA~_7;$$Lm%UOLj%9ZKh}O!e+;862-y zb_IW3HnbS|26fSO!KE~GUORrLe9QY~LyBo~d{DOgp;|WdK<yq() zdye=Gz2zQFGyV5Cp7GrC(@gKjoQ)X<=Vgw--y5y^_q}7QS~^$mWU6o`d=^jrEB&L> zt2p|x&&KRU*LpGWUUZ{M|Gqbj3++XEA;`pq`frT-$wY`z|9Vt5*{S^rxUSgWHvDR(HP7q+{VFybyk*o zAd#Y4V@Yj8O|c_l3`z+8RFzhvajY}%%Ra*IDpD0%mbOzHhF7Y=a0cPd;M=%@4qkkC!aV^+#a2*4#|K_x-`Rcm&T`h)J;Zobx?b(*TbW5MIzF+A- zr1%ambE8ja?&4Q2d?oAcNqc+NG`96mXzYOEd*Kdu5L{JUYp#Xh*Wb^v(AWDp55Na- zlc(Pw4OsU_y^F!cUZuJ(%l4<)e(nBf;8R(Ei93t-5a^tH#6w&_BOLwBpdF%1wP=K= zm+FE8kbnnDBjxn1s_hWJ?IeNxHivMW*I>B@J&uuG)a`oTNI!MEm%%js494uDAYNJn z^T07v13!xnewsciVdGd28qaTmwY(nw_$i?chOGtg)qV~%yQP&&>Lu!tXPQscitqrm z(9#0={WgtnvHA#xKm^?6qj};qasGh*Ez$){YQwR}2@$`&^}~HjGyxm5KVoW!aZ5pr zM$f#22y?}PuSPaX2`2TRU-~kPF<{eHi1aGN!8;QZ#c6#3YLBb5RfzP3hC6MpGP5?g ztCt6avjDa&nAxJ}@J zb(MXjpyQrle6x?Q8>Sb`e zp8=w!T@2*jBq@jo8nycuwQ>r->h98D$nb4z4<)rH%&L8QR)4XGZHD|)1l0zQ9Y8oYGd7-K^_AF7ECMqv&ywZ8O^A53b#ZkB5%{uDRj=D?z8At0G z+aG8WZT*VB;Bb(iH9Q&F)g9tfalP6n1uI z_RY+jH}C!4dn316wG{lGiT`}_)Z>csH)>4(bTod1C;S2hQ<&;0HoY}Z?Q6Ey*KJ*; zw(c4If?epFw%IS*McHR~rGD8i%ev}WwgtL^H`kxD=lT`9lJ!^ZDz9$U@El_1eN|O> zbzJ9%cU61-lER9tbWdTWyPCbg7w#%{gO!-p9Dk4t=7sIy-#N zu#aQpJX^rX1&mzeix_!=HP{hQo@7VaF}zPPbwg<#e~5i4?Ut6zUkHbO=knDnK@^Bp zugCo;X-xO6xsiLr4S7pXtjj**x32_@drA4i#_D=|?Q;84Vy&&`^_H5Ht`4GJ;JY3s zFEKabQLoRFGL;w?xgaTc{5|eQsEWx_mR}^JT#XEL>g-I@^7SFkBAZmpX%R zc_4x<-a(tW;msR?E7qD_7f>uC85l-oExmyx6BhWH(lpm%X6l zdN+eGT6y8znX~8Y>e_|t7cO4dSa;eNuB_XKxz9y+h_m#2(A=Ki#VXmXezzF~%}gJf zZnV-&i~%Ux@e>={oemGf&27&c=3H<#MIN7C7a|b(H;kJ4xYBG@6DA;Fq63({p>}oDJsl!L0WPgg`#O%MXhLc-O?MXrC0R2 zRwD%i>?tz}M#Q9KQo$ZWB~rFjae~URwo8hlj}4}hqA(4|s>h3~UJxF5s`27lFU)v? zcsE7R&(EXXDu_kQF6EOHIy8kiMjgcN)W)QV$=w~5MCrzyJ=GzY2u&1%}${D*Umy)NfUZ$0(-ZgeK zIadjD!Stu*nqJ#_gtNKXp02*5%X!L}N1vL<8pHpP24s*>`A8jp2v9Hp3a27#TxHdf zk%NMS1arr@V*nE5TS9^f-KoXP`ynASh9>N&8Ne9&uGbuJfhG*60}m2%KMaJ^W2rL< zBey?D3=sr)+nC#a@76X?OpJ)O!^DE~@gs+5?NW$_=taZ$ zskID_n>i!dG|g}dGUDSII*a7s# zPM=4j*9q;M25pzD-L8hiFyehjavM%r$SIoqG>TTiKDKY>98&DY^yHJ?oIGD-^5MwL zVtKb96qvF|GDgY!OQ?h|qfj(MpI0mDJpL;GFp3R>T9tnqg@#e5np*kFFdK%XpkA$} z0efiw4{$PqN87@mu~|7O3W-952!uc3e56Ttq^7pDMJ`{9k6hx>o=@rdIACZ>yQS~R zUc%he7WP1;0tj~|xQc})P*PFFchP=?EjrpC5#cXUhH=<1+5%;rH?ozN|SjzT!YuJJ?c zD1yCX>^NF=wg@4dN-Ebl>xEZb--RQJzav`{?-lq>cpcai7ik#NgfS6e2WU9?QJpro ziYGjef+-`#Q6k`e8eX8KM?f63Wr%TCMNc8?!FG3nqUbT10+xzx+q$;Cc6q~j?drz$ zq&#utdH0)FuU)r|mtQ>h1G|(Gu3b#jGp!;_g7)I1@tZgTPQ+0ZiIw(KG?3^$M5tNE zAP`YfN#!Mq)@I8B#JDyOGox?;v1z1P=!Y zV|f@q{`E1CoGU(gPy{4D0p==6RnQ?Vxs*5bf~1&N3MnTqfD=IgA-+e&St=+RIS|2g zaSor7f}HXoxS_~NhKfGu!V@SE&;VA9>`(iuWY%$(6w<^E$w7Xa*}$2@Br(x}evT)6 z5(N%P5lt$`sywO`VJIY)p|eab^!UhLIFkl5a;0{BY_C5LbL5wnk#4++C8jn`khxzw zEeJDX^$tEyPzd`uHnX2Sx`*`|&|eqd0)1i-h9z_S_Mj)YLupor98vxwyC!!ecTBcT zdkq_-nf)?tq>}3GkP0hfZDfq#|3|RuT^;HxWs*ta>;|H3-t-X8hfy5!!{h8%a{tI`xTeuDhf0g{v9ajH?`RV*s4`9UF7At@yt&MGms!oWxJ z9Jnl;krgqT3am{v>Eres-63*xQ?mLsXu>)Q3Y$q*^RT570_c4*+c)fC38^uZj;tmr zW?_pwq5rAAJ)PdJ+ZDVE&121M{fW}GzpN6;O4h$3E`iNlNW`+ZMg=L9kmA}W zq&jKHbjcs<}z zu-SAyFN38pMIymFi1%kPsvT$Xc8*EUKEfD*CJ9Yu(Ot0ii~5c_QpX66kOXVL(QX-7 zT$2WyD9AI4f)6~m!*TDWEuA?~%dGRnCGG+Sulx~H{~eDKpPSJuk{Y()xVRm8k7srp zE}fTARQL)i`3$f{>0Bj17y!u-+(-cZ8K&Kj0!I&?_zxhRQgehv-~KPyX}46dijjD| zMX$pG4oMM*s32rXijKpA4(`mpPhG^JxJboi`e?9TClW7G`^QwsE4MePcAbj1Q6vU! zQ~aFTdzLC|3)HuSg5wFPtE3uQ$t=}NW~0=o&?CQh@M-chP_yuSjwk#piuiYtlBX0% z;pG)aBqRCz>Tv}pjd*Tcpoo5?_V71RKfwK#$;@{R@so&d(u&mn7?-u50_t%|lFw9T z`edf^f0*gBnVGC~CZyOe;nvL>{smE@E)(n`qQtq8o=1r?!B%(lJ310<`Ia#v8AT;N zv+8lz-ySprkEO~#eGAik2Dul^0yl8YLE!Z|!&dvD4Y@#(5m5|7@e~P3f6|L2YQ&{A z?qLLL#tk`R=K1rK_93N;@nQK%!chjq@u@7BBxWdKo??iiNttO0ipi0uM;$+SUzE`h zEfn?~E{w6JgPdiEJFt`iTmWtR(Ns|NEYa^J#fJDHjjc`gM@b&Ce+0nW*vtVbEpE?J zQ8^HmObP#l8NwzC8EhiMHPzC;Du$&trs~IWk0wY7XB3SfcA{t=8gX+^(Ngu;!!o)@ zxuxKc^&!I{()}zxCUf`~Mz4dkX|vZ&jLYp;uO?+6;v4e%HbpPFBf5xB zZx85xR9-2{+lo|A33jslhm?{M=~O(lje~$HDVFKjUIkbmMQsF*|K9trsPNL$g$-#F_V^{I3tM)$zf+k zR>YN3#|av{HEO2?VjE5n|JkmyRu^9E{_tYEKxD;9fyDw;sf6rAK?1CQY_Wf6$BQ&> zf9$z4oS`Hu&h7}jbLQT2pL6ebzH>(ZURGvDaQ$2QU&gxY5c*H@pkJ1v@b#<-q08ub z#32sHQAoSv5LTZ}2^6BRmPdzZHIE4~a5u%x2}{V5u!gJ&TgaBMhwKSQ$dPb{oT@$* zFH5*WE>))E%f}X(QJU^(S=x^fpR&-o;gnV$P$cAk9|ZWVBL@pvI|^rY`ZVT-9v#O?Zdo!e|tQHuG-Y3a=aS z0nWPz?p#f%3u;G*#>$JU)n~}n@m3hSZ`s(laP@DZ&{n89y0R8|;u?&ajc+1jzT3Y; zyNPd}ZPrK0vCC@g_>LMaM*FSHw0|&#{cV3D0~qw170ZE$B=e zG{pXx6cv(5J}N6^hfbb+Zs64@pORxjQnB>&DL%uQ(3zk+?h-9OI*}Ky|YyOvo@QtT1u@G#^)}NGcUAZgjG0h-^R# zcFWg~AomDtn9E333rI3f8KNO@Sbq^1$;Hixj2Hd}0Kwltt#?e9sh=?nv?uGpf}V6- z)~M&uyBv4wQt2+2>$1MpmW}GkH7lM_Q!iS3=2WQ<&h%UQ9ef$X{GMEgF4d7Mbya=L zYs|V@*BSTfQtAGLdWO=P?~EypV#?HmF8l}l{t14)@MC{VO=7<}-O!gx#beP3IT36; zHUe8Jl4OgUGwoYmD#;O1jwMIgiBupE7+hIZEu2WPQ6Z6v^D@7xbZkOat8&6*5-O(~ zS2PTzq`;``cPi$%FbXG2p<~GrK`eubisTh4_E<6|$0G4qnpZ69QH%+SBP#L{nGeem z=@rGToiAx(Sc;0VVO|v7q_&GBWdA63IDWygxxC z+o)e(M;5?4-FE^oY_l9o@GP&X0*vaaktD|+7EZDfpOgfVjFAogV4UQTuDTu@C#4&R3v~cOr{HI4Lozv=>eY_K zrvO}y{>Ba1)-@3HRO! zWWXwwR7h-ujmBfJUpmWb*)#P{#@=8GuI(B}JB$c#I7k)|W-x0R=~i-rqr@VqU?C z9922HZ^7;>K^^VQ(TxjqH zb({jVCtRqcNYzk~a_>C1aTn)6omZAWt* zxNj>xw}~NgHzxU8F_E|TQTuzTJ3S`Iy0Z%txt9Vt3bQsY;v=Xh`8U-6pJ3URQLJ&} zlp1XV+2SaU=9pQt?n%s$^S8z+UUvR0Doz;Z?|TT0wRFayWrKE=#-O)6W12RtbxLKA zuB*2Mv_~65i6rVOYZEVfsf}xhbG)~Vh^dHU8aHT@(GYl(oO8BJ_c4qv51bxj+zZ*{+bX$`uvxL0o0<#AS*O81gx zKf1_7O`|Ak`Vo-G8D^RpZ!78PQmNOsO&6+Z(4Jv9H^;>AFY)8!xbR*Nx`Z#70Cm-< z0BHqKw{-?DTZ=W2oU={im1}Vd-scQ{4NqCL`=o)Nfa(SK-G<+5Q4>~2G>PD;Z&j~P ze{Zi9t9xCxcJ2BKlmU zI$48E0&Hv0E~WTrY$V2WY;-IFmLLxB58ys53K1>}{-~rjmEa`_97MGikPF=)lx&J- zI-~&G7ncOK==Mn(!GWHDEsYBO`!%bOZWz)TxH_!Ts6?RDiY+cgBk?gol4Ji}3&>84 zC?PpC$x`T)Aj*n~8&=#L9~C%0EEc^-X)wKIXaekmB#lhO<5MMjL*}aW1QcBFO=VMo z^yZ*25$D*XU|7QM)&@h>G^EpUuB2C7xn6d|BfOy)T`24Wue&ZbniNEEef3IVeIXSK zVTj<~DzwU`s!wP}i(^MbA)$@7msKi6UY-z>VP1Fkr8GMriZF{&1g|6J2B9Icqkk^)HgUd za8z-O^0Gbyh2i+&iBZj&@q$l5)*XC&!fSOM)l7>7FckA>1c-zPjf0t0EwjQ%ah^{> z+yjdW=EZ|V%~D(+#aR?ouBq6Uu~1@12%lkD)q;yn@T?|320^+4BvW%!>?=G7VuZ_= zoD~<3oIi5qr5o~X{QLez=kar3m&-4XoFDnc*hSYlmu_dP4lY^`73>JgUOfNeFG6Py zI{tWHcofc2`KR4bgzx|VpyKj-+pSQaTrb=5|zM(bO5Ljpk{Jtvl zLMYP^$Tqx`^S*TM;2+Dq`R4W4Phb7%CFhdcd)fK6Gt<2N*2}jBGmXJTcR%<(H61zc z(+l3GKWxZ)2XpShjC=5Z%&2PplCS0cz}3LgvB+n2!yo#tbmV-21z+IC^ryZ(f2bQS zP*lD5tB$~Z2JYV=s@hvnlRD%mFH|F2&7*scD#9t}M zzYsQ2{s?Byfd6DiH7{dAOe&7yM-S}C7sb#+wfEbHcI2*mfGAtrlGAhX==r0W`rWtt zZg0!f?OSv{ea};OdGp(wuj8zzHRo*o%-Q;vf(3N{Dt!cR=&bi(1-{xjU`KNd9SI^;K^yh)UVIRnKi~KViMT@Q?zyLs6LXCQ)+n(f_ z?$=y=u@y$4Sj35B5>UWTiznb&Tt_m^5c-fOB*irW(3l|cIyiNlM1qIpS#bf~4y)mj zFnRs}eN85$(P$gQEx51A78QN4$xQE{3zH5ZOv^E@}D6; z48#+PwQJmP!t|WAT}sYy1c7W&D#=t$E-|>!o6FjJF+lsk9!_J)#F=NPQWS9-@~83I1eUXKR-HzEhlcHYHS8sgQXFvHpvI-C>!X*e z^gObsXB{^!+Vc>hPk-MAYs%3xKx8ibgXXG2?k|hE7;=AQKK0bO)74rS8W<78p^ejbQtXFz1`j9} zZG%FXjN?Z|h@fyvnM@qoqWP0l$^z#_i=b#W5zdvSLrw}DJ;`qoUxfCQ3J4F43f!MYPuG2EJf!V~*gj8rY1rF7InPS6-kg+xjKXI?cz$~SDv`%ib#+%}-Huy7$=2^$^|(~u zOiHiqo^4vF?|E2mb=wQbYO@#GP}#0;3npw^pJ&(o!P)o-x-8qS=CtRV+J1hFWK`_O z-#%v$($< zzexuJ=zMFi76MK|FFi|x^p7pUHZ*`w%5A)Te_PiH^ z{(LPRY@+5H7?R(Dp~8F{9rRQ48_XmpVC!$8=6lxlH&7onFd%=_LP1>sb8XA5fNt`l zRp{Od?^1{EWuUkQVC%2~V5-Flbx>R$VIU@C)uq=fed-y1l9mDT1hix$4pH(jb+2q1 zVi&|r18l)k$Oeq70EJ6&z_lU@9QBwl2wt&v6{8`=6##$&WA++e1yGiBbH93ac}}MR z+A14RH3;sKjTmXjPoz}5P#TJGR0?=4Bh`wFcp<WC&J zaRQ3|6@C(77aFlJJDF$klGnfTPno)PP?ppm)Q|tS&tFddZ%nmLz^)7XGEq3qE zb_U^e0rw#1kb!f^Xot*GJY>+~cOSANi>{*?>rwTPJqw3y7mVdB-d}^?tm$h(*Kl7m z{cfKL#JMJtJJ*bf++y2bMa^}r+h0!IDQ7^wQ$<0kzejV9=yA#To*F2A3ljH3%jyy; zCP7jxBjAfekY1!nHrc~!WK)Gzk=Ry|MCz29qFNwfeEbbW1yK^oURKQV6!HB;*xX2T zkQ@v7Iz`+;;$e|^X*@P8lHClpt+*kyZ&t=vC-ozg;#l@!S91Z)Dn*k-!U;sOpNmF; zjG2VD6gx=35XlE!#i4T8ppYDijVkov!2`z>8(5E{+NUyWIGl_C0S#;BL$xL?q(u_i z0<+3-;nMUJS1cKgPmnl(+O`%NE)Rzxm;pAKd=8YLrNg8?3I3|lUCn7sl1D;URqLYe zFY#sa_#=|AkUX;PfoZ`v?Y-j9;Sn}Jl70qhfx;NyL$1$};|tUX>F3DyB?{$G=nJ&< zOVsi?a(#h1zA+;l##!{^FVLnhQO^SE`2w{Aq~oeHmK>^FK$TZEyx)1X^Ha1TZ}Xk2 z$=Q4hHs6(S(z`ZerRJXrZZH*jWC@UGi7iyF>f`#-gs^zi(v#V{6l=h_5hJP8I)Jc zdeFKaw-rvnKcg^j)x2JFuJVnB`z8xE6BTfeK`K<~Wp&qgpQ*{>uKT8S_=ws_cR>{k W&l&aWvbgd-boQ)TUZsiZd-`A1K!0Tb literal 0 HcmV?d00001 diff --git a/__pycache__/main.cpython-39.pyc b/__pycache__/main.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..409c581aaa7cf0c22be585ed2cdb0983b3d14608 GIT binary patch literal 5408 zcmaJ_NpBp-74GWpSxF9u6iF@C(zawvBTGXncAP*^WXq%+36Uw8lr6ghXV9D~4x8>} zRMkV715$@bfSm&aCr2;A=qMn6AP92uzvydDMsf&}06`QZ$@i*fp=>8J=;~Tty{dZe zdrP%9F;O<~`$PA48w-aGIue@@L;C@9YiL*aB(I7Z zpW3tCv*PULi=x7(TZ|vjCDms3a^QA$^PG5&&+@rFlNl|;Juj-Fc-Rnie(;jP54rQ= zY;S=de#AtPAHf>W@S}Q-3;Y;A{?Ks0hw+#AiARh({Mm;FR(}~hU*gY+6a3_!$xrd; zb?^5z9rn#n^E0~V70yQh>0C32mB<>6{wOUwAqDqUqdzsR zs^*5^oj^32f$*YEo7QLxsj#q!nxdhE-13{EW~HSiFY)wV({ZY<2-S`wCDO5L!V8j( z#SPK?0Mqj_?j+b1OLKwP62W7ZT3)-|1gM8Us#|y#O~Cy=Dv8l!oIL_i67zv^9dN#8 zpl%UVhOJyLpA;S#nu_Yhex2r8)XU9xXco0?O;y^*tK1vg$BSrk*&L`d-#vPz-46Vw zN4jwWe;tSOBBw8=?veeYD)D6EM=ht*uGj0!Pfyi@own1A!*(DN@r}`qPNJvs_+Es` z-D6KN#wZO8C)dW)d=R&wDTSAaM&hXt z(rTt@)mc+b>8}YXCu#Ns6&1|_KR2vACP(xcdo~`9VH?a zONRrPeXY^19kmS{hfi`B>I?M+7)mbjlK_zAf`rny#4W;m9O><(?CkBD?M`?>5Z`NH zJ1sv_?o`%}o&}iew|^!{&zYIJk8S^IqGZ-?~8d(3WD||1h2Qnu)&!L(BXD z7F6z7cAy4ryKBwgQ!tj&J+Y>=ANiKa6d|uK@u)2nJ4VmYfM(iMSkK(aOan&4%_Ntw z2kbNB3zI;&<9&&=p{tsUzVCW|{3uN*dhLAxL0lOG!c946zviI0KO zA=e5~CzG4ikBoK3t9&pAVv2{2}WRc+Dq*L}gA=7tA+2LLr7 zyC!3gHxd7pW)%vh5RmmOIE(?zN0yQqgsMAN0u?*`Fsm}Bg_Ym|Tk+EC8P>Z;R|j!M zZ)>zGiNKMTg1G4g8?j3KKLN0kA}SnLYExHgw_};4CSP-_Tr^`Y8nPd))pGa3Y6qxS zYP}N#+aoKYvt}zH&zW+Bh%An5QBU0-q+ozqX)=@zz& z-(Oy>neJh~6~z*%#$ZxoAjq^pULWa1YHJTUyONnAch+SbX1l%UJmn^lP#8Hp2k_|7 z(*mU5uk5lN)-wotzcTOItC_*VIhPPOxVCJhTV{PBEsxyuW9E+cccLBiJ|iz*3XmXC zjK92#h5m-e=$=A!Kx#EShOA%GN>Z0m8IY$_QY0N2HN+qjR9)w)ln}We_2oa}-~@5p zzUX{KC#p^F^GrFtmYdTGgSrtl)6yup%puyx5}^ko?X0<_A;)xLwrdkN=b1&&9^-Ga+&5-AVeF`V=`qzUZJr< zUv8Pmeuyqf&QTsk@iRQ?WfVrP%uF_oM1cNPcE~KUsyS_%@Q`_ix>dFE%x2a9n5Oyf zYOab=&EW}~f68fUUMRlAqjk<1xk&vG8=V9m53J5dA`)NFYTZF!P9^GF4gj!G*X7*R->G@!*5 zrS;TKD+o6Yoh~-0y)TaFh@yi@3HcmvNNoBeuvGf~Xq%9l$;w~Lo9IKPft&;Bca6D~ zfsrHa=OTDYMuZ@mu4VLBn)qAJb`jEdS$8fmHjzGXqzZ7%7KIW7kZP6~aD*z>aokmgIJhk@LmNx23zV&jWy2Rxe;@bAoACctDvs{@;CkOc4Irr9%rvaGZY8Ht_b9N04 zHozP#={^JI^jl+&(&a+;#FADAa)<0P;$(XrqHh_lvqT;M{b5@}z&+YW*eI7Ag(c-y zC>X+R!KaX)AUh21kB$FFtgY3sX#`^jtMzJX0b-T4SCec89?4eeV=+?q$VZA z->GO*x-|4%dY#D5r#ID6zXzF`K>@|$<_DERt>)Lof>zu6=&$`C3nccYiX%7sFj4Pc zW#x}Rpf{D8v2qLRNV0IFlB85=Q9+rq#-7Ymt$@Nk*e9!p!>=gyW-OJYi&L6Uwkb}^ z644Vf(gIzsDRffQr}RGX*W?&+Dh@=PUAc#sW_^ct$M$_8$--q`dt4=_i1#!VWH~NA zeE5=t;FeK?Bd0GT>6i{Vi*dB>w^IB1@*6kP5=`tycGE958j**1(a3B}n;cz~d1W)tMZZeaN2C`H*F8Z3G!0C0Ge})RWap)i_-=i9Z5N$(}G_uc9 zwaw}nRJ??<;S14alD&EdMHvP;j;F{RysOl*4xxrijfD%2RfMrt4-O30{5oH>VZ7O6 Z|0`drAh6pO0{m35R%GQeu0m&u?7!mn!W#ep literal 0 HcmV?d00001 diff --git a/__pycache__/main_layered.cpython-312.pyc b/__pycache__/main_layered.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfdb9f86c18b74f9caac3297e7e7e238802a46d9 GIT binary patch literal 11581 zcmbVSYj7Lab>53z0E-7f5`2IJNN`DlB0=#f$&y9ANQs2?BJD^@QnQ_)Aa*5!#)I8m zN+wFWvg~Q-jMF&NnaCcgV<(eNm5P}tleXg|( z;;CthN4zPHrV)9XrcLmq<8*?VW)Lo8;^u^9+LEwNTNCUwo3KsWNWM94PdKI>gf_*U z(@x;E#H$jnX&1?}#;X%G(=~)<sk%dT|(E~q+UnqDLD9sbBJ#w zDNw7P>t?kSQ6TYriBqQpAK$dveBD&W^*Oz)^yIzzoA5rt3Vm!T>thgV{(Om7r}$Pw z#{W8HywUb2aJLDqtDN2szP(JM?+FqeM*W>--2G4B4)9&8+l>0V%OnOsBCPki^yCA4 zPnkrpOkzlA=X+o3p(xP{tI`x7o*ssI8lK}z(mEw{@!R>n)&A?SAMk~!Mbn}wIFLSX zpNzzkTx5Pe9*aigSSra0Nm*Rv=2Nkx%w34dvs^r~D2M{jMa1ZAOctW@f+*O}%*G@R z{^ePLO9~e_NfsAK3D2DuLiPzKnf^<&8CsETI`?Ok)5W(r#~?9f^DUM!d7^CvW;L^9|Ew^ao{ZB^kN&?)64P2BDAUQ{TMN zU^j@T$d%SmlfiBfPmwFFq0c5~)V<|6+e;&{Xxc-)h2DITq@#PNH}ApyE4W{V`%7?- z!`<6S$zHw6044gDO=a`bs+W|Wykp69h58#)shvx(KbB3em@c3T)XVgX)CKfi`T`0% z(+9@NbYwhsR*>c+NlsG%s-m11Q}cqT>Ps$`jLES`JeDR}HSH}Im*j{j$CBr`h53Pj zfvGfiEO}N0HL0o4${GYd?LE98#gc*~Jzl0bgrp#zTMT18_YofcwEqQBkU)>C3Z3Zh zQh%Vfmd84?FwaF(iTSu73l*ia3o_{{pSqBQ&Zb*8$wDbECBz8bMmbibHY1V1ixHt$>0A z39Dm{;~rHfzP_6)my!{yJrwq;%0a;}~{+mm5?Hdxo!9B()>^}BDgdmdOQ zZ}*k96_=4abekQ9-0HTxYdGr~zR{m^jpo_W3_H5vYR$Wz$-17oF_Uxc&9i$m>|R4` z*KPKhyL!z#ZnHb@vQb?(Z#cI8~V^X%>nyBl+L z;QR*O|NA%wey$kuHs(G*7vZ$_B<@sP_{AjNrBz_NwJZ#?F4G~ zCq?MSAW+*M!jL%|KBi1&^19A9oEgu)_s8DfnQ1t5n;k8%Rlh5mq44+8`!Ld%8lN+x zcRV2;<6wbz8PSb-IB*kU`t4*i~`x>voeg9t}(NK|rn&vc* z0g8-kiPE(>*hNIJj!DNlHo!?s)SO}E3?aiaFk}pT^ky?ymNvMpU|kGk1=a<;IJwjl zwp5o+m3o4x7pcX~7R8jFw}S%V3{(pf%d4mx6e1@Y^G&7`Y=aO42flwC}cAY(fp@5}^Yi*;saIuvI+6o3ENrQv@hj(%Vm^QgUfBi3}0M$^zz8UXT>m0&M<-0M-r`5bR%Px#Jf!DGV-ETwvS7ssYBozeC-}t8^HI&Z=4+wl6|4Zah+vVGN+_3kb-9lz(WI4woW zVzxY#@Xo)~5kR0ZKzD!x1scNzz-B&`1ZOUh;)VF8NyV^%Olq9gC)EZTU>V;C{G-xd zHKq?Fv`e2_14}ci&R8}rnYPjwyy4$eN)TU6k3dm#Tj+%+wnZ<|a~StjTuB6l>XxV?s-2qr?Kb&oWn6Q91mKm1|7ZJJjFaDwp153lD@hJfX@_SY~C9 z-qMoc1ufg;Ha&Na)2Y(a=&M8S)N|+XI~_x;74{DdK@tfj#)2PVk<2W*Kz*n~zjq z0dnuc{U2Hhf|6etCD)+jt6)P64+(6@wq<+CW0~8^Lt&Tfn>`fXyljQr_9FIF?6270 z{Co9T@>nX`=>3CjR8_ViM_&s5%;Y!V8Lp z7i1_3dQ|_(32#fBKpgDC-c$m-Q=WKJiXE3l<1rlE8-=!sd!=*`iy;kwMJjVY2Sj~f zVIssaJOZw0Y9T3yXBNYMwjhX$N*%85C1Fv*S@RJY;#^6EjtfcAk1NM92N7>{0ymU; z(zqJ2gWwx<9x79Py9Nlsm7->0&6tTbSavT?u-zB&-V3_K2~39|QB3>{#xEp*r@j=2 z%Sd1CWU|BfCB$Qxxo-1ZkRcME!~E^IxS@n@$^etntX5y{M26Lh$fA(2Yl`t(=~8W< zQ(Zh^vh2k_0WJyOKY&S9&7bpG^9l4}3zuP=3ts<9b&+9g+crGCdC#7#XHU+vH($GV z<#54Wf4TSTy>EXZ=MLnZfs8XyWEopvqkTBvzCYW(Ki7UR?>o41vf%dT-JMx?XU^S~ zcXnl*T@UP&i(J7%5{$BTSr%>HhP}T_ngf3 zoXkvqDc2LucZ4&}wvCpy47W4avTJ4Xfs1P2k>^5LE|lYrX4uw^z^;7Ycs6i67dV+= z+rdwB`&KN!uI9c|U1(@q31O&P?pAvXb4d*9i6 zW#6hl>)Z37nys-HDb{8$P9n-Z^043xJS>`!ZCios`h~Og5meuD=#zr8i{u6h{?4yF z|EO3``LV;;5q@~Ly6Izzw)r=l?ysGE<7CD+_Ote%)n%F|ZaYJTR{zRWp{4b!lfNrk zf#-J*c4?mE34~o>;!5^i``oo7nFj1YZ@7Hd=*+HTw_V3G>~Z2%o#B~MZj39W{JZ48W-Moufcu)XP|&pn2Zq>hRRn>%Q)r?t6IIb^2Bso3fO=q zewdCb&=T4bQyEd;N{P>esW!KT@tZ^MW$F2(zJWz&OQ>qAh_fykL(8=M)CjZADL3ft z&OId>%iCZ~nKE7jC+N~txtFTGrRJqBn`NKQHD^RX^_-I2r{rqUC5d}u2kb;IF}&j{ zy=3N{*wxW+&c73!V_u?o%YG06v~$})J3j%QYeuOk)u(2Q)Afeu6i_B?1 zsW}7v={ewLRV`UIBO$p@2u$Wj-6J;|%yg?#hEDfhX2!idfi?_$BC_YOXvL$4I)JgGyR7;1V#Nt)3=oJK2L^x7hCMZST|j9VfQZjR0+Co87m|oQPB)w; zK41kT`RD=c^G6NG;FL~GM&caCCJ^W*7)v}3l?3Yu1Q923ty4I`&X|Z#dR4eV;>6RWlFya}UV1p{+6I`VM{Ux&rLG^nop~t#0cJdBn0ou5c@lvU>+CMHb>1`6 z?RGGsMLS~~MW52(ANUjozv!f@wtf3A^4-U>-N$m>CyKPmXJ4s);H3Hw=KD@%`%dNh z&JasKmF=0z^}GN*X@j2lfTMF~zT;@N<7lqqc)kS;Uo*sV!6$Eir}>I+!{704?48(^ zmsY2;{*gjU*M}W@urcR$-m@|uSJ4jG;9i{_n{Ydr@SR}6!B(pld=wQ%4rQF}nU38# z=bj2fexR^tKPlUpbMAUnY@u3Dpob4yH1j<9H1mAowhM5_Nn)NS!90Hp2K*A5@T1qw z<2}?@JmYrin`qp^e5(nlAE9xU`7s038s!=HQfnSG-pH&q5qc1fH!*8N49?#*)(Fx+ zx1;fG%+DPoK(Cu|rRx?1!s{$a?bh)+dc796vkr#dORYDN)N38@q}SUmIKRg>;iK1w zyCxdxTa9L*Z~17bCtW?Lu9ZP;`KP_l2{NbsfVeF=Z-z5;ykNo4u}TYo-NnUC#ux`J zi7^hq`qGW@r!&CI<%SxERVpssP=VQ11E#uJL1pzDq0f@=6Xi_A-8KPCpoy{A2FAb% z3f~BZ5QR_6SWA#=&In}aIiT<%98#h1tMyVGb6d5Pxusx?Ux9$&L67$s zZ2p?ZH~%v?A~#NF8us31M>K3+x%u&!UP6a!(CgdBywvy5SR-@82h^Gajlte_+A!UK z8337Mx{0J-+uuXEISIp;i+_!%BPQranj2NP2w}qCPbhGX(28fn?YNM_ynqKBp=@l z7WELfMqhcUIIp_qTP<``*+N&FBhI^o14G6-hL-U+Inb)UoW;MuqXPO;!h23z$97o9 z8?Z>Z(JVKb;|}M&hrtWCP)$1uo^47@Dh&aAyTCTw+<0iLSN8|Y7lDM*8H2oWtiEHTk&hm zIxZVf!(|XPBL-0oQnghw=?g~ZBG|G^<0?mV10_(c9*&7>JSwmiZza-J<5PMMH&qKm z3Wpo;ahD`wgQVDv3{kO=a|ww=uoMS5_c@YEo{gPTm}66+=M@_$+mq_24=~-y2%NKo z)p!|+bag^~+BXLgo)4Gi2BZ9Fe1R9#mPK+%M^51s8mc7cEF{__VjL=B{f*ZPoEzdT zpja2+i?$RF(!nMX_hA<7kr4y%h}{pJh7TwXidP{Ib_*r_5>hyML+GA~M$CPdLe;;a z9CxVR|D=xRsN;929lxPm|81eradd||a))~M4z-UEV|S=ScPKw>CscpYny2crRNdR% z@AkdZ_e-j~VDqkc@-}bQ=6(CwoGozC0(@@YTlJTlzux@UzKgC8QGLQ_*d3zBTe*EJL|(Q$rsj=F6?GwyrGX5H#dKL)UB{Q}DnoH}%~;k0wk3NgOG8Pv^UykpY{`mM6)P@DiIcuC7$RS6wI}{rI{JQZy ze&6@Cyn5Zy@b@p{KX!kALDT+|8uNc;G;ZRF|DonW(fik(vRrC_VK&9+s&7QE6s&0baYdT^$9mYvl#QG0X_&w1W>u2XB` ziq{Tx_PQ`8#-YaE5SFm{@(Z1>@YUa1U+C-t+G~8BZ-@(g^SQ+?ii?ll6ifWnfzG#7 zNww|gB~ZKg=q>Roe}$iZZcuBo!(TnrMVX%gy|etBqSr+KC4T-;=PrNkw;FbM8|yCd z*Tq@>#&d&T;1^ZzWi{_j%zKOPsGcioUXx$qZ)4tNens`}qPNAHd{_0h_*K<=RlLTp zJvP47<-dZXMsST?6W1PH7Z>?;{tkclxe2Zq547fc=@aJ@&ktR%-w*tbm-tcWiZGEw zw;%ao;y(40t{ZqmAq96m+3EU;=p+LvoQGXMcJVLi3O5u_-8hj0HNxE|LdHZW+E=4@ zyg2#b)4TI~IA4=C-D~REx)%?_PA3Wz@p;noLhnGxrj?b8MOpPTvG*YAJQhi|KC9mL zW2^{qlC1vm!-t=K^m#}0>Fglbxb@Tf4?giZ55!YX3fh8uiMQv)qFpS?C$8X={CKgi z{y>CrBp(R*#P87B8YRc_Cq*{e)-1s@|K<#Ql!snY87^!)NT@nqtM z2kxN1ySsZYb?=7z5^7qg@No))r>8$0#C|B^_~kLSBtjt%hHcWbueTw-^xUUX#IU2> zpp^X_od?v;ymXjB-|a-bejpNYY_vN_)KPi#G{j-kvkPl6N&`oWFMK3rBwap`^nKnj zo^^&$=o@ZxIkSW40JfG{ez+gW3QnDs3;U2&v?S@FtfOFSie2qOxs{>GB^uZ&20C5A z2Z7-2!dlpBoJ6ph+#1vZb}H{WJ~eY-gi~X#&I~y#3HpqtDOa&k)&MhMms2aubn%rY zU&Y8bKW=rSo@hzl=W*+H)EV@^_P7;$?{@le3kvZFC=s`~7kBp}Px98D2QUeFs|VO? z7s$5TA7+)QN}UDS_!0`uaty<0=ndU5Z9FUbitZSWVVjPLUV4m%LA{dJq~||UYT$kn zLGvQT{@c@ST>xPEi~E3qD~aC|z*y24w|6wv3%PqYdg$g~>XI%M%1vM7#=|%fJ$L6P z1r~oWhsDirO4oOQa7b}JD;IdIPMPVK9yTpj_ktjL+Quml{4i## zd9wpZ#AdLXcsipel5WIj&93k`&^;};yuROha*bfFFU7w9d1|(v#$V}Kbq0?M$VMnf zO7`z~;yWk^T}GP1A)W9n-56=)gMm}3{p&Sg#u;FSIlQT3T;Y*LD|DblXaE;`syM@-dWcHcMH#2S_EK?}Aij15c7c zoHYi}+@65BgZDhnYV(mt)5t6u%^Co=c8);Qo@2IRyW)5i^5$&`esr-xOk&e4L#Lmk z8-JBo(1tQYt>K@WI%@#8`%#Ekp%-xxEQp|ixg;qif`5WSf2pUpxju#{#(MfXDfHA4 zj*SrlN^;0kw+m%P7)w8HjSB@%-Nc+&vo+?(oRp?5}=()-#Hk>;IjUX9;^cX?s zh}^xMULM(#s-K$^!h>-clpS*G`TpZdvNWoEp^vLL$NMm<+VH>Nh8>vQg`^HQTp2l2 zH_S_81PnA&Ib-LUqnt6`O}RU6D6m3gD>-y=f-dJ zlTW3B+12!+vd>%aqf&I?F<_Q%E?O6Ur}KDb6Zt&qid{fo?$c2P26NGeZHlnti`?8F z-pi~$^!tt^3=G=^ zAL@q7%6WUpDqI{$AS`FqL0EJ)%W?-BXRB})2NBO7m=|0v(S5j5pORLUm0vE zw^N*i3ubeXV5qs32N5-Vt%`&w;--+IhY*dckRo$vtP}X~F>)7ss~Cg5u23W11uD(O zV7qe4SO`M4Fq<_x(I8CPd&Bmx2SN_nI+1%UhB0;Zy#&E_n3;hH?*Q zHqX-rWB@okh^YW4+e9946uP$yK$FX~_H8OIQ$g_CY{;urzlI_+_+AG1S6M{?0em;_F=Y*6}%we8_yIQU@ZKdro z=*B-sq1l#U>6T$2l-bY`%Bb2J)ezX!blWsk7*oUeif-wr&5B+zEsUrKv*&-#Un-6Y zdVT=Pz|LG37Lbyz2WCi#j9p9r1MUhA>xD7a=8)0^Qt2ZdNNFjgLR;Y;5_^S|rIE(% z_g_X!T1N<*qvc7f%F|W6k{|&()O>wxCP;mNJPxHVyo$3}sOQNGQ7i#>>KM<<2Jp!N zYMCR8FTF5FC0=_`D$r^fXjKDR*}%`8WM#6->mz%LpUK)Wv@*x`GaDGH-h`ptgqvPX z?vyNzZH##@QG{3vs}TImflmAc1QjYGRf6; z>sU@Z)%T{s&J;>|vNCQjC?k^G=}J~sCMC9jRo z1zHmjNs76dor8h0%XIUBiU*EC)7SqEjrwNuF0Kq@=mjoe6aol^wenpMX|5|MlJ61K z?@*xFeAQ+LMs_2Z1-EcG`0Q4 z%=Q~r<$o>P*6nX?yFm$#j%fy(KF5y}zZ2g> z7WyNcnDXFAB3M;i&d1oAM8=Mk2@a(n;R+4TDm}O8_Gg!1LOPTuI_r5G+JHIelnesCVcpd?qBtK(L ze))DPMmTQsYuKEwAhGdOPH>xAyn=&5K)9iRXuhqKlxRLeUsh>jKCV65X&KiA^z(0? z2VNS?>PB(GECee_^PQ~RZu6+qZnLtwo%JJycFaL!LRoI$?@5v(EAQjZ3>N{?L`#y1 z=Ls-DAloVicYCwT*xe4IQc2MPJO-|x5QMVk$w>CZD0$H#W zIV&!YV@XLTb7mb9|A|70X{>@%wxrVfTT!_0A293gy*oc;RhaEh@~b_FJM@r7xARa~ z1+jEfrS8;_9B|$~k_|cTPB7pi-&x8FG?T6(G7}_KrYtGsljIYWsgu6oMuk+1l?S*n zk0}0zR?cbaQ7T?}A1B8x;SZssTKp>%mW4PIQRX(DidjQB_rvv?V^-jiR+@xlx#>=( zk?CX`*-oyJ>x?zVI{8MPb!oTIDKrXvo^gwvQlpei+)0S6$bFFzIXNy%pJy5qK*mHK zNIpc$KnkJ=q!=QTKuTg9$asiUfJ}%okg`mEo^DK`JSi$DSGYWl@|2iHd78^JD9?y0 z%2h5`QJxhwlxtj`MR`ulqdd>$8p;b|5#>cL&!K!yETO!_<$1AuKe4*<74_9>N*8X~ zzTEbNbanQ&BVDnY)%l&np5t|GS7+`yeotpMoK{b#Z+4IL*gdBwmF=!3bzay#*>gG) z=b6oJztf^7hyT#=W(cn0L?#+Zk!YkuvXK_4MncP>W)H&Ie>flJHTi)Bc)RUb<+`iXWQs>+Kjw77jQ6tlSVz(N3*EwkSobG|n z`?A$n(9~CpI^S$|?T&0Vb+Os(c%tvp`FOMWY2S83O>VQkxqIi6MtXhc?MCUr=JrRM zyEh*^+}&*C?>^k#+Sq#VNh5Rj2RGLn`FmS;?mpP6-)ZFUZ>~Swh30ue&Eox7e0}qJ z+v~{dN*oIR`i9r)cVxHcU-#{ITZjJjL*+e^EtG}rxA#0-iR*iI>#^*L>v%Hh`fGW8VHgo#?Do%2SO} z$9B3s*|obZSzjGfQ)pOClbC_fxt;pGt@@@O+qr$4PqLf4yF0s$()P`*`h(5-&HDQ0 zpij(Mjjo=9n8e}dA);4f^yhl10*1}JQ6qEUd7_c0-o!MKjm)9vDV=X&HaIQYJ>_LG zcXoC*bbfnhgT{PQXFl55+12@V4DHtX&3mW5$1)zD#{Bqu5PJ4|%tL$n;9y{~y1dhB z^$*eENB86tEXK6!Jq0UC;q*H?)5g@UujWn;MEc&&$2zyYxv}+dTW9WKYM%PImvHNI z^td#wOt&k-Mjkc;@CFY5SFm)WhkXd{Y@}sIq(oX|zDzZ;GAGAG_Jahl92k(Wye!Dv zLIQZ6@f3KF@S-dMD~4E!v2i&8Y@9Gzel$sJh?*5y209tGTH%^gXmuKQo7qn-B)Ijd zP(RK3>WF?O)K^(QJEETr^)=SlM)Y%`exCJnBl-o>%XzWL&%7|AKlcIIi6yZNzFq!2 z)mRicVp~07*4?hO1sEzgztt7a6Gwo8ET75FQbuJl5v+YZ|FG+P+Lw{m5#SML-;wHh zQkR7EF{7EJb-`TUbVNN^+V#AS^|US3;6mSWy%v^&&>02o*JYwr%i9lI)}`?J9^haB zcYS6Nsrx;7XlUT|@fDLrc0dg}Dr))U*s+uB`W^}9|_jpL%=Jl6F$)|#MNR~`(E!Z+3?J`G;E z8(NKTtV$#m%x`=Ac0%?*+Vq}LU*tNh2^Mb(7$7#4(Li-%&xT>Nbv`;9S!HQ7eglU; zObIeAk|Om*!by^8#GJ_l)njvJZQbkcI|nT?s?i^vr*N#}U@rPB@ehfAOx#aAPu4L{ z3XbpXd#YpiP)I%*;s#=4o&UhKJA1eMo;%T`9&0v**J?Hu^^{%HpGvD14c0jLRZ0Ff4c{^fbXc6Ji2$sH|u1$pgD}bZl6I_YD|L z*@$_}RI%`5kyr8*?upg%I)~`+J;#MRwML#5d1hN;HL0_<>)1XIkm>d;uVI~`1^hgz z-o+bo$B%RZP1_qd{0cz->HL0qsxy#%O{`Y(XOwSPc*G=K|7d-iq@IhAh8Ldv-_@Ro0+C=6Ux)<%!zXs zRlbdb9Xm5|@=5xyrWEN$`YE0)KgLr={q&9II0b`j%rQ+AnlNOx^NbaEq7)b_VyaNR z5`q$Awv9QScvKF}@gy5*V~&f$nBz)Z+nC{^FlKlv#*O(c3S)k!W89eCqA+H6CdQ4q zEed09t1-@IRuo}o=_zMp9Oe~oRD^*gycXljA-=-+T#TO&af|W!7{3tW7a3oO@k=58 z3ge5w&#~L_YKUKEdNQ#wh|Ud< z2a7PicJ~b;9DC1=eH4nqbcUq5PIZaS>%k;DGq&$L2VIJynD2wx^}bRlMdSrP1(6K= zjf!K^RLJk49s`U79DX83bR0r%TttT*-xVf(Ehe$I-g34X6 z@9{eA5Q*!Y-}=b1g;3J>Eoa~A%HC5?J*M&K!e4>IS9A`WVTAy(*?Lr4Bfe|J8e&$u zfcl1b<+-ge-*D;|6;i#^3p*vQ4c?15I z<-=b^40~Wv+-EsmOb|S%@byHbX_6*<2+Rm)FPyUGA%>79A;EddeUJy-@nFzAMQyk2 zLz``(BYj2AjGh~4o5R{*DY_g_(Nms#>{?w7(c{lm5_cmk za|f0du00Q2zikP9wic`neH( zE!5AkettwhALp14UePJTUD z4ATu@T)*Gpq)GJ3Ce1KKX@+B6!Lea5Gn`lmJBb5ESm!8-5W$=Gx;+nr2y|FN;Vh|Mo$_=(a7i#fcj%0B6O_|3`HCE! zotA{SM2gEK$Vw>>LTC0o&sA*wJ|&QC#F@PQ2vvMKEQpBLYiVNk`Lva>()iERl2zjH zl%m=rgB|ntKmF|deQMY#3d~W*v%nO3Mp@v~!_HJ3c2cV*(dTA+`%k%(}}091Z=VJQp+cvbez5 z5@Uv)tHQAJa@;=H8Mdn`Ugi6)#Q5b9zry%=;1k?aS3`W2aVy4OW0rqiTm#Er1Iu3! zRj~XUx^(}@$JW^96A(!-ZweM78EidqKxVsA^+51*NbgconN>qU4HLc2GmJ!XFhSf( z)XIo18U;K1l3yF%vyCJnMlJ{|3r8(Xzk~S7^a8j>o3Yo{0q%P4j0ai-tA@0#6P2v6K3M# z*=C|gw;Y&>J{6ylpzTgG=h=@xVKt6OOS>d0AVKtNvZ`!$?ojbJNeryUXH--qev8D9 zN&Gg6-y!k4B+fJtWE}n;hfiiEG7yx&CL;ke!4AG<&dF$ynV@UVHWQ04G!rt<+joU< z(=f_X7Ac(_>yBqDORO6YuQOT86f%EA+!N51L6ikiVxmliLBV8RmZXREph z|5Npv#Mwc06Is2Oy`vg=e79iqSE9gzQW1EiwkNEs_LBp!aldoT>Ns5p8;ut1C;|yy z9lnOXHc)s~>*`ym{~nIF+Bn3j&PS@R1U>DgZTp$i>34=A3dNg|zSSf4L%ywRNH^;` z9c(0GZYF|-dK`ZY)%G7dhxi0yf8R$>lTSyPIqVuxOW5S-p}X}Moq#w&`q*;~iuVb} z-ltTnsYcqyHtd(U5S>NU3_f=@L41z7W_`13IG~CzNHOpff1ip>6@Nfye@NnwNc@Dv zPf7eSi9aF1XRmE?Lr?BJ$Si#dfHlFZ z7lC*&bd<2C!0Qp8Nr^G&X`{aseh`4qq8Ka7NnmE(7O-xqUPVp;!;RQmg2zj_*f|nMVK5RRiw45PK9((I#1d0Hy)#gN@nK))0m3kDY znlN-NSlCg{frBrgMwY71^5P9%F*lXZ@TpO3A}OZoI4&|YVeWA%*7HJXcKrJ4^x5-+ zLkH%1o*zv3JV`iK*uYq*KO@0YfkBJ+<5B8Pp*BFOOMmXq=EPm(wJ{bmA;W3!0}ohHSYDD&hQ z$JcO#RT!&;*c4+^j7^86Z?ZV#^^XaYxxsgFT#xcPGwexYg9-bh^I`!W^8!5PnwZ5D zThyhy(sq08^|ow14$hf^g?w}CG0h{McxJk-1+zQwVv&@MJWCUoo7@QJ`nJ(khCddz zz3zdxaVyHf6CH+O{1o!U2cFo&3=7lql#~^Ye@fj+MT$X9Lt+*xNWdk8U zgEth&yG(D(4!-{wWE-|`A(_Egm}}_ljm*C^`tk;j7bY5p#_xq5Br#-^Y<`JE^Z%fF z=sAwwVnQ69tj(t9<@{o2=9gK9NY&A_2~6X zl4IbSvNfXT*BObdpCZyRJ~8J&=BP*?%JBzi{EY63zP*aaLg8#*^cz>B4l5{ws5pQsvFc<;sOhtupriL0#~w literal 0 HcmV?d00001 diff --git a/__pycache__/persistence.cpython-39.pyc b/__pycache__/persistence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b045086d2446b9918416c589c316ccf7a508fc2 GIT binary patch literal 11307 zcmai4TZ|l6TCQ7HS6`>+;u()Uw#$x_xSeEd91|AH1c#aNMcCuW_F|{B+1_%W@=T5U z=2O*!Jy><@P;0d2l4=l(7qt;6L~{OyrL3M@UnhGAR^%;Hs60vRZsVL z24+;J&;6YG&-Y(XxusHG!QY=nf4};Vi;D8^R2cr#Q8RIGLvDn9Zz{ZCZ{c>eWWh$)Vk7}}3Ev#R3d`un{7o2oN+L19^D zeWEbyq2?U$4m?zxgG{}yRC7wH_On z{UD63tDVqqx7z~A)NmpE^8OjKO)!Wt%XZ@YmPm$=9Lpw@I-?uy5& zxwv%QYXxn7&2yRGT8Sqwdn;~zeYCPHi&wnRrHSIgwRXFS68GytT)4g-gkE#$zSj!l zyzF^nz2n72nZF*oVK<0#3s+Ai+49wTz1wkH_4Ui%eXmicS04VOBR+>Gm{S$SQ|RCv zjj4{#G^T%|I0iFV2EQ54^s>x+q6uOej^*W0&Y_%Vg-;A8k8(kji>!ol5vB4Im5s3q zN~NSU&L&VQC#6X?h0<73+QX(%s$iXQZ(>&QCfUpro$Y0_sGaimc++ejR@=|!P@X|~ zuP7g22T`6)<~YP`l=iW=Fw5}&1UvG?Vz02HpJbfo=AY#|u`l?sQ2@L)yui;t16I75erKi-CP(`XS z1^ZE#t9q=l+nkOt&Iqi1uEs{-HST<_@LAM8JAY!e-Skdy)?vYk#df{h1aN{AfqS~% z2~Ko)d(Eq(%-mr0cH8Cb#BH~J*K4s8$y?A{&Ue;ht5)+{epstb(RK_ZifXFSp`8gb zpZ0#~jYwpCNmIOuBvc+MmmoGS(wpJE*0jF9nbfFCX!J8o+f>2!#H@OB~Pgf2~ArBrNv1sM@MN#J2ly~e>s{}O`gO0d=DklNaAe0-D-gpvDs*^fLCL~Z{aX! zr~?sFCi|#seruw=C|?DAHfjynCzAVX&= z2&DiB(VvG(U+t-z>V_7ANKEZ%f1&(T5T~c~jUICRJIG}y2a;+Rl?}aB4$V7?uk^If z)IU@=bhKxCdfKjn=zVKjIk&Bx-&QURmv2I%UsntzEIwA&pQ*}*p(@dN?61U(p0TyR zwKB^vlf*_i)-xWe)WUK-)mK^mQAX}oRcAooxcgBpDONLa9s;@*)ZJxCH{CFv zssZGfG)(Jh5?-J|dul=0=3Y$-tQt@CQ?1)UCl3{*$AplIXVS`~b2C*`Q)vT-!oq7w zja90VWV>5)8(0~dY3V`T6EDG!(8}XAFkCGVTCFDDKh}egS2LWLi`$gEj>IW%+nefb zXUyq@&gokugguDSLfj~trsdU&o>wQdvR+C5HB+mo70pmJ@n2NUuZ!8}EjrehMg9EF zYg9F{Nq!h>jj%GwJCc?Qvq#b}j_IW~tG!UC8nh+Fqi9g?WTPJB3WB2*IQZ z!K6W_X=_@juR)NlL4>WDc(Qmbrb6Ruk1|63pIVUc9i;{_Sj+n;Wv&Fe38`={zk=oX zcaVU=atVff78jB%F&qC?G{^Zm#)clNxgkG8{d2rXV~IiduTer;*fE!um#$s-K#bwP zPSsm04AZro&^6i6cG8mmyMiYmg$nhdX{sUrfkflE@pbgZZlvCUi(!stlZ8eIPv-MT0el? zx7I@y?MaVJGNYNyXprjuKxw^$p1JkEhJJt-5FNy9c?La{>WANzl3(A@Kh|4$CVqw7 zhi<7_(V^=kn6{rTSRyf|Vzn0QHJ>d<`$SK+y(^hy(ZPYOZl?h8S6Y~u36m`{Y&S$N zsS-#O4(Wqrz!|$*fZT;cAi+SG5e7s&HY%9{vJha8gta#Kk7P1GP6;spC-4iwS2ZLL zo3^B}j2a#ME!2GvPXZTBwe)FiN}bk<>Ooc59&hdjmz^a0?jZRSEE3%U z43sqi4ZsC98B@W9I#jg)gaCsuKma4^DFDMV0w{F+7M>i-K7p~ZIT!`doCXx~>wl0y zVJfWw5Ur7rX}@A6kuE zy5%TQAZH*Z;Ub;!tt@eKyVdYp9t>$y*2lSq8-!3HUMn_8kwtS76+#mdEQSnrV4@`L zM__;kk>nM2ox})9{TPQUlxM>ypDlxq-UEJ&S~9HL>;zJhTtbCJhd>PJ#u13*<@Rb2 z(|jj@%ws?kPjD0oX*v_q#8kC!i`k-T3C%_t4zlH2^V@R1tVYLo1I)`he*`Wg*6CMR zEc&^GOJ5VxME7B*CN?P49RESmYNV}Urf+U4VRp@0%gH>`;pmZs$*<*L6=t4jn_6lY zww$nl7M&5W1XQRWo+8a4$5YK(ZbfC;M_ICS3B3JMPw$sWy{0pTWBm%?mdrxZDS_np z`hNhDWWCOz&jf7Kf%Up&l|nWN>vUj~<|8|SBqWT~9VrM?lSrTdUK%GQ$}b>s@{+ow zW#XA;8_HV<8`xGI_qN}Fi{b@UEi#S*CsFRG%h-+fb!4;Gr7u~{ib-RmR~nc3-9gq+*blV|M6-iB+5-oXH1dva91rBlV$9lyndbR)GGC$A(b z$c>53Zdh-%KjPykIOf&m%a@jyq?Q!>EumU&dWmwxBvMS0eKaH?6BFu*!D%GK2KzJvc8jH(U<~1)%qVZzZcRJ zY%%};utmb;=`KcC0}^FR-TndvfK3VV_uGVk-C>uFeK+1n1Ol${#pWk za@F*;EM%_vh`IY2 zR&p2<6Qxr~m3?3vd}*n)2e3OK=OaAw>y+q}2#0?Wx!52K$Jwx5YfxZs1`L>Nzc%8& z6o*TE3$P+;P$8*5(qT*8Z)y7MmZpyj+^Y*8k7A8xz0096;<2PMu>u0nMiOgBh@Zsv z{{~M$Izk~i513Er76JLWm6?G3XzEk?UVWb?0KKannaX((F7?(Rs^}PEE8U9pXoz;{ zA5^Q7!}BGcU@z$F!?5dVi^_+`38hS(B|K(&`hArjT~T^^F9Wj@>8t{eM;L|otQ8;( zPZkg5a}ccvHIO%ic|P4J^a}X>3C#zk0xN<)=$SN{>4?i2vk>aQHnnBY_LE*#v|)vj zHWO`M^eoY4Zfnb;?Vox%(U#rTW})pC*0Qi;jbXPF zbtOt!nT3$y$yX0gO9NX2@?rIKhHqm)gsNwNB#Uhc8Rr!X!j#vv8shD#;UHbyC%O2$q1 zijWC=qSuFH!paIqG$K(+Bnbo?yMnWYN;@ySE}rApa-0e&kX(#YDl>+g+H8Ww##s_tU^kFlZ&WHHgO~DQYaCgrncRxe7lw3&Fz4+RVC$ z_aN4YAW}PY8;;TO+`GaXj`dEbiG|x(lXsBBdeeP?#BT{hMRe*C4|uz>Is8ST=DxI<$mYcTqKL_@q8Ayx$9Cajv2A35ga8i>mXfQIGxH`C0hM9$e{JPV z1hl9JmGfw~@dF!w=DS z`@w9W?igkb2*C=>M;M6m(W2LORpW_UG}P5zwH!DtvXY|mXI_!s<61FX-*EHjt|*ONVrX8X`z@BHVseUMpnYQuyX zp1niP0L933w*QINGe1+=98{6DsrVuc3d8&Wne2~MD5V29{e#ilG89Y^ED=4o5&44( zgz^i7jh#B3B6x0#;CUhIn>5I5D0*h+KJ#6nUhI7?g&M+IUsX{{fN zj@qOkY!BgjkKuwc?H;ajFIa6i7{#TdVz+e{u3k&1ae(g?TKFg>uTnx_=f91_v1lKw z{>o~c{mA7lcydwsB8o%HzbaQm#M`~yzV9JYhnpVUS2A$B*p&!{KP9L-g+xTtjM0pk zXh{fd8$n`d5^=*2O(k`pTUuuyi(auwOWIhc+v)~gx6xR)X&zyaBwp~-gIMM17%eq9 zEb_evA@`bIG`S=*_KhtYB`OUS>|x z|F0vrjIc8)IH^MK588xM6kUZvdz%_aydoYTE=tZJ@0jwEg_BUj1^kc}bMo{SQbENT z(QUgd_$3TFOS0e=GKyxvWiCVRrS~FJ-{j5Vl4XsySH6)om7x)qMRF5r?75+9-x`Mb zjl7@T;a+9rmT0+Z?TDlSsSvGecX{0tw=Q*(IO!in>nUviC7$3jB+*)kOLMw4CDXiz ztAySBUtlf3NgWe8^0U;?M# z8z90cuDPpue10MB3Vk+2C=rt0;=2L0`0s%Zf>g`V9{B-@T)`HX-3-AX-G0kXBb`(! z;@?AZ1s-*U6FYDUtl-wEi#wDIrKkA=swN$`W#Y-eQEB*EFBo#34^U^~u%T#ALE?BP z0kXr3;1ZVJe@6$Ic;g-c`3N$Ku#{i}342Li@d>-=E3>p$-A56hUGKEE-%LIwbM7gb ztS~YLo5Jm@-}FOHY$Hvf407ooVUr}*79Q`A4y5~h1cNkkX(<@w<2D>_v&xSFp&4et zW!CuVo`yWR&*`lU+}*xOfuT)J-sMBLm=ONR+ZPI1YrYIuBSJ?u@>xU+`vzc=VF-^8 zVusYyOiU)k3=wWmqIw&82&0IE9J2&yAi%cGnM>-i7JZUIF~=laFW1at&1|w86EO@# zF&KJmts!PFVHPz789x5_DhZ!Un3cD}=Y9$^l|JkM%*5{?YB&}hAecefb7!g+{vnh)Z&W)Z+oKqfThoNI%;0oI)}mVqQV| z2}p|3MD=eiFHkywWjrQa@)}FOqS^*vd_*Wh)VLsAr$yhVWE*N_NE??xc*TIb)Ppv? zBSQBOq>^C3)}H=yAlT@}7*RfQ4XqbNy@)*A#}}2%Yx0W*HQMb;aL0+O#xnjW<$pw5 zdW@uM(}!Pl(@?9$`C6^nX59wmi?!N47*0t`wnjv$)%boIc!(0IC2qId4gL<57bv06 z3FLJ;-F}NZO+HV#6O^2!#H0j{jEo)#7dg({!RIZZ@ap&mHIawI$#vmmvv6SxJ)qp5 zQbH`oe@e;hXhNLLr;yOSL_o{Ip3yABGAqX`naWssygXZey?kW)gG!;2E5D<>5R{#P zQT_sM^o!z*7+pOiZ||~hu->ZsZ3zPsVX=v@45XcW0kIG9KI9d}*+i|z<<2_peOB7@ z9(_#|$a~@6r!Mprs`zkJew{{gB1W_eU!w++Y-HH*5+&rBN_||R9KDew5YZ;i;e;=X z@7VaK)FQ+ty?H#jgEg@eonq^(JnVN#dIfVxC^k`1_aa410-wyhnaOMAA1d^J>VE-b CmuB4n literal 0 HcmV?d00001 diff --git a/api.py b/api.py new file mode 100644 index 0000000..193dd5a --- /dev/null +++ b/api.py @@ -0,0 +1,582 @@ +from fastapi import APIRouter, HTTPException, Query, Depends +from typing import List, Optional, Dict, Any +from datetime import datetime, timedelta +import time +import logging +from pymongo import ASCENDING, DESCENDING + +from database import get_database, redis_manager +from models import ( + DataQuery, DataResponse, SensorReading, SensorMetadata, + RoomMetrics, SystemEvent, SensorType, SensorStatus +) +from persistence import persistence_service +from services.token_service import TokenService + +logger = logging.getLogger(__name__) +router = APIRouter() + +# Dependency to get database +async def get_db(): + return await get_database() + +@router.get("/sensors", summary="Get all sensors") +async def get_sensors( + room: Optional[str] = Query(None, description="Filter by room"), + sensor_type: Optional[SensorType] = Query(None, description="Filter by sensor type"), + status: Optional[SensorStatus] = Query(None, description="Filter by status"), + db=Depends(get_db) +): + """Get list of all registered sensors with optional filtering""" + try: + # Build query + query = {} + if room: + query["room"] = room + if sensor_type: + query["sensor_type"] = sensor_type.value + if status: + query["status"] = status.value + + # Execute query + cursor = db.sensor_metadata.find(query).sort("created_at", DESCENDING) + sensors = await cursor.to_list(length=None) + + # Convert ObjectId to string + for sensor in sensors: + sensor["_id"] = str(sensor["_id"]) + + return { + "sensors": sensors, + "count": len(sensors), + "query": query + } + + except Exception as e: + logger.error(f"Error getting sensors: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/sensors/{sensor_id}", summary="Get sensor details") +async def get_sensor(sensor_id: str, db=Depends(get_db)): + """Get detailed information about a specific sensor""" + try: + # Get sensor metadata + sensor = await db.sensor_metadata.find_one({"sensor_id": sensor_id}) + if not sensor: + raise HTTPException(status_code=404, detail="Sensor not found") + + sensor["_id"] = str(sensor["_id"]) + + # Get recent readings (last 24 hours) + recent_readings = await persistence_service.get_recent_readings( + sensor_id=sensor_id, + limit=100, + minutes=1440 # 24 hours + ) + + # Get latest reading from Redis + latest_reading = await redis_manager.get_sensor_data(sensor_id) + + return { + "sensor": sensor, + "latest_reading": latest_reading, + "recent_readings_count": len(recent_readings), + "recent_readings": recent_readings[:10] # Return only 10 most recent + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting sensor {sensor_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/sensors/{sensor_id}/data", summary="Get sensor historical data") +async def get_sensor_data( + sensor_id: str, + start_time: Optional[int] = Query(None, description="Start timestamp (Unix)"), + end_time: Optional[int] = Query(None, description="End timestamp (Unix)"), + limit: int = Query(100, description="Maximum records to return"), + offset: int = Query(0, description="Records to skip"), + db=Depends(get_db) +): + """Get historical data for a specific sensor""" + try: + start_query_time = time.time() + + # Build time range query + query = {"sensor_id": sensor_id} + + if start_time or end_time: + time_query = {} + if start_time: + time_query["$gte"] = datetime.fromtimestamp(start_time) + if end_time: + time_query["$lte"] = datetime.fromtimestamp(end_time) + query["created_at"] = time_query + + # Get total count + total_count = await db.sensor_readings.count_documents(query) + + # Execute query with pagination + cursor = db.sensor_readings.find(query).sort("timestamp", DESCENDING).skip(offset).limit(limit) + readings = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for reading in readings: + reading["_id"] = str(reading["_id"]) + + execution_time = (time.time() - start_query_time) * 1000 # Convert to milliseconds + + return DataResponse( + data=readings, + total_count=total_count, + query=DataQuery( + sensor_ids=[sensor_id], + start_time=start_time, + end_time=end_time, + limit=limit, + offset=offset + ), + execution_time_ms=execution_time + ) + + except Exception as e: + logger.error(f"Error getting sensor data for {sensor_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/rooms", summary="Get all rooms") +async def get_rooms(db=Depends(get_db)): + """Get list of all rooms with sensor counts""" + try: + # Get distinct rooms from sensor readings + rooms = await db.sensor_readings.distinct("room", {"room": {"$ne": None}}) + + room_data = [] + for room in rooms: + # Get sensor count for each room + sensor_count = len(await db.sensor_readings.distinct("sensor_id", {"room": room})) + + # Get latest room metrics from Redis + room_metrics = await redis_manager.get_room_metrics(room) + + room_data.append({ + "room": room, + "sensor_count": sensor_count, + "latest_metrics": room_metrics + }) + + return { + "rooms": room_data, + "count": len(room_data) + } + + except Exception as e: + logger.error(f"Error getting rooms: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/rooms/{room_name}/data", summary="Get room historical data") +async def get_room_data( + room_name: str, + start_time: Optional[int] = Query(None, description="Start timestamp (Unix)"), + end_time: Optional[int] = Query(None, description="End timestamp (Unix)"), + limit: int = Query(100, description="Maximum records to return"), + db=Depends(get_db) +): + """Get historical data for a specific room""" + try: + start_query_time = time.time() + + # Build query for room metrics + query = {"room": room_name} + + if start_time or end_time: + time_query = {} + if start_time: + time_query["$gte"] = datetime.fromtimestamp(start_time) + if end_time: + time_query["$lte"] = datetime.fromtimestamp(end_time) + query["created_at"] = time_query + + # Get room metrics + cursor = db.room_metrics.find(query).sort("timestamp", DESCENDING).limit(limit) + room_metrics = await cursor.to_list(length=limit) + + # Also get sensor readings for the room + sensor_query = {"room": room_name} + if "created_at" in query: + sensor_query["created_at"] = query["created_at"] + + sensor_cursor = db.sensor_readings.find(sensor_query).sort("timestamp", DESCENDING).limit(limit) + sensor_readings = await sensor_cursor.to_list(length=limit) + + # Convert ObjectId to string + for item in room_metrics + sensor_readings: + item["_id"] = str(item["_id"]) + + execution_time = (time.time() - start_query_time) * 1000 + + return { + "room": room_name, + "room_metrics": room_metrics, + "sensor_readings": sensor_readings, + "execution_time_ms": execution_time + } + + except Exception as e: + logger.error(f"Error getting room data for {room_name}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.post("/data/query", summary="Advanced data query", response_model=DataResponse) +async def query_data(query_params: DataQuery, db=Depends(get_db)): + """Advanced data querying with multiple filters and aggregations""" + try: + start_query_time = time.time() + + # Build MongoDB query + mongo_query = {} + + # Sensor filters + if query_params.sensor_ids: + mongo_query["sensor_id"] = {"$in": query_params.sensor_ids} + + if query_params.rooms: + mongo_query["room"] = {"$in": query_params.rooms} + + if query_params.sensor_types: + mongo_query["sensor_type"] = {"$in": [st.value for st in query_params.sensor_types]} + + # Time range + if query_params.start_time or query_params.end_time: + time_query = {} + if query_params.start_time: + time_query["$gte"] = datetime.fromtimestamp(query_params.start_time) + if query_params.end_time: + time_query["$lte"] = datetime.fromtimestamp(query_params.end_time) + mongo_query["created_at"] = time_query + + # Get total count + total_count = await db.sensor_readings.count_documents(mongo_query) + + # Execute query with pagination and sorting + sort_direction = DESCENDING if query_params.sort_order == "desc" else ASCENDING + + cursor = db.sensor_readings.find(mongo_query).sort( + query_params.sort_by, sort_direction + ).skip(query_params.offset).limit(query_params.limit) + + readings = await cursor.to_list(length=query_params.limit) + + # Convert ObjectId to string + for reading in readings: + reading["_id"] = str(reading["_id"]) + + execution_time = (time.time() - start_query_time) * 1000 + + return DataResponse( + data=readings, + total_count=total_count, + query=query_params, + execution_time_ms=execution_time + ) + + except Exception as e: + logger.error(f"Error executing data query: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/analytics/summary", summary="Get analytics summary") +async def get_analytics_summary( + hours: int = Query(24, description="Hours of data to analyze"), + db=Depends(get_db) +): + """Get analytics summary for the specified time period""" + try: + start_time = datetime.utcnow() - timedelta(hours=hours) + + # Aggregation pipeline for analytics + pipeline = [ + {"$match": {"created_at": {"$gte": start_time}}}, + {"$group": { + "_id": { + "sensor_id": "$sensor_id", + "room": "$room", + "sensor_type": "$sensor_type" + }, + "reading_count": {"$sum": 1}, + "avg_energy": {"$avg": "$energy.value"}, + "total_energy": {"$sum": "$energy.value"}, + "avg_co2": {"$avg": "$co2.value"}, + "max_co2": {"$max": "$co2.value"}, + "avg_temperature": {"$avg": "$temperature.value"}, + "latest_timestamp": {"$max": "$timestamp"} + }}, + {"$sort": {"total_energy": -1}} + ] + + cursor = db.sensor_readings.aggregate(pipeline) + analytics = await cursor.to_list(length=None) + + # Room-level summary + room_pipeline = [ + {"$match": {"created_at": {"$gte": start_time}, "room": {"$ne": None}}}, + {"$group": { + "_id": "$room", + "sensor_count": {"$addToSet": "$sensor_id"}, + "total_energy": {"$sum": "$energy.value"}, + "avg_co2": {"$avg": "$co2.value"}, + "max_co2": {"$max": "$co2.value"}, + "reading_count": {"$sum": 1} + }}, + {"$project": { + "room": "$_id", + "sensor_count": {"$size": "$sensor_count"}, + "total_energy": 1, + "avg_co2": 1, + "max_co2": 1, + "reading_count": 1 + }}, + {"$sort": {"total_energy": -1}} + ] + + room_cursor = db.sensor_readings.aggregate(room_pipeline) + room_analytics = await room_cursor.to_list(length=None) + + return { + "period_hours": hours, + "start_time": start_time.isoformat(), + "sensor_analytics": analytics, + "room_analytics": room_analytics, + "summary": { + "total_sensors_analyzed": len(analytics), + "total_rooms_analyzed": len(room_analytics), + "total_readings": sum(item["reading_count"] for item in analytics) + } + } + + except Exception as e: + logger.error(f"Error getting analytics summary: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/events", summary="Get system events") +async def get_events( + severity: Optional[str] = Query(None, description="Filter by severity"), + event_type: Optional[str] = Query(None, description="Filter by event type"), + hours: int = Query(24, description="Hours of events to retrieve"), + limit: int = Query(50, description="Maximum events to return"), + db=Depends(get_db) +): + """Get recent system events and alerts""" + try: + start_time = datetime.utcnow() - timedelta(hours=hours) + + # Build query + query = {"created_at": {"$gte": start_time}} + + if severity: + query["severity"] = severity + + if event_type: + query["event_type"] = event_type + + # Execute query + cursor = db.system_events.find(query).sort("timestamp", DESCENDING).limit(limit) + events = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for event in events: + event["_id"] = str(event["_id"]) + + return { + "events": events, + "count": len(events), + "period_hours": hours + } + + except Exception as e: + logger.error(f"Error getting events: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.put("/sensors/{sensor_id}/metadata", summary="Update sensor metadata") +async def update_sensor_metadata( + sensor_id: str, + metadata: dict, + db=Depends(get_db) +): + """Update sensor metadata""" + try: + # Update timestamp + metadata["updated_at"] = datetime.utcnow() + + result = await db.sensor_metadata.update_one( + {"sensor_id": sensor_id}, + {"$set": metadata} + ) + + if result.matched_count == 0: + raise HTTPException(status_code=404, detail="Sensor not found") + + return {"message": "Sensor metadata updated successfully", "modified": result.modified_count > 0} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error updating sensor metadata: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.delete("/sensors/{sensor_id}", summary="Delete sensor and all its data") +async def delete_sensor(sensor_id: str, db=Depends(get_db)): + """Delete a sensor and all its associated data""" + try: + # Delete sensor readings + readings_result = await db.sensor_readings.delete_many({"sensor_id": sensor_id}) + + # Delete sensor metadata + metadata_result = await db.sensor_metadata.delete_one({"sensor_id": sensor_id}) + + # Delete from Redis cache + await redis_manager.redis_client.delete(f"sensor:latest:{sensor_id}") + await redis_manager.redis_client.delete(f"sensor:status:{sensor_id}") + + if metadata_result.deleted_count == 0: + raise HTTPException(status_code=404, detail="Sensor not found") + + return { + "message": "Sensor deleted successfully", + "sensor_id": sensor_id, + "readings_deleted": readings_result.deleted_count, + "metadata_deleted": metadata_result.deleted_count + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error deleting sensor {sensor_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.get("/export", summary="Export data") +async def export_data( + start_time: int = Query(..., description="Start timestamp (Unix)"), + end_time: int = Query(..., description="End timestamp (Unix)"), + sensor_ids: Optional[str] = Query(None, description="Comma-separated sensor IDs"), + format: str = Query("json", description="Export format (json, csv)"), + db=Depends(get_db) +): + """Export sensor data for the specified time range""" + try: + # Build query + query = { + "created_at": { + "$gte": datetime.fromtimestamp(start_time), + "$lte": datetime.fromtimestamp(end_time) + } + } + + if sensor_ids: + sensor_list = [sid.strip() for sid in sensor_ids.split(",")] + query["sensor_id"] = {"$in": sensor_list} + + # Get data + cursor = db.sensor_readings.find(query).sort("timestamp", ASCENDING) + readings = await cursor.to_list(length=None) + + # Convert ObjectId to string + for reading in readings: + reading["_id"] = str(reading["_id"]) + # Convert datetime to ISO string for JSON serialization + if "created_at" in reading: + reading["created_at"] = reading["created_at"].isoformat() + + if format.lower() == "csv": + # TODO: Implement CSV export + raise HTTPException(status_code=501, detail="CSV export not yet implemented") + + return { + "data": readings, + "count": len(readings), + "export_params": { + "start_time": start_time, + "end_time": end_time, + "sensor_ids": sensor_ids.split(",") if sensor_ids else None, + "format": format + } + } + + except HTTPException: + raise + except Exception as e: + logger.error(f"Error exporting data: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +# Token Management Endpoints +@router.get("/tokens", summary="Get all tokens") +async def get_tokens(db=Depends(get_db)): + """Get list of all tokens""" + try: + token_service = TokenService(db) + tokens = await token_service.get_tokens() + return {"tokens": tokens} + except Exception as e: + logger.error(f"Error getting tokens: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.post("/tokens/generate", summary="Generate new token") +async def generate_token( + name: str, + list_of_resources: List[str], + data_aggregation: bool = False, + time_aggregation: bool = False, + embargo: int = 0, + exp_hours: int = 24, + db=Depends(get_db) +): + """Generate a new JWT token with specified permissions""" + try: + token_service = TokenService(db) + token = token_service.generate_token( + name=name, + list_of_resources=list_of_resources, + data_aggregation=data_aggregation, + time_aggregation=time_aggregation, + embargo=embargo, + exp_hours=exp_hours + ) + return {"token": token} + except Exception as e: + logger.error(f"Error generating token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.post("/tokens/check", summary="Validate token") +async def check_token(token: str, db=Depends(get_db)): + """Check token validity and decode payload""" + try: + token_service = TokenService(db) + decoded = token_service.decode_token(token) + return decoded + except Exception as e: + logger.error(f"Error checking token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.post("/tokens/save", summary="Save token to database") +async def save_token(token: str, db=Depends(get_db)): + """Save a valid token to the database""" + try: + token_service = TokenService(db) + result = await token_service.insert_token(token) + return result + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error saving token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@router.post("/tokens/revoke", summary="Revoke token") +async def revoke_token(token: str, db=Depends(get_db)): + """Revoke a token by marking it as inactive""" + try: + token_service = TokenService(db) + result = await token_service.revoke_token(token) + return result + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) + except Exception as e: + logger.error(f"Error revoking token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") \ No newline at end of file diff --git a/data_simulator.py b/data_simulator.py new file mode 100644 index 0000000..922d980 --- /dev/null +++ b/data_simulator.py @@ -0,0 +1,54 @@ + +import redis +import time +import random +import json + +# Connect to Redis +# This assumes Redis is running on localhost:6379 +try: + r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True) + r.ping() + print("Successfully connected to Redis.") +except redis.exceptions.ConnectionError as e: + print(f"Could not connect to Redis: {e}") + exit(1) + +# The channel to publish data to +REDIS_CHANNEL = "energy_data" + +def generate_mock_data(): + """Generates a single mock data point for a random sensor.""" + sensor_id = f"sensor_{random.randint(1, 10)}" + # Simulate energy consumption in kWh + energy_value = round(random.uniform(0.5, 5.0) + (random.random() * 5 * (1 if random.random() > 0.9 else 0)), 4) + + return { + "sensorId": sensor_id, + "timestamp": int(time.time()), + "value": energy_value, + "unit": "kWh" + } + +def main(): + """Main loop to generate and publish data.""" + print(f"Starting data simulation. Publishing to Redis channel: '{REDIS_CHANNEL}'") + while True: + try: + data = generate_mock_data() + payload = json.dumps(data) + + r.publish(REDIS_CHANNEL, payload) + print(f"Published: {payload}") + + # Wait for a random interval before sending the next data point + time.sleep(random.uniform(1, 3)) + except KeyboardInterrupt: + print("Stopping data simulation.") + break + except Exception as e: + print(f"An error occurred: {e}") + time.sleep(5) + +if __name__ == "__main__": + main() diff --git a/database.py b/database.py new file mode 100644 index 0000000..0fb25e3 --- /dev/null +++ b/database.py @@ -0,0 +1,220 @@ +import os +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +from pymongo import IndexModel, ASCENDING, DESCENDING +from typing import Optional +import asyncio +from datetime import datetime, timedelta +import logging + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +class MongoDB: + client: Optional[AsyncIOMotorClient] = None + database: Optional[AsyncIOMotorDatabase] = None + +# Global MongoDB instance +mongodb = MongoDB() + +async def connect_to_mongo(): + """Create database connection""" + try: + # MongoDB connection string - default to localhost for development + mongodb_url = os.getenv("MONGODB_URL", "mongodb://localhost:27017") + database_name = os.getenv("DATABASE_NAME", "energy_monitoring") + + logger.info(f"Connecting to MongoDB at: {mongodb_url}") + + # Create async MongoDB client + mongodb.client = AsyncIOMotorClient(mongodb_url) + + # Test the connection + await mongodb.client.admin.command('ping') + logger.info("Successfully connected to MongoDB") + + # Get database + mongodb.database = mongodb.client[database_name] + + # Create indexes for better performance + await create_indexes() + + except Exception as e: + logger.error(f"Error connecting to MongoDB: {e}") + raise + +async def close_mongo_connection(): + """Close database connection""" + if mongodb.client: + mongodb.client.close() + logger.info("Disconnected from MongoDB") + +async def create_indexes(): + """Create database indexes for optimal performance""" + try: + # Sensor readings collection indexes + sensor_readings_indexes = [ + IndexModel([("sensor_id", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("room", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("sensor_type", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("created_at", DESCENDING)]), + ] + await mongodb.database.sensor_readings.create_indexes(sensor_readings_indexes) + + # Room metrics collection indexes + room_metrics_indexes = [ + IndexModel([("room", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("created_at", DESCENDING)]), + ] + await mongodb.database.room_metrics.create_indexes(room_metrics_indexes) + + # Sensor metadata collection indexes + sensor_metadata_indexes = [ + IndexModel([("sensor_id", ASCENDING)], unique=True), + IndexModel([("room", ASCENDING)]), + IndexModel([("sensor_type", ASCENDING)]), + IndexModel([("status", ASCENDING)]), + ] + await mongodb.database.sensor_metadata.create_indexes(sensor_metadata_indexes) + + # System events collection indexes + system_events_indexes = [ + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("event_type", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("severity", ASCENDING), ("timestamp", DESCENDING)]), + ] + await mongodb.database.system_events.create_indexes(system_events_indexes) + + logger.info("Database indexes created successfully") + + except Exception as e: + logger.error(f"Error creating indexes: {e}") + +async def get_database() -> AsyncIOMotorDatabase: + """Get database instance""" + if not mongodb.database: + await connect_to_mongo() + return mongodb.database + +class RedisManager: + """Redis connection and operations manager""" + + def __init__(self): + self.redis_client = None + self.redis_host = os.getenv("REDIS_HOST", "localhost") + self.redis_port = int(os.getenv("REDIS_PORT", "6379")) + self.redis_db = int(os.getenv("REDIS_DB", "0")) + + async def connect(self): + """Connect to Redis""" + try: + import redis.asyncio as redis + self.redis_client = redis.Redis( + host=self.redis_host, + port=self.redis_port, + db=self.redis_db, + decode_responses=True + ) + await self.redis_client.ping() + logger.info("Successfully connected to Redis") + except Exception as e: + logger.error(f"Error connecting to Redis: {e}") + raise + + async def disconnect(self): + """Disconnect from Redis""" + if self.redis_client: + await self.redis_client.close() + logger.info("Disconnected from Redis") + + async def set_sensor_data(self, sensor_id: str, data: dict, expire_time: int = 3600): + """Store latest sensor data in Redis with expiration""" + if not self.redis_client: + await self.connect() + + key = f"sensor:latest:{sensor_id}" + await self.redis_client.setex(key, expire_time, str(data)) + + async def get_sensor_data(self, sensor_id: str) -> Optional[dict]: + """Get latest sensor data from Redis""" + if not self.redis_client: + await self.connect() + + key = f"sensor:latest:{sensor_id}" + data = await self.redis_client.get(key) + if data: + import json + return json.loads(data) + return None + + async def set_room_metrics(self, room: str, metrics: dict, expire_time: int = 1800): + """Store room aggregated metrics in Redis""" + if not self.redis_client: + await self.connect() + + key = f"room:metrics:{room}" + await self.redis_client.setex(key, expire_time, str(metrics)) + + async def get_room_metrics(self, room: str) -> Optional[dict]: + """Get room aggregated metrics from Redis""" + if not self.redis_client: + await self.connect() + + key = f"room:metrics:{room}" + data = await self.redis_client.get(key) + if data: + import json + return json.loads(data) + return None + + async def get_all_active_sensors(self) -> list: + """Get list of all sensors with recent data in Redis""" + if not self.redis_client: + await self.connect() + + keys = await self.redis_client.keys("sensor:latest:*") + return [key.replace("sensor:latest:", "") for key in keys] + +# Global Redis manager instance +redis_manager = RedisManager() + +async def cleanup_old_data(): + """Cleanup old data from MongoDB (retention policy)""" + try: + db = await get_database() + + # Delete sensor readings older than 90 days + retention_date = datetime.utcnow() - timedelta(days=90) + result = await db.sensor_readings.delete_many({ + "created_at": {"$lt": retention_date} + }) + + if result.deleted_count > 0: + logger.info(f"Deleted {result.deleted_count} old sensor readings") + + # Delete room metrics older than 30 days + retention_date = datetime.utcnow() - timedelta(days=30) + result = await db.room_metrics.delete_many({ + "created_at": {"$lt": retention_date} + }) + + if result.deleted_count > 0: + logger.info(f"Deleted {result.deleted_count} old room metrics") + + except Exception as e: + logger.error(f"Error cleaning up old data: {e}") + +# Scheduled cleanup task +async def schedule_cleanup(): + """Schedule periodic cleanup of old data""" + while True: + try: + await cleanup_old_data() + # Wait 24 hours before next cleanup + await asyncio.sleep(24 * 60 * 60) + except Exception as e: + logger.error(f"Error in scheduled cleanup: {e}") + # Wait 1 hour before retrying + await asyncio.sleep(60 * 60) \ No newline at end of file diff --git a/layers/__init__.py b/layers/__init__.py new file mode 100644 index 0000000..e8a337c --- /dev/null +++ b/layers/__init__.py @@ -0,0 +1 @@ +# Empty file to make this a Python package \ No newline at end of file diff --git a/layers/__pycache__/__init__.cpython-312.pyc b/layers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..779e97ddc75c48fced4168463981109d000f080b GIT binary patch literal 171 zcmX@j%ge<81SvH;Gqiy8V-N=&d}aZPOlPQM&}8&m$xy@urWO_J s$H!;pWtPOp>lIY~;;_lhPbtkwwJTx;8qEmA#UREg`kf|Qz_8CpR4F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*mwrZmZmND! zNMv>NwI!$qDgW=v3@~OepYHSke`xRoRO5DSd^lll$e~InwO%VlUSKr iRIDE#pP83g5+AQuPg`kg8Z7D8CpR4F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D5uzp5Mv>NwI!$qDgW=v3@~OepYHSke`xRoRO5DSd^lll$e~InwO%VlUSKr rRIHy=TAZ1eT3oCjAD@|*SrQ+wS5SG2!zMRBr8Fni4rJM9AZ7pnc=Ih} literal 0 HcmV?d00001 diff --git a/layers/business/__pycache__/analytics_service.cpython-39.pyc b/layers/business/__pycache__/analytics_service.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2420b29ac1d9f69f428ab3febf010f2af77fc4c8 GIT binary patch literal 8561 zcmbtZ-)|h(b)MflyId}pq(qUFWUr*P>UC^Vb<(DA6xDW|$Zb_RvC_1iCY_A-UM|Pl z-C5t6mBhs|FI0e_fPwU_Kp8-Mi~fYZwFnRtedv1!=t~O})k{&J1?tLf)$iOpvp*?9 zZgw$e?%cUQ&b{~C^L_VBu((*#@Vj#GS9|}st!e*E50gI=4{zg&zYoDQPh&bW+ImO# z^p4>fIz5|hvtxNy$M)=w<2jvzSCIKu+x1*#GpAkbl)RFzare;zFCXjP;!TYenEOy; z?xEpTm~~rg6bE~yce+7)KM9+0?M^=qyF89-?Wh$tYcUu1!X_`hn`hq&_PMCNQQHf; ztj*(EFsj)M+Rc7DNW!Q~*{sHbB&Y?gmf$UwTY7@AXbmHE@roy5ho?o7jJK1ZVWrl4 zVKYh1ce?wjbt{aM)cv4G%?0h9CkEBj({s0ZH;%-IJYZqB^&#&?ahOD6zez1m{%kzF zjVu0Z2(EcL=FnieXEKAC4>iwX=eW%sUf}Mov7s^h$Y2gDJhZ(cFL7r>%jCF(dt|aA zD?PNl1-3p{x6Bq%w>(#?f_9f!6}1-G#$2sswt`xfxmv5Jb%vcqttGaGHQG$8qb0hn zmSnJ%C2G5om{#=zIy{Osm@NN=s$H7;cy?DDP zqK|kJ&n$@d?nHrL+joNIUEXEeZ8D{JJGZ#)ktz9^)ou0mQ`h&yZkYIfl~g62HGQx# zI~iMfsVD0w01LDCqP~bzNARRCx=mS+>f%QOTfp@TT=5Sv6F=1lD~a|up+FW#1@c)6^ATIS!NZam!vGSs!T6Kt{l0LtF)HXO3OSj zhuTNxBWq|L>%XJ1wfD6H3q8qx*g3X_((6a)6a5v~26K>_??Lsyc|>o|vkUAZdkLw9 z?EkXNANOe;*jbCp=L(XdC9BQ!OFk+8&v5WeM?y(%ef+ zP1t0@nI9xit$Hirc+(P5zxT9M&x|%?Pwl#hqRyb4XC?bR-Z0bRs6=W}fpj6up!aAF z8a<&cf@E;7^>ne$yIi#Px9$b)K1T=5=*=u`L7fLrSLzAx$eq>~T)i2T>ZE;~1f5>P zO-(;!>0-W(no+--q$RZNtCrJp5+y-9e=5)$bdna(x=bul&O|3(6`mHY`=d@URyf8) z{2KIr@(jP7r)Is&9~RKWPO%;^_TA_BPT6q1D6`fy5dzT-XQS`)p6_D z7RF>?5Y$aL;Fy!inrMzX5Z6eM1~aWR?6j=b(N`O@gozPK`?Q+1G}`}Zg%-shKNm6+ z8C?3N5b$;_o=ERDDpAV{yj5$I1a0=z=_k!@bYBpT3fk^zF^3|auKPJyaX+i+L!l&2 zi(wqWpTfq{;x`{Oxx}B;0T{NpNNrBtH;RH>rnE=#n@D*}*$8@JkGBDz;;WRqC`U2M z`z@I_-JQ;*4^^Uq0_jpo(I@`@=~PPpZmq6Z zXXx$t%~Qu!&$5hggX}RQkop|p?+SYysp7) zs59Z_hz?DGFxe^1 zR7}?z#8CS|Zhny!5e!h(Jbxe<4ykvuk1W9wRKh42Nlrc=D$f;V~46F2@8 zjr|8MZE$^V37=>BTi4qWc13(+Yt+n~wnVC=wl5(8ZP!TKN1&%nKQ?-2n4W2EKx)kmpnWO%h}+g6c^3BsP)SX_($w<0lm6ZNd)`aI zJO`wXc~3N!7lMfY6g zC;tjF%gjf-hvKOTaG70*-^GhQE=jT+{a5KQ{K;z8p}2(>PwMHxxF@Xd@^~TN^xOO% zZ+mCe8pi(pFxm6>!qy%Ep|>%W-4r3V8h{h88n(Bl%p@-&;bFj;cShxtKaQ1iMQ&Qj zauv6Y(gWly!G!UpHb_R7rDmIV8?FKZ^7hL6?NCbM-v>y6q@V#w z8e;KndPRg;33B%0I>evgiU~zDa|N#5HH~L*-j?xfMK?Zm7Ar`t!`-{O@ys!${M4zC z%U4c&(0F0ndh&C0GDU?8$e?5BFL1?GY`eiPPbR`dgPFx_NJr9BNrmxLVnsPOZDdBxH9zyW!EO_JAFhRy_eJu9iJEHA0+l>_HT z4ydcVOX_+B+x_a`&6}XE$}+%UM{cH;2%KT!+N+H32K{#OR;?YxN$o!8cN+rMuZc4x z$gK|wb^ZWOA^t90IgznJS+%E_qBuucc72}*Vz5vj^V$KpZq!ZokjaAm4|acFKG=0) zP;^?R3aKrNq$WzF7QGP{soxq2#1%9_2O{Y~8kX{@(mW+mBYr1-odkK@K?U6;{98b+ zZ-bk~gCg<0EvZ&XCY3g>OryW70CfuTEw=?|q1sexfXbbJnR@sNM8j63Qy%&yGgffVWw(|8?97|5C3^zR=v}iX)@b}G}G zHtQIvNI|p6T)6?i9Tu-nG__?$hCO!XHXLc5V#gBS%Ru(xPmJ z&Z^#-$x8PE5eD5RrxP-cx)5Pke22y)_uz*}rG>p94w6I&QVAHGh#YsxeAC5jZz-7d zmeh%P)IBo7?xdogJW5X};VI=Lqu!&gr#G-5a{4#8Vj@~}{<4e;fY{ZmW>p7V1BhLH zT`zw&cEIcZ(v<^#1^E)X|X}6izI3!UM6vg#AOm!NX)>l zI{~{_>HVuDWPs&0N{!J^Lh+lFN3KB-M1VVlyF2}MTM$o_TN89(95@=d@&P;(sVrDD zsLH&FXp#6bRhAH3RDqg0Wk@D)x=Ip|Wx3Z;BL`&}pBXc04?MTiQ!^=naPaapEYHmL z>A*Z0oS}+e441_Zp{KVbE6!+jB0R4WF3yzx2Q(3X4?;#;B*Z!d(Aen{u@>F=)G0|4 zu7kK|e}FS!yYw&C#Z$n!KIbP+Py#{hqST8BkP55Jf;2_4B}o4r5;;iEg;ywua+RQ1 z21$ra>*5ALTgJ2j&V^4Xq+%fsjR49B-YJYSexfjnm=2<#4h2U5_iO=y8dLn)&}3!R z$IudgfdEeS1Y%$xl24-cYznoYR)JLj{~BWEBT9iX)uRIT>hj?A=RrNq$mG8L6@n^c z)p<}wo{H7ZnDPD;o>;?nd;yS^tCcg835?C7GICe%ych)#chIjwqUIJgqVUA$HN!lul%24lYScYDi4SL@3n5jHv*Fsa%?LTEB-1;(ZbylbD%J zB2(v4E~XCX2wQ!2dT`{q+UG%*=c`T*S*pG{AI_mcWc<$%(hE!FPu2TUKHrKUQM`Mu z^HEUd6uLbT4CCiaT*JqJcB{=_;+-Y~ZGnhj65~jlBywR~&UY0a$s&CTJ%3R+NSF zV!+RLptksk1evEGf|#?x9%ay`_NuwUBTbj42rHuag32vWk6C<%LQpM#N|#yV?V=6^ zNn!#|NdytGsp{({4EdSkRFHD@3jU5>CVKear^jZdOnV1`#VT6G?GMyVK2_=aXE*3@ z-%m@v--%eiP3f}lf7}n+*_(pzv#9C&VhL4K6Gvr1hA8NRfuNYJc$Wn2IT>;i0i|Tn zHKG&^Y+|ruH&en3dWy-MT@YW_+@|2Bp`hE{a zq_mI<$i53Gl@{n*3SOq?!|tv?%tZ8?_(sdO=-aaVm>=?3eSIzh7H`QfFM{k{oyMuh ala$4oNKMZTERp)CZCO|y-8ITa$@o7gy;i~i literal 0 HcmV?d00001 diff --git a/layers/business/__pycache__/cleanup_service.cpython-39.pyc b/layers/business/__pycache__/cleanup_service.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..566988e2f30562984bed9b65186cb66cb2240e7b GIT binary patch literal 6456 zcmbVQ&5s;M74NV4>iO8&^?KKitz;6Ckb!vD5I~VJR)U>`kFZ2Bij@|q)!S9O+v}e0 zadnTAY4wJKeFPM7LW(1Hk2!MW!iE0;Ar!SZ!O3TC0Rq2Q{joE%>m= z#p*NdrZO~>z`Wta&cOA#6HLYfCt!n-;{`DfoWSLSh>N?P%gygkA}`=kGE2? zUcwXo4h>fzR>jhoYUxa4`hCSR*cz{q2p?#y!J7ALRvo?81C^Pqec!Me)z=Q|qTl2# zZt~iWb{^scRgWzJ+m_f)h3&ItV7m+06?O*L9$%6pSJ_$g`mBb7SkHRb^CP&KOFVg@ z00uju$ff+KGYZFCI3$+HRk6qNM=;6oI-ckpnpnA~q>2r_LaQ-?b*5l)ZR^#cA(|k` z&O}nIw#em*_c+W?;{o#^&QYGy>--}y)pOBnr$wYe9L_@-cE7H!@L z-N^`Q5p71!x7=~GITqnQ&M(?zPPDfjI)ZI(JML{BuuY$|D%#vGv~07Sq)@+&@ic4N zwikG@ZLiRN>Sz?Tsn*qGeO_=ICEY{XGpSmS)LGnhe0wjPh$yQ=T_+-NNjT+?;2NGk zIX2t5A~9ISWlj~ z89O2#MD8AE6Q8rn%peLE)|5O|ti80efd|X+N%+%2A@QxBC6^={3_r=J5YXVca*tOA zNn;@QIUF`c8#~GBez?2KMb>!75%LThPBaZ%FU;C5ri!^d%dBgcQuo7fEIKsQ@PeIi zsEHmr;tVx(&Uw=(g7t`YXjcm>d934OL@X4ou9~WWc12B|T{x;o3?8at6(dh*Gij(i zpdSH{T1>~JimTKfYDbEyBz=N^h~xSJdbgBd)xd%68`CQ)`l|A_5@?r|xOP})#|LF3 zkKc^L@hRdh&gs>yO3SK~iZEi#*{~snoQtdOlGF4B?H&d#J2>`Ngb0N=3#53Anl3d9 zf?OkhC*}AgNF$Oo$&n#fh3LTSXB-*$octPhN<>}@}Wj5Poo&1;}LIT?1A>4cBgn> zRxX1JQ{i!pWceqEGL=1$#%+0hs9ADRN^5|Rl~R>tR|bW&vwo?nq0ejz5V^=}kk)rvn)3$7-CZeBl7ZdC22=JQ3+SNn2BU+BwL&uO$~3WL;{xP3xXh zQ##s8#2>>2zpp00@stBKo)PhZM#4IRI4K7GT0N&6=&80(N@=iKs(Y#j&yoxBz(|e# z29BeCRUwEqZX04hZXUK+llt&Et=#8SJk5e<4Lr?)XKjvWJ*`WgZSVvndH$*3i9UFC z7I=0Go(=G97d#ttJez4#^6Y^pAmaHx?6}|x47e^Wa4lEblDqG)KK8$ywj^Jz*uRNy zUCg+Wnw|>j;43@xpi;DhcG{Nnu3}z5#k{{N<^=}xp3Ub?)msR6P~+bF>hv#ApRN-^UY-B~qteBuso-}Xdy+N`%fZD?mQ38Z% zWCj#Hd>lm9stCi8J>s$O+-iKXEC=h<8SEo&8P z^H^d$y&H41h$RROUk9VC1iNrU!J_0s zZvAW`kZjBe_R2s!g{g+iqCqouc6lr;~aGg4|rTv>?l@oeR?*Il(mR z7Lr9yISJxp*PL}`<(GBmubCDnQjQ7U43M)L*Mi!Q5qD= z>Yw%uQv+73YkdT@G`C9WmQ~FxGD_^^wTz-uskH!Vh*~O(a#&rjl=tlYF$lKZB0v>GOhQ6QQ5Th-t zMLlHBaU?dOEm z(lTx1GQvZ(n*}q>T32^@z~3En%Yr2 z|2CUN7+Ad5oId}YM$H9-i!kuyuk)cGPmK`{wE2`9J)=l$;q1|?m8mm=M?F<#N?vD; zg_OL(D0noEloSP{2YOm(EppHMO{A$rZOkXeC=D2rx>-=mfUw^xsOMAj7E;`rq(*`- zdCMfdaHb}l>00GX+lL+2nQ^Awxcdd1X)n3*8k{NJ?8QjdjRwwkIEg>Alt&3C<#Jo% zX`H!uhMFNYU!&%$)I5h~s9U|+JYyPJZFdrfJ3Dy-vrNIf$n}cci+uC32TdNIx2%ep zTyJSry<@AAsh`*&Cgf*xSHC1({jvRLEiVf5?H*6{xz?s5nXJ;Xd>2-pffb!H)ylis z%+5);R-ZOon5tZm)sx$&3_b5lio#^`=yXcbm93Kr$V9r521;Q&;UBQy=p{6YhPx_K zs~-Jfi!vQEG$dKtf6c~c^y-DpE!JfoG2xuiQ5>b{MouNVh=z&@;bT;OVd_N9Au7sr zWQj4kbbXJzw+G%%d7G~)DX4k56F~voKdyS=woYbUl%ZG{@KehsD=1BtiDFSO`v)zE zN=13;$^0~Vav?OH9D|Azl@*7}RAvmE5x4EkwCzzyWjnwf+rBe#{NhW)Ce^ZSv5Ya9 zj?$aB07%dcOx6(u6`iH~mKn34%A)u&HRLigV>=9e@h0_;^J@uFrAM^TP%ok-HBmcj z)>h13yWd&+?%K7rZ^-Ys2E9eJRB6ghfV4lW$I~$aQa(S;nl-5S2bA>`R7T*ICMGVb zpL~N9YN4zyt;)KvKu9HESzAY0KQ9uupdYWxnxeQ#^UH`ut3JN)6m`kd_LnuU=5oD8 aDil$r&oG*}0rb@-?gJgo)Ox>Ee*S+yB(&@R literal 0 HcmV?d00001 diff --git a/layers/business/__pycache__/room_service.cpython-39.pyc b/layers/business/__pycache__/room_service.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d170cc407f8c36e9594644e24688fb6bce836a3 GIT binary patch literal 6572 zcmb_gNpBp-74Cg{n!`mDMTrt^k8ODw#U=&YSs*KdZBdeCNTfsBmOOwnX-*Z%re;%B zk1S4_K14d?U;{q*5Wq}8A}0YsVC3eU0{fCfenFiAh9r?qJkVUgR1U& z^}6cStFPWt;qY)^;P){3dhONc4C5cv82s62yn!NbqhdzYU?#H~X49;iO{;2|)V3RT z)2TX5x9T>%s@E)33!3gU{Hl+Bw^3{c)u1_49cq@UCG>f$&=_uxR7aYl)zRiyb<8xl zzkP_0@0iuYmkj2!;$4Fk?^x9d<}4YNpw|f&+wEq#$zu`KWqG+PqZXI4+-R>x^|IvR zR#fM~TRHhkxWPsFqCVht|K0w2FDcq5ECD~GGAg0F^g)NaW#7%s*Jy!OctqtW|iDaz&g$@lEHs&j~8X{q@rn0Ky`Gr@O z;xO(?HL_5zcROLLzHx=$;*I+j&0(vfOS~o9Vv&a|YOOBvPFqHCTWqK!1LF*kM;AGZ zzD z_3u=NKr4Y(MDOs5HD#~>UktGlz8E>MZ<=Kv2pYr;w3)L4r7i9 zb_DIiXiw<&Bs+@skpW4LZJBI}9S3!il_B3zehiYP^|1lIC$?PnBzx+vTb*JjfTX9> zy;({D(|aj26giD5HtraSQ8QypKkeAjPnQ{()|{_+ zq698=Sn9p5iFC8^?6k!&^&BFQ)-r^r^bw*5f_7UIDyDKIZ>-!i#00t?T$^2MH~Fk! z9VTbz+VyUew_-Uf!`JE^IolEKbzVoCg>r4V9SSzP9M*5}7MpF*`H-{uB4_o~*(sXo zY$(50i&{}!s~yHP(nDpKKK{K4LS&}2nVbp7#Gw(CCwSZytvV?t|Ix=}W4=X^e?gU) z5>{c!=-o<;bz|L3U{RR;nRVG<&fCVOm00rBHQ1FUsB_y&%*5Ks`)qt+Z##*-V`kh= z;)ouo?zRU`d%Lh>=5EG!o|;Hp4OEVB^oGvFVyd?<9DJ7{2|| zV1>j5r$e0Aj1M8rru&IYa-?skjnjC;X8OJ7&xehAm#m4l9}wYo8v^S!C}J7iUu{v>DC5bTvN2mvT|i2=6YQH}@JnR2Bh#%RRxRUQKu zb!>WVd84Mc;ApL$%ccoApu{2i!m9&!F&E?17?bq*0O*v%$f|iTs;gWmm+|H9sw!T% zUFX`kDVLLUfHJ`%UDaqde}-p?WoCBI(`sSLtV@tOwS(2-!YGwW0q%)n3HDjsX-VXFj~b}PRxZK8Z zj*Lw^vF41APHh5J%$x#35;w7Lnd0QCk=TiQR@YO;HjqRpx#_POn;uF5h2D$I+6;g~ zbH~6qzuO#2hVcA?#@hzQcT90R@#t%2!b}ChJ|BJ8F_#aCEe45M@rEU?vtTp0dHv?QF-(-U<_4|$j!FN`QjGHwdvRgg z-!Zm}JD|r{5o#Oj8lrYmN&+_gnYTHdc%PeM4yUw~K*nUaHk=edSp#Jxfn=bJ;(aU` zULR#6v|3OOB|dm?X1B)^NC?W|qzKBVpiCr1O*xVTplqd5fMYTl#$9vZ^SMtkGHs3-B9kWu+inpuD4%NLosjaAfPL_ z^4Ge}h(+;+@|tb(?p1$|$1tg!y|>31kiBzIB$a1SCB{d`H(}Yf5i?;IV8zn64-B9N zo*1jK)KDf%ywTzFFzq&uGsxD=X;sJs-hXn6XICNg?T$G2Ax%ZL6=%f^9`=4Tcp@HS z17^-P+OQ7t+)RHMoMmmCbZ{f%u-Q?iOssml+ltjlSdXJyyp|GWzn6rTYBbeLjb&<4 zh42qY4Xu*2XoYmGc^A5R8iQlYl+1ZX#NC6{sNWZINHgRqaTC4 z&Hi=??Z6!0Q>IPd4lL_oP=J*-M@$dBczfUlo>{U{@U{QF=VmQX0;~7z|A0|=%X1JV zwbulU{73JE_wy+7MN}~&3D~8y##y^#p2%r9Z+A?}RJL54G`n|l*KVZxE!PqIcbnR) z<@Td$-?+9`aaEz-6#_0*oW&%{joWe9s5%`U-cU{#K@kFW8{Y~NMA^;oHbmC`P|Lkf z!56^vD-=n=p8++Hs^HrG)qCzS%GitXJ)xA1Aqps31+U;`@8T(RN~~Bf9_AVimQ6vR6hBXGGW@$|URu6j5JVsYEQ)c}i zQPe?L$x!mBq6V!Ipxog+3;);RP9ndBYGRF|rM06*vlLO`~P$mW|dawCp7u zsC7j6I?8u@&yuqjfIctNK}jK9?uQn7_ULnp9RCSaM8Ad+n6J2^h#8f0X<=bb`BxX_ zE-b$J-Ua2nyRfjR{PT;K-@APN%`5jZlR;j5zlPoi?T;v#hI|}@tXRDhdo;B7uub-nB~5A%xpw|<$bduqwAO?`UZaR}I{^XiksvOLMdENdain zPn}&-TKY+$Q8)_q;mCOy(4Mn2~719PkE*+NWGO{Am7AY1Q_aR<|x%u1j0UT zcfLF!O98YZV z7IIM_PP`ZcyX3$LbCi9GGnxWif?|W>B`$ODge}{K%ST>HcO@}fD zbFv;Ee1!yetXp=5S#RSTV73U%&QQ+90(~Dcloa#(kOb!j=+$3n?7Ex9Pl~{k;ayxIeNI1$6||v>^Kd8>E1}L9;igYEngliJPdZ0S#YEKkbVs9t+t% zfKgyI!k_NqmFQHx|lSZnqnvg0`SAOThjcI-k=C zg>qLK?JyREc|ifNxJVVnHsT$s$U_QpbJ}AFas?uyih^75CRHC(wHr(8H@Z6#qo^oG zlx0-ViRHUv<;i!ZgXz-r(DcyQ&{%QuNo3jIBV+kqb61XtUxHWfw}7KYs)^(kmusom zT1veNnq3v>@(Z1m^5e2!rpwY2)0wTFh*~QGxt-|Nan;FZ=>AsU)<<0G8(w{xD=2v)?Tqigs^ z*Yr)D`mK)DE%~Lc?b}_)ce<|c%6_9$_RDf!#joI9>QuWmzt&yw7rJ%7-d*$;yA8k5 zUGkT@%lto(`LpJv8g ztyxKV~*5uuU6&!Klz2@x&J=Wnd0BG84f_H+1 zv&r4^b=D^Gw3ZL4WyE(3HDc6nB&lJQOwG_B0K7Pgbj{8sNMvu=fP zlDW70v_{ZrI@y9c;T;~Zu(z9?xW#va_R(x-T{XYMlK?veS$?Z~M}3~v)Y#o5NCt6M zzIyxhyjs8AZV&oFuYGijAM(x<1Bb+6)@PQ!!~0PjCXqPGHfA@O8d<%26eqlU{gC&P zsqV!)oQ3hEqfKa-{yAv8jVHdMYZ}+UA)0S6-8Y%R%uhAn;w5e~>yg1q%>LB!9q#fn zbI>MAT7Cs>&>igxtD;>UkJs1&+BIJ03s|GRW1Q4j9dL`Rf&N9TvLxHf>;&44aeIZW zqP;Y3ud#Ksm$6Ps{bAn5Bb%LMr_g_b`p|Rwk;%@m=g_l)p4G{o*!6kr`U3MXwuZ6s zX>jhdnDrt%hmrN^5jo>LdkOs;tT{DfJeOU-nlH19pO*ZSYzch)O2H&|72Bql@`|?u z){eiHRog)S_(yeOg{yet^C%MS6K$xq^u&;FGqL2ml-Tm^Fbzl0x0|)L#+ALb9W&XI9KRJ3rOLDGdGZhnss* zmv0KzXYuB>s6FUnYP=Z-Z?yaIW?w}6yp1*s;=S!C5NvZhXdmz%+w71i$D4)3Z|0}U zMgCI%D05q_uoot+)=6v>yC^i>m4E5lU5U*czn2O?$abv@^%3mymXttS@MIu*ZNg;n zBX%d%`Hy&F19(pVCecRvP=BmHG!kQEGJRQSY3Cu&w|>GX6y_4uSK-d0vR3_2l8UsM$Ha(?t= zuT;_AP7roDyW(Xw7a|hzyZ6$w*Qw@#edtWjTDaNBto=CZWu;COusE}EmR&Bg664#0 z-E6tl4@AscIaf($*l6XOj=S^2WX}CS^stLq1y5vl8~q8->a9VaEJdpwwD)+NtrzWl z(L#W-l@`X^2gPPVhl>O{{`!M9mj)y&$%|#HEgZ5nxl~Jan{gtVjvy`<&(Q%aI<8;O zx5*FbFHKMeNsI@b#J6LZ1kN&@OJP*SXy0-l0`U!*+vYt7&FS zEoiEaw9YNnOY~9Gi+-9eNTJ~967BN*q|YWO6fdFq4xZRXp_#yzk$SWEIL>4<_ct+* z?k{yXVP_9F`zvUfp+R^U>5sL?dUS>vL;a9$Y^G9?y#D{|l12!Qs6fKnOzLA*wP_O$XJT@K&kIQt}RpYag zM9Aw`bHdLcfk@xu_X}<)bc!N3X!@(zPW&2*W+^Kx?6g9bRr6{BhfMrBhO>$ib=C?J z@f!qR7G;`C2N%CZMHPi_U*EcZ=f;O}j`${ZPj;9_XFW$}exP}bzJZyy@x&`AG^1`9 zkU9BhpnPimve1%+ygiF=K~bkz|Cq$lxyKUs3p92$pc=CFSQ{CS!F}sw!}NV)XdYPN zBKQza9@)7m7;{5=Xh|6BKohqlj3sTMzHf;XBi5)yuHa+iA#545545dV+6q{sbhxg!6N)`E5v6|mdcxfejFhhGlgJ16laWZLVHKlyW z-VV55#=5GSRcLippBE0YN+02g$xvv91yfYfD|u<_br__IG53a2HlC&z=G0PdC+DsF z)-m-|!bVa(-9iO^nzj#{1liC>CdmqvkO})_-qaA-l0?u0&VOV=gA=qZp$!DR(~u^( z(MezZUFkb`GWhb82jSfhlRb|@RdA!yMN-d;-^TuMtn2(iKNNIs*ee!q`F7OnARdNQ zgQZu2V}~#ubKc7=5y4S&;m*Q&*uV<@B9we#@TG|(4F+!lVXabK-yx`_DJZpSVG@YM zepU5J|9PH9O#u8Up7;z3&9up7(2Xx^u2f`S+9m2S(375-qsfdXHA4upuB3A%A^r&! zLM=H5`^xQxdq4(L^b>PrJ=T)azP;}-j}MYW}B zv4$oy-&E^XCCN)lIg)F%ln-F}*hXMuK7ds5H7vy1%4XAV>zov%JD$sjKr_Ji%P6DA># zQ%7Ob1ECd3dqJ0rH)$e~RL)lGka>X@0Rut#kYXDdfKCkN6|cD@)@a8D#L|ir${~*6j77k^;mrM_zz-CnE|T(8M8?Bg&u|;5n1{3Wn(>_z?UiuW2!`%}5fI#$_WCx@8LP66KR|L5E} zjBDB-Jpa%J-#bHnM~9nklba6Jc$y^Whx*3`I}N3GCT%MBmC_wD2=YjefrTj#2b-#p zu2i0MpvZEDe~uIbxBxbyEdbjNI+OgcQPG9F_c8M=wL zaYWyx)9r=3d*Wp@)A~(Rrw;UzIs@!|upJ%3#zTL1kq2QAL3vJj`zItKNRCMtL9Ujr z%Y|~M0iJF==*nHwmAnu5EBbKPbKb!Rdrk5y2E9Q%2s)i3k4`m}U-F)iPxxe!wmLCo z%3J(FB6ydlC$6iCH|0#E3sVj6;=se~fB4(0awm8&amcvWqX4U;+!sPAC7P}SPDp(~ zsz6CUdC^LVyo*}akRh*R29;GR0$piqY!xai4?$u|+@$&Mq3}y`a9$6*ilM*56Mq{8 zxe4SFxXSFn^sIbQv8P+EJ3HR^LiIGJX1Fe5n>;@;@w|^m^w4BpNH0{`6}jwqF|kFH)~O)tG2?PhjxxbE?SZ5Uopzo-S{V8dJh_WfLyQF{ z5r1`AQnY30_J7$6>GN}zfpRm`jA~{JGCu<&Av21ZD}wvVEt1qWu}kqIPm;u2QkzaZ zrZzsXfWBjDGm}}f)F!hr)X8yR4h)A8>a6^k3x`JAFT;(2>42iGNLp7w>uP$EXf4wN zlT4B)Pb$eQ{t(;ys|0W?)g)fSSo)m0xOa_`DDO{FZ12iET>7GVQ;eN&P$=#k5m!91 zO?y-cA9){0)<)m|&o~sp4iI8e@ zxtmMq`z4-u1BGVHa)C=6@ZWYdJ@*U_nBY`p=is18HkBl&lNCQAISITZ`A4Wozi=w4 zMP6%siabG#82b=phVLm5WC|+Ct3@)ILTO5fGHu%#(5I=1(9K+v((O(r{^nt)bz0mpu{Gb2~q-x zRh(rwieLb}^2K~Q@AxtWk{X4hn5TearSPX!0M#u#k{^iw=rh8hChAC0=- z)6;inzH-gXp)}x5*h%mW?ResSh8Hhj-DhAOpSmcCHq`rGh>t)yW?#Mm9YdH@CyQ0e zBFevmdum5$8OpRfIZRfPIzZgVMnJa2lS8tEb%YebiY67WP(co~jn&54#%0|7tJ=!)(wU7@)r}=Nqj^UB z3C%uBg*28#@61Lt1IkRDl+2SnQ0Bn7*`>i9>XPPHUXsFG6}nfcMh>f3pn|y3KaKcZ zAm1bgZAye*qHoCZzYvg-L%ZrXtZ> Yptcs%DwgFU&9_0{c#ZScMyX=_AHAPK7ytkO literal 0 HcmV?d00001 diff --git a/layers/business/analytics_service.py b/layers/business/analytics_service.py new file mode 100644 index 0000000..b23c7bf --- /dev/null +++ b/layers/business/analytics_service.py @@ -0,0 +1,300 @@ +""" +Analytics business logic service +Business Layer - handles analytics calculations and data aggregations +""" +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional +import logging + +from ..infrastructure.repositories import SensorReadingRepository + +logger = logging.getLogger(__name__) + +class AnalyticsService: + """Service for analytics and reporting operations""" + + def __init__(self): + self.sensor_reading_repo = SensorReadingRepository() + + async def get_analytics_summary(self, hours: int = 24) -> Dict[str, Any]: + """Get comprehensive analytics summary for the specified time period""" + try: + start_time = datetime.utcnow() - timedelta(hours=hours) + + # Sensor-level analytics pipeline + sensor_pipeline = [ + {"$match": {"created_at": {"$gte": start_time}}}, + {"$group": { + "_id": { + "sensor_id": "$sensor_id", + "room": "$room", + "sensor_type": "$sensor_type" + }, + "reading_count": {"$sum": 1}, + "avg_energy": {"$avg": "$energy.value"}, + "total_energy": {"$sum": "$energy.value"}, + "avg_co2": {"$avg": "$co2.value"}, + "max_co2": {"$max": "$co2.value"}, + "avg_temperature": {"$avg": "$temperature.value"}, + "latest_timestamp": {"$max": "$timestamp"} + }}, + {"$sort": {"total_energy": -1}} + ] + + sensor_analytics = await self.sensor_reading_repo.aggregate(sensor_pipeline) + + # Room-level analytics pipeline + room_pipeline = [ + {"$match": {"created_at": {"$gte": start_time}, "room": {"$ne": None}}}, + {"$group": { + "_id": "$room", + "sensor_count": {"$addToSet": "$sensor_id"}, + "total_energy": {"$sum": "$energy.value"}, + "avg_co2": {"$avg": "$co2.value"}, + "max_co2": {"$max": "$co2.value"}, + "reading_count": {"$sum": 1} + }}, + {"$project": { + "room": "$_id", + "sensor_count": {"$size": "$sensor_count"}, + "total_energy": 1, + "avg_co2": 1, + "max_co2": 1, + "reading_count": 1 + }}, + {"$sort": {"total_energy": -1}} + ] + + room_analytics = await self.sensor_reading_repo.aggregate(room_pipeline) + + # Calculate summary statistics + summary_stats = self._calculate_summary_stats(sensor_analytics, room_analytics) + + return { + "period_hours": hours, + "start_time": start_time.isoformat(), + "sensor_analytics": sensor_analytics, + "room_analytics": room_analytics, + "summary": summary_stats + } + + except Exception as e: + logger.error(f"Error getting analytics summary: {e}") + return { + "period_hours": hours, + "start_time": None, + "sensor_analytics": [], + "room_analytics": [], + "summary": {} + } + + def _calculate_summary_stats(self, sensor_analytics: List[Dict], + room_analytics: List[Dict]) -> Dict[str, Any]: + """Calculate summary statistics from analytics data""" + total_readings = sum(item["reading_count"] for item in sensor_analytics) + total_energy = sum(item.get("total_energy", 0) or 0 for item in sensor_analytics) + + # Energy consumption insights + energy_insights = { + "total_consumption_kwh": round(total_energy, 2), + "average_consumption_per_sensor": ( + round(total_energy / len(sensor_analytics), 2) + if sensor_analytics else 0 + ), + "top_energy_consumer": ( + sensor_analytics[0]["_id"]["sensor_id"] + if sensor_analytics else None + ) + } + + # CO2 insights + co2_values = [item.get("avg_co2") for item in sensor_analytics if item.get("avg_co2")] + co2_insights = { + "average_co2_level": ( + round(sum(co2_values) / len(co2_values), 1) + if co2_values else 0 + ), + "sensors_with_high_co2": len([ + co2 for co2 in co2_values if co2 and co2 > 1000 + ]), + "sensors_with_critical_co2": len([ + co2 for co2 in co2_values if co2 and co2 > 5000 + ]) + } + + return { + "total_sensors_analyzed": len(sensor_analytics), + "total_rooms_analyzed": len(room_analytics), + "total_readings": total_readings, + "energy_insights": energy_insights, + "co2_insights": co2_insights + } + + async def get_energy_trends(self, hours: int = 168) -> Dict[str, Any]: + """Get energy consumption trends (default: last week)""" + try: + start_time = datetime.utcnow() - timedelta(hours=hours) + + # Hourly energy consumption pipeline + pipeline = [ + {"$match": { + "created_at": {"$gte": start_time}, + "energy.value": {"$exists": True} + }}, + {"$group": { + "_id": { + "year": {"$year": "$created_at"}, + "month": {"$month": "$created_at"}, + "day": {"$dayOfMonth": "$created_at"}, + "hour": {"$hour": "$created_at"} + }, + "total_energy": {"$sum": "$energy.value"}, + "sensor_count": {"$addToSet": "$sensor_id"}, + "reading_count": {"$sum": 1} + }}, + {"$project": { + "_id": 0, + "timestamp": { + "$dateFromParts": { + "year": "$_id.year", + "month": "$_id.month", + "day": "$_id.day", + "hour": "$_id.hour" + } + }, + "total_energy": {"$round": ["$total_energy", 2]}, + "sensor_count": {"$size": "$sensor_count"}, + "reading_count": 1 + }}, + {"$sort": {"timestamp": 1}} + ] + + trends = await self.sensor_reading_repo.aggregate(pipeline) + + # Calculate trend insights + insights = self._calculate_trend_insights(trends) + + return { + "period_hours": hours, + "data_points": len(trends), + "trends": trends, + "insights": insights + } + + except Exception as e: + logger.error(f"Error getting energy trends: {e}") + return { + "period_hours": hours, + "data_points": 0, + "trends": [], + "insights": {} + } + + def _calculate_trend_insights(self, trends: List[Dict]) -> Dict[str, Any]: + """Calculate insights from trend data""" + if not trends: + return {} + + energy_values = [item["total_energy"] for item in trends] + + # Peak and low consumption + max_consumption = max(energy_values) + min_consumption = min(energy_values) + avg_consumption = sum(energy_values) / len(energy_values) + + # Find peak time + peak_item = max(trends, key=lambda x: x["total_energy"]) + peak_time = peak_item["timestamp"] + + return { + "peak_consumption_kwh": max_consumption, + "lowest_consumption_kwh": min_consumption, + "average_consumption_kwh": round(avg_consumption, 2), + "peak_time": peak_time.isoformat() if hasattr(peak_time, 'isoformat') else str(peak_time), + "consumption_variance": round(max_consumption - min_consumption, 2) + } + + async def get_room_comparison(self, hours: int = 24) -> Dict[str, Any]: + """Get room-by-room comparison analytics""" + try: + start_time = datetime.utcnow() - timedelta(hours=hours) + + pipeline = [ + {"$match": { + "created_at": {"$gte": start_time}, + "room": {"$ne": None} + }}, + {"$group": { + "_id": "$room", + "total_energy": {"$sum": "$energy.value"}, + "avg_energy": {"$avg": "$energy.value"}, + "avg_co2": {"$avg": "$co2.value"}, + "max_co2": {"$max": "$co2.value"}, + "avg_temperature": {"$avg": "$temperature.value"}, + "sensor_count": {"$addToSet": "$sensor_id"}, + "reading_count": {"$sum": 1} + }}, + {"$project": { + "room": "$_id", + "_id": 0, + "total_energy": {"$round": [{"$ifNull": ["$total_energy", 0]}, 2]}, + "avg_energy": {"$round": [{"$ifNull": ["$avg_energy", 0]}, 2]}, + "avg_co2": {"$round": [{"$ifNull": ["$avg_co2", 0]}, 1]}, + "max_co2": {"$round": [{"$ifNull": ["$max_co2", 0]}, 1]}, + "avg_temperature": {"$round": [{"$ifNull": ["$avg_temperature", 0]}, 1]}, + "sensor_count": {"$size": "$sensor_count"}, + "reading_count": 1 + }}, + {"$sort": {"total_energy": -1}} + ] + + room_comparison = await self.sensor_reading_repo.aggregate(pipeline) + + # Calculate comparison insights + insights = self._calculate_room_insights(room_comparison) + + return { + "period_hours": hours, + "rooms_analyzed": len(room_comparison), + "comparison": room_comparison, + "insights": insights + } + + except Exception as e: + logger.error(f"Error getting room comparison: {e}") + return { + "period_hours": hours, + "rooms_analyzed": 0, + "comparison": [], + "insights": {} + } + + def _calculate_room_insights(self, room_data: List[Dict]) -> Dict[str, Any]: + """Calculate insights from room comparison data""" + if not room_data: + return {} + + # Energy insights + total_energy = sum(room["total_energy"] for room in room_data) + highest_consumer = room_data[0] if room_data else None + lowest_consumer = min(room_data, key=lambda x: x["total_energy"]) if room_data else None + + # CO2 insights + rooms_with_high_co2 = [ + room for room in room_data + if room.get("avg_co2", 0) > 1000 + ] + + # Temperature insights + temp_values = [room.get("avg_temperature", 0) for room in room_data if room.get("avg_temperature")] + avg_building_temp = sum(temp_values) / len(temp_values) if temp_values else 0 + + return { + "total_building_energy_kwh": round(total_energy, 2), + "highest_energy_consumer": highest_consumer["room"] if highest_consumer else None, + "lowest_energy_consumer": lowest_consumer["room"] if lowest_consumer else None, + "rooms_with_high_co2": len(rooms_with_high_co2), + "high_co2_rooms": [room["room"] for room in rooms_with_high_co2], + "average_building_temperature": round(avg_building_temp, 1), + "total_active_sensors": sum(room["sensor_count"] for room in room_data) + } \ No newline at end of file diff --git a/layers/business/cleanup_service.py b/layers/business/cleanup_service.py new file mode 100644 index 0000000..76219b4 --- /dev/null +++ b/layers/business/cleanup_service.py @@ -0,0 +1,234 @@ +""" +Data cleanup and maintenance service +Business Layer - handles data retention policies and system maintenance +""" +import asyncio +from datetime import datetime, timedelta +from typing import Dict, Any +import logging + +from ..infrastructure.database_connection import database_connection +from ..infrastructure.repositories import SensorReadingRepository + +logger = logging.getLogger(__name__) + +class CleanupService: + """Service for data cleanup and maintenance operations""" + + def __init__(self): + self.sensor_reading_repo = SensorReadingRepository() + self.is_running = False + self.cleanup_task = None + + async def start_scheduled_cleanup(self, interval_hours: int = 24) -> None: + """Start scheduled cleanup process""" + if self.is_running: + logger.warning("Cleanup service is already running") + return + + self.is_running = True + self.cleanup_task = asyncio.create_task(self._cleanup_loop(interval_hours)) + logger.info(f"Started scheduled cleanup service (interval: {interval_hours} hours)") + + async def stop_scheduled_cleanup(self) -> None: + """Stop scheduled cleanup process""" + self.is_running = False + if self.cleanup_task: + self.cleanup_task.cancel() + try: + await self.cleanup_task + except asyncio.CancelledError: + pass + logger.info("Cleanup service stopped") + + async def _cleanup_loop(self, interval_hours: int) -> None: + """Main cleanup loop""" + while self.is_running: + try: + await self.cleanup_old_data() + # Wait for next cleanup interval + await asyncio.sleep(interval_hours * 3600) # Convert hours to seconds + except Exception as e: + logger.error(f"Error in scheduled cleanup: {e}") + # Wait 1 hour before retrying on error + await asyncio.sleep(3600) + + async def cleanup_old_data(self) -> Dict[str, int]: + """Perform data cleanup based on retention policies""" + try: + cleanup_results = {} + db = await database_connection.get_database() + + # Delete sensor readings older than 90 days + sensor_retention_date = datetime.utcnow() - timedelta(days=90) + sensor_result = await db.sensor_readings.delete_many({ + "created_at": {"$lt": sensor_retention_date} + }) + cleanup_results["sensor_readings_deleted"] = sensor_result.deleted_count + + if sensor_result.deleted_count > 0: + logger.info(f"Deleted {sensor_result.deleted_count} old sensor readings") + + # Delete room metrics older than 30 days + room_retention_date = datetime.utcnow() - timedelta(days=30) + room_result = await db.room_metrics.delete_many({ + "created_at": {"$lt": room_retention_date} + }) + cleanup_results["room_metrics_deleted"] = room_result.deleted_count + + if room_result.deleted_count > 0: + logger.info(f"Deleted {room_result.deleted_count} old room metrics") + + # Delete system events older than 60 days + events_retention_date = datetime.utcnow() - timedelta(days=60) + events_result = await db.system_events.delete_many({ + "created_at": {"$lt": events_retention_date} + }) + cleanup_results["system_events_deleted"] = events_result.deleted_count + + if events_result.deleted_count > 0: + logger.info(f"Deleted {events_result.deleted_count} old system events") + + # Clean up orphaned sensor metadata (sensors with no recent readings) + orphaned_retention_date = datetime.utcnow() - timedelta(days=30) + + # Find sensors with no recent readings + active_sensors = await db.sensor_readings.distinct("sensor_id", { + "created_at": {"$gte": orphaned_retention_date} + }) + + orphaned_result = await db.sensor_metadata.delete_many({ + "sensor_id": {"$nin": active_sensors}, + "last_seen": {"$lt": orphaned_retention_date} + }) + cleanup_results["orphaned_metadata_deleted"] = orphaned_result.deleted_count + + if orphaned_result.deleted_count > 0: + logger.info(f"Deleted {orphaned_result.deleted_count} orphaned sensor metadata records") + + return cleanup_results + + except Exception as e: + logger.error(f"Error during data cleanup: {e}") + return {"error": str(e)} + + async def get_storage_statistics(self) -> Dict[str, Any]: + """Get storage statistics for different collections""" + try: + db = await database_connection.get_database() + + stats = {} + + # Sensor readings statistics + sensor_stats = await db.command("collStats", "sensor_readings") + stats["sensor_readings"] = { + "count": sensor_stats.get("count", 0), + "size_bytes": sensor_stats.get("size", 0), + "avg_obj_size": sensor_stats.get("avgObjSize", 0), + "storage_size": sensor_stats.get("storageSize", 0) + } + + # Room metrics statistics + room_stats = await db.command("collStats", "room_metrics") + stats["room_metrics"] = { + "count": room_stats.get("count", 0), + "size_bytes": room_stats.get("size", 0), + "avg_obj_size": room_stats.get("avgObjSize", 0), + "storage_size": room_stats.get("storageSize", 0) + } + + # System events statistics + events_stats = await db.command("collStats", "system_events") + stats["system_events"] = { + "count": events_stats.get("count", 0), + "size_bytes": events_stats.get("size", 0), + "avg_obj_size": events_stats.get("avgObjSize", 0), + "storage_size": events_stats.get("storageSize", 0) + } + + # Sensor metadata statistics + metadata_stats = await db.command("collStats", "sensor_metadata") + stats["sensor_metadata"] = { + "count": metadata_stats.get("count", 0), + "size_bytes": metadata_stats.get("size", 0), + "avg_obj_size": metadata_stats.get("avgObjSize", 0), + "storage_size": metadata_stats.get("storageSize", 0) + } + + # Calculate totals + total_documents = sum(collection["count"] for collection in stats.values()) + total_size = sum(collection["size_bytes"] for collection in stats.values()) + total_storage = sum(collection["storage_size"] for collection in stats.values()) + + stats["totals"] = { + "total_documents": total_documents, + "total_size_bytes": total_size, + "total_storage_bytes": total_storage, + "total_size_mb": round(total_size / (1024 * 1024), 2), + "total_storage_mb": round(total_storage / (1024 * 1024), 2) + } + + return stats + + except Exception as e: + logger.error(f"Error getting storage statistics: {e}") + return {"error": str(e)} + + async def get_data_retention_info(self) -> Dict[str, Any]: + """Get information about data retention policies and old data""" + try: + db = await database_connection.get_database() + + # Current date references + now = datetime.utcnow() + sensor_cutoff = now - timedelta(days=90) + room_cutoff = now - timedelta(days=30) + events_cutoff = now - timedelta(days=60) + + retention_info = {} + + # Sensor readings retention info + old_sensor_count = await db.sensor_readings.count_documents({ + "created_at": {"$lt": sensor_cutoff} + }) + retention_info["sensor_readings"] = { + "retention_days": 90, + "cutoff_date": sensor_cutoff.isoformat(), + "old_records_count": old_sensor_count + } + + # Room metrics retention info + old_room_count = await db.room_metrics.count_documents({ + "created_at": {"$lt": room_cutoff} + }) + retention_info["room_metrics"] = { + "retention_days": 30, + "cutoff_date": room_cutoff.isoformat(), + "old_records_count": old_room_count + } + + # System events retention info + old_events_count = await db.system_events.count_documents({ + "created_at": {"$lt": events_cutoff} + }) + retention_info["system_events"] = { + "retention_days": 60, + "cutoff_date": events_cutoff.isoformat(), + "old_records_count": old_events_count + } + + return retention_info + + except Exception as e: + logger.error(f"Error getting retention info: {e}") + return {"error": str(e)} + + def is_cleanup_running(self) -> bool: + """Check if cleanup service is currently running""" + return self.is_running and ( + self.cleanup_task is not None and + not self.cleanup_task.done() + ) + +# Global cleanup service instance +cleanup_service = CleanupService() \ No newline at end of file diff --git a/layers/business/room_service.py b/layers/business/room_service.py new file mode 100644 index 0000000..b2cc062 --- /dev/null +++ b/layers/business/room_service.py @@ -0,0 +1,262 @@ +""" +Room metrics business logic service +Business Layer - handles room-related aggregations and business operations +""" +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional +import logging + +from models import RoomMetrics, CO2Status, OccupancyLevel +from ..infrastructure.repositories import ( + SensorReadingRepository, RoomMetricsRepository, RedisRepository +) + +logger = logging.getLogger(__name__) + +class RoomService: + """Service for room-related business operations""" + + def __init__(self): + self.sensor_reading_repo = SensorReadingRepository() + self.room_metrics_repo = RoomMetricsRepository() + self.redis_repo = RedisRepository() + + async def update_room_metrics(self, room: str) -> bool: + """Calculate and store room-level metrics""" + if not room: + return False + + try: + # Get recent readings for this room (last 5 minutes) + recent_readings = await self.sensor_reading_repo.get_recent_by_room( + room=room, + minutes=5 + ) + + if not recent_readings: + return False + + # Calculate aggregated metrics + metrics = await self._calculate_room_metrics(room, recent_readings) + + # Store in MongoDB + stored = await self.room_metrics_repo.create(metrics) + + # Cache in Redis + if stored: + await self.redis_repo.set_room_metrics(room, metrics.dict()) + logger.debug(f"Updated room metrics for {room}") + + return stored + + except Exception as e: + logger.error(f"Error updating room metrics for {room}: {e}") + return False + + async def _calculate_room_metrics(self, room: str, readings: List[Dict]) -> RoomMetrics: + """Calculate aggregated metrics for a room based on recent readings""" + + # Group readings by sensor + sensors_data = {} + for reading in readings: + sensor_id = reading["sensor_id"] + if sensor_id not in sensors_data: + sensors_data[sensor_id] = [] + sensors_data[sensor_id].append(reading) + + # Initialize value arrays + energy_values = [] + co2_values = [] + temperature_values = [] + humidity_values = [] + motion_detected = False + + # Extract values from readings + for sensor_readings in sensors_data.values(): + for reading in sensor_readings: + if reading.get("energy"): + energy_values.append(reading["energy"]["value"]) + if reading.get("co2"): + co2_values.append(reading["co2"]["value"]) + if reading.get("temperature"): + temperature_values.append(reading["temperature"]["value"]) + if reading.get("humidity"): + humidity_values.append(reading["humidity"]["value"]) + if reading.get("motion") and reading["motion"].get("value") == "Detected": + motion_detected = True + + # Get sensor types present + sensor_types = list(set( + reading.get("sensor_type") + for reading in readings + if reading.get("sensor_type") + )) + + # Initialize metrics object + metrics = RoomMetrics( + room=room, + timestamp=int(datetime.utcnow().timestamp()), + sensor_count=len(sensors_data), + active_sensors=list(sensors_data.keys()), + sensor_types=sensor_types, + motion_detected=motion_detected + ) + + # Calculate energy metrics + if energy_values: + metrics.energy = self._calculate_energy_metrics(energy_values) + + # Calculate CO2 metrics and occupancy + if co2_values: + metrics.co2 = self._calculate_co2_metrics(co2_values) + metrics.occupancy_estimate = self._estimate_occupancy_from_co2( + metrics.co2["average"] + ) + + # Calculate temperature metrics + if temperature_values: + metrics.temperature = self._calculate_temperature_metrics(temperature_values) + + # Calculate humidity metrics + if humidity_values: + metrics.humidity = self._calculate_humidity_metrics(humidity_values) + + # Set last activity time if motion detected + if motion_detected: + metrics.last_activity = datetime.utcnow() + + return metrics + + def _calculate_energy_metrics(self, values: List[float]) -> Dict[str, Any]: + """Calculate energy consumption metrics""" + return { + "current": sum(values), + "average": sum(values) / len(values), + "total": sum(values), + "peak": max(values), + "unit": "kWh" + } + + def _calculate_co2_metrics(self, values: List[float]) -> Dict[str, Any]: + """Calculate CO2 level metrics""" + avg_co2 = sum(values) / len(values) + return { + "current": avg_co2, + "average": avg_co2, + "max": max(values), + "min": min(values), + "status": self._get_co2_status(avg_co2).value, + "unit": "ppm" + } + + def _calculate_temperature_metrics(self, values: List[float]) -> Dict[str, Any]: + """Calculate temperature metrics""" + avg_temp = sum(values) / len(values) + return { + "current": avg_temp, + "average": avg_temp, + "max": max(values), + "min": min(values), + "unit": "°C" + } + + def _calculate_humidity_metrics(self, values: List[float]) -> Dict[str, Any]: + """Calculate humidity metrics""" + avg_humidity = sum(values) / len(values) + return { + "current": avg_humidity, + "average": avg_humidity, + "max": max(values), + "min": min(values), + "unit": "%" + } + + def _get_co2_status(self, co2_level: float) -> CO2Status: + """Determine CO2 status based on level""" + if co2_level < 400: + return CO2Status.GOOD + elif co2_level < 1000: + return CO2Status.MODERATE + elif co2_level < 5000: + return CO2Status.POOR + else: + return CO2Status.CRITICAL + + def _estimate_occupancy_from_co2(self, co2_level: float) -> OccupancyLevel: + """Estimate occupancy level based on CO2 levels""" + if co2_level < 600: + return OccupancyLevel.LOW + elif co2_level < 1200: + return OccupancyLevel.MEDIUM + else: + return OccupancyLevel.HIGH + + async def get_all_rooms(self) -> Dict[str, Any]: + """Get list of all rooms with sensor counts and latest metrics""" + try: + rooms = await self.sensor_reading_repo.get_distinct_rooms() + + room_data = [] + for room in rooms: + # Get sensor count for each room + sensor_ids = await self.sensor_reading_repo.get_distinct_sensor_ids_by_room(room) + sensor_count = len(sensor_ids) + + # Get latest room metrics from cache + room_metrics = await self.redis_repo.get_room_metrics(room) + + room_data.append({ + "room": room, + "sensor_count": sensor_count, + "sensor_ids": sensor_ids, + "latest_metrics": room_metrics + }) + + return { + "rooms": room_data, + "count": len(room_data) + } + + except Exception as e: + logger.error(f"Error getting rooms: {e}") + return {"rooms": [], "count": 0} + + async def get_room_data(self, room_name: str, start_time: Optional[int] = None, + end_time: Optional[int] = None, limit: int = 100) -> Dict[str, Any]: + """Get historical data for a specific room""" + try: + # Build query for time range + query = {"room": room_name} + + if start_time or end_time: + time_query = {} + if start_time: + time_query["$gte"] = datetime.fromtimestamp(start_time) + if end_time: + time_query["$lte"] = datetime.fromtimestamp(end_time) + query["created_at"] = time_query + + # Get room metrics + room_metrics = await self.room_metrics_repo.get_by_room(room_name, limit) + + # Get sensor readings for the room + sensor_readings = await self.sensor_reading_repo.get_by_query( + query=query, + sort_by="timestamp", + sort_order="desc", + limit=limit + ) + + return { + "room": room_name, + "room_metrics": room_metrics, + "sensor_readings": sensor_readings + } + + except Exception as e: + logger.error(f"Error getting room data for {room_name}: {e}") + return { + "room": room_name, + "room_metrics": [], + "sensor_readings": [] + } \ No newline at end of file diff --git a/layers/business/sensor_service.py b/layers/business/sensor_service.py new file mode 100644 index 0000000..12a140a --- /dev/null +++ b/layers/business/sensor_service.py @@ -0,0 +1,328 @@ +""" +Sensor business logic service +Business Layer - handles sensor-related business operations and rules +""" +import json +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional +import logging +import uuid + +from models import ( + SensorReading, LegacySensorReading, SensorMetadata, + SensorType, SensorStatus, CO2Status, OccupancyLevel +) +from ..infrastructure.repositories import ( + SensorReadingRepository, SensorMetadataRepository, + SystemEventRepository, RedisRepository +) + +logger = logging.getLogger(__name__) + +class SensorService: + """Service for sensor-related business operations""" + + def __init__(self): + self.sensor_reading_repo = SensorReadingRepository() + self.sensor_metadata_repo = SensorMetadataRepository() + self.system_event_repo = SystemEventRepository() + self.redis_repo = RedisRepository() + + async def process_sensor_message(self, message_data: str) -> bool: + """Process incoming sensor message and handle business logic""" + try: + # Parse the message + data = json.loads(message_data) + logger.debug(f"Processing sensor message: {data}") + + # Convert to standard format + sensor_reading = await self._parse_sensor_data(data) + + # Validate business rules + validation_result = await self._validate_sensor_reading(sensor_reading) + if not validation_result["valid"]: + logger.warning(f"Sensor reading validation failed: {validation_result['errors']}") + return False + + # Store the reading + stored = await self.sensor_reading_repo.create(sensor_reading) + if not stored: + return False + + # Update caches and metadata + await self._update_caches(sensor_reading) + await self._update_sensor_metadata(sensor_reading) + + # Check for alerts + await self._check_sensor_alerts(sensor_reading) + + return True + + except Exception as e: + logger.error(f"Error processing sensor message: {e}") + await self._log_processing_error(str(e), message_data) + return False + + async def _parse_sensor_data(self, data: dict) -> SensorReading: + """Parse and convert sensor data to standard format""" + # Check if legacy format + if self._is_legacy_format(data): + return await self._convert_legacy_data(data) + else: + return SensorReading(**data) + + def _is_legacy_format(self, data: dict) -> bool: + """Check if data is in legacy format""" + legacy_keys = {"sensorId", "timestamp", "value", "unit"} + return legacy_keys.issubset(data.keys()) and "energy" not in data + + async def _convert_legacy_data(self, data: dict) -> SensorReading: + """Convert legacy format to new sensor reading format""" + legacy_reading = LegacySensorReading(**data) + + return SensorReading( + sensor_id=legacy_reading.sensor_id, + sensor_type=SensorType.ENERGY, + timestamp=legacy_reading.timestamp, + created_at=legacy_reading.created_at, + energy={ + "value": legacy_reading.value, + "unit": legacy_reading.unit + } + ) + + async def _validate_sensor_reading(self, reading: SensorReading) -> Dict[str, Any]: + """Validate sensor reading against business rules""" + errors = [] + + # Check timestamp is not too far in the future + future_threshold = datetime.utcnow().timestamp() + 3600 # 1 hour + if reading.timestamp > future_threshold: + errors.append("Timestamp is too far in the future") + + # Check timestamp is not too old + past_threshold = datetime.utcnow().timestamp() - 86400 # 24 hours + if reading.timestamp < past_threshold: + errors.append("Timestamp is too old") + + # Validate sensor values + if reading.energy: + energy_value = reading.energy.get("value", 0) + if energy_value < 0 or energy_value > 1000: # Reasonable energy range + errors.append("Energy value is out of acceptable range") + + if reading.co2: + co2_value = reading.co2.get("value", 0) + if co2_value < 0 or co2_value > 50000: # Reasonable CO2 range + errors.append("CO2 value is out of acceptable range") + + if reading.temperature: + temp_value = reading.temperature.get("value", 0) + if temp_value < -50 or temp_value > 100: # Reasonable temperature range + errors.append("Temperature value is out of acceptable range") + + return { + "valid": len(errors) == 0, + "errors": errors + } + + async def _update_caches(self, reading: SensorReading) -> None: + """Update Redis caches with latest sensor data""" + # Cache latest sensor reading + await self.redis_repo.set_sensor_data( + reading.sensor_id, + reading.dict(), + expire_seconds=3600 + ) + + # Update sensor status + status_data = { + "status": "online", + "last_seen": reading.timestamp, + "room": reading.room + } + await self.redis_repo.set_sensor_status( + reading.sensor_id, + status_data, + expire_seconds=1800 + ) + + async def _update_sensor_metadata(self, reading: SensorReading) -> None: + """Update or create sensor metadata""" + existing = await self.sensor_metadata_repo.get_by_sensor_id(reading.sensor_id) + + if existing: + # Update existing metadata + updates = { + "last_seen": datetime.utcnow(), + "status": SensorStatus.ONLINE.value + } + + # Add sensor type to monitoring capabilities if not present + capabilities = existing.get("monitoring_capabilities", []) + if reading.sensor_type.value not in capabilities: + capabilities.append(reading.sensor_type.value) + updates["monitoring_capabilities"] = capabilities + + await self.sensor_metadata_repo.update(reading.sensor_id, updates) + else: + # Create new sensor metadata + metadata = SensorMetadata( + sensor_id=reading.sensor_id, + name=f"Sensor {reading.sensor_id}", + sensor_type=reading.sensor_type, + room=reading.room, + status=SensorStatus.ONLINE, + last_seen=datetime.utcnow(), + monitoring_capabilities=[reading.sensor_type.value] + ) + + await self.sensor_metadata_repo.create(metadata) + logger.info(f"Created metadata for new sensor: {reading.sensor_id}") + + async def _check_sensor_alerts(self, reading: SensorReading) -> None: + """Check for alert conditions in sensor data""" + alerts = [] + + # CO2 level alerts + if reading.co2: + co2_level = reading.co2.get("value", 0) + if co2_level > 5000: + alerts.append({ + "event_type": "co2_critical", + "severity": "critical", + "title": "Critical CO2 Level", + "description": f"CO2 level ({co2_level} ppm) exceeds critical threshold in {reading.room or 'unknown room'}" + }) + elif co2_level > 1000: + alerts.append({ + "event_type": "co2_high", + "severity": "warning", + "title": "High CO2 Level", + "description": f"CO2 level ({co2_level} ppm) is above recommended levels in {reading.room or 'unknown room'}" + }) + + # Energy consumption alerts + if reading.energy: + energy_value = reading.energy.get("value", 0) + if energy_value > 10: + alerts.append({ + "event_type": "energy_high", + "severity": "warning", + "title": "High Energy Consumption", + "description": f"Energy consumption ({energy_value} kWh) is unusually high for sensor {reading.sensor_id}" + }) + + # Temperature alerts + if reading.temperature: + temp_value = reading.temperature.get("value", 0) + if temp_value > 30 or temp_value < 15: + alerts.append({ + "event_type": "temperature_extreme", + "severity": "warning", + "title": "Extreme Temperature", + "description": f"Temperature ({temp_value}°C) is outside normal range in {reading.room or 'unknown room'}" + }) + + # Log alerts as system events + for alert in alerts: + await self._log_alert_event(reading, **alert) + + async def _log_alert_event(self, reading: SensorReading, event_type: str, severity: str, + title: str, description: str) -> None: + """Log an alert as a system event""" + from models import SystemEvent + + event = SystemEvent( + event_id=str(uuid.uuid4()), + event_type=event_type, + severity=severity, + timestamp=int(datetime.utcnow().timestamp()), + title=title, + description=description, + sensor_id=reading.sensor_id, + room=reading.room, + source="sensor_service", + data=reading.dict() + ) + + await self.system_event_repo.create(event) + + async def _log_processing_error(self, error_message: str, raw_data: str) -> None: + """Log data processing error""" + from models import SystemEvent + + event = SystemEvent( + event_id=str(uuid.uuid4()), + event_type="data_processing_error", + severity="error", + timestamp=int(datetime.utcnow().timestamp()), + title="Sensor Data Processing Failed", + description=f"Failed to process sensor message: {error_message}", + source="sensor_service", + data={"raw_message": raw_data} + ) + + await self.system_event_repo.create(event) + + async def get_sensor_details(self, sensor_id: str) -> Optional[Dict[str, Any]]: + """Get complete sensor details including metadata and recent readings""" + # Get metadata + metadata = await self.sensor_metadata_repo.get_by_sensor_id(sensor_id) + if not metadata: + return None + + # Get recent readings + recent_readings = await self.sensor_reading_repo.get_recent_by_sensor( + sensor_id=sensor_id, + limit=100, + minutes=1440 # 24 hours + ) + + # Get latest reading from cache + latest_reading = await self.redis_repo.get_sensor_data(sensor_id) + + return { + "sensor": metadata, + "latest_reading": latest_reading, + "recent_readings_count": len(recent_readings), + "recent_readings": recent_readings[:10] # Return only 10 most recent + } + + async def update_sensor_metadata(self, sensor_id: str, metadata_updates: Dict[str, Any]) -> bool: + """Update sensor metadata with business validation""" + # Validate updates + if "sensor_id" in metadata_updates: + del metadata_updates["sensor_id"] # Cannot change sensor ID + + # Update timestamp + metadata_updates["updated_at"] = datetime.utcnow() + + return await self.sensor_metadata_repo.update(sensor_id, metadata_updates) + + async def delete_sensor(self, sensor_id: str) -> Dict[str, Any]: + """Delete a sensor and all its associated data""" + # Delete readings + readings_deleted = await self.sensor_reading_repo.delete_by_sensor_id(sensor_id) + + # Delete metadata + metadata_deleted = await self.sensor_metadata_repo.delete(sensor_id) + + # Clear cache + await self.redis_repo.delete_sensor_cache(sensor_id) + + return { + "sensor_id": sensor_id, + "readings_deleted": readings_deleted, + "metadata_deleted": metadata_deleted + } + + async def get_all_sensors(self, filters: Dict[str, Any] = None) -> Dict[str, Any]: + """Get all sensors with optional filtering""" + sensors = await self.sensor_metadata_repo.get_all(filters) + + return { + "sensors": sensors, + "count": len(sensors), + "filters": filters or {} + } \ No newline at end of file diff --git a/layers/infrastructure/__init__.py b/layers/infrastructure/__init__.py new file mode 100644 index 0000000..e8a337c --- /dev/null +++ b/layers/infrastructure/__init__.py @@ -0,0 +1 @@ +# Empty file to make this a Python package \ No newline at end of file diff --git a/layers/infrastructure/__pycache__/__init__.cpython-312.pyc b/layers/infrastructure/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..898b81d21c2ba9fb6452a5bdea387311af1b2ecb GIT binary patch literal 186 zcmX@j%ge<81XVRVGqiy8V-N=&d}aZPOlPQM&}8&m$xy@urWO_J zXXd39B^H+yl_r;z7NzRP$7kkcmc+;F6;%G>u*uC&Da}c>D`ExO#0bR2AjU^#Mn=XW HW*`dy3)?cH literal 0 HcmV?d00001 diff --git a/layers/infrastructure/__pycache__/__init__.cpython-39.pyc b/layers/infrastructure/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35add973ade2aa01fdd7925b94b5524f3ac15a0c GIT binary patch literal 180 zcmYe~<>g`kf~uOG8CpR4F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D5q<%(zZmND! zNMv>NwI!$qDgW=v3@~OepYHSke`xRoRO5DSd^lll$e~InwO%VlUSKr xRIHzwmsXTmTvAk;TvA$;svjSpnU`4-AFo$Xd5gm)H$SB`C)EyQuM*fL{FvQ8_dj#!E$%Cg)XY>NfWUC~rXuChzn zA!7xBEwoNxGzkjS35p^`n+FGR0YADxQ6P?k80gEgv_W_4A_1C*_Kk@GG}lA_S#p<$ zDLX+2Xh5gh(ccFaaf% zAd^&>O44DP!2NWBN!r4;q&;j;I>HX4k4ZR_uCU9flL>d&O(My?+-fqYl#=ItqSOjE zk?1HAZR<$1zfYF?nqgk^`(^U5H$(az+2>sok#yu_M3wlcoJvX2bX-pH$w(>^laf*@ z%`eFcKPRVR^2C^HI<=%k)U>i1O|L2vKO4zN3g6A2iloGZr1A;*Om{*$EhTude$CVI zbjI}`%sS*JbuOr8Qqk%8IXNvWhx}VS;^D9{HNFrL+&M3-yqm3bGUP2NAi=!|p5@AXt*HM@jog%YNhZ!hsP})RF zvM-UM9qJC~b3p0L(0-TBnF5VlHp(8FGmNB~)~U817^GqYc|}qpSUfda-W@|a>=zXL zH!d2|iRdg6i~*b(EsHwMm;=i;vsNj=Ye;jCVGYyY&;w>sB3(7h;*ddpCZxOP=0iv4 zC&q*yAD_*(CGq}?Cx-%ogdB||PRVL|=urRT2m5u;MDRp#EVwu+go1OEdW)2jlvqZ9 z331rkcq-<%>9%NThaIL@=%lRbwwRQbQm1tm6XVr|RVAS}2_}({iX^4?2%T1?#FC0l z!G9wLPRU6rpolA?8kmrytJp);fEwwIuBd?(MP3H~RRdx~J#|u!C}QAbB>KFR5(5eB zscIlzyo0(;k>D()LN%^| zH{(3~H~t2C&maPv*NM`Y-bl;XZR(OpoJGsl)>j69gMLezL4iPNyE)RB+N7*@BcODc zvnu7X5%4N)TwC_kws8wwv+Ty_kx0D+lK+~zME#s$K&!?pLvPc)>$E`^NM%U^R9o() zGK1T_ydGdZ+LV$&8LA&$JO=-cJt)1^8ZP}fZD)?R^uAZt68)?}LCfRl~(xhU1rJIkWhj^V{frAyw?pj=p zMkQ5UT1_M}<}#94MV{S0sVES?P5-W0Yl!!=x-B8cVv?fMa5Q8ETU5c$rNAYIbS5Gu z<0;)9m6LEGhzd4_g8g1^7NUw2NlS20AUI2^?wouvDj6|YXC%ziPbv>%Ny;QFwUA90R%QXksd#&=R5?qqPilLDHW7=gFxJp4D- zAuGiQI1^B>Ps-zs`x`|O^ZY=huwu!w6*DR;i$v9VAv2`m)l{~1JRt*+D8mKI?c`Lp zZ6dCgw--EgNs*H#Rme}5y)B9-E8BsKvIA!iK~@UQn~^r;wcCmsi*`n$~cP3BS3$TsaUvN zt(!Evvw^Xy3A$kkbsuCUb2V>yeeJb1e2VtJx&K!O3M{X&{2ko;>YC>0C{oC?>l>8? z`g43A@q40&{zFS&t;KMdHk|V`xRG^%S$IYy(k3=TfVaqsvn`P^>%vG^=xZq~=c}Ti ze&CAq52R;mWV9rRF>CnZZGg%eUYc&OOI%41@R8 z^uc@Ks+RD3oCo*!p^s5(^d|b@OS~>M)X#!N4fNy81(tE3u1g|oS_S}>tslKXT#Oqz z23K#ue>Z?De9;>Tz}4p)ghHZX30lM#w_+g=C_r=+_pMS7W~^<&B5w5$xTkyz(X}0&2FU=SgBY!-blu+ zuk3<$cDHG{aqInkpbmoe;K_U7S}CK(z0SceZK%cf)=#eZwT|AaFWq=bJ3O!Xj^(`z zcfIY<{g~F#r}+-&z5TfT^t;b#9f7O+{+!hgKBf6)^4?k8{j)36TF2pQz8lNhz=GyG zp7$=|?w?*6f^N;%pZ5+F-L}0=8oQ_1gtlzE)ct06feYlgK!F?0aiiK3Pv^O33hXl) z`wSpQkY^J{_43qku+yEufiJo&4*PT5{sK3g*NQSK73Wo~zH_7}EMenr}YuJyx{SkAPOYsGi#*4n;Bf0J+&V#+(424%VsK|b^Pa20t36s;>0DH!}t z&4RO>Bc=w4^|7r}Zgjnsm?G%wZNV;R+#rZ4n!Z5>d!g~sVPeWof7BlgK;z>_i76-j z@!mG5-$Z!&O`wH3ZRB<%cN)3dIdz!2xwUobAa!#;gDDR(xaTkhb%1W|AV3H{EWx)1 z{GCG(be8}hLd4YsuCs#h!fGT@9iw8600000 literal 0 HcmV?d00001 diff --git a/layers/infrastructure/__pycache__/database_connection.cpython-39.pyc b/layers/infrastructure/__pycache__/database_connection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4178cb6594722301b0da3ece0ac1293efc097b6e GIT binary patch literal 3133 zcmZuz-ESMk7N6NKuh)*#rY^LF(gg`2OSCQpfmB^xktR`ZQR5;CLX41B+ntHyupee- zUGOU5fmY(aTnX`nbT5+g$e(bZ`7idBC;kOKB+eOooplmhdv@l`*)wO($L~zIw&oG| zeRuf({X6S~{ECzM&xFbQ@Tff?oG`*EH)6^tyv^81OlBq)vncYd*iIbgBn{R`T;^(? z9XAt?d3tQbE!GmPSzD|fQ`WgpxWgOI32!_z*t%GMM%V>zJSIK2_`=%`^YBTigdb&T zDx!RprG65o;ZP(Z&HcSh`ny>=%(m}&57Ipus$5Q@d?JPaFq{hM-}Lvxl*dB(arV{C zSUeT6&rj=oI?AWsZ*VX&=o#h4rkbYFgGakrp2@9v1pAcTIT$;Lv4 zXt;`K&1f83@NB`O-UPzQAqnP`=Els#%s4iHXX%;AEj_b20h{arYwbQ7+<&xvFZkr6 zhsD(dqsO1z?f2s>3gi7u<#+GAcl)jP*xL5y$D8*yAMXT%&D|Y#QKUi+rvZF0g20T@ zVb3X@XdWN8ifL(Ns&s}T7wOZojl!rsn8>*71eH*bhKcx@mX;Fn-p@o{gj4*^`}ThS!1QSg4RrpRcR{gQe4j}zhpNDGyNft`pe-cJth?MtZOf=OWt)w5w%@a2` zN!(lGY3T;R2<#7n3+O%<2%!%AJG6M|Y_M)s(gR2%M+!*nbR`%De2<0u3q0yaAjRi7 znL$w=k^|$vx$;`)x1fC`n{S9IO= zd+pMRv*A$4(t?O(at)j&(T%bRgxMiaMk#AVSpqP_Wd~Q#!{vGqNfG7(;s+%!RN375 zIug3FOIzTcJwswtODc3m&B+FGK%GXf7hs~gK!{<`4WmuFMu&C`_|prtxOPqgEIwQb zI#wVehYtEHFo56@c@61|9z#IqRfs{;IBFJGw&Dyhbb*vh-(+cVb$g^1dw?7FWR_G? zgPx^bQrZ!IEHA@G5)UAuKg?^Uc0!LY^}GSg$^k+QhkCTQe$KH@q|clTW1&xW4F-57 z)rFLVb3sbdMYu%6kAhMm5U_MHmh2dU%@S|0_iy#*4T@ucB+7ob?ZAUxs`ae({kbR z%8nIVV!5BfP5C15-iAlL3xpV!xk1}>1ODwltVY+uS^F=`?phacMB9H_PS?^B$OT*l zRDG@T`_}Rfbn6t9_dM*&cqs^i(hK0$z$Y<|+d=To`Iz zmWadJ-r5=PULmmoNNn`jX;d4COD)57Iu|=m$I&aji}E#;Y5`%F61;cb(liM1Dmu#M z4GQ;Rjqy}pYEC5sb0FOy{<5Kku*JcQH37OF>UJk@;0_wZ(U|fQMjvn1wF}C5rkCzN a=I;0>wjG5H&azx+S6y(pYj{TIqrU;a_&XQ? literal 0 HcmV?d00001 diff --git a/layers/infrastructure/__pycache__/redis_connection.cpython-39.pyc b/layers/infrastructure/__pycache__/redis_connection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a0953f2578d92fd6f16146bc13c09dfaa1e1c16 GIT binary patch literal 3383 zcma)8OK%%D5GMB}S+-@xcAPlRMUy_%){)x;NgqLx);5w9sT0HjiteF{)snW?S?#Li zDv1Q8hsL*}mtLC!_A%Gqdd|P#wWs`rqKD3KSF)uzXjkBf91e#w-wcNgrl%_ezF+%) zZvPCuzi@E)GhuK8n*1IJBOYOt88P*!{x>`W{>|8IS)SFhJ-g+24$WmsUP;T8TN7S| z5?}Q5o<<)1zn_`+_cR_fD^KEJJ}S0%tR;5I)b}*f-V>ClDiFJF_-SBrbkiM)e7SD@lwnm@%XT+ zSKu63ubZlTw~fk!SXrx4m?`tiq#NSo;m?7=4QTQYAe;ae1aa_8W-xP?cozEioHB=% zb}i2?MrAeuBZrrE%6hiKCSg3m&hiTUCP8ZYxy7n%X4m$nfHTc2i-gsl8|)aH1<5L} zfDDf3*gTA9K&Hly!+3$6fblWZ0}>~nn{1Jt0?sV#HMcXbcRS6_0OvTf0OxaRMuW35 zM7g*0jfQy~HNYQ1Zw4|758)M3aD4D#--fmVO8DpR$BKe_t#c$Z}_+FZah#EaT*5kb}F+0``+FA4?OGY&} zD@B*iIb}FFnv*rt`EXm`|7epKbyZ;Uv z+i8n81Zy+dSWUwYW}R%v;7Zt*jkZX4ASY#m1#-KY27)!3LHLj-tPx}C$woAikd4Be zA;_27T~+q|D2X!PuOV7xAcQ)!N@r>B zBRc@@4nV$RLbEUczgP%2jO;yUpWG(QyhSLz>jdX;R7mm&r14^r5X4dE{nke&`<5EoEuMEseFonqH0ZFlF2q zT(;rxa=G3+ztIUpF6Cw?j=O^+&)HB-@AR4wsTj;e$<}b{Gq*meYz?roHEv=O01(qi zsz?x8Wwl|Ea^iGriwk8zd{Q;B_9W!`kSm*`{<aV81v!~TPN^tE3owu;fe^!> zR`E0bDLXZYT$R>n@5FfQij^br!h?e4RLAQgbRZ@~xJdV?jtte2G4`O4AnuF;zGm*E<=)$2oJuZE!%pH1k{KkJ>4$u*z>z7GzYQ}IYa1~f zbn4W&QwO52`GvWt{W=95^hnP4N4!EFcw_VyZ}MzVoApU`qeNyw67qG31cptxNRBiX z&U10TKLDAbvDG7tJrI3WgN%TbG#~P=vL6L;hpQ@o(vAf8C1hp7q?iYFp#@~{VMW8H zoq7j4;0_rC>8aJX_Gq8>&Azd3!jaN})8R6D7dODcz=h40^j>Z|h=lt%%C=p-slL6? z*9)N4vn7W6cX7fwcai;T8(i3v`#z`XkU)bAX+Xl76%6 z7bB!ramq378pA# zXYO^HaU{3hLNgZlz%BM0`dez}Qc&2iQj29InDj^aEk1^cHE0J}^;&f<^o>+8j2kxX z0#^JH7a;SoM&}i5Mesmp6XT+p3!JD9*w0@>3iEBk}+nz2w`MQ>jT+ueLU-*xH^pSL=NZn0kMmg*(0+nsWEu0F@d zMyFD*h`ew*^WFLSJkI4sp|j9ks4wvKV&_nIvA)3eQ};K^pt?)Q8bm)wuHJ=ymB;v-C>R;wD3gO*;`=b4@*jQfVj3G<#= zx5XJR?>YD@c*UcT{C!h6qHxcum&94VqRyA@=R{e|;e1(~^NP4Kw_zOB@SDB+(0yAh zioV=9SwIi!+$vfAiwJfQ?>q)@j%zLcQs+%)J!cX4Dab0{c#&R?lB+tXj0qvleWd- z-|5|T;-Nxx+$6F+u>TO`{~rSH+$$=@T`ZX+YH=rjjh7<@kl85R8N=OH>zb> z#-v8&O)qF9HWQhG?69DQh><8;^@C=25Y5MDW1;y`vEB1M88rGmFS0xR%}q~6mhf(E zZ%PteRJeMl<*_kGmPa;PwIb8^IvesZT{gt6dO7Z_!OzvbhnhTtYmYy?c)Q>AE=n;F z{>AlvYr6}(^Dp|%ms$h=;z0JdVA=jf(e!WM>Nlmhc&pj^$m@xV9fBwSVtWF#F3JSv zfJhex!^m#oxq){CkMdVhXojIzbSJlLEb5E8ZIty@-Nu;(JzTq)1&OhSUr=2<61e-( zWfWCY9>q^&b=uu_5aqk=-ge;matS9}CW_=kbCW9e-=h+KGNIT|QDFqeZtj5=n54{~ z=_^_r-sZlsV~tE!YwLl&n-`X_9~isN2+Fr(kL<0&$PoF@VujzykMirN;}1spcQsc0((rGg%mpa(xuCo|Cki7Src@jipwg`Nn=lfalwaQ|+*L}A z-&CnfVfC69Ko47xPoj>>hcsP(;I-Nt?N+ROwa4a}&A@x8%V+S?RYOfzQO%=hGz0lO z&OiDRpNBdW%Ij?gx8*WT&?%+NP}P8xdDi0|8<*T_g>_m!g?l7nNn~#atzQ2-lDxWn zn$DOT?Vf-d_GKWebRr-08y)gNIXF3af*KyD!bMRn%9C_Vg5}mcba@KZ$RZC%sz;U!OAU9(;cMBsyIt`ERm_Hu{UlF8_Jw$3}YRH9xi=hTdVqF&e0bQ+o%B67^HqXP()Tdkf4#Nlo3j2 zjjXLah>(>k9n>&ZEob+-vL>Iy zE68V2K&!|WS(D^Nda?6QRvqfYl|Mtbm!=d*zDT{L+9Q9Ksy|1?l%hCFQTXAc-mD|_ zQY>vEJQ8eElRrYy9C{e@!@M6qH#^OGqCYXadM@ZIxU zlBtp{U#2s8vIMAOROExc6o6_r4754^50J~HI7 zz~~u44nSmYnY-3V2WZ(NW0VJI-t;3pp z1uwZLQ*O~fLz*C@`$-)?Ll*+t>DnRF>_CUc1tI$`e&6FOMl^&63B*|Z1 zp@QTgU!{Ver(R)Ei8i#0=xs)`$g$2;FLv_)9e#&jMJu0xRm;Ke84J2=jPV9O@617J zm2?7A<59ADWQiO3x>P;BjY@cGLOl#nGk7;r2PN)Upyu#B2o#YOh!vDYT^YZ-e60ji8n_^GVvKpGVUU11P#D%I`vk#N|r?K)9r7#h_EofEf4{q zmtSKj4s&OEUcyF!aV&tzhL_xjd#2t=noQ4mPR;qg)6F=V+}z-S^f4)!DPL1lejs*E zcJMIvDhI22hvxn(6LTM{I?eoO*1T`T3V)6j9>fnG(qgrrCSOqaJ&JFKr)c(*u_he< z`g%gZ=sy@9t2qtCbh9U zqA1UgLuVo-3Y?s2a4PYgUZdMY(qv9m1QHy;xG9@?Xej~mjFA8--uAaU!A!h59QO;k zsZl_Rze&QYpirQ-tUF44&+aF_8GTR+XwV*bop#StC}vQH@n=GAY)KbpSrR!7MEsBz zrR5U7kV=m1i6rO7l1n4=430rsd!%ND$FAP-T8P)&=H{mKHaU^yCc~{MDtTN<%;T0M zvP@fWfISi_B-g!}lrfgd4a@+$b>+rqa6@?$O}>Y3Mg(O&_8ceo6G8fLB;VwDddOE$ zRCW0a^kYimgNo?zMRFmC=SZaac&LSG0wDE z!a@$t7J1~oDF0US5I+kCxv+vL;#^rALKZJ+J;ztexJubr%E#rA_oLR7k3}9%fP_{T zqZK8qB*QpPCWO`TaW-|i%Q-0<>jGukyDXbJy9+Dq{*eT#y# zBsB(ygVdNa1pcI~tNAW?7jXF2Bx6mITX^LDxXn_=0egQ<+!3galVY*Qiy7 zt|hH0c~u;vVVajpu1iuBNpTYg@{x&TP+z$VCfhEqBtGresMUTxjg|TbXyGrQNT3hF zmm&}+_LF9u8mi{_p;3N&K&kII6GU_^`H)4V)a@=R;cFApio!ZYl<%f45GmDwDvs*yuZH?v1xg6`RB~-|bqttmh;qte5tb1e@71c{{ znuh#HXr7_ikWeF$7q@t_yf`tV;_44@chWOw5i39(WQ@GoeynO|{1^omN^-wKM>Aj( z$sc~1zr=MF?QoW_j4&F1Ulh&qljNxGmv5qj{ZrE+u7Lt+CI(_UYXxgSM(k55c;zp3_9?u!NEP^OH; z!ni)gDXP<)Lb55%62k&0W3}DZkO~1jzU91;U98 z*Pr82;RjP9(g(|JT0Ky~DAoyfO{86|2S#j8$aE$Ag7RFPkFW#eGU9v$evyxOZDhp) zbcTNd0aCup4JhAbai)s}m|-8YGF^7~Jc*D9hE8KOtLq zzlqcp$#0x^kerd&H{}qgt|%~72E}3h*HEp`CHlk}()~r8C*ffHpW~*CsX4|{lE_r1 z<{l_rbg;jfF(-UsN`@>uTKNGOPH=gVOTo|@Camxv!;bwR#O9e<08cc^%m zig_yDqatN+inlRayG0lBRCrVnyO5hyw5b@6F&(I^<={u47Ynmz`EB1uR71;d=^|jZ#Wag5)5S(I z3&-%DV+tkLsCt=-Jq+q2I(s04`d74=&7h9mm?gECK5@(f4#kh<1SkjlIrNr%CP4l# za5nt*1VB)r$f+*{5CHfb0X!w45k?Yg%7a^(Ghp5U%vXeho?Su+)#(E^6bKu>w^tRR(Q^Xi3K(pAe}gqPDUfM@YZ zeqcP`ELAd0NkYaR7as0Rs#S`;GY#xN(AXzgDfhmXaRJEqoSZH5%>6aQs1%Z7g?biZ z5f`6EtoP|0qZ0v&>{2lcohUsT;p-E+DRh!_ZVxWe^(*-Luc0^)mrB5;7@0V7Od>^W z0YIckJ2=3ztL5lOW&_C>mBRDmV-*GQHWSyQeI`>7W%0=;0Mu__yy5zU6{Pw0{bN3g zDz+TN2yuc-AVtF4B0}hTl94A$Gb{G9_ar9s;o&6q%e+o4jOReToy$80#3eV(VLMTZ zRdkj;Y35GCs?5qL=tUGY>~OMf_pwB*)^IlHk&I)ct;wf^gVe?RBP!wVOi0j7mt#5U zh2>bpfrtSuO6FtbA0RohvA_lrt7{YsVtLJ`jwg@eWI`TP5I~o#Gu9-k8=F zg7T;!uu}*-1n>*I>x`V3*3Jz-hYl2()>d}svE8MZ#F80WTfAFjEx>OYO%}qHYoN99 zV!aYstk`7Y+jvh!WS^y@RVv1S7w6gl3vqIkHy9{^BUo7&$N!mll6-@_Ma6&$UaWnI zj+g)+jD95*Tk@#5C5wt65G$iW6KkZrh?{cb$d0v-wdgcGWsEqKxQdE|Y3l85kVVd4 z@q!x`_RG4d7mak8KViqL)>p~)$FOi=+zq^;PY()E)T(nc;qeePc#n#6R4_6!C{l7x z<={y5Br%!Hh=Mh>(uzk8HE=F*6hg0c1(f+HeQP$U<8wgk>{{_ zXM7z(!TvRsq%()Kf(I>&mrl)*ofs*vRv0_>T@V?VC?Bzj(iTuVJg*pKhZ0n27J&xt zVqaS3B_CH*6m&_oyi|KTd|Hu9nuAQ}L-zu&ALb)-%kTHFA#A%l@Z}s{j52jWgF@N{ zPs(sA@~P%C`JG?K4RSG_3id8h-NvNcQ`b{Y3WF$I~HGz`{ zn1H$$v(I4w2Y3G;XZ_^fKscRth02fTC>FUChgdZCaWI_VRW^nKU`#mE6L0%WZDQC4Mrz|r>%zQ2a8ny;A1>^GHtgbmRxo_ykBpBI!(ru@oI*tZ z7=g|9gHE&MA#HA4ctP^SGwVyKUsSwIBcr`=uY8DU`cK@dg%UeZg{WL?+R3vTirV>(_wO~qR zVuA)W5fn+Y;-myEAD&(xZzqXuis>2c5^2Wk1`$BqhC!Jw5S4>oyR+;F8B=Efy28I|kets|Wvp6%b z;rN9k8B%=}*l?V4kE%ohV> sw~d_3YFo!Ecw^0XQJAI!`&hxZtUCxz%le8@G%DZK%*xfuTIKBj00krm;{X5v literal 0 HcmV?d00001 diff --git a/layers/infrastructure/database_connection.py b/layers/infrastructure/database_connection.py new file mode 100644 index 0000000..cc024b5 --- /dev/null +++ b/layers/infrastructure/database_connection.py @@ -0,0 +1,95 @@ +""" +Database connection management for MongoDB +Infrastructure Layer - handles low-level database connectivity +""" +import os +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +from pymongo import IndexModel, ASCENDING, DESCENDING +from typing import Optional +import logging + +logger = logging.getLogger(__name__) + +class DatabaseConnection: + """Manages MongoDB connection and database operations""" + + def __init__(self): + self.client: Optional[AsyncIOMotorClient] = None + self.database: Optional[AsyncIOMotorDatabase] = None + self._mongodb_url = os.getenv("MONGODB_URL", "mongodb://localhost:27017") + self._database_name = os.getenv("DATABASE_NAME", "energy_monitoring") + + async def connect(self) -> None: + """Establish connection to MongoDB""" + try: + logger.info(f"Connecting to MongoDB at: {self._mongodb_url}") + + self.client = AsyncIOMotorClient(self._mongodb_url) + await self.client.admin.command('ping') + + self.database = self.client[self._database_name] + await self._create_indexes() + + logger.info("Successfully connected to MongoDB") + + except Exception as e: + logger.error(f"Error connecting to MongoDB: {e}") + raise + + async def disconnect(self) -> None: + """Close MongoDB connection""" + if self.client: + self.client.close() + logger.info("Disconnected from MongoDB") + + async def get_database(self) -> AsyncIOMotorDatabase: + """Get database instance""" + if not self.database: + await self.connect() + return self.database + + async def _create_indexes(self) -> None: + """Create database indexes for optimal performance""" + try: + # Sensor readings collection indexes + sensor_readings_indexes = [ + IndexModel([("sensor_id", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("room", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("sensor_type", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("created_at", DESCENDING)]), + ] + await self.database.sensor_readings.create_indexes(sensor_readings_indexes) + + # Room metrics collection indexes + room_metrics_indexes = [ + IndexModel([("room", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("created_at", DESCENDING)]), + ] + await self.database.room_metrics.create_indexes(room_metrics_indexes) + + # Sensor metadata collection indexes + sensor_metadata_indexes = [ + IndexModel([("sensor_id", ASCENDING)], unique=True), + IndexModel([("room", ASCENDING)]), + IndexModel([("sensor_type", ASCENDING)]), + IndexModel([("status", ASCENDING)]), + ] + await self.database.sensor_metadata.create_indexes(sensor_metadata_indexes) + + # System events collection indexes + system_events_indexes = [ + IndexModel([("timestamp", DESCENDING)]), + IndexModel([("event_type", ASCENDING), ("timestamp", DESCENDING)]), + IndexModel([("severity", ASCENDING), ("timestamp", DESCENDING)]), + ] + await self.database.system_events.create_indexes(system_events_indexes) + + logger.info("Database indexes created successfully") + + except Exception as e: + logger.error(f"Error creating indexes: {e}") + +# Global database connection instance +database_connection = DatabaseConnection() \ No newline at end of file diff --git a/layers/infrastructure/redis_connection.py b/layers/infrastructure/redis_connection.py new file mode 100644 index 0000000..574414f --- /dev/null +++ b/layers/infrastructure/redis_connection.py @@ -0,0 +1,80 @@ +""" +Redis connection management and operations +Infrastructure Layer - handles Redis connectivity and low-level operations +""" +import os +import json +from typing import Optional, Dict, Any +import logging +import redis.asyncio as redis + +logger = logging.getLogger(__name__) + +class RedisConnection: + """Manages Redis connection and basic operations""" + + def __init__(self): + self.redis_client: Optional[redis.Redis] = None + self._host = os.getenv("REDIS_HOST", "localhost") + self._port = int(os.getenv("REDIS_PORT", "6379")) + self._db = int(os.getenv("REDIS_DB", "0")) + + async def connect(self) -> None: + """Connect to Redis""" + try: + self.redis_client = redis.Redis( + host=self._host, + port=self._port, + db=self._db, + decode_responses=True + ) + await self.redis_client.ping() + logger.info("Successfully connected to Redis") + except Exception as e: + logger.error(f"Error connecting to Redis: {e}") + raise + + async def disconnect(self) -> None: + """Disconnect from Redis""" + if self.redis_client: + await self.redis_client.close() + logger.info("Disconnected from Redis") + + async def get_client(self) -> redis.Redis: + """Get Redis client instance""" + if not self.redis_client: + await self.connect() + return self.redis_client + + async def set_with_expiry(self, key: str, value: str, expire_seconds: int = 3600) -> None: + """Set a key-value pair with expiration""" + client = await self.get_client() + await client.setex(key, expire_seconds, value) + + async def get(self, key: str) -> Optional[str]: + """Get value by key""" + client = await self.get_client() + return await client.get(key) + + async def delete(self, key: str) -> None: + """Delete a key""" + client = await self.get_client() + await client.delete(key) + + async def get_keys_by_pattern(self, pattern: str) -> list: + """Get keys matching a pattern""" + client = await self.get_client() + return await client.keys(pattern) + + async def publish(self, channel: str, message: str) -> None: + """Publish message to a channel""" + client = await self.get_client() + await client.publish(channel, message) + + async def create_pubsub(self) -> redis.client.PubSub: + """Create a pub/sub instance""" + client = await self.get_client() + return client.pubsub() + +# Global Redis connection instance +redis_connection = RedisConnection() \ No newline at end of file diff --git a/layers/infrastructure/repositories.py b/layers/infrastructure/repositories.py new file mode 100644 index 0000000..c9c2945 --- /dev/null +++ b/layers/infrastructure/repositories.py @@ -0,0 +1,362 @@ +""" +Repository classes for data access +Infrastructure Layer - handles database operations and queries +""" +import json +from datetime import datetime, timedelta +from typing import List, Dict, Any, Optional +from pymongo import ASCENDING, DESCENDING +from pymongo.errors import DuplicateKeyError +import logging + +from .database_connection import database_connection +from .redis_connection import redis_connection +from models import SensorReading, SensorMetadata, RoomMetrics, SystemEvent + +logger = logging.getLogger(__name__) + +class SensorReadingRepository: + """Repository for sensor reading data operations""" + + async def create(self, reading: SensorReading) -> bool: + """Store sensor reading in MongoDB""" + try: + db = await database_connection.get_database() + reading_dict = reading.dict() + + # Add document ID for deduplication + reading_dict["_id"] = f"{reading.sensor_id}_{reading.timestamp}" + + await db.sensor_readings.insert_one(reading_dict) + logger.debug(f"Stored sensor reading for {reading.sensor_id}") + return True + + except DuplicateKeyError: + logger.debug(f"Duplicate reading ignored for {reading.sensor_id} at {reading.timestamp}") + return True + except Exception as e: + logger.error(f"Error storing sensor reading: {e}") + return False + + async def get_recent_by_sensor(self, sensor_id: str, limit: int = 100, minutes: int = 60) -> List[Dict]: + """Get recent readings for a specific sensor""" + try: + db = await database_connection.get_database() + query = { + "sensor_id": sensor_id, + "created_at": {"$gte": datetime.utcnow() - timedelta(minutes=minutes)} + } + + cursor = db.sensor_readings.find(query).sort("created_at", -1).limit(limit) + readings = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for reading in readings: + reading["_id"] = str(reading["_id"]) + + return readings + + except Exception as e: + logger.error(f"Error getting recent readings for {sensor_id}: {e}") + return [] + + async def get_recent_by_room(self, room: str, minutes: int = 5) -> List[Dict]: + """Get recent readings for a specific room""" + try: + db = await database_connection.get_database() + recent_time = datetime.utcnow() - timedelta(minutes=minutes) + + cursor = db.sensor_readings.find({ + "room": room, + "created_at": {"$gte": recent_time} + }) + + readings = await cursor.to_list(length=None) + return readings + + except Exception as e: + logger.error(f"Error getting recent readings for room {room}: {e}") + return [] + + async def get_by_query(self, query: Dict[str, Any], sort_by: str = "timestamp", + sort_order: str = "desc", limit: int = 100, offset: int = 0) -> List[Dict]: + """Get readings by complex query""" + try: + db = await database_connection.get_database() + + sort_direction = DESCENDING if sort_order == "desc" else ASCENDING + cursor = db.sensor_readings.find(query).sort(sort_by, sort_direction).skip(offset).limit(limit) + + readings = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for reading in readings: + reading["_id"] = str(reading["_id"]) + + return readings + + except Exception as e: + logger.error(f"Error querying sensor readings: {e}") + return [] + + async def count_by_query(self, query: Dict[str, Any]) -> int: + """Count readings matching query""" + try: + db = await database_connection.get_database() + return await db.sensor_readings.count_documents(query) + except Exception as e: + logger.error(f"Error counting sensor readings: {e}") + return 0 + + async def get_distinct_rooms(self) -> List[str]: + """Get list of distinct rooms""" + try: + db = await database_connection.get_database() + return await db.sensor_readings.distinct("room", {"room": {"$ne": None}}) + except Exception as e: + logger.error(f"Error getting distinct rooms: {e}") + return [] + + async def get_distinct_sensor_ids_by_room(self, room: str) -> List[str]: + """Get distinct sensor IDs for a room""" + try: + db = await database_connection.get_database() + return await db.sensor_readings.distinct("sensor_id", {"room": room}) + except Exception as e: + logger.error(f"Error getting distinct sensor IDs for room {room}: {e}") + return [] + + async def delete_by_sensor_id(self, sensor_id: str) -> int: + """Delete all readings for a sensor""" + try: + db = await database_connection.get_database() + result = await db.sensor_readings.delete_many({"sensor_id": sensor_id}) + return result.deleted_count + except Exception as e: + logger.error(f"Error deleting readings for sensor {sensor_id}: {e}") + return 0 + + async def aggregate(self, pipeline: List[Dict]) -> List[Dict]: + """Execute aggregation pipeline""" + try: + db = await database_connection.get_database() + cursor = db.sensor_readings.aggregate(pipeline) + return await cursor.to_list(length=None) + except Exception as e: + logger.error(f"Error executing aggregation: {e}") + return [] + +class SensorMetadataRepository: + """Repository for sensor metadata operations""" + + async def create(self, metadata: SensorMetadata) -> bool: + """Create sensor metadata""" + try: + db = await database_connection.get_database() + await db.sensor_metadata.insert_one(metadata.dict()) + logger.info(f"Created metadata for sensor: {metadata.sensor_id}") + return True + except Exception as e: + logger.error(f"Error creating sensor metadata: {e}") + return False + + async def update(self, sensor_id: str, updates: Dict[str, Any]) -> bool: + """Update sensor metadata""" + try: + db = await database_connection.get_database() + updates["updated_at"] = datetime.utcnow() + + result = await db.sensor_metadata.update_one( + {"sensor_id": sensor_id}, + {"$set": updates} + ) + return result.modified_count > 0 + except Exception as e: + logger.error(f"Error updating sensor metadata: {e}") + return False + + async def get_by_sensor_id(self, sensor_id: str) -> Optional[Dict]: + """Get sensor metadata by ID""" + try: + db = await database_connection.get_database() + metadata = await db.sensor_metadata.find_one({"sensor_id": sensor_id}) + if metadata: + metadata["_id"] = str(metadata["_id"]) + return metadata + except Exception as e: + logger.error(f"Error getting sensor metadata: {e}") + return None + + async def get_all(self, filters: Dict[str, Any] = None) -> List[Dict]: + """Get all sensor metadata with optional filters""" + try: + db = await database_connection.get_database() + query = filters or {} + + cursor = db.sensor_metadata.find(query).sort("created_at", DESCENDING) + metadata_list = await cursor.to_list(length=None) + + # Convert ObjectId to string + for metadata in metadata_list: + metadata["_id"] = str(metadata["_id"]) + + return metadata_list + except Exception as e: + logger.error(f"Error getting sensor metadata: {e}") + return [] + + async def delete(self, sensor_id: str) -> bool: + """Delete sensor metadata""" + try: + db = await database_connection.get_database() + result = await db.sensor_metadata.delete_one({"sensor_id": sensor_id}) + return result.deleted_count > 0 + except Exception as e: + logger.error(f"Error deleting sensor metadata: {e}") + return False + +class RoomMetricsRepository: + """Repository for room metrics operations""" + + async def create(self, metrics: RoomMetrics) -> bool: + """Store room metrics""" + try: + db = await database_connection.get_database() + await db.room_metrics.insert_one(metrics.dict()) + logger.debug(f"Stored room metrics for {metrics.room}") + return True + except Exception as e: + logger.error(f"Error storing room metrics: {e}") + return False + + async def get_by_room(self, room: str, limit: int = 100) -> List[Dict]: + """Get room metrics by room name""" + try: + db = await database_connection.get_database() + cursor = db.room_metrics.find({"room": room}).sort("timestamp", DESCENDING).limit(limit) + metrics = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for metric in metrics: + metric["_id"] = str(metric["_id"]) + + return metrics + except Exception as e: + logger.error(f"Error getting room metrics for {room}: {e}") + return [] + +class SystemEventRepository: + """Repository for system events operations""" + + async def create(self, event: SystemEvent) -> bool: + """Create system event""" + try: + db = await database_connection.get_database() + await db.system_events.insert_one(event.dict()) + logger.info(f"System event logged: {event.event_type} - {event.title}") + return True + except Exception as e: + logger.error(f"Error logging system event: {e}") + return False + + async def get_recent(self, hours: int = 24, limit: int = 50, + filters: Dict[str, Any] = None) -> List[Dict]: + """Get recent system events""" + try: + db = await database_connection.get_database() + start_time = datetime.utcnow() - timedelta(hours=hours) + + query = {"created_at": {"$gte": start_time}} + if filters: + query.update(filters) + + cursor = db.system_events.find(query).sort("timestamp", DESCENDING).limit(limit) + events = await cursor.to_list(length=limit) + + # Convert ObjectId to string + for event in events: + event["_id"] = str(event["_id"]) + + return events + except Exception as e: + logger.error(f"Error getting recent events: {e}") + return [] + +class RedisRepository: + """Repository for Redis cache operations""" + + async def set_sensor_data(self, sensor_id: str, data: Dict[str, Any], expire_seconds: int = 3600) -> bool: + """Store latest sensor data in Redis cache""" + try: + key = f"sensor:latest:{sensor_id}" + json_data = json.dumps(data) + await redis_connection.set_with_expiry(key, json_data, expire_seconds) + return True + except Exception as e: + logger.error(f"Error caching sensor data: {e}") + return False + + async def get_sensor_data(self, sensor_id: str) -> Optional[Dict[str, Any]]: + """Get latest sensor data from Redis cache""" + try: + key = f"sensor:latest:{sensor_id}" + data = await redis_connection.get(key) + if data: + return json.loads(data) + return None + except Exception as e: + logger.error(f"Error getting cached sensor data: {e}") + return None + + async def set_sensor_status(self, sensor_id: str, status_data: Dict[str, Any], expire_seconds: int = 1800) -> bool: + """Set sensor status in Redis""" + try: + key = f"sensor:status:{sensor_id}" + json_data = json.dumps(status_data) + await redis_connection.set_with_expiry(key, json_data, expire_seconds) + return True + except Exception as e: + logger.error(f"Error setting sensor status: {e}") + return False + + async def set_room_metrics(self, room: str, metrics: Dict[str, Any], expire_seconds: int = 1800) -> bool: + """Store room metrics in Redis cache""" + try: + key = f"room:metrics:{room}" + json_data = json.dumps(metrics) + await redis_connection.set_with_expiry(key, json_data, expire_seconds) + return True + except Exception as e: + logger.error(f"Error caching room metrics: {e}") + return False + + async def get_room_metrics(self, room: str) -> Optional[Dict[str, Any]]: + """Get room metrics from Redis cache""" + try: + key = f"room:metrics:{room}" + data = await redis_connection.get(key) + if data: + return json.loads(data) + return None + except Exception as e: + logger.error(f"Error getting cached room metrics: {e}") + return None + + async def get_active_sensors(self) -> List[str]: + """Get list of active sensors from Redis""" + try: + keys = await redis_connection.get_keys_by_pattern("sensor:latest:*") + return [key.replace("sensor:latest:", "") for key in keys] + except Exception as e: + logger.error(f"Error getting active sensors: {e}") + return [] + + async def delete_sensor_cache(self, sensor_id: str) -> bool: + """Delete all cached data for a sensor""" + try: + await redis_connection.delete(f"sensor:latest:{sensor_id}") + await redis_connection.delete(f"sensor:status:{sensor_id}") + return True + except Exception as e: + logger.error(f"Error deleting sensor cache: {e}") + return False \ No newline at end of file diff --git a/layers/presentation/__init__.py b/layers/presentation/__init__.py new file mode 100644 index 0000000..e8a337c --- /dev/null +++ b/layers/presentation/__init__.py @@ -0,0 +1 @@ +# Empty file to make this a Python package \ No newline at end of file diff --git a/layers/presentation/__pycache__/__init__.cpython-39.pyc b/layers/presentation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce43cbddc694938dac907434bd390ee27b493bc1 GIT binary patch literal 178 zcmYe~<>g`kf}EP28CpR4F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D5xPC@{ZmND! zNMv>NwI!$qDgW=v3@~OepYHSke`xRoRO5DSd^lll$e~InwO%VlUSKr s1X7V&4AhZWl9`{UA0MBYmst`YuUAlci^C>2KczG$)edCgXCP((070)W6#xJL literal 0 HcmV?d00001 diff --git a/layers/presentation/__pycache__/api_routes.cpython-39.pyc b/layers/presentation/__pycache__/api_routes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..373d25c25731a9b26f36fd79caab7f5fd5a93ab1 GIT binary patch literal 10672 zcmc&)OKcoRdhYl1JUARtBt^YjvTSoCizD0eOO~~gE!mRSrfpG{*RytJn^P^Ssd>=V zJ+h|V)CQJ-1OWs%4+KG038+JW=oBD_0J#O&9COa0uR(z5l1pBIEZh11s-ETyHS+Fl zNpy90)!+5_|F8a6jXN<>Q1I7V|HrGrYl`x3^fLTsM3Yx=$W*^(1jp}4$9Y~)JvZvA~l^q%y-}L4&_7O2Ql}EfI z?CHBEdxkwL`hHIIJL_z zd%@erUc94wgY{O}ajf?x_OhTlCPsaQy_$~t8apwxA`L5e9n>e;8-n_IL4At-B&9yh z&Wxdclf5OVUl7!1*|{I>`)&4)png$MpJx}c^}m~P*#Enh_CTX~ zs>wu3;qmK!(?dn^Vz=8~%3dpHI&P~U`psaoZab#rF1vx(Xm&dtuSv_omlf_YKNzZO z5-$dSG#)A9U*ihuD5B|5*--B(7ob?rE9*+Ee4;EVh7xN1ORBQ2s!B8;>Kg{rV)a+b zUupMH6PiqCP+)houhmbKPx0ltdQa-q_pCNjlqyfeiH#!tGluSV3;1~UCv>46bM>l%ZcjUSGa@sho9E2cH3T!vo#jf z&UTx63z6HNbd zxMxuML%c+9`pUWyD|{+eVuPtCm00CYtgh0VhBu8b#F}{1Pb#6hp~c2M6~Hq9JTo>^ z;OYIZ0X$0p&)6`T8Cx0fEM}3d&VXmETQ@BLPuvCY3;-`5oghZFu%Dgo6~Qp>t>DeW z-Wc%C`O*)sI^A@)J68nvpgZ`}SPFlJif2(QMmx?CJ{-V^RLkJ=6V8MG1|%QOT`pZB>)^jc(++%E%_Zh_x7G8Ka>`30!TwGr36qdxBb6iHO~dV>f^Z&iNblbS5Oe@xA(TK+ex)!g)&U0q2YN^&FvxyVJ*3~%g&x8^(?j{_ z7%f819suw=(o&FqahG8o9M=ifyrzG}Z>CFKeDG%^+{%*J15T$K!n*W2jFViWnQ@44 z9%?5XK96=@MFI751h-^rQxuJqe?q)lM1J4k$LLd%F<+o!oLmc=o&5wf!88g**VGCG zThc78B=p9t%3nr(v~!$5p{qhS+BGEAtJoMY#7#h0w9Ud!@z5bNhh5)ZbLKzp_&2K2 z^f?&at+fxB80mfYhTrbBVUwF(&H_^Q+zWfW6HQ*+tO~CAYgJP$G%28O9yWwYwV^TP zliBjyewbL@D^~(9Y+AIw^luXP3|j_SgMR?FlzUJW3<&ep^<1n5yP#y=g&MMOB)k27|o>!GPe@j^}#Dxtr&I#1!fZAeQShYZF zvwW;WnHBDsP+Qbel5>cyIfWIcmGy}*7nkBRm} zuw53|E^HK8IVNmR#^ns#B?=VSRHo{3>*bqeU|ZY;wsTN*+oLLpa*GUwv-KFL1>+wY zhgL!lC5%Npg;aPiUp(`|!XBe@QXyhHeYC zyw?f&SO^7?+&G~qnYhBc?F`My zZYTxr1|jb?0b6f@4;Jow0Y6P2l*NRw%7$FD@GAyye1gU!^SxzXOI1E8H}{xTOQl|s zn?ZCemMGUv632hK|dA``ft$ldFn!ZPJWei z3!&!W7D{_9__jkbyY`hBz2mmMb*bFvXwg);RV`qPVznvJP<4Cp}(deBPspb0&wLl5TSTx^kUjjhbByA5&Lk6Cj&(|E$QWBE+u zY3ue)8$uO#Ayf?-uMj;;>-!BFkCw@Qj+rm=cTn>8Xc2oxm4;x^kE@Ivf1keo8Hz;6 zRj`k~87tR3|BT4D>~mq$FH(ar?3d|jjCJRqqv20+1>^`Sd2CFW^>oucD&;D8C;4Nu z5oJ^q(yj@m5p6%iuDhKkyc@9~QCZdaQy@&rg9y3NMtszQ`&N>gX3gbpI~eiciufSg zo^NC0expQ>QrT;;t9q=gs{9aH+*Q?AV_jHQWmV&^iaJfyk!@90b$%I1H^hmz#oJls zmi)pJtwyj68k7H}3>sDq9*5RO4t4PDU}JNF%H)5N)|#T$My-KS^RU_lR%{^wxQ?V? zY{F_=v9(c%bz!wjz$6g3T7>;oR!g`haKXy1PQnh8^(G2CMXd(ZmVw%QSRtPvh8Msl zGi$x=8?}EL*hP{NvGCN!c2d0MLLD(YlA;Clakl&G3gyNGCl}a8ssnsQ1!4!%W@cZ+^I#lVZ z^2gd22#G|1P*wS1v{I0}a4gYa>>Rwx;sBnK!bwt<&2D?`)IZ_{MdX z&I^)KYQ!ii9d;uKvwV??52!dz1sOU15f$gCc$-fXrW-oKd`(_+FuDfZcf@44hD|UTX8ezs zVC|6&eSieP3VMvDYVIICR2!rNW=Kh9)lORdV2Ucoadb{J?~oecPhTUL-R*^ z4;&OZ7DujSG1@ySsF9V95fWd;;*)J87D?KW9gPzSZ-I7;R^uJikt%y!0@M;!h9n?# zOe7W{oC8vYF82dU6bvBoJLT=u#9Nx$(dGWpCr)uO+CR$UEtF5>@^<1f5hMBHTkxM*?yjC{O{4$k(3;3T zKx>*C7PI4grZsacPo7TZUs&t;n|b&b;x7CP3;snZdP+dUYl6q=iAu^{aaiKEJRS!8 z3#@~4D#%1b-OntE!47N%}a> zY-yeYbeV``77|7nIni5QXB^O~g`p`6Z7U48u+20tPCZFW9MaPmw_1)PCp1a$oChJi z>ns;)Q`&6cX@6JBX|TZBceW)mqc=bs?H!v)Lzw82{d_q4@fz+hOfp$GsE7M~3|}Yq zprT1m+Tf4EtqfiwRr;JXsgb8XgFC)i7J0HK8918B}`9e2~*_803%A6 z=3*m*kw(d@kyNR1(-09e?gBPL_TEt4lDz>WT2q|VfWPzwc-Bt%F9 zsmcVn&Pw=of#~l?65!?NNV+QR{RuZR`9Q;4LWjv^yf6QXSh8C%kI)ttOrzmoV;DtN%;XohPyH z!h_6_&#EQe_(9XO?=3USloK7`8a{g|k6o ztimgZcBb_5K*5=(%IBPBaJ>rea5D`cA{a{&jTjlq7-m}FIHnsVOdViJ+>yR!+FgXz zFg+SSO=bq+G$9HBF@@Afe8;CEa={ZM_5 z{{nMB(7dTFVcI zg7J^gWsr=YK$NW~=+9ClB8Fmo)5XG+r%j*+Kjk5eeIW7EkrT2{q*+hC*_R z#*E}o;p zrh?M1l9!Z@6G@OHN5578ZDNTeULX$}CaAqk#UvFK6!jg;y}<9l7%#}(FbHiqrH1M= zS*!5#)1)=uPHnrgwk+DoJ|^o~Ke*+Tgdnix7bxOK7xn#H2)F(wh0mh9gwova$sb|Z zjRs|~8x3&^j>B#7qa9%eMD|rgf#T;7{P*co9U|v7Jc*;uM8~g!#1RCqQyt|;69Y$A zp-7O3AY8q9Bt3CRE*C>X` literal 0 HcmV?d00001 diff --git a/layers/presentation/__pycache__/redis_subscriber.cpython-39.pyc b/layers/presentation/__pycache__/redis_subscriber.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a6d5a3f503c44a22333aa8ae017ba428e82dc1d GIT binary patch literal 4387 zcmb7H&2JmW6`%d&H$}vVN)xB5hB2LxigX}FL&wL zr4ucvTGA& z$nM*!ZwHl8wO>WQ!mB}TRPWa{CiGcDG>^1?>pjM6y#9>w`cu8%7VW33zryt$)@`Iu z>h}d7#!fsL#J(I3gmiWz=}6%PH*3ck#pt8o>Z3 z1C(0d-~jX)>zn+nu(-wT7dnCW%<9`1)m~`4&Ku9ner5h|leaLgs&N~uS9k~Gny9PQ zt9%V30Io)7_&P={ZUN+T*^0ttXAW_C>m#+>60idNvBH#4Q=l&c=F@@85A_MTZ9K7q zCSgz6l(|}>t6zg_7@3^rEY7e6y?3u`OM)V6!+OBgjdNh7MUzblY|E*v4qM1c7{XB5 z>hj*)#ETz&tz~8`g59r~T!HK>ck^L15}T5bdA#{f4g)( zZj?*2h6sh+J8%ifURTd5K72G3L1s&lOl0U2faQn!gu`F)#COq5**=r&i8h0$9BBwE z1PeC+CrgZ(Ib}0ziuogg*~Y30H>cX4*jIYFwri!=cM?w~MMNxv=nO-LAlQ@91g0iP z)9yYz!7x1*3sJJ;a5(B zcS81dZeMpX0@rws~6ePl5rmQV4V8FzchAe z!E`t9h6b-qtul6!=B&l*%Mzc$u!q){*5h9OdyCy-k9#ol&q&jA?Vw%gx}R{kQL?`?d?zY$p0Ao_E(_Gi^IJzlA?6d zfpAC2|46WLR-v*Wnk1P?+?qPd-wCINTCC~=B3zt{#99LopV za?&fgRzHZS=x|U`U_mK~0s$n~S&A|PugL0qB60m-i1d@S3WCc8vs$4ZXeefE(9+^W znSnvpaf=#fK3#kF3tuQs$Sk3vT7|k52SSXq#w48U%q-fcK!%-@B{FJW(&XE*i+xne zcWUAUDaeaB!c9E!Z8S`;>$cw3+D1oP110GCIz|`pbo3i~N4sVaL9F4q_MftszIl== zmaIpnfLWWx`&C-ZDR(&!R`6yjQWblAgHHM)=Z{5p0|S6T7SVI4Pj&em<)|K*heoyb00F-MUX@}O-lAk4&SrxLiS$%3z=2UTZ*<@gn#=$hFYP_q-@$a0pTgT%xtg97v zA$=PRuyXKE#1V?%l;Xd`D^t7Vk|O@?FKre7_$BU8Wp6dT{Bd5jdIuCa(a?Vugs)sDZRe%=&>hb;S{oY{frfk_#%;>s%O(5=hf`VYTDWL zhDaJWocPgjJf>~Ti@q|j7NvI)V8v??0jnwkGK-7BWKY&;Yv)QYE~>m@ac1tvQJ7f) zO7K`-!%CI6BqeM~7Vn>#j1kVlRfXV4By#A-*~$kyzwNyv{D_M@ubrdyFQI`$vgi%8NRbmQ?@?9JR5f#YZTq3{ zA34L_lAWm1LSg1lqy)(Xi_)xXEL8?3k5GHQF7cm5VLfLJS=QJi+Kwt~Rn=7gZ~DWN zk^EY@Q)i?4UWM*k#o6d&@&dJVs`vueLY?bW!XBZVePQ5cV5V2z!{uzTD+kqCGKmow zpyiRe=4F-gHc%-mq5QmAgQ{0Im&h)WuRjs);!d`>(8XPAfsp(K_LsZSy5j8ly$@Hw zNtnDIJ6gHeX7SR5upNj2Y z@x%*?e=4raBj0u9Dr7PvPNe)Edh#+gMBU19Gjk9{fxJU&Og{7z`D^-X;o_=C^_e7l z(@0f)Rg}tAd#&DXbbipe)UK$1k>KvpN|kjgRLX-N`ftK65|?N>@o|k28%3I#BPb~q z_RkL{$eRecjT5&$a*_Vo`M=B8zWgS*bfeBM^|FX}fcT-hPDl!$JonNe%FFx0G_N~v bR=txW`%7Zbn24BLSlAqLCkj3n9= zDbI{-i;(h=H4r})=yP2_`{+MVpg`aHFZkN0{DnNEJ!eQ=>{U#FqnY7czw@2%90glj zbpp@+^k4n!+l2fV8>^26jqC8LJ5X@aA)Im+(+-7SD`o?$V^Qq0V|!5PR0dARF>MxC zJ5^DgxuSMVJN2&!uW;u%;m#53Y>15`(%IzfE@@V?LH)isyqkuPg!aNTNkpilH1YaD z!eb%px1~@b(Shl_6^w=Su6TnW33@_#>t-qsW_nTt@f95n1gtq2jFKocE7X64Ymj!6 z=Ju^f=_bo-i#75yi|UO?l}8D2{OV&v<2t~>pAJzqQY%ndC9oL ztIzF@vuL}#2JNbFO;7!$#W(mS^wfk4GqzyHHa`XJx~Q0*28=k(cc5oOZ1OWNDc|Mq zKx?aPo#pRBYn$5uG#m}Uj;f`CtKm?! zhBAE!rdBN;sQzIZNZvXOfCLF|#b{+5v9x^asW?=IBYsJYz2P`_eLqSf?fa+jn0Q`7 zv-58Pu{R&pMsqI~${i{6NG73~hjZbpw0Qe4jsdaeV9C{~$w-QCJA+!2HH ziCBw8_VE`541D2#YmjI+D{PzuDRfO88bhZYp`2ju{fHH7&yRI=R(n*lJ#bWWQ4Bss8&rsF%5OhGNPOBw-U z6KhVI37OdwL!bNP8=%nC`p!z~#qU1ZpOfm!T`;&;p2yRvr-mYox>2|m%y#B|DP<}> zkWUW&?G3@H(gZkFt^k~>2Yw=9ywJ)C`XF+@oqIpH&ba@so)wByry$f{%L{7z< z5Wizqns4Hln|uH>jDQfv=q}x+4Vqm#f%w{~O(xBL2J=-KqHPiLBV7C6P`scIVH0${ z?AZ_G2ij#uj9u)X(PMH0XgFn>J+vka#W|vXEv+-PdSq$?Rjng>gFN{4w`^LOI^fif zhDarEPV89~SD94K6Ljy#&;#SPr_eidCsoro1PfmU3p?X~f`whM@I_sl)wwfq=N2yQ z>VD@MyTb3nuGQ?*-_K=uZW=Uh5QFy@@lf!&4zyoWX#@qG7#XjS*QnAD8dC~~0}8$% z$Lw$HF~kRBD^zV?1zq7AxJv_pCbGT#yuK82&<}&V5fS4@Mmp-(Vu+X!O@{|pa6e?e z!`T0_y%XkKgq6K(aT*4(I)KW2a(=qdU0=-UrtW3C1y!TOTg(M|ZWMlQhbf4CHBjRu zjMCic1-cK4R}%?@6d7od*CQ1rN(V_O)nzg%C4} zwd(A2gD4h!HJs%%!VrPq<*#tL3s@LDJggZ|YK!Aufc+Y@?)V^AF>`W6f< zk{6bJa^m3YX1pc>$PwJZM4&EIFzgm;2-;=>bfJL{4yG)-2+@g|goc@fhR#3}QN`|) z$LD}4_V`}G9bK6@+`=#4z?Xe^G1&V=S0|9}03LSs$J-cIR&)w6KKP+| zGogTW&Xnt{z^)2^e$zI%pbxiM9eEp;P#6IVueO0+3|`Ecc7SYa?6q!{<|#~7lVBix zKd<}#AmyVN>zls+cof9tm#XjcH1vJB0sBd`tK7u`_s=b*rM!sE%UB@$b1O=;L@}28 zSS&BJV4TEc7X3&eF None: + """Start Redis subscription in background task""" + if self.is_running: + logger.warning("Redis subscriber is already running") + return + + self.is_running = True + self.subscription_task = asyncio.create_task(self._subscribe_loop(channel)) + logger.info(f"Started Redis subscriber for channel: {channel}") + + async def stop_subscription(self) -> None: + """Stop Redis subscription""" + self.is_running = False + if self.subscription_task: + self.subscription_task.cancel() + try: + await self.subscription_task + except asyncio.CancelledError: + pass + logger.info("Redis subscriber stopped") + + async def _subscribe_loop(self, channel: str) -> None: + """Main subscription loop""" + logger.info("Starting Redis subscriber...") + + try: + # Get Redis client and create pubsub + redis_client = await redis_connection.get_client() + pubsub = await redis_connection.create_pubsub() + + # Subscribe to channel + await pubsub.subscribe(channel) + logger.info(f"Subscribed to Redis channel: '{channel}'") + + while self.is_running: + try: + # Get message with timeout + message = await pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0) + + if message and message.get('data'): + await self._process_message(message['data']) + + except Exception as e: + logger.error(f"Error in Redis subscriber loop: {e}") + # Add delay to prevent rapid-fire errors + await asyncio.sleep(5) + + except Exception as e: + logger.error(f"Could not connect to Redis for subscription: {e}") + finally: + # Clean up pubsub connection + try: + await pubsub.unsubscribe(channel) + await pubsub.close() + except Exception as e: + logger.error(f"Error closing pubsub connection: {e}") + + async def _process_message(self, message_data: str) -> None: + """Process incoming Redis message""" + try: + logger.debug(f"Received from Redis: {message_data}") + + # Process sensor data through business layer + processing_success = await self.sensor_service.process_sensor_message(message_data) + + if processing_success: + # Extract room from message for room metrics update + import json + try: + data = json.loads(message_data) + room = data.get('room') + if room: + # Update room metrics asynchronously + asyncio.create_task(self.room_service.update_room_metrics(room)) + except json.JSONDecodeError: + logger.warning("Could not parse message for room extraction") + + # Broadcast to WebSocket clients + await websocket_manager.broadcast(message_data) + else: + logger.warning("Sensor data processing failed, skipping broadcast") + + except Exception as e: + logger.error(f"Error processing Redis message: {e}") + + def is_subscriber_running(self) -> bool: + """Check if subscriber is currently running""" + return self.is_running and ( + self.subscription_task is not None and + not self.subscription_task.done() + ) + + async def get_subscriber_status(self) -> dict: + """Get subscriber status information""" + return { + "is_running": self.is_running, + "task_status": ( + "running" if self.subscription_task and not self.subscription_task.done() + else "stopped" + ), + "active_websocket_connections": websocket_manager.get_connection_count() + } + +# Global Redis subscriber instance +redis_subscriber = RedisSubscriber() \ No newline at end of file diff --git a/layers/presentation/websocket_handler.py b/layers/presentation/websocket_handler.py new file mode 100644 index 0000000..cb565ad --- /dev/null +++ b/layers/presentation/websocket_handler.py @@ -0,0 +1,97 @@ +""" +WebSocket connection handler +Presentation Layer - manages WebSocket connections and real-time communication +""" +import asyncio +from typing import List +from fastapi import WebSocket, WebSocketDisconnect +import logging + +logger = logging.getLogger(__name__) + +class WebSocketManager: + """Manages WebSocket connections and broadcasting""" + + def __init__(self): + self.active_connections: List[WebSocket] = [] + + async def connect(self, websocket: WebSocket) -> None: + """Accept and store new WebSocket connection""" + await websocket.accept() + self.active_connections.append(websocket) + logger.info(f"New client connected. Total clients: {len(self.active_connections)}") + + def disconnect(self, websocket: WebSocket) -> None: + """Remove WebSocket connection""" + if websocket in self.active_connections: + self.active_connections.remove(websocket) + logger.info(f"Client disconnected. Total clients: {len(self.active_connections)}") + + async def send_personal_message(self, message: str, websocket: WebSocket) -> None: + """Send message to specific WebSocket connection""" + try: + await websocket.send_text(message) + except Exception as e: + logger.error(f"Error sending personal message: {e}") + self.disconnect(websocket) + + async def broadcast(self, message: str) -> None: + """Broadcast message to all connected clients""" + if not self.active_connections: + return + + try: + # Send to all connections concurrently + tasks = [ + self._safe_send_message(connection, message) + for connection in self.active_connections.copy() + ] + + # Execute all sends concurrently and handle exceptions + results = await asyncio.gather(*tasks, return_exceptions=True) + + # Remove failed connections + failed_connections = [] + for i, result in enumerate(results): + if isinstance(result, Exception): + failed_connections.append(self.active_connections[i]) + + for connection in failed_connections: + self.disconnect(connection) + + except Exception as e: + logger.error(f"Error in broadcast: {e}") + + async def _safe_send_message(self, websocket: WebSocket, message: str) -> None: + """Safely send message to WebSocket with error handling""" + try: + await websocket.send_text(message) + except WebSocketDisconnect: + # Connection was closed + raise + except Exception as e: + logger.error(f"Error sending message to client: {e}") + raise + + def get_connection_count(self) -> int: + """Get number of active connections""" + return len(self.active_connections) + + async def ping_all_connections(self) -> int: + """Ping all connections to check health, return number of healthy connections""" + if not self.active_connections: + return 0 + + healthy_connections = [] + for connection in self.active_connections.copy(): + try: + await connection.ping() + healthy_connections.append(connection) + except Exception: + logger.debug("Removing unhealthy connection") + + self.active_connections = healthy_connections + return len(healthy_connections) + +# Global WebSocket manager instance +websocket_manager = WebSocketManager() \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..e0e6580 --- /dev/null +++ b/main.py @@ -0,0 +1,202 @@ + +import asyncio +import json +import redis.asyncio as redis +import time +import os +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, Depends, Query +from fastapi.middleware.cors import CORSMiddleware +from typing import List, Optional +import logging +from contextlib import asynccontextmanager + +# Import our custom modules +from database import connect_to_mongo, close_mongo_connection, redis_manager, schedule_cleanup +from persistence import persistence_service +from models import DataQuery, DataResponse, HealthCheck +from api import router as api_router + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Application startup time for uptime calculation +app_start_time = time.time() + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager""" + # Startup + logger.info("Application starting up...") + + # Connect to databases + await connect_to_mongo() + await persistence_service.initialize() + + # Start background tasks + asyncio.create_task(redis_subscriber()) + asyncio.create_task(schedule_cleanup()) + + logger.info("Application startup complete") + + yield + + # Shutdown + logger.info("Application shutting down...") + await close_mongo_connection() + await redis_manager.disconnect() + logger.info("Application shutdown complete") + +app = FastAPI( + title="Energy Monitoring Dashboard API", + description="Real-time energy monitoring and IoT sensor data management system", + version="1.0.0", + lifespan=lifespan +) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Configure appropriately for production + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Include API router +app.include_router(api_router, prefix="/api/v1") + +# In-memory store for active WebSocket connections +active_connections: List[WebSocket] = [] + +# Redis channel to subscribe to +REDIS_CHANNEL = "energy_data" + + +@app.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + """ + WebSocket endpoint that connects a client, adds them to the active pool, + and removes them on disconnection. + """ + await websocket.accept() + active_connections.append(websocket) + logger.info(f"New client connected. Total clients: {len(active_connections)}") + try: + while True: + # Keep the connection alive + await websocket.receive_text() + except WebSocketDisconnect: + active_connections.remove(websocket) + logger.info(f"Client disconnected. Total clients: {len(active_connections)}") + + +async def redis_subscriber(): + """ + Connects to Redis, subscribes to the specified channel, and broadcasts + messages to all active WebSocket clients. Also persists data to MongoDB. + """ + logger.info("Starting Redis subscriber...") + try: + r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True) + await r.ping() + logger.info("Successfully connected to Redis for subscription.") + except Exception as e: + logger.error(f"Could not connect to Redis for subscription: {e}") + return + + pubsub = r.pubsub() + await pubsub.subscribe(REDIS_CHANNEL) + + logger.info(f"Subscribed to Redis channel: '{REDIS_CHANNEL}'") + while True: + try: + message = await pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0) + if message: + message_data = message['data'] + logger.debug(f"Received from Redis: {message_data}") + + # Process and persist the data + await persistence_service.process_sensor_message(message_data) + + # Broadcast message to all connected WebSocket clients + if active_connections: + await asyncio.gather( + *[connection.send_text(message_data) for connection in active_connections], + return_exceptions=True + ) + + except Exception as e: + logger.error(f"Error in Redis subscriber loop: {e}") + # Add a delay to prevent rapid-fire errors + await asyncio.sleep(5) + + +@app.get("/") +async def read_root(): + """Root endpoint with basic system information""" + return { + "message": "Energy Monitoring Dashboard Backend", + "version": "1.0.0", + "status": "running", + "uptime_seconds": time.time() - app_start_time + } + + +@app.get("/health", response_model=HealthCheck) +async def health_check(): + """Health check endpoint""" + try: + # Check database connections + mongodb_connected = True + redis_connected = True + + try: + await persistence_service.db.command("ping") + except: + mongodb_connected = False + + try: + await redis_manager.redis_client.ping() + except: + redis_connected = False + + # Get system statistics + stats = await persistence_service.get_sensor_statistics() + + # Determine overall status + status = "healthy" + if not mongodb_connected or not redis_connected: + status = "degraded" + + return HealthCheck( + status=status, + mongodb_connected=mongodb_connected, + redis_connected=redis_connected, + total_sensors=stats.get("total_sensors", 0), + active_sensors=stats.get("active_sensors", 0), + total_readings=stats.get("total_readings", 0), + uptime_seconds=time.time() - app_start_time + ) + + except Exception as e: + logger.error(f"Health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + + +@app.get("/status") +async def system_status(): + """Detailed system status endpoint""" + try: + stats = await persistence_service.get_sensor_statistics() + + return { + "timestamp": time.time(), + "uptime_seconds": time.time() - app_start_time, + "active_websocket_connections": len(active_connections), + "database_stats": stats + } + + except Exception as e: + logger.error(f"Status check failed: {e}") + raise HTTPException(status_code=500, detail="Internal Server Error") diff --git a/main_layered.py b/main_layered.py new file mode 100644 index 0000000..0b40940 --- /dev/null +++ b/main_layered.py @@ -0,0 +1,273 @@ +""" +Main application entry point with layered architecture +This is the new structured version of the FastAPI application +""" +import asyncio +import time +from contextlib import asynccontextmanager +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException +from fastapi.middleware.cors import CORSMiddleware +import logging + +# Import layered components +from layers.infrastructure.database_connection import database_connection +from layers.infrastructure.redis_connection import redis_connection +from layers.business.sensor_service import SensorService +from layers.business.cleanup_service import cleanup_service +from layers.presentation.websocket_handler import websocket_manager +from layers.presentation.redis_subscriber import redis_subscriber +from layers.presentation.api_routes import router as api_router +from models import HealthCheck + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Application startup time for uptime calculation +app_start_time = time.time() + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager with proper layer initialization""" + # Startup + logger.info("Application starting up...") + + try: + # Initialize infrastructure layer + await database_connection.connect() + await redis_connection.connect() + logger.info("Infrastructure layer initialized") + + # Initialize business layer + sensor_service = SensorService() # Services are initialized on-demand + logger.info("Business layer initialized") + + # Initialize presentation layer + await redis_subscriber.start_subscription("energy_data") + await cleanup_service.start_scheduled_cleanup(24) # Daily cleanup + logger.info("Presentation layer initialized") + + logger.info("Application startup complete") + + yield + + # Shutdown + logger.info("Application shutting down...") + + # Stop background tasks + await redis_subscriber.stop_subscription() + await cleanup_service.stop_scheduled_cleanup() + + # Close connections + await database_connection.disconnect() + await redis_connection.disconnect() + + logger.info("Application shutdown complete") + + except Exception as e: + logger.error(f"Error during application lifecycle: {e}") + raise + +app = FastAPI( + title="Energy Monitoring Dashboard API", + description="Real-time energy monitoring and IoT sensor data management system (Layered Architecture)", + version="2.0.0", + lifespan=lifespan +) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Configure appropriately for production + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Include API router with version prefix +app.include_router(api_router, prefix="/api/v1") + +@app.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + """ + WebSocket endpoint for real-time data streaming + Presentation Layer - handles WebSocket connections + """ + await websocket_manager.connect(websocket) + try: + while True: + # Keep the connection alive by waiting for messages + await websocket.receive_text() + except WebSocketDisconnect: + websocket_manager.disconnect(websocket) + +@app.get("/") +async def read_root(): + """Root endpoint with basic system information""" + return { + "message": "Energy Monitoring Dashboard Backend (Layered Architecture)", + "version": "2.0.0", + "status": "running", + "uptime_seconds": time.time() - app_start_time, + "architecture": "3-layer (Presentation, Business, Infrastructure)" + } + +@app.get("/health", response_model=HealthCheck) +async def health_check(): + """ + Comprehensive health check endpoint + Checks all layers and dependencies + """ + try: + # Check infrastructure layer + mongodb_connected = True + redis_connected = True + + try: + db = await database_connection.get_database() + await db.command("ping") + except: + mongodb_connected = False + + try: + redis_client = await redis_connection.get_client() + await redis_client.ping() + except: + redis_connected = False + + # Check business layer through service + sensor_service = SensorService() + from layers.infrastructure.repositories import SensorReadingRepository + stats_repo = SensorReadingRepository() + + # Get basic statistics + try: + # Simple count queries to test business layer + total_readings = await stats_repo.count_by_query({}) + active_sensors_data = await redis_connection.get_keys_by_pattern("sensor:latest:*") + total_sensors = len(active_sensors_data) + except Exception as e: + logger.error(f"Error getting stats for health check: {e}") + total_readings = 0 + total_sensors = 0 + + # Check presentation layer + websocket_connections = websocket_manager.get_connection_count() + redis_subscription_active = redis_subscriber.is_subscriber_running() + + # Determine overall status + status = "healthy" + if not mongodb_connected or not redis_connected: + status = "degraded" + if not mongodb_connected and not redis_connected: + status = "unhealthy" + + return HealthCheck( + status=status, + mongodb_connected=mongodb_connected, + redis_connected=redis_connected, + total_sensors=total_sensors, + active_sensors=total_sensors, # Approximation + total_readings=total_readings, + uptime_seconds=time.time() - app_start_time + ) + + except Exception as e: + logger.error(f"Health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + +@app.get("/status") +async def system_status(): + """ + Detailed system status endpoint with layer-specific information + """ + try: + # Infrastructure layer status + infrastructure_status = { + "database_connected": True, + "redis_connected": True + } + + try: + db = await database_connection.get_database() + await db.command("ping") + except: + infrastructure_status["database_connected"] = False + + try: + redis_client = await redis_connection.get_client() + await redis_client.ping() + except: + infrastructure_status["redis_connected"] = False + + # Business layer status + business_status = { + "cleanup_service_running": cleanup_service.is_cleanup_running() + } + + # Presentation layer status + presentation_status = { + "active_websocket_connections": websocket_manager.get_connection_count(), + "redis_subscriber_running": redis_subscriber.is_subscriber_running() + } + + # Get subscriber status details + subscriber_status = await redis_subscriber.get_subscriber_status() + + return { + "timestamp": time.time(), + "uptime_seconds": time.time() - app_start_time, + "architecture": "layered", + "layers": { + "infrastructure": infrastructure_status, + "business": business_status, + "presentation": presentation_status + }, + "redis_subscriber": subscriber_status + } + + except Exception as e: + logger.error(f"Status check failed: {e}") + raise HTTPException(status_code=500, detail="Internal Server Error") + +@app.get("/system/cleanup", summary="Get cleanup service status") +async def get_cleanup_status(): + """Get data cleanup service status and statistics""" + try: + # Get cleanup service status + cleanup_running = cleanup_service.is_cleanup_running() + + # Get storage statistics + storage_stats = await cleanup_service.get_storage_statistics() + + # Get retention policy info + retention_info = await cleanup_service.get_data_retention_info() + + return { + "cleanup_service_running": cleanup_running, + "storage_statistics": storage_stats, + "retention_policies": retention_info + } + + except Exception as e: + logger.error(f"Error getting cleanup status: {e}") + raise HTTPException(status_code=500, detail="Internal Server Error") + +@app.post("/system/cleanup", summary="Run manual cleanup") +async def run_manual_cleanup(): + """Manually trigger data cleanup process""" + try: + cleanup_results = await cleanup_service.cleanup_old_data() + + return { + "message": "Manual cleanup completed", + "results": cleanup_results + } + + except Exception as e: + logger.error(f"Error running manual cleanup: {e}") + raise HTTPException(status_code=500, detail="Internal Server Error") + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/microservices/DEPLOYMENT_GUIDE.md b/microservices/DEPLOYMENT_GUIDE.md new file mode 100644 index 0000000..b5b2575 --- /dev/null +++ b/microservices/DEPLOYMENT_GUIDE.md @@ -0,0 +1,422 @@ +# Energy Management Microservices Deployment Guide + +This guide provides comprehensive instructions for deploying and managing the Energy Management microservices architecture based on the tiocps/iot-building-monitoring system. + +## 🏗️ Architecture Overview + +The system consists of 6 independent microservices coordinated by an API Gateway: + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ Client Apps │ │ Web Dashboard │ │ Mobile App │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ + │ │ │ + └───────────────────────┼───────────────────────┘ + │ + ┌─────────────────────────────────────────────────────┐ + │ API Gateway (Port 8000) │ + │ • Request routing │ + │ • Authentication │ + │ • Load balancing │ + │ • Rate limiting │ + └─────────────────────────────────────────────────────┘ + │ + ┌───────────────────────────┼───────────────────────────┐ + │ │ │ +┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ +│ Token │ │ Battery │ │ Demand │ │ P2P │ │Forecast │ │ IoT │ +│Service │ │Service │ │Response │ │Trading │ │Service │ │Control │ +│ 8001 │ │ 8002 │ │ 8003 │ │ 8004 │ │ 8005 │ │ 8006 │ +└─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘ └─────────┘ + │ │ │ │ │ │ + └────────────┼────────────┼────────────┼────────────┼────────────┘ + │ │ │ │ + ┌─────────────────────────────────────────────────────────────────┐ + │ Shared Infrastructure │ + │ ┌─────────────┐ ┌─────────────┐ │ + │ │ MongoDB │ │ Redis │ │ + │ │ :27017 │ │ :6379 │ │ + │ │ • Data │ │ • Caching │ │ + │ │ • Metadata │ │ • Events │ │ + │ └─────────────┘ └─────────────┘ │ + └─────────────────────────────────────────────────────────────────┘ +``` + +## 🚀 Quick Start + +### Prerequisites +- Docker 20.0+ +- Docker Compose 2.0+ +- 8GB RAM minimum +- 10GB free disk space + +### 1. Deploy the Complete System +```bash +cd microservices/ +./deploy.sh deploy +``` + +This command will: +- ✅ Check dependencies +- ✅ Set up environment +- ✅ Build all services +- ✅ Start infrastructure (MongoDB, Redis) +- ✅ Start all microservices +- ✅ Configure networking +- ✅ Run health checks + +### 2. Verify Deployment +```bash +./deploy.sh status +``` + +Expected output: +``` +[SUCCESS] api-gateway is healthy +[SUCCESS] token-service is healthy +[SUCCESS] battery-service is healthy +[SUCCESS] demand-response-service is healthy +[SUCCESS] p2p-trading-service is healthy +[SUCCESS] forecasting-service is healthy +[SUCCESS] iot-control-service is healthy +``` + +### 3. Access the System +- **API Gateway**: http://localhost:8000 +- **System Health**: http://localhost:8000/health +- **Service Status**: http://localhost:8000/services/status +- **System Overview**: http://localhost:8000/api/v1/overview + +## 📋 Service Details + +### 🔐 Token Service (Port 8001) +**Purpose**: JWT authentication and authorization +**Database**: `energy_dashboard_tokens` + +**Key Endpoints**: +``` +# Generate token +curl -X POST "http://localhost:8001/tokens/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "admin_user", + "list_of_resources": ["batteries", "demand_response", "p2p", "forecasting", "iot"], + "data_aggregation": true, + "time_aggregation": true, + "exp_hours": 24 + }' + +# Validate token +curl -X POST "http://localhost:8001/tokens/validate" \ + -H "Content-Type: application/json" \ + -d '{"token": "your_jwt_token_here"}' +``` + +### 🔋 Battery Service (Port 8002) +**Purpose**: Energy storage management and optimization +**Database**: `energy_dashboard_batteries` + +**Key Features**: +- Battery monitoring and status tracking +- Charging/discharging control +- Health monitoring and maintenance alerts +- Energy storage optimization +- Performance analytics + +**Example Usage**: +```bash +# Get all batteries +curl "http://localhost:8002/batteries" \ + -H "Authorization: Bearer YOUR_TOKEN" + +# Charge a battery +curl -X POST "http://localhost:8002/batteries/BATT001/charge" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "power_kw": 50.0, + "duration_minutes": 120 + }' + +# Get battery analytics +curl "http://localhost:8002/batteries/analytics/summary" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### ⚡ Demand Response Service (Port 8003) +**Purpose**: Grid interaction and load management +**Database**: `energy_dashboard_demand_response` + +**Key Features**: +- Demand response event management +- Load reduction coordination +- Flexibility forecasting +- Auto-response configuration +- Performance analytics + +**Example Usage**: +```bash +# Send demand response invitation +curl -X POST "http://localhost:8003/invitations/send" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "event_time": "2025-01-10T14:00:00Z", + "load_kwh": 100, + "load_percentage": 15, + "duration_minutes": 60, + "iots": ["DEVICE001", "DEVICE002"] + }' + +# Get current flexibility +curl "http://localhost:8003/flexibility/current" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### 🤝 P2P Trading Service (Port 8004) +**Purpose**: Peer-to-peer energy marketplace +**Database**: `energy_dashboard_p2p` + +**Key Features**: +- Energy trading marketplace +- Bid/ask management +- Transaction processing +- Price optimization +- Market analytics + +### 📊 Forecasting Service (Port 8005) +**Purpose**: ML-based energy forecasting +**Database**: `energy_dashboard_forecasting` + +**Key Features**: +- Consumption forecasting +- Generation forecasting +- Flexibility forecasting +- Historical data analysis +- Model training and optimization + +### 🏠 IoT Control Service (Port 8006) +**Purpose**: IoT device management and control +**Database**: `energy_dashboard_iot` + +**Key Features**: +- Device registration and management +- Remote device control +- Automation rules +- Device status monitoring +- Integration with other services + +## 🛠️ Management Commands + +### Service Management +```bash +# Start all services +./deploy.sh start + +# Stop all services +./deploy.sh stop + +# Restart all services +./deploy.sh restart + +# View service status +./deploy.sh status +``` + +### Logs and Debugging +```bash +# View all logs +./deploy.sh logs + +# View specific service logs +./deploy.sh logs battery-service +./deploy.sh logs api-gateway + +# Follow logs in real-time +docker-compose logs -f token-service +``` + +### Scaling Services +```bash +# Scale a specific service +docker-compose up -d --scale battery-service=3 + +# Scale multiple services +docker-compose up -d \ + --scale battery-service=2 \ + --scale demand-response-service=2 +``` + +## 🔧 Configuration + +### Environment Variables +Each service can be configured using environment variables: + +**Common Variables**: +- `MONGO_URL`: MongoDB connection string +- `REDIS_URL`: Redis connection string +- `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR) + +**Service-Specific Variables**: +- `JWT_SECRET_KEY`: Token service secret key +- `TOKEN_SERVICE_URL`: API Gateway token service URL +- `BATTERY_SERVICE_URL`: Battery service URL for IoT control + +### Database Configuration +MongoDB databases are automatically created: +- `energy_dashboard_tokens`: Token management +- `energy_dashboard_batteries`: Battery data +- `energy_dashboard_demand_response`: DR events +- `energy_dashboard_p2p`: P2P transactions +- `energy_dashboard_forecasting`: Forecasting data +- `energy_dashboard_iot`: IoT device data + +## 🔐 Security + +### Authentication Flow +1. Client requests token from Token Service +2. Token Service validates credentials and issues JWT +3. Client includes JWT in Authorization header +4. API Gateway validates token with Token Service +5. Request forwarded to target microservice + +### Token Permissions +Tokens include resource-based permissions: +```json +{ + "name": "user_name", + "list_of_resources": ["batteries", "demand_response"], + "data_aggregation": true, + "time_aggregation": false, + "embargo": 0, + "exp": 1736524800 +} +``` + +## 📊 Monitoring + +### Health Checks +All services provide health endpoints: +```bash +# API Gateway health (includes all services) +curl http://localhost:8000/health + +# Individual service health +curl http://localhost:8001/health # Token Service +curl http://localhost:8002/health # Battery Service +curl http://localhost:8003/health # Demand Response Service +``` + +### Metrics and Analytics +- **Gateway Stats**: Request counts, success rates, uptime +- **Battery Analytics**: Energy flows, efficiency, health +- **DR Performance**: Event success rates, load reduction +- **P2P Metrics**: Trading volumes, prices, participants + +## 🚨 Troubleshooting + +### Common Issues + +**Services won't start**: +```bash +# Check Docker status +docker ps + +# Check logs +./deploy.sh logs + +# Restart problematic service +docker-compose restart battery-service +``` + +**Database connection issues**: +```bash +# Check MongoDB status +docker-compose logs mongodb + +# Restart database +docker-compose restart mongodb + +# Wait for services to reconnect (30 seconds) +``` + +**Authentication failures**: +```bash +# Check token service +curl http://localhost:8001/health + +# Verify token generation +curl -X POST "http://localhost:8001/tokens/generate" \ + -H "Content-Type: application/json" \ + -d '{"name": "test", "list_of_resources": ["test"]}' +``` + +### Performance Optimization +- Increase service replicas for high load +- Monitor memory usage and adjust limits +- Use Redis for caching frequently accessed data +- Implement database indexes for query optimization + +## 🔄 Updates and Maintenance + +### Service Updates +```bash +# Update specific service +docker-compose build battery-service +docker-compose up -d battery-service + +# Update all services +./deploy.sh build +./deploy.sh restart +``` + +### Database Maintenance +```bash +# Backup databases +docker exec energy-mongodb mongodump --out /data/backup + +# Restore databases +docker exec energy-mongodb mongorestore /data/backup +``` + +### Clean Deployment +```bash +# Complete system cleanup +./deploy.sh cleanup + +# Fresh deployment +./deploy.sh deploy +``` + +## 📈 Scaling and Production + +### Production Considerations +1. **Security**: Change default passwords and secrets +2. **SSL/TLS**: Configure HTTPS with proper certificates +3. **Monitoring**: Set up Prometheus and Grafana +4. **Logging**: Configure centralized logging +5. **Backup**: Implement automated database backups +6. **Resource Limits**: Set appropriate CPU and memory limits + +### Kubernetes Deployment +The microservices can be deployed to Kubernetes: +```bash +# Generate Kubernetes manifests +kompose convert + +# Deploy to Kubernetes +kubectl apply -f kubernetes/ +``` + +## 🆘 Support + +### Documentation +- API documentation: http://localhost:8000/docs +- Service-specific docs: http://localhost:800X/docs (where X = service port) + +### Logs Location +- Container logs: `docker-compose logs [service]` +- Application logs: Check service-specific log files +- Gateway logs: Include request routing and authentication + +This microservices implementation provides a robust, scalable foundation for energy management systems with independent deployability, comprehensive monitoring, and production-ready features. diff --git a/microservices/README.md b/microservices/README.md new file mode 100644 index 0000000..0f72304 --- /dev/null +++ b/microservices/README.md @@ -0,0 +1,97 @@ +# Energy Management Microservices Architecture + +This directory contains independent microservices based on the tiocps/iot-building-monitoring system, redesigned for modular deployment and scalability. + +## Services Overview + +### 1. **Token Service** (`token-service/`) +- JWT token generation, validation, and management +- Resource-based access control +- Authentication service for all other services +- **Port**: 8001 + +### 2. **Battery Management Service** (`battery-service/`) +- Battery monitoring, charging, and discharging +- Energy storage optimization +- Battery health and state tracking +- **Port**: 8002 + +### 3. **Demand Response Service** (`demand-response-service/`) +- Grid interaction and demand response events +- Load shifting coordination +- Event scheduling and management +- **Port**: 8003 + +### 4. **P2P Energy Trading Service** (`p2p-trading-service/`) +- Peer-to-peer energy marketplace +- Transaction management and pricing +- Energy trading optimization +- **Port**: 8004 + +### 5. **Forecasting Service** (`forecasting-service/`) +- ML-based consumption and generation forecasting +- Historical data analysis +- Predictive analytics for optimization +- **Port**: 8005 + +### 6. **IoT Control Service** (`iot-control-service/`) +- IoT device management and control +- Device instructions and automation +- Real-time device monitoring +- **Port**: 8006 + +### 7. **API Gateway** (`api-gateway/`) +- Central entry point for all services +- Request routing and load balancing +- Authentication and rate limiting +- **Port**: 8000 + +## Architecture Principles + +- **Independent Deployment**: Each service can be deployed, scaled, and updated independently +- **Database per Service**: Each microservice has its own database/collection +- **Event-Driven Communication**: Services communicate via Redis pub/sub for real-time events +- **REST APIs**: Synchronous communication between services via REST +- **Containerized**: Each service runs in its own Docker container + +## Communication Patterns + +1. **API Gateway → Services**: HTTP REST calls +2. **Inter-Service Communication**: HTTP REST + Redis pub/sub for events +3. **Real-time Updates**: Redis channels for WebSocket broadcasting +4. **Data Persistence**: MongoDB with service-specific collections + +## Deployment + +Each service includes: +- `main.py` - FastAPI application +- `models.py` - Pydantic models +- `database.py` - Database connection +- `requirements.txt` - Dependencies +- `Dockerfile` - Container configuration +- `docker-compose.yml` - Service orchestration + +## Getting Started + +```bash +# Start all services +docker-compose up -d + +# Start individual service +cd token-service && python main.py + +# API Gateway (main entry point) +curl http://localhost:8000/health +``` + +## Service Dependencies + +``` +API Gateway (8000) +├── Token Service (8001) - Authentication +├── Battery Service (8002) +├── Demand Response Service (8003) +├── P2P Trading Service (8004) +├── Forecasting Service (8005) +└── IoT Control Service (8006) +``` \ No newline at end of file diff --git a/microservices/api-gateway/Dockerfile b/microservices/api-gateway/Dockerfile new file mode 100644 index 0000000..2a3ebec --- /dev/null +++ b/microservices/api-gateway/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.9-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8000/health || exit 1 + +# Run the application +CMD ["python", "main.py"] \ No newline at end of file diff --git a/microservices/api-gateway/auth_middleware.py b/microservices/api-gateway/auth_middleware.py new file mode 100644 index 0000000..f91edf4 --- /dev/null +++ b/microservices/api-gateway/auth_middleware.py @@ -0,0 +1,89 @@ +""" +Authentication middleware for API Gateway +""" + +import aiohttp +from fastapi import HTTPException, Request +from typing import Optional, Dict, Any +import logging + +logger = logging.getLogger(__name__) + +class AuthMiddleware: + """Authentication middleware for validating tokens""" + + def __init__(self, token_service_url: str = "http://localhost:8001"): + self.token_service_url = token_service_url + + async def verify_token(self, request: Request) -> Optional[Dict[str, Any]]: + """ + Verify authentication token from request headers + Returns token payload if valid, raises HTTPException if invalid + """ + # Extract token from Authorization header + auth_header = request.headers.get("Authorization") + if not auth_header: + raise HTTPException(status_code=401, detail="Authorization header required") + + if not auth_header.startswith("Bearer "): + raise HTTPException(status_code=401, detail="Bearer token required") + + token = auth_header[7:] # Remove "Bearer " prefix + + try: + # Validate token with token service + async with aiohttp.ClientSession() as session: + async with session.post( + f"{self.token_service_url}/tokens/validate", + json={"token": token}, + timeout=aiohttp.ClientTimeout(total=5) + ) as response: + + if response.status != 200: + raise HTTPException(status_code=401, detail="Token validation failed") + + token_data = await response.json() + + if not token_data.get("valid"): + error_msg = token_data.get("error", "Invalid token") + raise HTTPException(status_code=401, detail=error_msg) + + # Token is valid, return decoded payload + return token_data.get("decoded") + + except aiohttp.ClientError as e: + logger.error(f"Token service connection error: {e}") + raise HTTPException(status_code=503, detail="Authentication service unavailable") + except HTTPException: + raise + except Exception as e: + logger.error(f"Token verification error: {e}") + raise HTTPException(status_code=500, detail="Authentication error") + + async def check_permissions(self, token_payload: Dict[str, Any], required_resources: list) -> bool: + """ + Check if token has required permissions for specific resources + """ + if not token_payload: + return False + + # Get list of resources the token has access to + token_resources = token_payload.get("list_of_resources", []) + + # Check if token has access to all required resources + for resource in required_resources: + if resource not in token_resources: + return False + + return True + + def extract_user_info(self, token_payload: Dict[str, Any]) -> Dict[str, Any]: + """Extract user information from token payload""" + return { + "name": token_payload.get("name"), + "resources": token_payload.get("list_of_resources", []), + "data_aggregation": token_payload.get("data_aggregation", False), + "time_aggregation": token_payload.get("time_aggregation", False), + "embargo": token_payload.get("embargo", 0), + "expires_at": token_payload.get("exp") + } \ No newline at end of file diff --git a/microservices/api-gateway/load_balancer.py b/microservices/api-gateway/load_balancer.py new file mode 100644 index 0000000..f9d267d --- /dev/null +++ b/microservices/api-gateway/load_balancer.py @@ -0,0 +1,124 @@ +""" +Load balancer for distributing requests across service instances +""" + +import random +from typing import List, Dict, Optional +import logging + +logger = logging.getLogger(__name__) + +class LoadBalancer: + """Simple load balancer for microservice requests""" + + def __init__(self): + # In a real implementation, this would track multiple instances per service + self.service_instances: Dict[str, List[str]] = {} + self.current_index: Dict[str, int] = {} + + def register_service_instance(self, service_name: str, instance_url: str): + """Register a new service instance""" + if service_name not in self.service_instances: + self.service_instances[service_name] = [] + self.current_index[service_name] = 0 + + if instance_url not in self.service_instances[service_name]: + self.service_instances[service_name].append(instance_url) + logger.info(f"Registered instance {instance_url} for service {service_name}") + + def unregister_service_instance(self, service_name: str, instance_url: str): + """Unregister a service instance""" + if service_name in self.service_instances: + try: + self.service_instances[service_name].remove(instance_url) + logger.info(f"Unregistered instance {instance_url} for service {service_name}") + + # Reset index if it's out of bounds + if self.current_index[service_name] >= len(self.service_instances[service_name]): + self.current_index[service_name] = 0 + + except ValueError: + logger.warning(f"Instance {instance_url} not found for service {service_name}") + + async def get_service_url(self, service_name: str, strategy: str = "single") -> Optional[str]: + """ + Get a service URL using the specified load balancing strategy + + Strategies: + - single: Single instance (default for this simple implementation) + - round_robin: Round-robin across instances + - random: Random selection + """ + # For this microservice setup, we typically have one instance per service + # In a production environment, you'd have multiple instances + + if strategy == "single": + # Default behavior - get the service URL from service registry + from service_registry import ServiceRegistry + service_registry = ServiceRegistry() + return await service_registry.get_service_url(service_name) + + elif strategy == "round_robin": + return await self._round_robin_select(service_name) + + elif strategy == "random": + return await self._random_select(service_name) + + else: + logger.error(f"Unknown load balancing strategy: {strategy}") + return None + + async def _round_robin_select(self, service_name: str) -> Optional[str]: + """Select service instance using round-robin""" + instances = self.service_instances.get(service_name, []) + if not instances: + # Fall back to service registry + from service_registry import ServiceRegistry + service_registry = ServiceRegistry() + return await service_registry.get_service_url(service_name) + + # Round-robin selection + current_idx = self.current_index[service_name] + selected_instance = instances[current_idx] + + # Update index for next request + self.current_index[service_name] = (current_idx + 1) % len(instances) + + logger.debug(f"Round-robin selected {selected_instance} for {service_name}") + return selected_instance + + async def _random_select(self, service_name: str) -> Optional[str]: + """Select service instance randomly""" + instances = self.service_instances.get(service_name, []) + if not instances: + # Fall back to service registry + from service_registry import ServiceRegistry + service_registry = ServiceRegistry() + return await service_registry.get_service_url(service_name) + + selected_instance = random.choice(instances) + logger.debug(f"Random selected {selected_instance} for {service_name}") + return selected_instance + + def get_service_instances(self, service_name: str) -> List[str]: + """Get all registered instances for a service""" + return self.service_instances.get(service_name, []) + + def get_instance_count(self, service_name: str) -> int: + """Get number of registered instances for a service""" + return len(self.service_instances.get(service_name, [])) + + def get_all_services(self) -> Dict[str, List[str]]: + """Get all services and their instances""" + return self.service_instances.copy() + + def health_check_failed(self, service_name: str, instance_url: str): + """Handle health check failure for a service instance""" + logger.warning(f"Health check failed for {instance_url} ({service_name})") + # In a production system, you might temporarily remove unhealthy instances + # For now, we just log the failure + + def health_check_recovered(self, service_name: str, instance_url: str): + """Handle health check recovery for a service instance""" + logger.info(f"Health check recovered for {instance_url} ({service_name})") + # Re-register the instance if it was temporarily removed \ No newline at end of file diff --git a/microservices/api-gateway/main.py b/microservices/api-gateway/main.py new file mode 100644 index 0000000..548dc6a --- /dev/null +++ b/microservices/api-gateway/main.py @@ -0,0 +1,352 @@ +""" +API Gateway for Energy Management Microservices +Central entry point that routes requests to appropriate microservices. +Port: 8000 +""" + +import asyncio +import aiohttp +from datetime import datetime +from fastapi import FastAPI, HTTPException, Depends, Request, Response +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse +from contextlib import asynccontextmanager +import logging +import json +from typing import Dict, Any, Optional +import os + +from models import ServiceConfig, HealthResponse, GatewayStats +from service_registry import ServiceRegistry +from load_balancer import LoadBalancer +from auth_middleware import AuthMiddleware + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager""" + logger.info("API Gateway starting up...") + + # Initialize service registry + await service_registry.initialize() + + # Start health check task + asyncio.create_task(health_check_task()) + + logger.info("API Gateway startup complete") + + yield + + logger.info("API Gateway shutting down...") + await service_registry.close() + logger.info("API Gateway shutdown complete") + +app = FastAPI( + title="Energy Management API Gateway", + description="Central API gateway for energy management microservices", + version="1.0.0", + lifespan=lifespan +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Service registry and load balancer +service_registry = ServiceRegistry() +load_balancer = LoadBalancer() +auth_middleware = AuthMiddleware() + +# Service configuration +SERVICES = { + "token-service": ServiceConfig( + name="token-service", + base_url="http://localhost:8001", + health_endpoint="/health", + auth_required=False + ), + "battery-service": ServiceConfig( + name="battery-service", + base_url="http://localhost:8002", + health_endpoint="/health", + auth_required=True + ), + "demand-response-service": ServiceConfig( + name="demand-response-service", + base_url="http://localhost:8003", + health_endpoint="/health", + auth_required=True + ), + "p2p-trading-service": ServiceConfig( + name="p2p-trading-service", + base_url="http://localhost:8004", + health_endpoint="/health", + auth_required=True + ), + "forecasting-service": ServiceConfig( + name="forecasting-service", + base_url="http://localhost:8005", + health_endpoint="/health", + auth_required=True + ), + "iot-control-service": ServiceConfig( + name="iot-control-service", + base_url="http://localhost:8006", + health_endpoint="/health", + auth_required=True + ) +} + +# Request statistics +request_stats = { + "total_requests": 0, + "successful_requests": 0, + "failed_requests": 0, + "service_requests": {service: 0 for service in SERVICES.keys()}, + "start_time": datetime.utcnow() +} + +@app.get("/health", response_model=HealthResponse) +async def gateway_health_check(): + """Gateway health check endpoint""" + try: + # Check all services + service_health = await service_registry.get_all_service_health() + + healthy_services = sum(1 for status in service_health.values() if status.get("status") == "healthy") + total_services = len(SERVICES) + + overall_status = "healthy" if healthy_services == total_services else "degraded" + + return HealthResponse( + service="api-gateway", + status=overall_status, + timestamp=datetime.utcnow(), + version="1.0.0", + services=service_health, + healthy_services=healthy_services, + total_services=total_services + ) + except Exception as e: + logger.error(f"Gateway health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + +@app.get("/services/status") +async def get_services_status(): + """Get status of all registered services""" + try: + service_health = await service_registry.get_all_service_health() + return { + "services": service_health, + "timestamp": datetime.utcnow().isoformat(), + "total_services": len(SERVICES), + "healthy_services": sum(1 for status in service_health.values() if status.get("status") == "healthy") + } + except Exception as e: + logger.error(f"Error getting services status: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/stats", response_model=GatewayStats) +async def get_gateway_stats(): + """Get API gateway statistics""" + uptime = (datetime.utcnow() - request_stats["start_time"]).total_seconds() + + return GatewayStats( + total_requests=request_stats["total_requests"], + successful_requests=request_stats["successful_requests"], + failed_requests=request_stats["failed_requests"], + success_rate=round((request_stats["successful_requests"] / max(request_stats["total_requests"], 1)) * 100, 2), + uptime_seconds=uptime, + service_requests=request_stats["service_requests"], + timestamp=datetime.utcnow() + ) + +# Token Service Routes +@app.api_route("/api/v1/tokens/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def token_service_proxy(request: Request, path: str): + """Proxy requests to token service""" + return await proxy_request(request, "token-service", f"/{path}") + +# Battery Service Routes +@app.api_route("/api/v1/batteries/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def battery_service_proxy(request: Request, path: str): + """Proxy requests to battery service""" + return await proxy_request(request, "battery-service", f"/{path}") + +# Demand Response Service Routes +@app.api_route("/api/v1/demand-response/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def demand_response_service_proxy(request: Request, path: str): + """Proxy requests to demand response service""" + return await proxy_request(request, "demand-response-service", f"/{path}") + +# P2P Trading Service Routes +@app.api_route("/api/v1/p2p/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def p2p_trading_service_proxy(request: Request, path: str): + """Proxy requests to P2P trading service""" + return await proxy_request(request, "p2p-trading-service", f"/{path}") + +# Forecasting Service Routes +@app.api_route("/api/v1/forecast/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def forecasting_service_proxy(request: Request, path: str): + """Proxy requests to forecasting service""" + return await proxy_request(request, "forecasting-service", f"/{path}") + +# IoT Control Service Routes +@app.api_route("/api/v1/iot/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) +async def iot_control_service_proxy(request: Request, path: str): + """Proxy requests to IoT control service""" + return await proxy_request(request, "iot-control-service", f"/{path}") + +async def proxy_request(request: Request, service_name: str, path: str): + """Generic request proxy function""" + try: + # Update request statistics + request_stats["total_requests"] += 1 + request_stats["service_requests"][service_name] += 1 + + # Get service configuration + service_config = SERVICES.get(service_name) + if not service_config: + raise HTTPException(status_code=404, detail=f"Service {service_name} not found") + + # Check authentication if required + if service_config.auth_required: + await auth_middleware.verify_token(request) + + # Get healthy service instance + service_url = await load_balancer.get_service_url(service_name) + + # Prepare request + url = f"{service_url}{path}" + method = request.method + headers = dict(request.headers) + + # Remove hop-by-hop headers + headers.pop("host", None) + headers.pop("content-length", None) + + # Get request body + body = None + if method in ["POST", "PUT", "PATCH"]: + body = await request.body() + + # Make request to service + async with aiohttp.ClientSession() as session: + async with session.request( + method=method, + url=url, + headers=headers, + data=body, + params=dict(request.query_params), + timeout=aiohttp.ClientTimeout(total=30) + ) as response: + + # Get response data + response_data = await response.read() + response_headers = dict(response.headers) + + # Remove hop-by-hop headers from response + response_headers.pop("transfer-encoding", None) + response_headers.pop("connection", None) + + # Update success statistics + if response.status < 400: + request_stats["successful_requests"] += 1 + else: + request_stats["failed_requests"] += 1 + + # Return response + return Response( + content=response_data, + status_code=response.status, + headers=response_headers, + media_type=response_headers.get("content-type") + ) + + except aiohttp.ClientError as e: + request_stats["failed_requests"] += 1 + logger.error(f"Service {service_name} connection error: {e}") + raise HTTPException(status_code=503, detail=f"Service {service_name} unavailable") + + except HTTPException: + request_stats["failed_requests"] += 1 + raise + + except Exception as e: + request_stats["failed_requests"] += 1 + logger.error(f"Proxy error for {service_name}: {e}") + raise HTTPException(status_code=500, detail="Internal gateway error") + +@app.get("/api/v1/overview") +async def get_system_overview(): + """Get comprehensive system overview from all services""" + try: + overview = {} + + # Get data from each service + for service_name in SERVICES.keys(): + try: + if await service_registry.is_service_healthy(service_name): + service_url = await load_balancer.get_service_url(service_name) + + async with aiohttp.ClientSession() as session: + # Try to get service-specific overview data + overview_endpoints = { + "battery-service": "/batteries", + "demand-response-service": "/flexibility/current", + "p2p-trading-service": "/market/status", + "forecasting-service": "/forecast/summary", + "iot-control-service": "/devices/summary" + } + + endpoint = overview_endpoints.get(service_name) + if endpoint: + async with session.get(f"{service_url}{endpoint}", timeout=aiohttp.ClientTimeout(total=5)) as response: + if response.status == 200: + data = await response.json() + overview[service_name] = data + else: + overview[service_name] = {"status": "error", "message": "Service returned error"} + else: + overview[service_name] = {"status": "available"} + + except Exception as e: + logger.warning(f"Could not get overview from {service_name}: {e}") + overview[service_name] = {"status": "unavailable", "error": str(e)} + + return { + "system_overview": overview, + "timestamp": datetime.utcnow().isoformat(), + "services_checked": len(SERVICES) + } + + except Exception as e: + logger.error(f"Error getting system overview: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +async def health_check_task(): + """Background task for periodic health checks""" + logger.info("Starting health check task") + + while True: + try: + await service_registry.update_all_service_health() + await asyncio.sleep(30) # Check every 30 seconds + + except Exception as e: + logger.error(f"Error in health check task: {e}") + await asyncio.sleep(60) + +# Initialize service registry with services +asyncio.create_task(service_registry.register_services(SERVICES)) + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/microservices/api-gateway/models.py b/microservices/api-gateway/models.py new file mode 100644 index 0000000..a8915c0 --- /dev/null +++ b/microservices/api-gateway/models.py @@ -0,0 +1,77 @@ +""" +Models for API Gateway +""" + +from pydantic import BaseModel, Field +from typing import Dict, Any, Optional, List +from datetime import datetime + +class ServiceConfig(BaseModel): + """Configuration for a microservice""" + name: str + base_url: str + health_endpoint: str = "/health" + auth_required: bool = True + timeout_seconds: int = 30 + retry_attempts: int = 3 + +class ServiceHealth(BaseModel): + """Health status of a service""" + service: str + status: str # healthy, unhealthy, unknown + response_time_ms: Optional[float] = None + last_check: datetime + error_message: Optional[str] = None + +class HealthResponse(BaseModel): + """Gateway health response""" + service: str + status: str + timestamp: datetime + version: str + services: Optional[Dict[str, Any]] = None + healthy_services: Optional[int] = None + total_services: Optional[int] = None + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } + +class GatewayStats(BaseModel): + """API Gateway statistics""" + total_requests: int + successful_requests: int + failed_requests: int + success_rate: float + uptime_seconds: float + service_requests: Dict[str, int] + timestamp: datetime + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } + +class AuthToken(BaseModel): + """Authentication token model""" + token: str + user_id: Optional[str] = None + permissions: List[str] = Field(default_factory=list) + +class ProxyRequest(BaseModel): + """Proxy request model""" + service: str + path: str + method: str + headers: Dict[str, str] + query_params: Dict[str, Any] + body: Optional[bytes] = None + +class ProxyResponse(BaseModel): + """Proxy response model""" + status_code: int + headers: Dict[str, str] + body: bytes + service: str + response_time_ms: float \ No newline at end of file diff --git a/microservices/api-gateway/requirements.txt b/microservices/api-gateway/requirements.txt new file mode 100644 index 0000000..a9ec60e --- /dev/null +++ b/microservices/api-gateway/requirements.txt @@ -0,0 +1,5 @@ +fastapi +uvicorn[standard] +aiohttp +python-dotenv +pydantic \ No newline at end of file diff --git a/microservices/api-gateway/service_registry.py b/microservices/api-gateway/service_registry.py new file mode 100644 index 0000000..7f9a0ad --- /dev/null +++ b/microservices/api-gateway/service_registry.py @@ -0,0 +1,194 @@ +""" +Service registry for managing microservice discovery and health monitoring +""" + +import aiohttp +import asyncio +from datetime import datetime +from typing import Dict, List, Optional +import logging + +from models import ServiceConfig, ServiceHealth + +logger = logging.getLogger(__name__) + +class ServiceRegistry: + """Service registry for microservice management""" + + def __init__(self): + self.services: Dict[str, ServiceConfig] = {} + self.service_health: Dict[str, ServiceHealth] = {} + self.session: Optional[aiohttp.ClientSession] = None + + async def initialize(self): + """Initialize the service registry""" + self.session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=10) + ) + logger.info("Service registry initialized") + + async def close(self): + """Close the service registry""" + if self.session: + await self.session.close() + logger.info("Service registry closed") + + async def register_services(self, services: Dict[str, ServiceConfig]): + """Register multiple services""" + self.services.update(services) + + # Initialize health status for all services + for service_name, config in services.items(): + self.service_health[service_name] = ServiceHealth( + service=service_name, + status="unknown", + last_check=datetime.utcnow() + ) + + logger.info(f"Registered {len(services)} services") + + # Perform initial health check + await self.update_all_service_health() + + async def register_service(self, service_config: ServiceConfig): + """Register a single service""" + self.services[service_config.name] = service_config + self.service_health[service_config.name] = ServiceHealth( + service=service_config.name, + status="unknown", + last_check=datetime.utcnow() + ) + + logger.info(f"Registered service: {service_config.name}") + + # Check health of the newly registered service + await self.check_service_health(service_config.name) + + async def unregister_service(self, service_name: str): + """Unregister a service""" + self.services.pop(service_name, None) + self.service_health.pop(service_name, None) + logger.info(f"Unregistered service: {service_name}") + + async def check_service_health(self, service_name: str) -> ServiceHealth: + """Check health of a specific service""" + service_config = self.services.get(service_name) + if not service_config: + logger.error(f"Service {service_name} not found in registry") + return ServiceHealth( + service=service_name, + status="unknown", + last_check=datetime.utcnow(), + error_message="Service not registered" + ) + + start_time = datetime.utcnow() + + try: + health_url = f"{service_config.base_url}{service_config.health_endpoint}" + + async with self.session.get(health_url) as response: + end_time = datetime.utcnow() + response_time = (end_time - start_time).total_seconds() * 1000 + + if response.status == 200: + health_data = await response.json() + status = "healthy" if health_data.get("status") in ["healthy", "ok"] else "unhealthy" + + health = ServiceHealth( + service=service_name, + status=status, + response_time_ms=response_time, + last_check=end_time + ) + else: + health = ServiceHealth( + service=service_name, + status="unhealthy", + response_time_ms=response_time, + last_check=end_time, + error_message=f"HTTP {response.status}" + ) + + except aiohttp.ClientError as e: + health = ServiceHealth( + service=service_name, + status="unhealthy", + last_check=datetime.utcnow(), + error_message=f"Connection error: {str(e)}" + ) + except Exception as e: + health = ServiceHealth( + service=service_name, + status="unhealthy", + last_check=datetime.utcnow(), + error_message=f"Health check failed: {str(e)}" + ) + + # Update health status + self.service_health[service_name] = health + + # Log health status changes + if health.status != "healthy": + logger.warning(f"Service {service_name} health check failed: {health.error_message}") + + return health + + async def update_all_service_health(self): + """Update health status for all registered services""" + health_checks = [ + self.check_service_health(service_name) + for service_name in self.services.keys() + ] + + if health_checks: + await asyncio.gather(*health_checks, return_exceptions=True) + + # Log summary + healthy_count = sum(1 for h in self.service_health.values() if h.status == "healthy") + total_count = len(self.services) + logger.info(f"Health check complete: {healthy_count}/{total_count} services healthy") + + async def get_service_health(self, service_name: str) -> Optional[ServiceHealth]: + """Get health status of a specific service""" + return self.service_health.get(service_name) + + async def get_all_service_health(self) -> Dict[str, Dict]: + """Get health status of all services""" + health_dict = {} + for service_name, health in self.service_health.items(): + health_dict[service_name] = { + "status": health.status, + "response_time_ms": health.response_time_ms, + "last_check": health.last_check.isoformat(), + "error_message": health.error_message + } + return health_dict + + async def is_service_healthy(self, service_name: str) -> bool: + """Check if a service is healthy""" + health = self.service_health.get(service_name) + return health is not None and health.status == "healthy" + + async def get_healthy_services(self) -> List[str]: + """Get list of healthy service names""" + return [ + service_name + for service_name, health in self.service_health.items() + if health.status == "healthy" + ] + + def get_service_config(self, service_name: str) -> Optional[ServiceConfig]: + """Get configuration for a specific service""" + return self.services.get(service_name) + + def get_all_services(self) -> Dict[str, ServiceConfig]: + """Get all registered services""" + return self.services.copy() + + async def get_service_url(self, service_name: str) -> Optional[str]: + """Get base URL for a healthy service""" + if await self.is_service_healthy(service_name): + service_config = self.services.get(service_name) + return service_config.base_url if service_config else None + return None \ No newline at end of file diff --git a/microservices/battery-service/Dockerfile b/microservices/battery-service/Dockerfile new file mode 100644 index 0000000..217731f --- /dev/null +++ b/microservices/battery-service/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.9-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Expose port +EXPOSE 8002 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8002/health || exit 1 + +# Run the application +CMD ["python", "main.py"] \ No newline at end of file diff --git a/microservices/battery-service/battery_service.py b/microservices/battery-service/battery_service.py new file mode 100644 index 0000000..ac8f94d --- /dev/null +++ b/microservices/battery-service/battery_service.py @@ -0,0 +1,414 @@ +""" +Battery management service implementation +""" + +import asyncio +from datetime import datetime, timedelta +from typing import Dict, List, Optional, Any +from motor.motor_asyncio import AsyncIOMotorDatabase +import redis.asyncio as redis +import logging +import json + +from models import BatteryState, BatteryType, MaintenanceAlert + +logger = logging.getLogger(__name__) + +class BatteryService: + """Service for managing battery operations and monitoring""" + + def __init__(self, db: AsyncIOMotorDatabase, redis_client: redis.Redis): + self.db = db + self.redis = redis_client + self.batteries_collection = db.batteries + self.battery_history_collection = db.battery_history + self.maintenance_alerts_collection = db.maintenance_alerts + + async def get_batteries(self) -> List[Dict[str, Any]]: + """Get all registered batteries""" + cursor = self.batteries_collection.find({}) + batteries = [] + + async for battery in cursor: + battery["_id"] = str(battery["_id"]) + # Convert datetime fields to ISO format + for field in ["installed_date", "last_maintenance", "next_maintenance", "last_updated"]: + if field in battery and battery[field]: + battery[field] = battery[field].isoformat() + + batteries.append(battery) + + return batteries + + async def get_battery_status(self, battery_id: str) -> Optional[Dict[str, Any]]: + """Get current status of a specific battery""" + # First try to get from Redis cache + cached_status = await self.redis.get(f"battery:status:{battery_id}") + if cached_status: + return json.loads(cached_status) + + # Fall back to database + battery = await self.batteries_collection.find_one({"battery_id": battery_id}) + if battery: + battery["_id"] = str(battery["_id"]) + + # Convert datetime fields + for field in ["installed_date", "last_maintenance", "next_maintenance", "last_updated"]: + if field in battery and battery[field]: + battery[field] = battery[field].isoformat() + + # Cache the result + await self.redis.setex( + f"battery:status:{battery_id}", + 300, # 5 minutes TTL + json.dumps(battery, default=str) + ) + + return battery + + return None + + async def charge_battery(self, battery_id: str, power_kw: float, duration_minutes: Optional[int] = None) -> Dict[str, Any]: + """Initiate battery charging""" + battery = await self.get_battery_status(battery_id) + if not battery: + return {"success": False, "error": "Battery not found"} + + # Check if battery can accept charge + current_soc = battery.get("state_of_charge", 0) + max_charge_power = battery.get("max_charge_power_kw", 0) + + if current_soc >= 100: + return {"success": False, "error": "Battery is already fully charged"} + + if power_kw > max_charge_power: + return {"success": False, "error": f"Requested power ({power_kw} kW) exceeds maximum charge power ({max_charge_power} kW)"} + + # Update battery state + now = datetime.utcnow() + update_data = { + "state": BatteryState.CHARGING.value, + "current_power_kw": power_kw, + "last_updated": now + } + + if duration_minutes: + update_data["charging_until"] = now + timedelta(minutes=duration_minutes) + + await self.batteries_collection.update_one( + {"battery_id": battery_id}, + {"$set": update_data} + ) + + # Clear cache + await self.redis.delete(f"battery:status:{battery_id}") + + # Log the charging event + await self._log_battery_event(battery_id, "charging_started", { + "power_kw": power_kw, + "duration_minutes": duration_minutes + }) + + # Publish event to Redis for real-time updates + await self.redis.publish("battery_events", json.dumps({ + "event": "charging_started", + "battery_id": battery_id, + "power_kw": power_kw, + "timestamp": now.isoformat() + })) + + return { + "success": True, + "estimated_completion": (now + timedelta(minutes=duration_minutes)).isoformat() if duration_minutes else None + } + + async def discharge_battery(self, battery_id: str, power_kw: float, duration_minutes: Optional[int] = None) -> Dict[str, Any]: + """Initiate battery discharging""" + battery = await self.get_battery_status(battery_id) + if not battery: + return {"success": False, "error": "Battery not found"} + + # Check if battery can discharge + current_soc = battery.get("state_of_charge", 0) + max_discharge_power = battery.get("max_discharge_power_kw", 0) + + if current_soc <= 0: + return {"success": False, "error": "Battery is already empty"} + + if power_kw > max_discharge_power: + return {"success": False, "error": f"Requested power ({power_kw} kW) exceeds maximum discharge power ({max_discharge_power} kW)"} + + # Update battery state + now = datetime.utcnow() + update_data = { + "state": BatteryState.DISCHARGING.value, + "current_power_kw": -power_kw, # Negative for discharging + "last_updated": now + } + + if duration_minutes: + update_data["discharging_until"] = now + timedelta(minutes=duration_minutes) + + await self.batteries_collection.update_one( + {"battery_id": battery_id}, + {"$set": update_data} + ) + + # Clear cache + await self.redis.delete(f"battery:status:{battery_id}") + + # Log the discharging event + await self._log_battery_event(battery_id, "discharging_started", { + "power_kw": power_kw, + "duration_minutes": duration_minutes + }) + + # Publish event + await self.redis.publish("battery_events", json.dumps({ + "event": "discharging_started", + "battery_id": battery_id, + "power_kw": power_kw, + "timestamp": now.isoformat() + })) + + return { + "success": True, + "estimated_completion": (now + timedelta(minutes=duration_minutes)).isoformat() if duration_minutes else None + } + + async def optimize_battery(self, battery_id: str, target_soc: float) -> Dict[str, Any]: + """Optimize battery charging/discharging to reach target SOC""" + battery = await self.get_battery_status(battery_id) + if not battery: + return {"success": False, "error": "Battery not found"} + + current_soc = battery.get("state_of_charge", 0) + capacity_kwh = battery.get("capacity_kwh", 0) + + # Calculate energy needed + energy_difference_kwh = (target_soc - current_soc) / 100 * capacity_kwh + + if abs(energy_difference_kwh) < 0.1: # Within 0.1 kWh + return {"message": "Battery is already at target SOC", "action": "none"} + + if energy_difference_kwh > 0: + # Need to charge + max_power = battery.get("max_charge_power_kw", 0) + action = "charge" + else: + # Need to discharge + max_power = battery.get("max_discharge_power_kw", 0) + action = "discharge" + energy_difference_kwh = abs(energy_difference_kwh) + + # Calculate optimal power and duration + optimal_power = min(max_power, energy_difference_kwh * 2) # Conservative power level + duration_hours = energy_difference_kwh / optimal_power + duration_minutes = int(duration_hours * 60) + + # Execute the optimization + if action == "charge": + result = await self.charge_battery(battery_id, optimal_power, duration_minutes) + else: + result = await self.discharge_battery(battery_id, optimal_power, duration_minutes) + + return { + "action": action, + "power_kw": optimal_power, + "duration_minutes": duration_minutes, + "energy_difference_kwh": energy_difference_kwh, + "result": result + } + + async def get_battery_history(self, battery_id: str, hours: int = 24) -> List[Dict[str, Any]]: + """Get historical data for a battery""" + start_time = datetime.utcnow() - timedelta(hours=hours) + + cursor = self.battery_history_collection.find({ + "battery_id": battery_id, + "timestamp": {"$gte": start_time} + }).sort("timestamp", -1) + + history = [] + async for record in cursor: + record["_id"] = str(record["_id"]) + if "timestamp" in record: + record["timestamp"] = record["timestamp"].isoformat() + history.append(record) + + return history + + async def get_battery_analytics(self, hours: int = 24) -> Dict[str, Any]: + """Get system-wide battery analytics""" + start_time = datetime.utcnow() - timedelta(hours=hours) + + # Get all batteries + batteries = await self.get_batteries() + + total_capacity = sum(b.get("capacity_kwh", 0) for b in batteries) + total_stored = sum(b.get("stored_energy_kwh", 0) for b in batteries) + active_count = sum(1 for b in batteries if b.get("state") != "error") + + # Aggregate historical data + pipeline = [ + {"$match": {"timestamp": {"$gte": start_time}}}, + {"$group": { + "_id": None, + "total_energy_charged": {"$sum": {"$cond": [{"$gt": ["$power_kw", 0]}, {"$multiply": ["$power_kw", 0.5]}, 0]}}, # Approximate kWh + "total_energy_discharged": {"$sum": {"$cond": [{"$lt": ["$power_kw", 0]}, {"$multiply": [{"$abs": "$power_kw"}, 0.5]}, 0]}}, + "avg_efficiency": {"$avg": "$efficiency"} + }} + ] + + cursor = self.battery_history_collection.aggregate(pipeline) + analytics_data = await cursor.to_list(length=1) + + if analytics_data: + energy_data = analytics_data[0] + else: + energy_data = { + "total_energy_charged": 0, + "total_energy_discharged": 0, + "avg_efficiency": 0.95 + } + + # Calculate metrics + average_soc = sum(b.get("state_of_charge", 0) for b in batteries) / len(batteries) if batteries else 0 + average_health = sum(b.get("health_percentage", 100) for b in batteries) / len(batteries) if batteries else 100 + + return { + "total_batteries": len(batteries), + "active_batteries": active_count, + "total_capacity_kwh": total_capacity, + "total_stored_energy_kwh": total_stored, + "average_soc": round(average_soc, 2), + "total_energy_charged_kwh": round(energy_data["total_energy_charged"], 2), + "total_energy_discharged_kwh": round(energy_data["total_energy_discharged"], 2), + "net_energy_flow_kwh": round(energy_data["total_energy_charged"] - energy_data["total_energy_discharged"], 2), + "round_trip_efficiency": round(energy_data.get("avg_efficiency", 0.95) * 100, 2), + "capacity_utilization": round((total_stored / total_capacity * 100) if total_capacity > 0 else 0, 2), + "average_health": round(average_health, 2), + "batteries_needing_maintenance": sum(1 for b in batteries if b.get("health_percentage", 100) < 80) + } + + async def update_battery_status(self, battery_id: str): + """Update battery status with simulated or real data""" + # This would typically connect to actual battery management systems + # For now, we'll simulate some basic updates + + battery = await self.get_battery_status(battery_id) + if not battery: + return + + now = datetime.utcnow() + current_power = battery.get("current_power_kw", 0) + current_soc = battery.get("state_of_charge", 50) + capacity = battery.get("capacity_kwh", 100) + + # Simulate SOC changes based on power flow + if current_power != 0: + # Convert power to SOC change (simplified) + soc_change = (current_power * 0.5) / capacity * 100 # 0.5 hour interval + new_soc = max(0, min(100, current_soc + soc_change)) + + # Calculate stored energy + stored_energy = new_soc / 100 * capacity + + # Update database + await self.batteries_collection.update_one( + {"battery_id": battery_id}, + { + "$set": { + "state_of_charge": round(new_soc, 2), + "stored_energy_kwh": round(stored_energy, 2), + "last_updated": now + } + } + ) + + # Log historical data + await self.battery_history_collection.insert_one({ + "battery_id": battery_id, + "timestamp": now, + "state_of_charge": new_soc, + "power_kw": current_power, + "stored_energy_kwh": stored_energy, + "efficiency": battery.get("efficiency", 0.95) + }) + + # Clear cache + await self.redis.delete(f"battery:status:{battery_id}") + + async def check_maintenance_alerts(self): + """Check for batteries needing maintenance""" + batteries = await self.get_batteries() + + for battery in batteries: + alerts = [] + + # Check health + health = battery.get("health_percentage", 100) + if health < 70: + alerts.append({ + "alert_type": "health", + "severity": "critical", + "message": f"Battery health is critically low at {health}%", + "recommended_action": "Schedule immediate maintenance and consider replacement" + }) + elif health < 85: + alerts.append({ + "alert_type": "health", + "severity": "warning", + "message": f"Battery health is declining at {health}%", + "recommended_action": "Schedule maintenance inspection" + }) + + # Check cycles + cycles = battery.get("cycles_completed", 0) + max_cycles = battery.get("max_cycles", 5000) + if cycles > max_cycles * 0.9: + alerts.append({ + "alert_type": "cycles", + "severity": "warning", + "message": f"Battery has completed {cycles}/{max_cycles} cycles", + "recommended_action": "Plan for battery replacement" + }) + + # Check scheduled maintenance + next_maintenance = battery.get("next_maintenance") + if next_maintenance and datetime.fromisoformat(next_maintenance.replace('Z', '+00:00')) < datetime.utcnow(): + alerts.append({ + "alert_type": "scheduled", + "severity": "info", + "message": "Scheduled maintenance is due", + "recommended_action": "Perform scheduled maintenance procedures" + }) + + # Save alerts to database + for alert in alerts: + alert_doc = { + "battery_id": battery["battery_id"], + "timestamp": datetime.utcnow(), + **alert + } + + # Check if alert already exists to avoid duplicates + existing = await self.maintenance_alerts_collection.find_one({ + "battery_id": battery["battery_id"], + "alert_type": alert["alert_type"], + "severity": alert["severity"] + }) + + if not existing: + await self.maintenance_alerts_collection.insert_one(alert_doc) + + async def _log_battery_event(self, battery_id: str, event_type: str, data: Dict[str, Any]): + """Log battery events for auditing""" + event_doc = { + "battery_id": battery_id, + "event_type": event_type, + "timestamp": datetime.utcnow(), + "data": data + } + + await self.db.battery_events.insert_one(event_doc) \ No newline at end of file diff --git a/microservices/battery-service/database.py b/microservices/battery-service/database.py new file mode 100644 index 0000000..e349b29 --- /dev/null +++ b/microservices/battery-service/database.py @@ -0,0 +1,104 @@ +""" +Database connections for Battery Service +""" + +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +import redis.asyncio as redis +import logging +import os + +logger = logging.getLogger(__name__) + +# Database configuration +MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017") +DATABASE_NAME = os.getenv("DATABASE_NAME", "energy_dashboard_batteries") +REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379") + +# Global database clients +_mongo_client: AsyncIOMotorClient = None +_database: AsyncIOMotorDatabase = None +_redis_client: redis.Redis = None + +async def connect_to_mongo(): + """Create MongoDB connection""" + global _mongo_client, _database + + try: + _mongo_client = AsyncIOMotorClient(MONGO_URL) + _database = _mongo_client[DATABASE_NAME] + + # Test connection + await _database.command("ping") + logger.info(f"Connected to MongoDB: {DATABASE_NAME}") + + # Create indexes + await create_indexes() + + except Exception as e: + logger.error(f"Failed to connect to MongoDB: {e}") + raise + +async def connect_to_redis(): + """Create Redis connection""" + global _redis_client + + try: + _redis_client = redis.from_url(REDIS_URL, decode_responses=True) + await _redis_client.ping() + logger.info("Connected to Redis") + + except Exception as e: + logger.error(f"Failed to connect to Redis: {e}") + raise + +async def close_mongo_connection(): + """Close MongoDB connection""" + global _mongo_client + + if _mongo_client: + _mongo_client.close() + logger.info("Disconnected from MongoDB") + +async def get_database() -> AsyncIOMotorDatabase: + """Get database instance""" + global _database + + if _database is None: + raise RuntimeError("Database not initialized. Call connect_to_mongo() first.") + + return _database + +async def get_redis() -> redis.Redis: + """Get Redis instance""" + global _redis_client + + if _redis_client is None: + raise RuntimeError("Redis not initialized. Call connect_to_redis() first.") + + return _redis_client + +async def create_indexes(): + """Create database indexes for performance""" + db = await get_database() + + # Indexes for batteries collection + await db.batteries.create_index("battery_id", unique=True) + await db.batteries.create_index("state") + await db.batteries.create_index("building") + await db.batteries.create_index("room") + await db.batteries.create_index("last_updated") + + # Indexes for battery_history collection + await db.battery_history.create_index([("battery_id", 1), ("timestamp", -1)]) + await db.battery_history.create_index("timestamp") + + # Indexes for maintenance_alerts collection + await db.maintenance_alerts.create_index([("battery_id", 1), ("alert_type", 1)]) + await db.maintenance_alerts.create_index("timestamp") + await db.maintenance_alerts.create_index("severity") + + # Indexes for battery_events collection + await db.battery_events.create_index([("battery_id", 1), ("timestamp", -1)]) + await db.battery_events.create_index("event_type") + + logger.info("Database indexes created") \ No newline at end of file diff --git a/microservices/battery-service/main.py b/microservices/battery-service/main.py new file mode 100644 index 0000000..3b67dab --- /dev/null +++ b/microservices/battery-service/main.py @@ -0,0 +1,262 @@ +""" +Battery Management Microservice +Handles battery monitoring, charging, and energy storage optimization. +Port: 8002 +""" + +import asyncio +from datetime import datetime, timedelta +from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +import logging +from typing import List, Optional + +from models import ( + BatteryStatus, BatteryCommand, BatteryResponse, BatteryListResponse, + ChargingRequest, HistoricalDataRequest, HealthResponse +) +from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis +from battery_service import BatteryService + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager""" + logger.info("Battery Service starting up...") + await connect_to_mongo() + await connect_to_redis() + + # Start background tasks + asyncio.create_task(battery_monitoring_task()) + + logger.info("Battery Service startup complete") + + yield + + logger.info("Battery Service shutting down...") + await close_mongo_connection() + logger.info("Battery Service shutdown complete") + +app = FastAPI( + title="Battery Management Service", + description="Energy storage monitoring and control microservice", + version="1.0.0", + lifespan=lifespan +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Dependencies +async def get_db(): + return await get_database() + +async def get_battery_service(db=Depends(get_db)): + redis = await get_redis() + return BatteryService(db, redis) + +@app.get("/health", response_model=HealthResponse) +async def health_check(): + """Health check endpoint""" + try: + db = await get_database() + await db.command("ping") + + redis = await get_redis() + await redis.ping() + + return HealthResponse( + service="battery-service", + status="healthy", + timestamp=datetime.utcnow(), + version="1.0.0" + ) + except Exception as e: + logger.error(f"Health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + +@app.get("/batteries", response_model=BatteryListResponse) +async def get_batteries(service: BatteryService = Depends(get_battery_service)): + """Get all registered batteries""" + try: + batteries = await service.get_batteries() + return BatteryListResponse( + batteries=batteries, + count=len(batteries), + total_capacity=sum(b.get("capacity", 0) for b in batteries), + total_stored_energy=sum(b.get("stored_energy", 0) for b in batteries) + ) + except Exception as e: + logger.error(f"Error getting batteries: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/batteries/{battery_id}", response_model=BatteryResponse) +async def get_battery(battery_id: str, service: BatteryService = Depends(get_battery_service)): + """Get specific battery status""" + try: + battery = await service.get_battery_status(battery_id) + if not battery: + raise HTTPException(status_code=404, detail="Battery not found") + + return BatteryResponse( + battery_id=battery_id, + status=battery + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting battery {battery_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/batteries/{battery_id}/charge") +async def charge_battery( + battery_id: str, + request: ChargingRequest, + service: BatteryService = Depends(get_battery_service) +): + """Charge a battery with specified power""" + try: + result = await service.charge_battery(battery_id, request.power_kw, request.duration_minutes) + + if not result.get("success"): + raise HTTPException(status_code=400, detail=result.get("error", "Charging failed")) + + return { + "message": "Charging initiated successfully", + "battery_id": battery_id, + "power_kw": request.power_kw, + "duration_minutes": request.duration_minutes, + "estimated_completion": result.get("estimated_completion") + } + except HTTPException: + raise + except Exception as e: + logger.error(f"Error charging battery {battery_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/batteries/{battery_id}/discharge") +async def discharge_battery( + battery_id: str, + request: ChargingRequest, + service: BatteryService = Depends(get_battery_service) +): + """Discharge a battery with specified power""" + try: + result = await service.discharge_battery(battery_id, request.power_kw, request.duration_minutes) + + if not result.get("success"): + raise HTTPException(status_code=400, detail=result.get("error", "Discharging failed")) + + return { + "message": "Discharging initiated successfully", + "battery_id": battery_id, + "power_kw": request.power_kw, + "duration_minutes": request.duration_minutes, + "estimated_completion": result.get("estimated_completion") + } + except HTTPException: + raise + except Exception as e: + logger.error(f"Error discharging battery {battery_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/batteries/{battery_id}/history") +async def get_battery_history( + battery_id: str, + hours: int = 24, + service: BatteryService = Depends(get_battery_service) +): + """Get battery historical data""" + try: + history = await service.get_battery_history(battery_id, hours) + + return { + "battery_id": battery_id, + "period_hours": hours, + "history": history, + "data_points": len(history) + } + except Exception as e: + logger.error(f"Error getting battery history for {battery_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/batteries/analytics/summary") +async def get_battery_analytics( + hours: int = 24, + service: BatteryService = Depends(get_battery_service) +): + """Get battery system analytics""" + try: + analytics = await service.get_battery_analytics(hours) + + return { + "period_hours": hours, + "timestamp": datetime.utcnow().isoformat(), + **analytics + } + except Exception as e: + logger.error(f"Error getting battery analytics: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/batteries/{battery_id}/optimize") +async def optimize_battery( + battery_id: str, + target_soc: float, # State of Charge target (0-100%) + service: BatteryService = Depends(get_battery_service) +): + """Optimize battery charging/discharging to reach target SOC""" + try: + if not (0 <= target_soc <= 100): + raise HTTPException(status_code=400, detail="Target SOC must be between 0 and 100") + + result = await service.optimize_battery(battery_id, target_soc) + + return { + "battery_id": battery_id, + "target_soc": target_soc, + "optimization_plan": result, + "message": "Battery optimization initiated" + } + except HTTPException: + raise + except Exception as e: + logger.error(f"Error optimizing battery {battery_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +async def battery_monitoring_task(): + """Background task for continuous battery monitoring""" + logger.info("Starting battery monitoring task") + + while True: + try: + db = await get_database() + redis = await get_redis() + service = BatteryService(db, redis) + + # Update all battery statuses + batteries = await service.get_batteries() + for battery in batteries: + await service.update_battery_status(battery["battery_id"]) + + # Check for maintenance alerts + await service.check_maintenance_alerts() + + # Sleep for monitoring interval (30 seconds) + await asyncio.sleep(30) + + except Exception as e: + logger.error(f"Error in battery monitoring task: {e}") + await asyncio.sleep(60) # Wait longer on error + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8002) \ No newline at end of file diff --git a/microservices/battery-service/models.py b/microservices/battery-service/models.py new file mode 100644 index 0000000..7355877 --- /dev/null +++ b/microservices/battery-service/models.py @@ -0,0 +1,157 @@ +""" +Pydantic models for Battery Management Service +""" + +from pydantic import BaseModel, Field +from typing import List, Optional, Dict, Any, Literal +from datetime import datetime +from enum import Enum + +class BatteryState(str, Enum): + IDLE = "idle" + CHARGING = "charging" + DISCHARGING = "discharging" + MAINTENANCE = "maintenance" + ERROR = "error" + +class BatteryType(str, Enum): + LITHIUM_ION = "lithium_ion" + LEAD_ACID = "lead_acid" + NICKEL_METAL_HYDRIDE = "nickel_metal_hydride" + FLOW_BATTERY = "flow_battery" + +class BatteryStatus(BaseModel): + """Battery status model""" + battery_id: str = Field(..., description="Unique battery identifier") + name: str = Field(..., description="Human-readable battery name") + type: BatteryType = Field(..., description="Battery technology type") + state: BatteryState = Field(..., description="Current operational state") + + # Energy metrics + capacity_kwh: float = Field(..., description="Total battery capacity in kWh") + stored_energy_kwh: float = Field(..., description="Currently stored energy in kWh") + state_of_charge: float = Field(..., description="State of charge (0-100%)") + + # Power metrics + max_charge_power_kw: float = Field(..., description="Maximum charging power in kW") + max_discharge_power_kw: float = Field(..., description="Maximum discharging power in kW") + current_power_kw: float = Field(0, description="Current power flow in kW (positive = charging)") + + # Technical specifications + efficiency: float = Field(0.95, description="Round-trip efficiency (0-1)") + cycles_completed: int = Field(0, description="Number of charge/discharge cycles") + max_cycles: int = Field(5000, description="Maximum rated cycles") + + # Health and maintenance + health_percentage: float = Field(100, description="Battery health (0-100%)") + temperature_celsius: Optional[float] = Field(None, description="Battery temperature") + last_maintenance: Optional[datetime] = Field(None, description="Last maintenance date") + next_maintenance: Optional[datetime] = Field(None, description="Next maintenance date") + + # Location and installation + location: Optional[str] = Field(None, description="Physical location") + building: Optional[str] = Field(None, description="Building identifier") + room: Optional[str] = Field(None, description="Room identifier") + + # Operational data + installed_date: Optional[datetime] = Field(None, description="Installation date") + last_updated: datetime = Field(default_factory=datetime.utcnow, description="Last status update") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() if v else None + } + +class BatteryCommand(BaseModel): + """Battery control command""" + battery_id: str = Field(..., description="Target battery ID") + command: Literal["charge", "discharge", "stop"] = Field(..., description="Command type") + power_kw: Optional[float] = Field(None, description="Power level in kW") + duration_minutes: Optional[int] = Field(None, description="Command duration in minutes") + target_soc: Optional[float] = Field(None, description="Target state of charge (0-100%)") + +class ChargingRequest(BaseModel): + """Battery charging/discharging request""" + power_kw: float = Field(..., description="Power level in kW", gt=0) + duration_minutes: Optional[int] = Field(None, description="Duration in minutes", gt=0) + target_soc: Optional[float] = Field(None, description="Target SOC (0-100%)", ge=0, le=100) + +class BatteryResponse(BaseModel): + """Battery operation response""" + battery_id: str + status: Dict[str, Any] + message: Optional[str] = None + +class BatteryListResponse(BaseModel): + """Response for battery list endpoint""" + batteries: List[Dict[str, Any]] + count: int + total_capacity: float = Field(description="Total system capacity in kWh") + total_stored_energy: float = Field(description="Total stored energy in kWh") + +class HistoricalDataRequest(BaseModel): + """Request for historical battery data""" + battery_id: str + start_time: Optional[datetime] = None + end_time: Optional[datetime] = None + hours: int = Field(default=24, description="Hours of data to retrieve") + +class BatteryHistoricalData(BaseModel): + """Historical battery data point""" + timestamp: datetime + state_of_charge: float + power_kw: float + temperature_celsius: Optional[float] = None + efficiency: float + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } + +class BatteryAnalytics(BaseModel): + """Battery system analytics""" + total_batteries: int + active_batteries: int + total_capacity_kwh: float + total_stored_energy_kwh: float + average_soc: float + + # Energy flows + total_energy_charged_kwh: float + total_energy_discharged_kwh: float + net_energy_flow_kwh: float + + # Efficiency metrics + round_trip_efficiency: float + capacity_utilization: float + + # Health metrics + average_health: float + batteries_needing_maintenance: int + +class MaintenanceAlert(BaseModel): + """Battery maintenance alert""" + battery_id: str + alert_type: Literal["scheduled", "health", "temperature", "cycles"] + severity: Literal["info", "warning", "critical"] + message: str + recommended_action: str + timestamp: datetime + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } + +class HealthResponse(BaseModel): + """Health check response""" + service: str + status: str + timestamp: datetime + version: str + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } \ No newline at end of file diff --git a/microservices/battery-service/requirements.txt b/microservices/battery-service/requirements.txt new file mode 100644 index 0000000..adb8f27 --- /dev/null +++ b/microservices/battery-service/requirements.txt @@ -0,0 +1,7 @@ +fastapi +uvicorn[standard] +pymongo +motor +redis +python-dotenv +pydantic \ No newline at end of file diff --git a/microservices/demand-response-service/main.py b/microservices/demand-response-service/main.py new file mode 100644 index 0000000..02afa4d --- /dev/null +++ b/microservices/demand-response-service/main.py @@ -0,0 +1,383 @@ +""" +Demand Response Microservice +Handles grid interaction, demand response events, and load management. +Port: 8003 +""" + +import asyncio +from datetime import datetime, timedelta +from fastapi import FastAPI, HTTPException, Depends, BackgroundTasks +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +import logging +from typing import List, Optional + +from models import ( + DemandResponseInvitation, InvitationResponse, EventRequest, EventStatus, + LoadReductionRequest, FlexibilityResponse, HealthResponse +) +from database import connect_to_mongo, close_mongo_connection, get_database, connect_to_redis, get_redis +from demand_response_service import DemandResponseService + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager""" + logger.info("Demand Response Service starting up...") + await connect_to_mongo() + await connect_to_redis() + + # Start background tasks + asyncio.create_task(event_scheduler_task()) + asyncio.create_task(auto_response_task()) + + logger.info("Demand Response Service startup complete") + + yield + + logger.info("Demand Response Service shutting down...") + await close_mongo_connection() + logger.info("Demand Response Service shutdown complete") + +app = FastAPI( + title="Demand Response Service", + description="Grid interaction and demand response event management microservice", + version="1.0.0", + lifespan=lifespan +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Dependencies +async def get_db(): + return await get_database() + +async def get_dr_service(db=Depends(get_db)): + redis = await get_redis() + return DemandResponseService(db, redis) + +@app.get("/health", response_model=HealthResponse) +async def health_check(): + """Health check endpoint""" + try: + db = await get_database() + await db.command("ping") + + redis = await get_redis() + await redis.ping() + + return HealthResponse( + service="demand-response-service", + status="healthy", + timestamp=datetime.utcnow(), + version="1.0.0" + ) + except Exception as e: + logger.error(f"Health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + +@app.post("/invitations/send") +async def send_invitation( + event_request: EventRequest, + service: DemandResponseService = Depends(get_dr_service) +): + """Send demand response invitation to specified IoT devices""" + try: + result = await service.send_invitation( + event_time=event_request.event_time, + load_kwh=event_request.load_kwh, + load_percentage=event_request.load_percentage, + iots=event_request.iots, + duration_minutes=event_request.duration_minutes + ) + + return { + "message": "Demand response invitation sent successfully", + "event_id": result["event_id"], + "event_time": event_request.event_time.isoformat(), + "participants": len(event_request.iots), + "load_kwh": event_request.load_kwh, + "load_percentage": event_request.load_percentage + } + except Exception as e: + logger.error(f"Error sending invitation: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/invitations/unanswered") +async def get_unanswered_invitations( + service: DemandResponseService = Depends(get_dr_service) +): + """Get all unanswered demand response invitations""" + try: + invitations = await service.get_unanswered_invitations() + return { + "invitations": invitations, + "count": len(invitations) + } + except Exception as e: + logger.error(f"Error getting unanswered invitations: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/invitations/answered") +async def get_answered_invitations( + hours: int = 24, + service: DemandResponseService = Depends(get_dr_service) +): + """Get answered demand response invitations""" + try: + invitations = await service.get_answered_invitations(hours) + return { + "invitations": invitations, + "count": len(invitations), + "period_hours": hours + } + except Exception as e: + logger.error(f"Error getting answered invitations: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/invitations/answer") +async def answer_invitation( + response: InvitationResponse, + service: DemandResponseService = Depends(get_dr_service) +): + """Answer a demand response invitation""" + try: + result = await service.answer_invitation( + event_id=response.event_id, + iot_id=response.iot_id, + response=response.response, + committed_reduction_kw=response.committed_reduction_kw + ) + + return { + "message": "Invitation response recorded successfully", + "event_id": response.event_id, + "iot_id": response.iot_id, + "response": response.response, + "committed_reduction": response.committed_reduction_kw + } + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error answering invitation: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/invitations/{event_id}") +async def get_invitation( + event_id: str, + service: DemandResponseService = Depends(get_dr_service) +): + """Get details of a specific demand response invitation""" + try: + invitation = await service.get_invitation(event_id) + if not invitation: + raise HTTPException(status_code=404, detail="Invitation not found") + + return invitation + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting invitation {event_id}: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/events/schedule") +async def schedule_event( + event_request: EventRequest, + service: DemandResponseService = Depends(get_dr_service) +): + """Schedule a demand response event""" + try: + result = await service.schedule_event( + event_time=event_request.event_time, + iots=event_request.iots, + load_reduction_kw=event_request.load_kwh * 1000, # Convert to kW + duration_minutes=event_request.duration_minutes + ) + + return { + "message": "Demand response event scheduled successfully", + "event_id": result["event_id"], + "scheduled_time": event_request.event_time.isoformat(), + "participants": len(event_request.iots) + } + except Exception as e: + logger.error(f"Error scheduling event: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/events/active") +async def get_active_events( + service: DemandResponseService = Depends(get_dr_service) +): + """Get currently active demand response events""" + try: + events = await service.get_active_events() + return { + "events": events, + "count": len(events) + } + except Exception as e: + logger.error(f"Error getting active events: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/flexibility/current") +async def get_current_flexibility( + service: DemandResponseService = Depends(get_dr_service) +): + """Get current system flexibility capacity""" + try: + flexibility = await service.get_current_flexibility() + return { + "timestamp": datetime.utcnow().isoformat(), + "flexibility": flexibility + } + except Exception as e: + logger.error(f"Error getting current flexibility: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/flexibility/forecast") +async def get_flexibility_forecast( + hours: int = 24, + service: DemandResponseService = Depends(get_dr_service) +): + """Get forecasted flexibility for the next specified hours""" + try: + forecast = await service.get_flexibility_forecast(hours) + return { + "forecast_hours": hours, + "flexibility_forecast": forecast, + "generated_at": datetime.utcnow().isoformat() + } + except Exception as e: + logger.error(f"Error getting flexibility forecast: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/load-reduction/execute") +async def execute_load_reduction( + request: LoadReductionRequest, + service: DemandResponseService = Depends(get_dr_service) +): + """Execute immediate load reduction""" + try: + result = await service.execute_load_reduction( + target_reduction_kw=request.target_reduction_kw, + duration_minutes=request.duration_minutes, + priority_iots=request.priority_iots + ) + + return { + "message": "Load reduction executed successfully", + "target_reduction_kw": request.target_reduction_kw, + "actual_reduction_kw": result.get("actual_reduction_kw"), + "participating_devices": result.get("participating_devices", []), + "execution_time": datetime.utcnow().isoformat() + } + except Exception as e: + logger.error(f"Error executing load reduction: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/auto-response/config") +async def get_auto_response_config( + service: DemandResponseService = Depends(get_dr_service) +): + """Get auto-response configuration""" + try: + config = await service.get_auto_response_config() + return {"auto_response_config": config} + except Exception as e: + logger.error(f"Error getting auto-response config: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/auto-response/config") +async def set_auto_response_config( + enabled: bool, + max_reduction_percentage: float = 20.0, + response_delay_seconds: int = 300, + service: DemandResponseService = Depends(get_dr_service) +): + """Set auto-response configuration""" + try: + await service.set_auto_response_config( + enabled=enabled, + max_reduction_percentage=max_reduction_percentage, + response_delay_seconds=response_delay_seconds + ) + + return { + "message": "Auto-response configuration updated successfully", + "enabled": enabled, + "max_reduction_percentage": max_reduction_percentage, + "response_delay_seconds": response_delay_seconds + } + except Exception as e: + logger.error(f"Error setting auto-response config: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/analytics/performance") +async def get_performance_analytics( + days: int = 30, + service: DemandResponseService = Depends(get_dr_service) +): + """Get demand response performance analytics""" + try: + analytics = await service.get_performance_analytics(days) + return { + "period_days": days, + "analytics": analytics, + "generated_at": datetime.utcnow().isoformat() + } + except Exception as e: + logger.error(f"Error getting performance analytics: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +async def event_scheduler_task(): + """Background task for scheduling and executing demand response events""" + logger.info("Starting event scheduler task") + + while True: + try: + db = await get_database() + redis = await get_redis() + service = DemandResponseService(db, redis) + + # Check for events that need to be executed + await service.check_scheduled_events() + + # Sleep for 60 seconds between checks + await asyncio.sleep(60) + + except Exception as e: + logger.error(f"Error in event scheduler task: {e}") + await asyncio.sleep(120) # Wait longer on error + +async def auto_response_task(): + """Background task for automatic demand response""" + logger.info("Starting auto-response task") + + while True: + try: + db = await get_database() + redis = await get_redis() + service = DemandResponseService(db, redis) + + # Check for auto-response opportunities + await service.process_auto_responses() + + # Sleep for 30 seconds between checks + await asyncio.sleep(30) + + except Exception as e: + logger.error(f"Error in auto-response task: {e}") + await asyncio.sleep(90) + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8003) \ No newline at end of file diff --git a/microservices/deploy.sh b/microservices/deploy.sh new file mode 100755 index 0000000..cd0807b --- /dev/null +++ b/microservices/deploy.sh @@ -0,0 +1,309 @@ +#!/bin/bash + +# Energy Management Microservices Deployment Script +# This script handles deployment, startup, and management of all microservices + +set -e # Exit on any error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +COMPOSE_FILE="docker-compose.yml" +PROJECT_NAME="energy-dashboard" + +# Function to print colored output +print_status() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Function to check if Docker and Docker Compose are installed +check_dependencies() { + print_status "Checking dependencies..." + + if ! command -v docker &> /dev/null; then + print_error "Docker is not installed. Please install Docker first." + exit 1 + fi + + if ! command -v docker-compose &> /dev/null; then + print_error "Docker Compose is not installed. Please install Docker Compose first." + exit 1 + fi + + print_success "Dependencies check passed" +} + +# Function to create necessary directories and files +setup_environment() { + print_status "Setting up environment..." + + # Create nginx configuration directory + mkdir -p nginx/ssl + + # Create init-mongo directory for database initialization + mkdir -p init-mongo + + # Create a simple nginx configuration if it doesn't exist + if [ ! -f "nginx/nginx.conf" ]; then + cat > nginx/nginx.conf << 'EOF' +events { + worker_connections 1024; +} + +http { + upstream api_gateway { + server api-gateway:8000; + } + + server { + listen 80; + + location / { + proxy_pass http://api_gateway; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /ws { + proxy_pass http://api_gateway; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + } + } +} +EOF + print_success "Created nginx configuration" + fi + + # Create MongoDB initialization script if it doesn't exist + if [ ! -f "init-mongo/init.js" ]; then + cat > init-mongo/init.js << 'EOF' +// MongoDB initialization script +db = db.getSiblingDB('energy_dashboard'); +db.createUser({ + user: 'dashboard_user', + pwd: 'dashboard_pass', + roles: [ + { role: 'readWrite', db: 'energy_dashboard' }, + { role: 'readWrite', db: 'energy_dashboard_tokens' }, + { role: 'readWrite', db: 'energy_dashboard_batteries' }, + { role: 'readWrite', db: 'energy_dashboard_demand_response' }, + { role: 'readWrite', db: 'energy_dashboard_p2p' }, + { role: 'readWrite', db: 'energy_dashboard_forecasting' }, + { role: 'readWrite', db: 'energy_dashboard_iot' } + ] +}); + +// Create initial collections and indexes +db.sensors.createIndex({ "sensor_id": 1 }, { unique: true }); +db.sensor_readings.createIndex({ "sensor_id": 1, "timestamp": -1 }); +db.room_metrics.createIndex({ "room": 1, "timestamp": -1 }); + +print("MongoDB initialization completed"); +EOF + print_success "Created MongoDB initialization script" + fi + + print_success "Environment setup completed" +} + +# Function to build all services +build_services() { + print_status "Building all microservices..." + + docker-compose -f $COMPOSE_FILE build + + if [ $? -eq 0 ]; then + print_success "All services built successfully" + else + print_error "Failed to build services" + exit 1 + fi +} + +# Function to start all services +start_services() { + print_status "Starting all services..." + + docker-compose -f $COMPOSE_FILE up -d + + if [ $? -eq 0 ]; then + print_success "All services started successfully" + else + print_error "Failed to start services" + exit 1 + fi +} + +# Function to stop all services +stop_services() { + print_status "Stopping all services..." + + docker-compose -f $COMPOSE_FILE down + + print_success "All services stopped" +} + +# Function to restart all services +restart_services() { + stop_services + start_services +} + +# Function to show service status +show_status() { + print_status "Service status:" + docker-compose -f $COMPOSE_FILE ps + + print_status "Service health checks:" + + # Wait a moment for services to start + sleep 5 + + services=("api-gateway:8000" "token-service:8001" "battery-service:8002" "demand-response-service:8003") + + for service in "${services[@]}"; do + name="${service%:*}" + port="${service#*:}" + + if curl -f -s "http://localhost:$port/health" > /dev/null; then + print_success "$name is healthy" + else + print_warning "$name is not responding to health checks" + fi + done +} + +# Function to view logs +view_logs() { + if [ -z "$2" ]; then + print_status "Showing logs for all services..." + docker-compose -f $COMPOSE_FILE logs -f + else + print_status "Showing logs for $2..." + docker-compose -f $COMPOSE_FILE logs -f $2 + fi +} + +# Function to clean up everything +cleanup() { + print_warning "This will remove all containers, images, and volumes. Are you sure? (y/N)" + read -r response + if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then + print_status "Cleaning up everything..." + docker-compose -f $COMPOSE_FILE down -v --rmi all + docker system prune -f + print_success "Cleanup completed" + else + print_status "Cleanup cancelled" + fi +} + +# Function to run database migrations or setup +setup_database() { + print_status "Setting up databases..." + + # Wait for MongoDB to be ready + print_status "Waiting for MongoDB to be ready..." + sleep 10 + + # Run any additional setup scripts here + print_success "Database setup completed" +} + +# Function to show help +show_help() { + echo "Energy Management Microservices Deployment Script" + echo "" + echo "Usage: $0 [COMMAND]" + echo "" + echo "Commands:" + echo " setup Setup environment and dependencies" + echo " build Build all microservices" + echo " start Start all services" + echo " stop Stop all services" + echo " restart Restart all services" + echo " status Show service status and health" + echo " logs Show logs for all services" + echo " logs Show logs for specific service" + echo " deploy Full deployment (setup + build + start)" + echo " db-setup Setup databases" + echo " cleanup Remove all containers, images, and volumes" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 deploy # Full deployment" + echo " $0 logs battery-service # Show battery service logs" + echo " $0 status # Check service health" +} + +# Main script logic +case "${1:-help}" in + setup) + check_dependencies + setup_environment + ;; + build) + check_dependencies + build_services + ;; + start) + check_dependencies + start_services + ;; + stop) + stop_services + ;; + restart) + restart_services + ;; + status) + show_status + ;; + logs) + view_logs $@ + ;; + deploy) + check_dependencies + setup_environment + build_services + start_services + setup_database + show_status + ;; + db-setup) + setup_database + ;; + cleanup) + cleanup + ;; + help|--help|-h) + show_help + ;; + *) + print_error "Unknown command: $1" + show_help + exit 1 + ;; +esac \ No newline at end of file diff --git a/microservices/docker-compose.yml b/microservices/docker-compose.yml new file mode 100644 index 0000000..0139cfe --- /dev/null +++ b/microservices/docker-compose.yml @@ -0,0 +1,193 @@ +version: '3.8' + +services: + # Database Services + mongodb: + image: mongo:5.0 + container_name: energy-mongodb + restart: unless-stopped + environment: + MONGO_INITDB_ROOT_USERNAME: admin + MONGO_INITDB_ROOT_PASSWORD: password123 + ports: + - "27017:27017" + volumes: + - mongodb_data:/data/db + - ./init-mongo:/docker-entrypoint-initdb.d + networks: + - energy-network + + redis: + image: redis:7-alpine + container_name: energy-redis + restart: unless-stopped + ports: + - "6379:6379" + volumes: + - redis_data:/data + networks: + - energy-network + + # API Gateway + api-gateway: + build: + context: ./api-gateway + dockerfile: Dockerfile + container_name: energy-api-gateway + restart: unless-stopped + ports: + - "8000:8000" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard?authSource=admin + - REDIS_URL=redis://redis:6379 + - TOKEN_SERVICE_URL=http://token-service:8001 + - BATTERY_SERVICE_URL=http://battery-service:8002 + - DEMAND_RESPONSE_SERVICE_URL=http://demand-response-service:8003 + - P2P_TRADING_SERVICE_URL=http://p2p-trading-service:8004 + - FORECASTING_SERVICE_URL=http://forecasting-service:8005 + - IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006 + depends_on: + - mongodb + - redis + - token-service + - battery-service + - demand-response-service + networks: + - energy-network + + # Token Management Service + token-service: + build: + context: ./token-service + dockerfile: Dockerfile + container_name: energy-token-service + restart: unless-stopped + ports: + - "8001:8001" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_tokens?authSource=admin + - JWT_SECRET_KEY=your-super-secret-jwt-key-change-in-production + depends_on: + - mongodb + networks: + - energy-network + + # Battery Management Service + battery-service: + build: + context: ./battery-service + dockerfile: Dockerfile + container_name: energy-battery-service + restart: unless-stopped + ports: + - "8002:8002" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_batteries?authSource=admin + - REDIS_URL=redis://redis:6379 + depends_on: + - mongodb + - redis + networks: + - energy-network + + # Demand Response Service + demand-response-service: + build: + context: ./demand-response-service + dockerfile: Dockerfile + container_name: energy-demand-response-service + restart: unless-stopped + ports: + - "8003:8003" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_demand_response?authSource=admin + - REDIS_URL=redis://redis:6379 + - IOT_CONTROL_SERVICE_URL=http://iot-control-service:8006 + depends_on: + - mongodb + - redis + networks: + - energy-network + + # P2P Trading Service + p2p-trading-service: + build: + context: ./p2p-trading-service + dockerfile: Dockerfile + container_name: energy-p2p-trading-service + restart: unless-stopped + ports: + - "8004:8004" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_p2p?authSource=admin + - REDIS_URL=redis://redis:6379 + depends_on: + - mongodb + - redis + networks: + - energy-network + + # Forecasting Service + forecasting-service: + build: + context: ./forecasting-service + dockerfile: Dockerfile + container_name: energy-forecasting-service + restart: unless-stopped + ports: + - "8005:8005" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_forecasting?authSource=admin + - REDIS_URL=redis://redis:6379 + depends_on: + - mongodb + - redis + networks: + - energy-network + + # IoT Control Service + iot-control-service: + build: + context: ./iot-control-service + dockerfile: Dockerfile + container_name: energy-iot-control-service + restart: unless-stopped + ports: + - "8006:8006" + environment: + - MONGO_URL=mongodb://admin:password123@mongodb:27017/energy_dashboard_iot?authSource=admin + - REDIS_URL=redis://redis:6379 + - BATTERY_SERVICE_URL=http://battery-service:8002 + - DEMAND_RESPONSE_SERVICE_URL=http://demand-response-service:8003 + depends_on: + - mongodb + - redis + networks: + - energy-network + + # Monitoring and Management + nginx: + image: nginx:alpine + container_name: energy-nginx + restart: unless-stopped + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - ./nginx/ssl:/etc/nginx/ssl + depends_on: + - api-gateway + networks: + - energy-network + +networks: + energy-network: + driver: bridge + name: energy-network + +volumes: + mongodb_data: + name: energy-mongodb-data + redis_data: + name: energy-redis-data \ No newline at end of file diff --git a/microservices/token-service/Dockerfile b/microservices/token-service/Dockerfile new file mode 100644 index 0000000..35f09ad --- /dev/null +++ b/microservices/token-service/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.9-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . . + +# Expose port +EXPOSE 8001 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8001/health || exit 1 + +# Run the application +CMD ["python", "main.py"] \ No newline at end of file diff --git a/microservices/token-service/database.py b/microservices/token-service/database.py new file mode 100644 index 0000000..5f29e69 --- /dev/null +++ b/microservices/token-service/database.py @@ -0,0 +1,65 @@ +""" +Database connection for Token Service +""" + +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +import logging +import os + +logger = logging.getLogger(__name__) + +# Database configuration +MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017") +DATABASE_NAME = os.getenv("DATABASE_NAME", "energy_dashboard_tokens") + +# Global database client +_client: AsyncIOMotorClient = None +_database: AsyncIOMotorDatabase = None + +async def connect_to_mongo(): + """Create database connection""" + global _client, _database + + try: + _client = AsyncIOMotorClient(MONGO_URL) + _database = _client[DATABASE_NAME] + + # Test connection + await _database.command("ping") + logger.info(f"Connected to MongoDB: {DATABASE_NAME}") + + # Create indexes for performance + await create_indexes() + + except Exception as e: + logger.error(f"Failed to connect to MongoDB: {e}") + raise + +async def close_mongo_connection(): + """Close database connection""" + global _client + + if _client: + _client.close() + logger.info("Disconnected from MongoDB") + +async def get_database() -> AsyncIOMotorDatabase: + """Get database instance""" + global _database + + if _database is None: + raise RuntimeError("Database not initialized. Call connect_to_mongo() first.") + + return _database + +async def create_indexes(): + """Create database indexes for performance""" + db = await get_database() + + # Indexes for tokens collection + await db.tokens.create_index("token", unique=True) + await db.tokens.create_index("active") + await db.tokens.create_index("expires_at") + await db.tokens.create_index("name") + + logger.info("Database indexes created") \ No newline at end of file diff --git a/microservices/token-service/main.py b/microservices/token-service/main.py new file mode 100644 index 0000000..37f72f3 --- /dev/null +++ b/microservices/token-service/main.py @@ -0,0 +1,190 @@ +""" +Token Management Microservice +Handles JWT authentication, token generation, validation, and resource access control. +Port: 8001 +""" + +import asyncio +from datetime import datetime +from fastapi import FastAPI, HTTPException, Depends, Security +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +import logging +from typing import List, Optional + +from models import ( + TokenGenerateRequest, TokenResponse, TokenValidationResponse, + TokenListResponse, HealthResponse +) +from database import connect_to_mongo, close_mongo_connection, get_database +from token_service import TokenService + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +security = HTTPBearer() + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Application lifespan manager""" + logger.info("Token Service starting up...") + await connect_to_mongo() + logger.info("Token Service startup complete") + + yield + + logger.info("Token Service shutting down...") + await close_mongo_connection() + logger.info("Token Service shutdown complete") + +app = FastAPI( + title="Token Management Service", + description="JWT authentication and token management microservice", + version="1.0.0", + lifespan=lifespan +) + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Dependency for database +async def get_db(): + return await get_database() + +@app.get("/health", response_model=HealthResponse) +async def health_check(): + """Health check endpoint""" + try: + db = await get_database() + await db.command("ping") + + return HealthResponse( + service="token-service", + status="healthy", + timestamp=datetime.utcnow(), + version="1.0.0" + ) + except Exception as e: + logger.error(f"Health check failed: {e}") + raise HTTPException(status_code=503, detail="Service Unavailable") + +@app.get("/tokens", response_model=TokenListResponse) +async def get_tokens(db=Depends(get_db)): + """Get all tokens""" + try: + token_service = TokenService(db) + tokens = await token_service.get_tokens() + + return TokenListResponse( + tokens=tokens, + count=len(tokens) + ) + except Exception as e: + logger.error(f"Error getting tokens: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/tokens/generate", response_model=TokenResponse) +async def generate_token(request: TokenGenerateRequest, db=Depends(get_db)): + """Generate a new JWT token""" + try: + token_service = TokenService(db) + token = token_service.generate_token( + name=request.name, + list_of_resources=request.list_of_resources, + data_aggregation=request.data_aggregation, + time_aggregation=request.time_aggregation, + embargo=request.embargo, + exp_hours=request.exp_hours + ) + + return TokenResponse(token=token) + except Exception as e: + logger.error(f"Error generating token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/tokens/validate", response_model=TokenValidationResponse) +async def validate_token(token: str, db=Depends(get_db)): + """Validate and decode a JWT token""" + try: + token_service = TokenService(db) + is_valid = await token_service.is_token_valid(token) + decoded = token_service.decode_token(token) if is_valid else None + + return TokenValidationResponse( + valid=is_valid, + token=token, + decoded=decoded if is_valid and "error" not in (decoded or {}) else None, + error=decoded.get("error") if decoded and "error" in decoded else None + ) + except Exception as e: + logger.error(f"Error validating token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/tokens/save") +async def save_token(token: str, db=Depends(get_db)): + """Save a token to database""" + try: + token_service = TokenService(db) + result = await token_service.insert_token(token) + return result + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"Error saving token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.post("/tokens/revoke") +async def revoke_token(token: str, db=Depends(get_db)): + """Revoke a token""" + try: + token_service = TokenService(db) + result = await token_service.revoke_token(token) + return result + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) + except Exception as e: + logger.error(f"Error revoking token: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.get("/tokens/{token}/permissions") +async def get_token_permissions(token: str, db=Depends(get_db)): + """Get permissions for a specific token""" + try: + token_service = TokenService(db) + permissions = await token_service.get_token_permissions(token) + + if permissions: + return {"permissions": permissions} + else: + raise HTTPException(status_code=401, detail="Invalid or expired token") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting token permissions: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +@app.delete("/tokens/cleanup") +async def cleanup_expired_tokens(db=Depends(get_db)): + """Clean up expired tokens""" + try: + token_service = TokenService(db) + expired_count = await token_service.cleanup_expired_tokens() + + return { + "message": "Expired tokens cleaned up", + "expired_tokens_removed": expired_count + } + except Exception as e: + logger.error(f"Error cleaning up tokens: {e}") + raise HTTPException(status_code=500, detail="Internal server error") + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8001) \ No newline at end of file diff --git a/microservices/token-service/models.py b/microservices/token-service/models.py new file mode 100644 index 0000000..e9d75db --- /dev/null +++ b/microservices/token-service/models.py @@ -0,0 +1,55 @@ +""" +Pydantic models for Token Management Service +""" + +from pydantic import BaseModel, Field +from typing import List, Optional, Dict, Any +from datetime import datetime + +class TokenGenerateRequest(BaseModel): + """Request model for token generation""" + name: str = Field(..., description="Token owner name") + list_of_resources: List[str] = Field(..., description="List of accessible resources") + data_aggregation: bool = Field(default=False, description="Allow data aggregation") + time_aggregation: bool = Field(default=False, description="Allow time aggregation") + embargo: int = Field(default=0, description="Embargo period in seconds") + exp_hours: int = Field(default=24, description="Token expiration in hours") + +class TokenResponse(BaseModel): + """Response model for token operations""" + token: str = Field(..., description="JWT token") + +class TokenValidationResponse(BaseModel): + """Response model for token validation""" + valid: bool = Field(..., description="Whether token is valid") + token: str = Field(..., description="Original token") + decoded: Optional[Dict[str, Any]] = Field(None, description="Decoded token payload") + error: Optional[str] = Field(None, description="Error message if invalid") + +class TokenRecord(BaseModel): + """Token database record model""" + token: str + datetime: str + active: bool + name: str + resources: List[str] + expires_at: str + created_at: str + updated_at: str + +class TokenListResponse(BaseModel): + """Response model for token list""" + tokens: List[Dict[str, Any]] + count: int + +class HealthResponse(BaseModel): + """Health check response""" + service: str + status: str + timestamp: datetime + version: str + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } \ No newline at end of file diff --git a/microservices/token-service/requirements.txt b/microservices/token-service/requirements.txt new file mode 100644 index 0000000..3406135 --- /dev/null +++ b/microservices/token-service/requirements.txt @@ -0,0 +1,7 @@ +fastapi +uvicorn[standard] +pymongo +motor +PyJWT +python-dotenv +pydantic \ No newline at end of file diff --git a/microservices/token-service/token_service.py b/microservices/token-service/token_service.py new file mode 100644 index 0000000..3ea291b --- /dev/null +++ b/microservices/token-service/token_service.py @@ -0,0 +1,157 @@ +""" +Token service implementation +""" + +import jwt +import uuid +from datetime import datetime, timedelta +from typing import Dict, List, Optional, Any +from motor.motor_asyncio import AsyncIOMotorDatabase +import os + +class TokenService: + """Service for managing JWT tokens and authentication""" + + def __init__(self, db: AsyncIOMotorDatabase, secret_key: str = None): + self.db = db + self.secret_key = secret_key or os.getenv("JWT_SECRET_KEY", "energy-dashboard-secret-key") + self.tokens_collection = db.tokens + + def generate_token(self, name: str, list_of_resources: List[str], + data_aggregation: bool = False, time_aggregation: bool = False, + embargo: int = 0, exp_hours: int = 24) -> str: + """Generate a new JWT token with specified permissions""" + + # Calculate expiration time + exp_timestamp = int((datetime.utcnow() + timedelta(hours=exp_hours)).timestamp()) + + # Create token payload + payload = { + "name": name, + "list_of_resources": list_of_resources, + "data_aggregation": data_aggregation, + "time_aggregation": time_aggregation, + "embargo": embargo, + "exp": exp_timestamp, + "iat": int(datetime.utcnow().timestamp()), + "jti": str(uuid.uuid4()) # unique token ID + } + + # Generate JWT token + token = jwt.encode(payload, self.secret_key, algorithm="HS256") + return token + + def decode_token(self, token: str) -> Optional[Dict[str, Any]]: + """Decode and verify JWT token""" + try: + payload = jwt.decode(token, self.secret_key, algorithms=["HS256"]) + return payload + except jwt.ExpiredSignatureError: + return {"error": "Token has expired"} + except jwt.InvalidTokenError: + return {"error": "Invalid token"} + + async def insert_token(self, token: str) -> Dict[str, Any]: + """Save token to database""" + now = datetime.utcnow() + + # Decode token to verify it's valid + decoded = self.decode_token(token) + if decoded and "error" not in decoded: + token_record = { + "token": token, + "datetime": now, + "active": True, + "created_at": now, + "updated_at": now, + "name": decoded.get("name", ""), + "resources": decoded.get("list_of_resources", []), + "expires_at": datetime.fromtimestamp(decoded.get("exp", 0)) + } + + # Upsert token (update if exists, insert if not) + await self.tokens_collection.replace_one( + {"token": token}, + token_record, + upsert=True + ) + + return { + "token": token, + "datetime": now.isoformat(), + "active": True + } + else: + raise ValueError("Invalid token cannot be saved") + + async def revoke_token(self, token: str) -> Dict[str, Any]: + """Revoke a token by marking it as inactive""" + now = datetime.utcnow() + + result = await self.tokens_collection.update_one( + {"token": token}, + { + "$set": { + "active": False, + "updated_at": now, + "revoked_at": now + } + } + ) + + if result.matched_count > 0: + return { + "token": token, + "datetime": now.isoformat(), + "active": False + } + else: + raise ValueError("Token not found") + + async def get_tokens(self) -> List[Dict[str, Any]]: + """Get all tokens from database""" + cursor = self.tokens_collection.find({}) + tokens = [] + + async for token_record in cursor: + # Convert ObjectId to string and datetime to ISO format + token_record["_id"] = str(token_record["_id"]) + for field in ["datetime", "created_at", "updated_at", "expires_at", "revoked_at"]: + if field in token_record and token_record[field]: + token_record[field] = token_record[field].isoformat() + + tokens.append(token_record) + + return tokens + + async def is_token_valid(self, token: str) -> bool: + """Check if token is valid and active""" + # Check if token exists and is active in database + token_record = await self.tokens_collection.find_one({ + "token": token, + "active": True + }) + + if not token_record: + return False + + # Verify JWT signature and expiration + decoded = self.decode_token(token) + return decoded is not None and "error" not in decoded + + async def get_token_permissions(self, token: str) -> Optional[Dict[str, Any]]: + """Get permissions for a valid token""" + if await self.is_token_valid(token): + return self.decode_token(token) + return None + + async def cleanup_expired_tokens(self) -> int: + """Remove expired tokens from database""" + now = datetime.utcnow() + + # Delete tokens that have expired + result = await self.tokens_collection.delete_many({ + "expires_at": {"$lt": now} + }) + + return result.deleted_count \ No newline at end of file diff --git a/microservices_example.md b/microservices_example.md new file mode 100644 index 0000000..6e6c3c4 --- /dev/null +++ b/microservices_example.md @@ -0,0 +1,84 @@ +# Microservices Architecture Example + +## Service Decomposition + +### 1. Sensor Data Service +**Responsibility**: Sensor data ingestion, validation, and storage +``` +Port: 8001 +Database: sensor_db (MongoDB) +Endpoints: +- POST /sensors/data # Ingest sensor readings +- GET /sensors/{id}/data # Get sensor history +- GET /sensors # List sensors +``` + +### 2. Room Management Service +**Responsibility**: Room metrics, aggregations, and space management +``` +Port: 8002 +Database: room_db (MongoDB) +Endpoints: +- GET /rooms # List rooms +- GET /rooms/{id}/metrics # Current room metrics +- GET /rooms/{id}/history # Historical room data +``` + +### 3. Analytics Service +**Responsibility**: Data analysis, reporting, and insights +``` +Port: 8003 +Database: analytics_db (PostgreSQL/ClickHouse) +Endpoints: +- GET /analytics/summary # Dashboard summary +- GET /analytics/trends # Trend analysis +- GET /analytics/reports/{id} # Generated reports +``` + +### 4. Notification Service +**Responsibility**: Alerts, events, and real-time notifications +``` +Port: 8004 +Database: events_db (MongoDB) +Message Queue: RabbitMQ/Kafka +Endpoints: +- POST /notifications/send # Send notification +- GET /events # System events +- WebSocket: /ws/notifications # Real-time alerts +``` + +### 5. API Gateway +**Responsibility**: Request routing, authentication, rate limiting +``` +Port: 8000 +Routes all requests to appropriate services +Handles CORS, authentication, logging +``` + +## Inter-Service Communication + +### Synchronous (HTTP/REST) +```python +# Analytics Service calling Sensor Service +import httpx + +async def get_sensor_data(sensor_id: str): + async with httpx.AsyncClient() as client: + response = await client.get(f"http://sensor-service:8001/sensors/{sensor_id}/data") + return response.json() +``` + +### Asynchronous (Message Queue) +```python +# Sensor Service publishes event +await message_queue.publish("sensor.data.received", { + "sensor_id": "sensor_001", + "timestamp": datetime.utcnow(), + "data": sensor_reading +}) + +# Room Service subscribes to event +@message_queue.subscribe("sensor.data.received") +async def handle_sensor_data(message): + await room_service.update_room_metrics(message.data) +``` \ No newline at end of file diff --git a/models.py b/models.py new file mode 100644 index 0000000..9516612 --- /dev/null +++ b/models.py @@ -0,0 +1,236 @@ +from pydantic import BaseModel, Field +from typing import Optional, List, Dict, Any, Literal +from datetime import datetime +from enum import Enum + +class SensorType(str, Enum): + ENERGY = "energy" + CO2 = "co2" + TEMPERATURE = "temperature" + HUMIDITY = "humidity" + HVAC = "hvac" + LIGHTING = "lighting" + SECURITY = "security" + +class SensorStatus(str, Enum): + ONLINE = "online" + OFFLINE = "offline" + ERROR = "error" + MAINTENANCE = "maintenance" + +class CO2Status(str, Enum): + GOOD = "good" + MODERATE = "moderate" + POOR = "poor" + CRITICAL = "critical" + +class OccupancyLevel(str, Enum): + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + +# Base Models +class SensorReading(BaseModel): + """Individual sensor reading model""" + sensor_id: str = Field(..., description="Unique sensor identifier") + room: Optional[str] = Field(None, description="Room where sensor is located") + sensor_type: SensorType = Field(..., description="Type of sensor") + timestamp: int = Field(..., description="Unix timestamp of reading") + created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp") + + # Sensor values + energy: Optional[Dict[str, Any]] = Field(None, description="Energy reading with value and unit") + co2: Optional[Dict[str, Any]] = Field(None, description="CO2 reading with value and unit") + temperature: Optional[Dict[str, Any]] = Field(None, description="Temperature reading with value and unit") + humidity: Optional[Dict[str, Any]] = Field(None, description="Humidity reading with value and unit") + motion: Optional[Dict[str, Any]] = Field(None, description="Motion detection reading") + + # Metadata + metadata: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Additional sensor metadata") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } + +class LegacySensorReading(BaseModel): + """Legacy sensor reading format for backward compatibility""" + sensor_id: str = Field(..., alias="sensorId") + timestamp: int + value: float + unit: str + created_at: datetime = Field(default_factory=datetime.utcnow) + + class Config: + allow_population_by_field_name = True + +class SensorMetadata(BaseModel): + """Sensor configuration and metadata""" + sensor_id: str = Field(..., description="Unique sensor identifier") + name: str = Field(..., description="Human-readable sensor name") + sensor_type: SensorType = Field(..., description="Type of sensor") + room: Optional[str] = Field(None, description="Room assignment") + status: SensorStatus = Field(default=SensorStatus.OFFLINE, description="Current sensor status") + + # Physical location and installation details + location: Optional[str] = Field(None, description="Physical location description") + floor: Optional[str] = Field(None, description="Floor level") + building: Optional[str] = Field(None, description="Building identifier") + + # Technical specifications + model: Optional[str] = Field(None, description="Sensor model") + manufacturer: Optional[str] = Field(None, description="Sensor manufacturer") + firmware_version: Optional[str] = Field(None, description="Firmware version") + hardware_version: Optional[str] = Field(None, description="Hardware version") + + # Network and connectivity + ip_address: Optional[str] = Field(None, description="IP address if network connected") + mac_address: Optional[str] = Field(None, description="MAC address") + connection_type: Optional[str] = Field(None, description="Connection type (wifi, ethernet, zigbee, etc.)") + + # Power and maintenance + battery_level: Optional[float] = Field(None, description="Battery level percentage") + last_maintenance: Optional[datetime] = Field(None, description="Last maintenance date") + next_maintenance: Optional[datetime] = Field(None, description="Next scheduled maintenance") + + # Operational settings + sampling_rate: Optional[int] = Field(None, description="Data sampling rate in seconds") + calibration_date: Optional[datetime] = Field(None, description="Last calibration date") + + # Capabilities + monitoring_capabilities: List[str] = Field(default_factory=list, description="List of monitoring capabilities") + control_capabilities: List[str] = Field(default_factory=list, description="List of control capabilities") + + # Timestamps + installed_at: Optional[datetime] = Field(None, description="Installation timestamp") + last_seen: Optional[datetime] = Field(None, description="Last communication timestamp") + created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp") + updated_at: datetime = Field(default_factory=datetime.utcnow, description="Record update timestamp") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() if v else None + } + +class RoomMetrics(BaseModel): + """Aggregated room-level metrics""" + room: str = Field(..., description="Room identifier") + timestamp: int = Field(..., description="Metrics calculation timestamp") + created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp") + + # Sensor inventory + sensor_count: int = Field(0, description="Total number of sensors in room") + active_sensors: List[str] = Field(default_factory=list, description="List of active sensor IDs") + sensor_types: List[SensorType] = Field(default_factory=list, description="Types of sensors present") + + # Energy metrics + energy: Optional[Dict[str, Any]] = Field(None, description="Energy consumption metrics") + # Format: {"current": float, "total": float, "average": float, "peak": float, "unit": str} + + # Environmental metrics + co2: Optional[Dict[str, Any]] = Field(None, description="CO2 level metrics") + # Format: {"current": float, "average": float, "max": float, "min": float, "status": CO2Status, "unit": str} + + temperature: Optional[Dict[str, Any]] = Field(None, description="Temperature metrics") + # Format: {"current": float, "average": float, "max": float, "min": float, "unit": str} + + humidity: Optional[Dict[str, Any]] = Field(None, description="Humidity metrics") + # Format: {"current": float, "average": float, "max": float, "min": float, "unit": str} + + # Occupancy and usage + occupancy_estimate: OccupancyLevel = Field(default=OccupancyLevel.LOW, description="Estimated occupancy level") + motion_detected: bool = Field(default=False, description="Recent motion detection status") + + # Time-based metrics + last_activity: Optional[datetime] = Field(None, description="Last detected activity timestamp") + daily_usage_hours: Optional[float] = Field(None, description="Estimated daily usage in hours") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() if v else None + } + +class SystemEvent(BaseModel): + """System events and alerts""" + event_id: str = Field(..., description="Unique event identifier") + event_type: str = Field(..., description="Type of event") + severity: Literal["info", "warning", "error", "critical"] = Field(..., description="Event severity") + timestamp: int = Field(..., description="Event timestamp") + created_at: datetime = Field(default_factory=datetime.utcnow, description="Record creation timestamp") + + # Event details + title: str = Field(..., description="Event title") + description: str = Field(..., description="Event description") + source: Optional[str] = Field(None, description="Event source (sensor_id, system component, etc.)") + + # Context + sensor_id: Optional[str] = Field(None, description="Related sensor ID") + room: Optional[str] = Field(None, description="Related room") + + # Event data + data: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Additional event data") + + # Status tracking + acknowledged: bool = Field(default=False, description="Whether event has been acknowledged") + resolved: bool = Field(default=False, description="Whether event has been resolved") + acknowledged_by: Optional[str] = Field(None, description="Who acknowledged the event") + resolved_by: Optional[str] = Field(None, description="Who resolved the event") + acknowledged_at: Optional[datetime] = Field(None, description="Acknowledgment timestamp") + resolved_at: Optional[datetime] = Field(None, description="Resolution timestamp") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() if v else None + } + +class DataQuery(BaseModel): + """Data query parameters for historical data retrieval""" + sensor_ids: Optional[List[str]] = Field(None, description="Filter by sensor IDs") + rooms: Optional[List[str]] = Field(None, description="Filter by rooms") + sensor_types: Optional[List[SensorType]] = Field(None, description="Filter by sensor types") + + # Time range + start_time: Optional[int] = Field(None, description="Start timestamp (Unix)") + end_time: Optional[int] = Field(None, description="End timestamp (Unix)") + + # Aggregation + aggregate: Optional[str] = Field(None, description="Aggregation method (avg, sum, min, max)") + interval: Optional[str] = Field(None, description="Aggregation interval (1m, 5m, 1h, 1d)") + + # Pagination + limit: int = Field(default=100, description="Maximum number of records to return") + offset: int = Field(default=0, description="Number of records to skip") + + # Sorting + sort_by: str = Field(default="timestamp", description="Field to sort by") + sort_order: Literal["asc", "desc"] = Field(default="desc", description="Sort order") + +class DataResponse(BaseModel): + """Response model for data queries""" + data: List[Dict[str, Any]] = Field(default_factory=list, description="Query results") + total_count: int = Field(0, description="Total number of matching records") + query: DataQuery = Field(..., description="Original query parameters") + execution_time_ms: float = Field(..., description="Query execution time in milliseconds") + +class HealthCheck(BaseModel): + """Health check response model""" + status: str = Field(..., description="Overall system status") + timestamp: datetime = Field(default_factory=datetime.utcnow) + + # Database status + mongodb_connected: bool = Field(..., description="MongoDB connection status") + redis_connected: bool = Field(..., description="Redis connection status") + + # Data statistics + total_sensors: int = Field(0, description="Total number of registered sensors") + active_sensors: int = Field(0, description="Number of active sensors") + total_readings: int = Field(0, description="Total sensor readings in database") + + # System metrics + uptime_seconds: float = Field(..., description="System uptime in seconds") + memory_usage_mb: Optional[float] = Field(None, description="Memory usage in MB") + + class Config: + json_encoders = { + datetime: lambda v: v.isoformat() + } \ No newline at end of file diff --git a/persistence.py b/persistence.py new file mode 100644 index 0000000..11ec778 --- /dev/null +++ b/persistence.py @@ -0,0 +1,448 @@ +import json +import asyncio +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional +import logging +from pymongo.errors import DuplicateKeyError +import uuid + +from database import get_database, redis_manager +from models import ( + SensorReading, LegacySensorReading, SensorMetadata, RoomMetrics, + SystemEvent, SensorType, SensorStatus, CO2Status, OccupancyLevel +) + +logger = logging.getLogger(__name__) + +class DataPersistenceService: + """Service for persisting sensor data to MongoDB and managing Redis cache""" + + def __init__(self): + self.db = None + self.redis = redis_manager + + async def initialize(self): + """Initialize the persistence service""" + self.db = await get_database() + await self.redis.connect() + logger.info("Data persistence service initialized") + + async def process_sensor_message(self, message_data: str) -> bool: + """Process incoming sensor message and persist data""" + try: + # Parse the message + data = json.loads(message_data) + logger.debug(f"Processing sensor message: {data}") + + # Determine message format and convert to standard format + if self._is_legacy_format(data): + sensor_reading = await self._convert_legacy_data(data) + else: + sensor_reading = SensorReading(**data) + + # Store in MongoDB + await self._store_sensor_reading(sensor_reading) + + # Update Redis cache for real-time access + await self._update_redis_cache(sensor_reading) + + # Update sensor metadata + await self._update_sensor_metadata(sensor_reading) + + # Calculate and store room metrics + await self._update_room_metrics(sensor_reading) + + # Check for alerts and anomalies + await self._check_alerts(sensor_reading) + + return True + + except Exception as e: + logger.error(f"Error processing sensor message: {e}") + # Log the error event + await self._log_system_event( + event_type="data_processing_error", + severity="error", + title="Sensor Data Processing Failed", + description=f"Failed to process sensor message: {str(e)}", + data={"raw_message": message_data} + ) + return False + + def _is_legacy_format(self, data: dict) -> bool: + """Check if data is in legacy format""" + legacy_keys = {"sensorId", "timestamp", "value", "unit"} + return legacy_keys.issubset(data.keys()) and "energy" not in data + + async def _convert_legacy_data(self, data: dict) -> SensorReading: + """Convert legacy format to new sensor reading format""" + legacy_reading = LegacySensorReading(**data) + + return SensorReading( + sensor_id=legacy_reading.sensor_id, + sensor_type=SensorType.ENERGY, # Assume legacy data is energy + timestamp=legacy_reading.timestamp, + created_at=legacy_reading.created_at, + energy={ + "value": legacy_reading.value, + "unit": legacy_reading.unit + } + ) + + async def _store_sensor_reading(self, reading: SensorReading): + """Store sensor reading in MongoDB""" + try: + reading_dict = reading.dict() + + # Add document ID for deduplication + reading_dict["_id"] = f"{reading.sensor_id}_{reading.timestamp}" + + await self.db.sensor_readings.insert_one(reading_dict) + logger.debug(f"Stored sensor reading for {reading.sensor_id}") + + except DuplicateKeyError: + logger.debug(f"Duplicate reading ignored for {reading.sensor_id} at {reading.timestamp}") + except Exception as e: + logger.error(f"Error storing sensor reading: {e}") + raise + + async def _update_redis_cache(self, reading: SensorReading): + """Update Redis cache with latest sensor data""" + try: + # Store latest reading for real-time access + await self.redis.set_sensor_data( + reading.sensor_id, + reading.dict(), + expire_time=3600 # 1 hour expiration + ) + + # Store sensor status + status_key = f"sensor:status:{reading.sensor_id}" + await self.redis.redis_client.setex( + status_key, + 1800, # 30 minutes + json.dumps({ + "status": "online", + "last_seen": reading.timestamp, + "room": reading.room + }) + ) + + except Exception as e: + logger.error(f"Error updating Redis cache: {e}") + + async def _update_sensor_metadata(self, reading: SensorReading): + """Update or create sensor metadata""" + try: + # Check if sensor metadata exists + existing = await self.db.sensor_metadata.find_one({"sensor_id": reading.sensor_id}) + + if existing: + # Update existing metadata + await self.db.sensor_metadata.update_one( + {"sensor_id": reading.sensor_id}, + { + "$set": { + "last_seen": datetime.utcnow(), + "status": SensorStatus.ONLINE.value, + "updated_at": datetime.utcnow() + }, + "$addToSet": { + "monitoring_capabilities": reading.sensor_type.value + } + } + ) + else: + # Create new sensor metadata + metadata = SensorMetadata( + sensor_id=reading.sensor_id, + name=f"Sensor {reading.sensor_id}", + sensor_type=reading.sensor_type, + room=reading.room, + status=SensorStatus.ONLINE, + last_seen=datetime.utcnow(), + monitoring_capabilities=[reading.sensor_type.value] + ) + + await self.db.sensor_metadata.insert_one(metadata.dict()) + logger.info(f"Created metadata for new sensor: {reading.sensor_id}") + + except Exception as e: + logger.error(f"Error updating sensor metadata: {e}") + + async def _update_room_metrics(self, reading: SensorReading): + """Calculate and store room-level metrics""" + if not reading.room: + return + + try: + # Get recent readings for this room (last 5 minutes) + recent_time = datetime.utcnow() - timedelta(minutes=5) + + # Query recent readings for the room + cursor = self.db.sensor_readings.find({ + "room": reading.room, + "created_at": {"$gte": recent_time} + }) + + recent_readings = await cursor.to_list(length=None) + + if not recent_readings: + return + + # Calculate aggregated metrics + metrics = await self._calculate_room_metrics(reading.room, recent_readings) + + # Store in MongoDB + await self.db.room_metrics.insert_one(metrics.dict()) + + # Cache in Redis + await self.redis.set_room_metrics(reading.room, metrics.dict()) + + logger.debug(f"Updated room metrics for {reading.room}") + + except Exception as e: + logger.error(f"Error updating room metrics: {e}") + + async def _calculate_room_metrics(self, room: str, readings: List[Dict]) -> RoomMetrics: + """Calculate aggregated metrics for a room""" + + # Group readings by sensor + sensors_data = {} + for reading in readings: + sensor_id = reading["sensor_id"] + if sensor_id not in sensors_data: + sensors_data[sensor_id] = [] + sensors_data[sensor_id].append(reading) + + # Initialize metrics + energy_values = [] + co2_values = [] + temperature_values = [] + humidity_values = [] + motion_detected = False + + # Extract values from readings + for sensor_readings in sensors_data.values(): + for reading in sensor_readings: + if reading.get("energy"): + energy_values.append(reading["energy"]["value"]) + if reading.get("co2"): + co2_values.append(reading["co2"]["value"]) + if reading.get("temperature"): + temperature_values.append(reading["temperature"]["value"]) + if reading.get("humidity"): + humidity_values.append(reading["humidity"]["value"]) + if reading.get("motion") and reading["motion"].get("value") == "Detected": + motion_detected = True + + # Calculate aggregated metrics + metrics = RoomMetrics( + room=room, + timestamp=int(datetime.utcnow().timestamp()), + sensor_count=len(sensors_data), + active_sensors=list(sensors_data.keys()), + sensor_types=list(set(reading.get("sensor_type") for reading in readings if reading.get("sensor_type"))), + motion_detected=motion_detected + ) + + # Energy metrics + if energy_values: + metrics.energy = { + "current": sum(energy_values), + "average": sum(energy_values) / len(energy_values), + "total": sum(energy_values), + "peak": max(energy_values), + "unit": "kWh" + } + + # CO2 metrics + if co2_values: + avg_co2 = sum(co2_values) / len(co2_values) + metrics.co2 = { + "current": avg_co2, + "average": avg_co2, + "max": max(co2_values), + "min": min(co2_values), + "status": self._get_co2_status(avg_co2).value, + "unit": "ppm" + } + + # Set occupancy estimate based on CO2 + metrics.occupancy_estimate = self._estimate_occupancy(avg_co2) + + # Temperature metrics + if temperature_values: + metrics.temperature = { + "current": sum(temperature_values) / len(temperature_values), + "average": sum(temperature_values) / len(temperature_values), + "max": max(temperature_values), + "min": min(temperature_values), + "unit": "°C" + } + + # Humidity metrics + if humidity_values: + metrics.humidity = { + "current": sum(humidity_values) / len(humidity_values), + "average": sum(humidity_values) / len(humidity_values), + "max": max(humidity_values), + "min": min(humidity_values), + "unit": "%" + } + + return metrics + + def _get_co2_status(self, co2_level: float) -> CO2Status: + """Determine CO2 status based on level""" + if co2_level < 400: + return CO2Status.GOOD + elif co2_level < 1000: + return CO2Status.MODERATE + elif co2_level < 5000: + return CO2Status.POOR + else: + return CO2Status.CRITICAL + + def _estimate_occupancy(self, co2_level: float) -> OccupancyLevel: + """Estimate occupancy level based on CO2""" + if co2_level < 600: + return OccupancyLevel.LOW + elif co2_level < 1200: + return OccupancyLevel.MEDIUM + else: + return OccupancyLevel.HIGH + + async def _check_alerts(self, reading: SensorReading): + """Check for alert conditions and create system events""" + alerts = [] + + # CO2 level alerts + if reading.co2: + co2_level = reading.co2.get("value", 0) + if co2_level > 5000: + alerts.append({ + "event_type": "co2_critical", + "severity": "critical", + "title": "Critical CO2 Level", + "description": f"CO2 level ({co2_level} ppm) exceeds critical threshold in {reading.room or 'unknown room'}" + }) + elif co2_level > 1000: + alerts.append({ + "event_type": "co2_high", + "severity": "warning", + "title": "High CO2 Level", + "description": f"CO2 level ({co2_level} ppm) is above recommended levels in {reading.room or 'unknown room'}" + }) + + # Energy consumption alerts + if reading.energy: + energy_value = reading.energy.get("value", 0) + if energy_value > 10: # Threshold for high energy consumption + alerts.append({ + "event_type": "energy_high", + "severity": "warning", + "title": "High Energy Consumption", + "description": f"Energy consumption ({energy_value} kWh) is unusually high for sensor {reading.sensor_id}" + }) + + # Temperature alerts + if reading.temperature: + temp_value = reading.temperature.get("value", 0) + if temp_value > 30 or temp_value < 15: + alerts.append({ + "event_type": "temperature_extreme", + "severity": "warning", + "title": "Extreme Temperature", + "description": f"Temperature ({temp_value}°C) is outside normal range in {reading.room or 'unknown room'}" + }) + + # Create system events for alerts + for alert in alerts: + await self._log_system_event( + sensor_id=reading.sensor_id, + room=reading.room, + **alert, + data=reading.dict() + ) + + async def _log_system_event(self, event_type: str, severity: str, title: str, description: str, + sensor_id: str = None, room: str = None, source: str = None, data: Dict = None): + """Log a system event""" + try: + event = SystemEvent( + event_id=str(uuid.uuid4()), + event_type=event_type, + severity=severity, + timestamp=int(datetime.utcnow().timestamp()), + title=title, + description=description, + sensor_id=sensor_id, + room=room, + source=source or "data_persistence_service", + data=data or {} + ) + + await self.db.system_events.insert_one(event.dict()) + logger.info(f"System event logged: {event_type} - {title}") + + except Exception as e: + logger.error(f"Error logging system event: {e}") + + async def get_recent_readings(self, sensor_id: str = None, room: str = None, + limit: int = 100, minutes: int = 60) -> List[Dict]: + """Get recent sensor readings""" + try: + # Build query + query = { + "created_at": {"$gte": datetime.utcnow() - timedelta(minutes=minutes)} + } + + if sensor_id: + query["sensor_id"] = sensor_id + if room: + query["room"] = room + + cursor = self.db.sensor_readings.find(query).sort("created_at", -1).limit(limit) + readings = await cursor.to_list(length=limit) + + return readings + + except Exception as e: + logger.error(f"Error getting recent readings: {e}") + return [] + + async def get_sensor_statistics(self) -> Dict[str, Any]: + """Get overall sensor statistics""" + try: + stats = {} + + # Total readings count + stats["total_readings"] = await self.db.sensor_readings.count_documents({}) + + # Active sensors (sensors that sent data in last 24 hours) + recent_time = datetime.utcnow() - timedelta(hours=24) + active_sensors = await self.db.sensor_readings.distinct("sensor_id", { + "created_at": {"$gte": recent_time} + }) + stats["active_sensors"] = len(active_sensors) + + # Total registered sensors + stats["total_sensors"] = await self.db.sensor_metadata.count_documents({}) + + # Readings in last 24 hours + stats["recent_readings"] = await self.db.sensor_readings.count_documents({ + "created_at": {"$gte": recent_time} + }) + + # Room count + stats["total_rooms"] = len(await self.db.sensor_readings.distinct("room", {"room": {"$ne": None}})) + + return stats + + except Exception as e: + logger.error(f"Error getting sensor statistics: {e}") + return {} + +# Global persistence service instance +persistence_service = DataPersistenceService() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..16857a3 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +fastapi +uvicorn[standard] +redis +websockets +pymongo +motor +python-dotenv +pandas +numpy +pydantic \ No newline at end of file diff --git a/services/__init__.py b/services/__init__.py new file mode 100644 index 0000000..71d03af --- /dev/null +++ b/services/__init__.py @@ -0,0 +1 @@ +# Services package for dashboard backend \ No newline at end of file diff --git a/services/token_service.py b/services/token_service.py new file mode 100644 index 0000000..55e6872 --- /dev/null +++ b/services/token_service.py @@ -0,0 +1,174 @@ +""" +Token management service for authentication and resource access control. +Based on the tiocps JWT token implementation with resource-based permissions. +""" + +import jwt +import uuid +from datetime import datetime, timedelta +from typing import Dict, List, Optional, Any +from pydantic import BaseModel +from motor.motor_asyncio import AsyncIOMotorDatabase + +class TokenPayload(BaseModel): + """Token payload structure""" + name: str + list_of_resources: List[str] + data_aggregation: bool = False + time_aggregation: bool = False + embargo: int = 0 # embargo period in seconds + exp: int # expiration timestamp + +class TokenRecord(BaseModel): + """Token database record""" + token: str + datetime: datetime + active: bool = True + created_at: datetime + updated_at: datetime + +class TokenService: + """Service for managing JWT tokens and authentication""" + + def __init__(self, db: AsyncIOMotorDatabase, secret_key: str = "dashboard-secret-key"): + self.db = db + self.secret_key = secret_key + self.tokens_collection = db.tokens + + def generate_token(self, name: str, list_of_resources: List[str], + data_aggregation: bool = False, time_aggregation: bool = False, + embargo: int = 0, exp_hours: int = 24) -> str: + """Generate a new JWT token with specified permissions""" + + # Calculate expiration time + exp_timestamp = int((datetime.utcnow() + timedelta(hours=exp_hours)).timestamp()) + + # Create token payload + payload = { + "name": name, + "list_of_resources": list_of_resources, + "data_aggregation": data_aggregation, + "time_aggregation": time_aggregation, + "embargo": embargo, + "exp": exp_timestamp, + "iat": int(datetime.utcnow().timestamp()), + "jti": str(uuid.uuid4()) # unique token ID + } + + # Generate JWT token + token = jwt.encode(payload, self.secret_key, algorithm="HS256") + return token + + def decode_token(self, token: str) -> Optional[Dict[str, Any]]: + """Decode and verify JWT token""" + try: + payload = jwt.decode(token, self.secret_key, algorithms=["HS256"]) + return payload + except jwt.ExpiredSignatureError: + return {"error": "Token has expired"} + except jwt.InvalidTokenError: + return {"error": "Invalid token"} + + async def insert_token(self, token: str) -> Dict[str, Any]: + """Save token to database""" + now = datetime.utcnow() + + # Decode token to verify it's valid + decoded = self.decode_token(token) + if decoded and "error" not in decoded: + token_record = { + "token": token, + "datetime": now, + "active": True, + "created_at": now, + "updated_at": now, + "name": decoded.get("name", ""), + "resources": decoded.get("list_of_resources", []), + "expires_at": datetime.fromtimestamp(decoded.get("exp", 0)) + } + + await self.tokens_collection.insert_one(token_record) + return { + "token": token, + "datetime": now.isoformat(), + "active": True + } + else: + raise ValueError("Invalid token cannot be saved") + + async def revoke_token(self, token: str) -> Dict[str, Any]: + """Revoke a token by marking it as inactive""" + now = datetime.utcnow() + + result = await self.tokens_collection.update_one( + {"token": token}, + { + "$set": { + "active": False, + "updated_at": now, + "revoked_at": now + } + } + ) + + if result.matched_count > 0: + return { + "token": token, + "datetime": now.isoformat(), + "active": False + } + else: + raise ValueError("Token not found") + + async def get_tokens(self) -> List[Dict[str, Any]]: + """Get all tokens from database""" + cursor = self.tokens_collection.find({}) + tokens = [] + + async for token_record in cursor: + # Convert ObjectId to string and datetime to ISO format + token_record["_id"] = str(token_record["_id"]) + for field in ["datetime", "created_at", "updated_at", "expires_at", "revoked_at"]: + if field in token_record and token_record[field]: + token_record[field] = token_record[field].isoformat() + + tokens.append(token_record) + + return tokens + + async def is_token_valid(self, token: str) -> bool: + """Check if token is valid and active""" + # Check if token exists and is active in database + token_record = await self.tokens_collection.find_one({ + "token": token, + "active": True + }) + + if not token_record: + return False + + # Verify JWT signature and expiration + decoded = self.decode_token(token) + return decoded is not None and "error" not in decoded + + async def get_token_permissions(self, token: str) -> Optional[Dict[str, Any]]: + """Get permissions for a valid token""" + if await self.is_token_valid(token): + return self.decode_token(token) + return None + + async def cleanup_expired_tokens(self): + """Remove expired tokens from database""" + now = datetime.utcnow() + + # Find tokens that have expired + expired_cursor = self.tokens_collection.find({ + "expires_at": {"$lt": now} + }) + + expired_count = 0 + async for token_record in expired_cursor: + await self.tokens_collection.delete_one({"_id": token_record["_id"]}) + expired_count += 1 + + return expired_count \ No newline at end of file diff --git a/test_structure.py b/test_structure.py new file mode 100644 index 0000000..27e4f76 --- /dev/null +++ b/test_structure.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +""" +Test script to validate the layered architecture structure +This script checks the structure without requiring all dependencies to be installed +""" +import os +import sys +from pathlib import Path + +def check_file_structure(): + """Check if all expected files exist in the layered structure""" + expected_structure = { + "layers/__init__.py": "Layers package init", + "layers/infrastructure/__init__.py": "Infrastructure layer init", + "layers/infrastructure/database_connection.py": "Database connection management", + "layers/infrastructure/redis_connection.py": "Redis connection management", + "layers/infrastructure/repositories.py": "Data access layer", + "layers/business/__init__.py": "Business layer init", + "layers/business/sensor_service.py": "Sensor business logic", + "layers/business/room_service.py": "Room business logic", + "layers/business/analytics_service.py": "Analytics business logic", + "layers/business/cleanup_service.py": "Cleanup business logic", + "layers/presentation/__init__.py": "Presentation layer init", + "layers/presentation/websocket_handler.py": "WebSocket management", + "layers/presentation/redis_subscriber.py": "Redis pub/sub handling", + "layers/presentation/api_routes.py": "API route definitions", + "main_layered.py": "Main application with layered architecture", + "models.py": "Data models (existing)", + } + + print("🔍 Checking layered architecture file structure...") + print("=" * 60) + + all_files_exist = True + + for file_path, description in expected_structure.items(): + full_path = Path(file_path) + + if full_path.exists(): + size = full_path.stat().st_size + print(f"✅ {file_path:<40} ({size:,} bytes) - {description}") + else: + print(f"❌ {file_path:<40} MISSING - {description}") + all_files_exist = False + + print("=" * 60) + + if all_files_exist: + print("🎉 All files in layered structure exist!") + return True + else: + print("❌ Some files are missing from the layered structure") + return False + +def check_import_structure(): + """Check the logical structure of imports (without actually importing)""" + print("\n📋 Analyzing import dependencies...") + print("=" * 60) + + # Define expected dependencies by layer + layer_dependencies = { + "Infrastructure Layer": { + "files": [ + "layers/infrastructure/database_connection.py", + "layers/infrastructure/redis_connection.py", + "layers/infrastructure/repositories.py" + ], + "can_import_from": ["models", "external libraries"], + "should_not_import_from": ["business", "presentation"] + }, + "Business Layer": { + "files": [ + "layers/business/sensor_service.py", + "layers/business/room_service.py", + "layers/business/analytics_service.py", + "layers/business/cleanup_service.py" + ], + "can_import_from": ["models", "infrastructure", "external libraries"], + "should_not_import_from": ["presentation"] + }, + "Presentation Layer": { + "files": [ + "layers/presentation/websocket_handler.py", + "layers/presentation/redis_subscriber.py", + "layers/presentation/api_routes.py" + ], + "can_import_from": ["models", "business", "infrastructure", "external libraries"], + "should_not_import_from": [] + } + } + + violations = [] + + for layer_name, layer_info in layer_dependencies.items(): + print(f"\n{layer_name}:") + + for file_path in layer_info["files"]: + if Path(file_path).exists(): + try: + with open(file_path, 'r') as f: + content = f.read() + + # Check for violations + for forbidden in layer_info["should_not_import_from"]: + if forbidden == "business" and "from ..business" in content: + violations.append(f"{file_path} imports from business layer (violation)") + elif forbidden == "presentation" and "from ..presentation" in content: + violations.append(f"{file_path} imports from presentation layer (violation)") + + print(f" ✅ {Path(file_path).name}") + + except Exception as e: + print(f" ⚠️ {Path(file_path).name} - Could not analyze: {e}") + + if violations: + print(f"\n❌ Found {len(violations)} layering violations:") + for violation in violations: + print(f" - {violation}") + return False + else: + print("\n✅ No layering violations detected!") + return True + +def analyze_code_separation(): + """Analyze how well the code has been separated by responsibility""" + print("\n📊 Analyzing code separation...") + print("=" * 60) + + analysis = { + "Infrastructure Layer": { + "responsibilities": ["Database connections", "Redis connections", "Data repositories"], + "file_count": 0, + "total_lines": 0 + }, + "Business Layer": { + "responsibilities": ["Business logic", "Data processing", "Analytics", "Cleanup"], + "file_count": 0, + "total_lines": 0 + }, + "Presentation Layer": { + "responsibilities": ["HTTP endpoints", "WebSocket handling", "Request/Response"], + "file_count": 0, + "total_lines": 0 + } + } + + layer_paths = { + "Infrastructure Layer": "layers/infrastructure/", + "Business Layer": "layers/business/", + "Presentation Layer": "layers/presentation/" + } + + for layer_name, layer_path in layer_paths.items(): + layer_dir = Path(layer_path) + if layer_dir.exists(): + py_files = list(layer_dir.glob("*.py")) + py_files = [f for f in py_files if f.name != "__init__.py"] + + total_lines = 0 + for py_file in py_files: + try: + with open(py_file, 'r') as f: + lines = len(f.readlines()) + total_lines += lines + except: + pass + + analysis[layer_name]["file_count"] = len(py_files) + analysis[layer_name]["total_lines"] = total_lines + + for layer_name, info in analysis.items(): + print(f"\n{layer_name}:") + print(f" Files: {info['file_count']}") + print(f" Lines of Code: {info['total_lines']:,}") + print(f" Responsibilities: {', '.join(info['responsibilities'])}") + + total_files = sum(info["file_count"] for info in analysis.values()) + total_lines = sum(info["total_lines"] for info in analysis.values()) + + print(f"\n📈 Total Separation Metrics:") + print(f" Total Files: {total_files}") + print(f" Total Lines: {total_lines:,}") + print(f" Layers: 3 (Infrastructure, Business, Presentation)") + + return True + +def main(): + """Main test function""" + print("🏗️ LAYERED ARCHITECTURE VALIDATION") + print("=" * 60) + + success = True + + # Check file structure + if not check_file_structure(): + success = False + + # Check import structure + if not check_import_structure(): + success = False + + # Analyze code separation + if not analyze_code_separation(): + success = False + + print("\n" + "=" * 60) + if success: + print("🎉 VALIDATION SUCCESSFUL - Layered architecture is properly structured!") + print("\n✨ Key Benefits Achieved:") + print(" • Clear separation of concerns") + print(" • Infrastructure isolated from business logic") + print(" • Business logic separated from presentation") + print(" • Easy to test individual layers") + print(" • Maintainable and scalable structure") + else: + print("❌ VALIDATION FAILED - Issues found in layered architecture") + + return success + +if __name__ == "__main__": + sys.exit(0 if main() else 1) \ No newline at end of file