Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 89 additions & 16 deletions app/opensense.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
'''Module to get entries from OpenSenseMap API and get the average temperature'''
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
from datetime import datetime, timezone, timedelta
import re
import json
import requests
import redis
from app.config import create_redis_client, CACHE_TTL
Expand All @@ -26,16 +27,43 @@ def classify_temperature(average):

return "Unknown" # Default case

def _parse_partial_json_array(text: str):
"""Parse as many full objects as possible from a (possibly truncated) JSON array."""
decoder = json.JSONDecoder()
items = []
i = text.find('[')
if i == -1:
return items
i += 1 # past '['
n = len(text)
while i < n:
while i < n and text[i].isspace():
i += 1
if i >= n or text[i] == ']':
break
try:
obj, end = decoder.raw_decode(text, i)
except json.JSONDecodeError:
# truncated object at the end; stop with what we have
break
items.append(obj)
i = end
while i < n and text[i].isspace():
i += 1
if i < n and text[i] == ',':
i += 1
return items

def get_temperature():
'''Function to get the average temperature from OpenSenseMap API.'''
if REDIS_AVAILABLE:
try:
cached_data = redis_client.get("temperature_data")
if cached_data:
print("Using cached data from Redis.")
# Return cached data with default stats (since we don't have fresh stats)
cached_result = cached_data.decode('utf-8')
default_stats = {"total_sensors": 0, "null_count": 0}
return cached_data, default_stats
return cached_result, default_stats
except redis.RedisError as e:
print(f"Redis error: {e}. Proceeding without cache.")

Expand All @@ -49,41 +77,86 @@ def get_temperature():
"format": "json"
}

# Streaming configuration
max_mb = 0.5
max_bytes = int(max_mb * 1024 * 1024)

print('Getting data from OpenSenseMap API...')

try:
# Stream the response and count bytes
response = requests.get(
"https://api.opensensemap.org/boxes",
params=params,
timeout=(3, 10)
stream=True,
timeout=(180, 60)
)
print('Data retrieved successfully!')
response.raise_for_status()

downloaded = 0
chunks = []
truncated = False

for chunk in response.iter_content(chunk_size=64 * 1024): # 64 KB
if not chunk:
break
chunks.append(chunk)
downloaded += len(chunk)
if downloaded >= max_bytes:
print(f"Reached {max_mb} MB limit ({downloaded:,} bytes), stopping download")
truncated = True
response.close()
break

print(f'Bytes downloaded: {downloaded:,}')
print('Data retrieved successfully!' + (" (partial)" if truncated else ""))

# Build body and parse JSON
body = b"".join(chunks)
text = body.decode(response.encoding or "utf-8", errors="replace")

try:
data = json.loads(text)
except json.JSONDecodeError:
if not truncated:
print("Warning: Unexpected JSON parse error. Trying partial parse.")
data = _parse_partial_json_array(text)
if not data:
return "Error: Failed to parse JSON and no partial objects found\n", {
"total_sensors": 0,
"null_count": 0
}

except requests.Timeout:
print("API request timed out")
return "Error: API request timed out\n", {"total_sensors": 0, "null_count": 0}
except requests.RequestException as e:
print(f"API request failed: {e}")
return f"Error: API request failed - {e}\n", {"total_sensors": 0, "null_count": 0}

_sensor_stats["total_sensors"] = sum(
1 for line in response.text.splitlines() if re.search(r'^\s*"sensors"\s*:\s*\[', line)
)

res = [d.get('sensors') for d in response.json() if 'sensors' in d]
# Process the data (keeping the existing logic)
_sensor_stats["total_sensors"] = sum(1 for d in data if isinstance(d, dict) and "sensors" in d)
res = [d.get('sensors') for d in data if isinstance(d, dict) and 'sensors' in d]

temp_list = []
_sensor_stats["null_count"] = 0 # Initialize counter for null measurements
_sensor_stats["null_count"] = 0

for sensor_list in res:
for measure in sensor_list:
if measure.get('unit') == "°C" and 'lastMeasurement' in measure:
last_measurement = measure['lastMeasurement']
if last_measurement is not None and 'value' in last_measurement:
last_measurement_int = float(last_measurement['value'])
temp_list.append(last_measurement_int)
last = measure['lastMeasurement']
if last is not None and isinstance(last, dict) and 'value' in last:
try:
temp_list.append(float(last['value']))
except (TypeError, ValueError):
_sensor_stats["null_count"] += 1
else:
_sensor_stats["null_count"] += 1

average = sum(temp_list) / len(temp_list) if temp_list else 0
average = sum(temp_list) / len(temp_list) if temp_list else 0.0

if not temp_list:
print("Warning: No valid temperature readings found")

# Use the dictionary-based classification
status = classify_temperature(average)
Expand Down
67 changes: 67 additions & 0 deletions kustomize/base/cronjob.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: temperature-storage-cronjob
labels:
app: hivebox-cronjob
spec:
schedule: "*/5 * * * *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
securityContext:
fsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
initContainers:
- name: wait-for-start
image: curlimages/curl:8.15.0@sha256:4026b29997dc7c823b51c164b71e2b51e0fd95cce4601f78202c513d97da2922
command: ["/bin/sh", "-c"]
args:
- |
set -eu
while true; do
if curl -sSf -m 3 http://hivebox-service/version >/dev/null; then
echo "Hivebox service is up!"
exit 0
else
echo "Waiting for Hivebox service to be available..."
sleep 5
fi
done
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
capabilities:
drop: ["ALL"]
containers:
- name: temperature-storage
image: curlimages/curl:8.15.0@sha256:4026b29997dc7c823b51c164b71e2b51e0fd95cce4601f78202c513d97da2922
command: ["curl"]
args:
- "-f"
- "-s"
- "-S"
- "--max-time"
- "60"
- "http://hivebox-service/store"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
capabilities:
drop: ["ALL"]
resources:
limits: { memory: "32Mi", cpu: "50m" }
requests: { memory: "16Mi", cpu: "10m" }
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
158 changes: 158 additions & 0 deletions kustomize/base/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hivebox
labels:
app: hivebox
spec:
replicas: 2
selector:
matchLabels:
app: hivebox
template:
metadata:
labels:
app: hivebox
spec:
securityContext:
fsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
containers:
- name: hivebox
image: ghcr.io/gabrielpalmar/hivebox:latest@sha256:c731999c3fd9b757e2fd816e3c9dcf645dba56647d8a921cb567ece3cf378dc3
ports:
- containerPort: 5000
env:
- name: REDIS_HOST
value: redis-service
- name: MINIO_HOST
value: minio-service
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
resources:
limits: { memory: "512Mi", cpu: "500m" }
requests: { memory: "256Mi", cpu: "250m" }
readinessProbe:
httpGet:
path: /readyz
port: 5000
initialDelaySeconds: 30
timeoutSeconds: 480
failureThreshold: 3
periodSeconds: 600
livenessProbe:
httpGet:
path: /version
port: 5000
timeoutSeconds: 3
failureThreshold: 3
periodSeconds: 60
volumeMounts:
- name: tmp-volume
mountPath: /tmp
volumes:
- name: tmp-volume
emptyDir: {}

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
labels:
app: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
securityContext:
fsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
containers:
- name: valkey
image: valkey/valkey:8-alpine3.22@sha256:0d27f0bca0249f61d060029a6aaf2e16b2c417d68d02a508e1dfb763fa2948b4
ports:
- containerPort: 6379
command: ["valkey-server"]
args: ["--save", "", "--appendonly", "no"]
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
capabilities:
drop: ["ALL"]
resources:
limits: { memory: "256Mi", cpu: "250m" }
requests: { memory: "128Mi", cpu: "100m" }
volumeMounts:
- name: valkey-data
mountPath: /data
volumes:
- name: valkey-data
emptyDir: {}

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio
labels:
app: minio
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
securityContext:
fsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
containers:
- name: minio
image: minio/minio:RELEASE.2025-07-23T15-54-02Z@sha256:d249d1fb6966de4d8ad26c04754b545205ff15a62e4fd19ebd0f26fa5baacbc0
ports:
- containerPort: 9000
command: ["minio", "server", "/data"]
env:
- name: MINIO_ROOT_USER
value: minioadmin
- name: MINIO_ROOT_PASSWORD
value: minioadmin
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsGroup: 1000
runAsUser: 1000
capabilities:
drop: ["ALL"]
resources:
limits: { memory: "256Mi", cpu: "250m" }
requests: { memory: "128Mi", cpu: "100m" }
volumeMounts:
- name: minio-data
mountPath: /data
volumes:
- name: minio-data
emptyDir: {}
Loading