diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 2f1d05b..f3231b6 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -11,7 +11,11 @@ "Bash(python -m py_compile:*)", "WebSearch", "WebFetch(domain:docs.waveshare.com)", - "WebFetch(domain:www.waveshare.com)" + "WebFetch(domain:www.waveshare.com)", + "Bash(npm view:*)", + "WebFetch(domain:raw.githubusercontent.com)", + "Bash(docker ps:*)", + "Bash(python3:*)" ] }, "outputStyle": "iseri", diff --git a/components/dashboard_ui/dashboard_ui.c b/components/dashboard_ui/dashboard_ui.c index e55db60..4ff38b9 100644 --- a/components/dashboard_ui/dashboard_ui.c +++ b/components/dashboard_ui/dashboard_ui.c @@ -369,9 +369,14 @@ void dashboard_ui_update_stats(const pi_stats_t *stats) /* Services table */ s_service_count = stats->service_count; for (int i = 0; i < stats->service_count && i < WS_MAX_SERVICES; i++) { + const char *tag; + switch (stats->services[i].status) { + case SVC_RUNNING: tag = "[RUN]"; break; + case SVC_WARNING: tag = "[WARN]"; break; + default: tag = "[STOP]"; break; + } lv_table_set_cell_value(tbl_services, i, 0, stats->services[i].name); - lv_table_set_cell_value(tbl_services, i, 1, - stats->services[i].running ? "[RUN]" : "[STOP]"); + lv_table_set_cell_value(tbl_services, i, 1, tag); } /* Clear unused rows */ for (int i = stats->service_count; i < WS_MAX_SERVICES; i++) { diff --git a/components/user_app/user_app.cpp b/components/user_app/user_app.cpp index 3246754..0bb2395 100644 --- a/components/user_app/user_app.cpp +++ b/components/user_app/user_app.cpp @@ -133,7 +133,7 @@ static void ws_data_cb(const pi_stats_t *stats) } for (int i = 0; i < stats->service_count; i++) { - if (!stats->services[i].running) { + if (stats->services[i].status != SVC_RUNNING) { alert_trigger(ALERT_SERVICE_DOWN); break; } diff --git a/components/ws_client/ws_client.c b/components/ws_client/ws_client.c index 242a591..9708f92 100644 --- a/components/ws_client/ws_client.c +++ b/components/ws_client/ws_client.c @@ -92,7 +92,13 @@ static void parse_stats_json(const char *data, int len) s_stats.services[i].name[WS_SERVICE_NAME_LEN - 1] = '\0'; } if (status && status->valuestring) { - s_stats.services[i].running = (strcmp(status->valuestring, "running") == 0); + if (strcmp(status->valuestring, "running") == 0) { + s_stats.services[i].status = SVC_RUNNING; + } else if (strcmp(status->valuestring, "warning") == 0) { + s_stats.services[i].status = SVC_WARNING; + } else { + s_stats.services[i].status = SVC_STOPPED; + } } } } diff --git a/components/ws_client/ws_client.h b/components/ws_client/ws_client.h index e713dab..322e9bc 100644 --- a/components/ws_client/ws_client.h +++ b/components/ws_client/ws_client.h @@ -8,7 +8,7 @@ extern "C" { #endif -#define WS_MAX_SERVICES 8 +#define WS_MAX_SERVICES 20 #define WS_SERVICE_NAME_LEN 16 typedef enum { @@ -18,9 +18,15 @@ typedef enum { WS_STATE_ERROR, } ws_state_t; +typedef enum { + SVC_STOPPED = 0, + SVC_WARNING, + SVC_RUNNING, +} ws_svc_status_t; + typedef struct { char name[WS_SERVICE_NAME_LEN]; - bool running; + ws_svc_status_t status; } ws_service_t; typedef struct { diff --git a/pi/stats_server.py b/pi/stats_server.py index e17e366..b15363b 100644 --- a/pi/stats_server.py +++ b/pi/stats_server.py @@ -7,7 +7,7 @@ same 2s push interval. Services remain mocked until systemd integration is added import asyncio import json -import random +import subprocess import time from datetime import datetime from pathlib import Path @@ -61,16 +61,41 @@ def _get_net_throughput() -> tuple[float, float]: return rx_kbps, tx_kbps -def _mock_services() -> list[dict]: - """Mocked service status — same logic as mock_server.py.""" - return [ - {"name": "docker", "status": random.choice(["running", "running", "running", "stopped"])}, - {"name": "pihole", "status": random.choice(["running", "running", "running", "stopped"])}, - {"name": "nginx", "status": random.choice(["running", "running", "stopped"])}, - {"name": "sshd", "status": "running"}, - {"name": "ph1", "status": "running"}, - {"name": "ph2", "status": "stopped"}, - ] +def _get_docker_services() -> list[dict]: + """Query Docker for real container statuses with ternary status model.""" + try: + result = subprocess.run( + ["docker", "ps", "-a", "--format", "{{.Names}}\t{{.Status}}"], + capture_output=True, text=True, timeout=5, + ) + except (subprocess.TimeoutExpired, FileNotFoundError, OSError): + return [] + + if result.returncode != 0: + return [] + + services = [] + for line in result.stdout.strip().splitlines(): + parts = line.split("\t", 1) + if len(parts) != 2: + continue + name, raw_status = parts + + if raw_status.startswith("Up"): + if "unhealthy" in raw_status or "Restarting" in raw_status: + status = "warning" + else: + status = "running" + else: + status = "stopped" + + services.append({"name": name, "status": status}) + + # Sort: warnings first, then stopped, then running (problems float to top) + order = {"warning": 0, "stopped": 1, "running": 2} + services.sort(key=lambda s: order.get(s["status"], 3)) + + return services def _local_time_fields() -> dict: @@ -100,7 +125,7 @@ def generate_stats() -> dict: "uptime_hrs": round((time.time() - psutil.boot_time()) / 3600, 1), "net_rx_kbps": rx_kbps / 8, "net_tx_kbps": tx_kbps / 8, # kByte/s for humans - "services": _mock_services(), + "services": _get_docker_services(), "timestamp": int(time.time()), "local_time": _local_time_fields(), }