Compare commits

..

No commits in common. "Server" and "main" have entirely different histories.
Server ... main

2 changed files with 563 additions and 216 deletions

View File

@ -13,19 +13,18 @@ load_dotenv(override=True)
app = Flask(__name__)
# Load-time debug for camera config (safe to leave; only prints on startup)
print(
"[Surveillance] camera_ip_1=" + str(os.getenv("camera_ip_1")) +
" camera_ip_2=" + str(os.getenv("camera_ip_2"))
)
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
USERNAME = os.getenv("username")
PASSWORD = os.getenv("password")
# Dynamically find all camera_ip_N variables in environment
CAMERA_IPS = []
for i in range(1, 101): # Supports up to 100 cameras
ip = os.getenv(f"camera_ip_{i}")
if ip and ip.strip():
CAMERA_IPS.append(ip.strip())
CAMERA_IPS = [os.getenv(f"camera_ip_{i}") for i in range(1, 3)]
CAMERAS = [
{
"id": f"cam{i + 1}",
@ -35,6 +34,7 @@ CAMERAS = [
}
for i, ip in enumerate(CAMERA_IPS)
]
DEFAULT_CAMERA_ID = CAMERAS[0]["id"] if CAMERAS else "cam1"
PROXIMITY_PX = 200 # max pixel distance to consider two people "together"
GROUP_TIME_THRESHOLD = 20 # seconds before an alert fires
@ -44,264 +44,470 @@ YOLO_CONF = 0.25
INFERENCE_DEVICE = 0
# ---------------------------------------------------------------------------
# Shared state
# Shared state (protected by lock)
# ---------------------------------------------------------------------------
lock = threading.Lock()
state = {
"cameras": {}, # camera_id -> dict with frame, metadata, status
"grid_frame": None,
"frame": None,
"people_count": 0,
"groups": [],
"alert_active": False,
"alerts": [],
"total_people_count": 0,
"alert_active": False
"fps": 0,
"stream_status": "connecting",
"selected_camera_id": DEFAULT_CAMERA_ID,
"active_camera_id": DEFAULT_CAMERA_ID,
}
# Initialize camera states
for cam in CAMERAS:
state["cameras"][cam["id"]] = {
"frame": None,
"people_count": 0,
"groups": [],
"alert_active": False,
"fps": 0,
"status": "connecting",
"name": cam["name"]
}
# ---------------------------------------------------------------------------
# YOLO model
# YOLO model (downloaded on first run)
# ---------------------------------------------------------------------------
if not torch.cuda.is_available():
print("[WARNING] CUDA not detected. Using CPU (this will be slow!).")
device = "cpu"
else:
device = f"cuda:{INFERENCE_DEVICE}"
raise RuntimeError(
"CUDA GPU is required but not available. Install a CUDA-enabled PyTorch build "
"and verify NVIDIA drivers."
)
model = YOLO("yolo26m.pt")
model.to(f"cuda:{INFERENCE_DEVICE}")
# Group tracking
_group_trackers: dict = {}
_next_group_id = 0
_last_alert_time = 0.0
model = YOLO("person_detector_best.pt")
model.to(device)
# ---------------------------------------------------------------------------
# Detection Helpers
# Detection helpers
# ---------------------------------------------------------------------------
def _centroids(boxes):
"""Return list of (cx, cy) from xyxy boxes."""
return [((b[0] + b[2]) / 2, (b[1] + b[3]) / 2) for b in boxes]
def _find_groups(centroids, threshold):
"""BFS clustering — returns list of index-lists with >= MIN_GROUP_SIZE."""
n = len(centroids)
if n < MIN_GROUP_SIZE: return []
if n < MIN_GROUP_SIZE:
return []
visited = set()
groups = []
for i in range(n):
if i in visited: continue
cluster, queue = [i], [i]
if i in visited:
continue
cluster = [i]
visited.add(i)
queue = [i]
while queue:
cur = queue.pop(0)
for j in range(n):
if j in visited: continue
dx, dy = centroids[cur][0] - centroids[j][0], centroids[cur][1] - centroids[j][1]
if (dx*dx + dy*dy)**0.5 < threshold:
cluster.append(j); visited.add(j); queue.append(j)
if len(cluster) >= MIN_GROUP_SIZE: groups.append(cluster)
if j in visited:
continue
dx = centroids[cur][0] - centroids[j][0]
dy = centroids[cur][1] - centroids[j][1]
if (dx * dx + dy * dy) ** 0.5 < threshold:
cluster.append(j)
visited.add(j)
queue.append(j)
if len(cluster) >= MIN_GROUP_SIZE:
groups.append(cluster)
return groups
def _group_centroid(centroids, indices):
xs = [centroids[i][0] for i in indices]
ys = [centroids[i][1] for i in indices]
return (sum(xs) / len(xs), sum(ys) / len(ys))
def _get_camera_by_id(camera_id):
for cam in CAMERAS:
if cam["id"] == camera_id:
return cam
return CAMERAS[0] if CAMERAS else None
def _reset_tracking():
global _group_trackers, _next_group_id, _last_alert_time
_group_trackers = {}
_next_group_id = 0
_last_alert_time = 0.0
# ---------------------------------------------------------------------------
# Stream Processing (One per camera)
# Main processing loop (runs in background thread)
# ---------------------------------------------------------------------------
def _process_stream(camera):
cam_id = camera["id"]
rtsp_url = camera["rtsp_url"]
group_trackers = {}
next_group_id = 0
last_alert_time = 0.0
def _process_stream():
global _next_group_id, _last_alert_time
cap = None
active_camera_id = None
prev_time = time.time()
print(f"[Surveillance] Starting thread for {camera['name']}")
while True:
if cap is None:
cap = cv2.VideoCapture(rtsp_url)
if not cap.isOpened():
print(f"[ERROR] {camera['name']} (IP: {camera['ip']}) failed to connect. Check if 401 Unauthorized or 403 Forbidden.")
with lock: state["cameras"][cam_id]["status"] = "error"
time.sleep(10); cap = None; continue
with lock:
selected_camera_id = state["selected_camera_id"]
selected_camera = _get_camera_by_id(selected_camera_id)
if not selected_camera:
with lock:
state["stream_status"] = "error"
time.sleep(2)
continue
print(f"[SUCCESS] {camera['name']} (IP: {camera['ip']}) connected and streaming over TCP.")
with lock: state["cameras"][cam_id]["status"] = "live"
if cap is None or active_camera_id != selected_camera["id"]:
if cap is not None:
cap.release()
cap = cv2.VideoCapture(selected_camera["rtsp_url"])
active_camera_id = selected_camera["id"]
_reset_tracking()
with lock:
state["active_camera_id"] = active_camera_id
if not cap.isOpened():
with lock:
state["stream_status"] = "error"
print(f"[ERROR] Cannot open RTSP stream: {selected_camera['rtsp_url']}")
time.sleep(2)
cap = None
continue
with lock:
state["stream_status"] = "live"
ret, frame = cap.read()
if not ret:
with lock: state["cameras"][cam_id]["status"] = "reconnecting"
cap.release(); time.sleep(2); cap = cv2.VideoCapture(rtsp_url); continue
with lock:
state["stream_status"] = "reconnecting"
cap.release()
time.sleep(2)
cap = cv2.VideoCapture(selected_camera["rtsp_url"])
if not cap.isOpened():
with lock:
state["stream_status"] = "error"
cap = None
else:
with lock:
state["stream_status"] = "live"
continue
now = time.time()
fps = 1.0 / max(now - prev_time, 1e-6)
prev_time = now
results = model(frame, classes=[0], verbose=False, conf=YOLO_CONF, device=device)
# --- YOLO inference (person = class 0) ---
results = model(
frame,
classes=[0],
verbose=False,
conf=YOLO_CONF,
device=INFERENCE_DEVICE,
)
person_boxes = []
for r in results:
for box in r.boxes:
x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
person_boxes.append((float(x1), float(y1), float(x2), float(y2), float(box.conf[0])))
conf = float(box.conf[0])
person_boxes.append((float(x1), float(y1), float(x2), float(y2), conf))
centroids = _centroids([(b[0], b[1], b[2], b[3]) for b in person_boxes])
current_groups = _find_groups(centroids, PROXIMITY_PX)
# Match Groups
matched_ids = set()
# --- Match current groups to tracked groups ---
matched_ids: set = set()
frame_group_data = []
for grp_indices in current_groups:
gc = _group_centroid(centroids, grp_indices)
best_id, best_dist = None, float("inf")
for gid, gdata in group_trackers.items():
if gid in matched_ids: continue
dist = ((gc[0]-gdata["centroid"][0])**2 + (gc[1]-gdata["centroid"][1])**2)**0.5
for gid, gdata in _group_trackers.items():
if gid in matched_ids:
continue
dx = gc[0] - gdata["centroid"][0]
dy = gc[1] - gdata["centroid"][1]
dist = (dx * dx + dy * dy) ** 0.5
if dist < PROXIMITY_PX * 2 and dist < best_dist:
best_dist, best_id = dist, gid
best_dist = dist
best_id = gid
if best_id is not None:
group_trackers[best_id].update({"centroid": gc, "last_seen": now, "member_count": len(grp_indices)})
_group_trackers[best_id]["centroid"] = gc
_group_trackers[best_id]["last_seen"] = now
_group_trackers[best_id]["member_count"] = len(grp_indices)
matched_ids.add(best_id)
frame_group_data.append((best_id, grp_indices, gc))
else:
gid = next_group_id
next_group_id += 1
group_trackers[gid] = {"centroid": gc, "first_seen": now, "last_seen": now, "member_count": len(grp_indices), "alerted": False}
gid = _next_group_id
_next_group_id += 1
_group_trackers[gid] = {
"centroid": gc,
"first_seen": now,
"last_seen": now,
"member_count": len(grp_indices),
"alerted": False,
}
frame_group_data.append((gid, grp_indices, gc))
stale = [gid for gid, gd in group_trackers.items() if now - gd["last_seen"] > 3]
for gid in stale: del group_trackers[gid]
# Remove stale groups (not seen for > 3 s)
stale = [gid for gid, gd in _group_trackers.items() if now - gd["last_seen"] > 3]
for gid in stale:
del _group_trackers[gid]
# Alerting
# --- Alert logic (mark + metadata; save after drawing overlays) ---
alert_this_frame = False
for gid, gdata in group_trackers.items():
if (gdata["last_seen"] - gdata["first_seen"]) >= GROUP_TIME_THRESHOLD and not gdata["alerted"]:
if now - last_alert_time >= ALERT_COOLDOWN:
pending_alerts = [] # list of (gid, people_count, duration_seconds)
for gid, gdata in _group_trackers.items():
duration = gdata["last_seen"] - gdata["first_seen"]
if duration >= GROUP_TIME_THRESHOLD and not gdata["alerted"]:
if now - _last_alert_time >= ALERT_COOLDOWN:
gdata["alerted"] = True
alert_this_frame = True
last_alert_time = now
os.makedirs("alerts", exist_ok=True)
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
path = f"alerts/alert_{ts}_{cam_id}_gid{gid}.jpg"
cv2.imwrite(path, frame)
with lock:
state["alerts"].insert(0, {"time": datetime.now().strftime("%H:%M:%S"), "camera": camera["name"], "people": gdata["member_count"], "duration": round(gdata["last_seen"]-gdata["first_seen"],1), "image": path})
state["alerts"] = state["alerts"][:50]
_last_alert_time = now
pending_alerts.append((gid, gdata["member_count"], duration))
# Overlays
# --- Draw overlays ---
display = frame.copy()
cv2.putText(display, camera["name"], (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
for x1, y1, x2, y2, conf in person_boxes:
cv2.rectangle(display, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
pending_gid_set = {gid for gid, _, _ in pending_alerts}
alert_person_indices = set()
for gid, grp_indices, _gc in frame_group_data:
if gid in pending_gid_set:
alert_person_indices.update(grp_indices)
# Live overlay: mark any person that belongs to any alerting group.
if pending_alerts:
max_people = max(people for _gid, people, _dur in pending_alerts)
max_dur = max(dur for _gid, _people, dur in pending_alerts)
cv2.putText(
display,
f"ALERT: {len(pending_alerts)} group(s) | {max_people} people | {int(max_dur)}s",
(12, 32),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
(0, 0, 255),
3,
)
for idx, (x1, y1, x2, y2, conf) in enumerate(person_boxes):
is_alert_person = idx in alert_person_indices
box_color = (0, 0, 255) if is_alert_person else (0, 255, 0)
cv2.rectangle(display, (int(x1), int(y1)), (int(x2), int(y2)), box_color, 2)
cv2.putText(
display, f"{conf:.0%}",
(int(x1), int(y1) - 6),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 2,
)
for gid, grp_indices, gc in frame_group_data:
cv2.circle(display, (int(gc[0]), int(gc[1])), 50, (0, 165, 255), 2)
gdata = _group_trackers.get(gid)
if gdata is None:
continue
duration = gdata["last_seen"] - gdata["first_seen"]
radius = int(PROXIMITY_PX * 0.6)
is_alert = duration >= GROUP_TIME_THRESHOLD
color = (0, 0, 255) if is_alert else (0, 165, 255)
cv2.circle(display, (int(gc[0]), int(gc[1])), radius, color, 2)
label = f"Group: {len(grp_indices)} | {duration:.0f}s"
cv2.putText(
display, label,
(int(gc[0]) - 70, int(gc[1]) - radius - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 2,
)
if is_alert:
cv2.putText(
display, "ALERT",
(int(gc[0]) - 35, int(gc[1]) + radius + 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3,
)
# --- Save annotated alerts (separate per group) ---
if pending_alerts:
os.makedirs("alerts", exist_ok=True)
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
frame_group_by_id = {gid: (grp_indices, gc) for gid, grp_indices, gc in frame_group_data}
for gid, people, duration in pending_alerts:
grp_indices, gc = frame_group_by_id.get(gid, (None, None))
if grp_indices is None:
continue
alert_display = frame.copy()
alert_person_set = set(grp_indices)
# Header annotation for this specific alert group.
cv2.putText(
alert_display,
f"ALERT GROUP {gid} | {people} people | {int(duration)}s",
(12, 32),
cv2.FONT_HERSHEY_SIMPLEX,
0.9,
(0, 0, 255),
3,
)
# Draw only the person boxes relevant to this group.
for idx, (x1, y1, x2, y2, conf) in enumerate(person_boxes):
is_alert_person = idx in alert_person_set
box_color = (0, 0, 255) if is_alert_person else (0, 255, 0)
cv2.rectangle(alert_display, (int(x1), int(y1)), (int(x2), int(y2)), box_color, 2)
cv2.putText(
alert_display,
f"{conf:.0%}",
(int(x1), int(y1) - 6),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
box_color,
2,
)
# Draw only the circle/label for this group.
radius = int(PROXIMITY_PX * 0.6)
cv2.circle(alert_display, (int(gc[0]), int(gc[1])), radius, (0, 0, 255), 2)
cv2.putText(
alert_display,
f"Group: {len(grp_indices)} | {duration:.0f}s",
(int(gc[0]) - 70, int(gc[1]) - radius - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.55,
(0, 0, 255),
2,
)
cv2.putText(
alert_display,
"ALERT",
(int(gc[0]) - 35, int(gc[1]) + radius + 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
(0, 0, 255),
3,
)
# Include group id to avoid collisions when multiple groups alert in one second.
alert_path = f"alerts/alert_{ts}_gid{gid}.jpg"
cv2.imwrite(alert_path, alert_display)
alert_info = {
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"people": people,
"duration": round(duration, 1),
"image": alert_path,
}
with lock:
state["alerts"].insert(0, alert_info)
state["alerts"] = state["alerts"][:50]
# --- Update shared state ---
groups_json = []
for gid, gi, gc in frame_group_data:
gdata = _group_trackers.get(gid)
if gdata:
groups_json.append({
"id": gid,
"count": len(gi),
"duration": round(gdata["last_seen"] - gdata["first_seen"], 1),
})
with lock:
state["cameras"][cam_id].update({
"frame": display,
"people_count": len(person_boxes),
"groups": [{"id": gid, "count": len(gi), "duration": round(group_trackers[gid]["last_seen"]-group_trackers[gid]["first_seen"], 1)} for gid, gi, gc in frame_group_data],
"alert_active": alert_this_frame,
"fps": round(fps, 1)
})
state["frame"] = display
state["people_count"] = len(person_boxes)
state["groups"] = groups_json
state["alert_active"] = alert_this_frame or any(
(gd["last_seen"] - gd["first_seen"]) >= GROUP_TIME_THRESHOLD
for gd in _group_trackers.values()
)
state["fps"] = round(fps, 1)
# --- 1 FPS RATE LIMIT ---
# ensures this specific camera thread only loops once per second
elapsed = time.time() - now
time.sleep(max(0, 1.0 - elapsed))
# ---------------------------------------------------------------------------
# Grid View Generator
# MJPEG generator
# ---------------------------------------------------------------------------
def _update_grid_frame():
def _generate_frames():
while True:
frames = []
with lock:
for cam_id in state["cameras"]:
if state["cameras"][cam_id]["frame"] is not None:
frames.append(cv2.resize(state["cameras"][cam_id]["frame"], (640, 480)))
frame = state["frame"]
if frame is not None:
ok, buf = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
if ok:
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + buf.tobytes() + b"\r\n"
)
time.sleep(0.033)
if not frames:
time.sleep(0.1); continue
n = len(frames)
if n == 0:
time.sleep(0.1); continue
# Calculate grid dimensions (rows/cols)
cols = int(np.ceil(np.sqrt(n)))
rows = int(np.ceil(n / cols))
# Determine cell size
# Use 320x240 for large grids to prevent the output frame from being too massive
cell_w, cell_h = 320, 240
if n <= 1: cell_w, cell_h = 640, 480
elif n <= 4: cell_w, cell_h = 480, 360
grid_rows = []
for r in range(rows):
row_items = []
for c in range(cols):
idx = r * cols + c
if idx < n:
row_items.append(cv2.resize(frames[idx], (cell_w, cell_h)))
else:
row_items.append(np.zeros((cell_h, cell_w, 3), dtype=np.uint8))
grid_rows.append(np.hstack(row_items))
grid = np.vstack(grid_rows)
with lock:
state["grid_frame"] = grid
state["total_people_count"] = sum(c["people_count"] for c in state["cameras"].values())
state["alert_active"] = any(c["alert_active"] for c in state["cameras"].values())
time.sleep(0.04)
# ---------------------------------------------------------------------------
# Routes
# ---------------------------------------------------------------------------
@app.route("/")
def dashboard(): return render_template("dashboard.html")
def dashboard():
return render_template("dashboard.html")
@app.route("/video_feed")
def video_feed():
def gen():
while True:
with lock:
frame = state["grid_frame"]
if frame is not None:
_, buf = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
yield (b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + buf.tobytes() + b"\r\n")
time.sleep(0.04)
return Response(gen(), mimetype="multipart/x-mixed-replace; boundary=frame")
return Response(
_generate_frames(),
mimetype="multipart/x-mixed-replace; boundary=frame",
)
@app.route("/api/status")
def api_status():
with lock:
cams_info = {cid: {k:v for k,v in c.items() if k != "frame"} for cid, c in state["cameras"].items()}
active_camera = _get_camera_by_id(state["active_camera_id"])
return jsonify({
"total_people_count": state["total_people_count"],
"cameras": cams_info,
"people_count": state["people_count"],
"groups": state["groups"],
"alert_active": state["alert_active"],
"alerts": state["alerts"][:20]
"alerts": state["alerts"][:20],
"fps": state["fps"],
"stream_status": state["stream_status"],
"selected_camera_id": state["selected_camera_id"],
"active_camera_name": active_camera["name"] if active_camera else "Unknown",
})
@app.route("/api/cameras")
def api_cameras():
return jsonify({"cameras": [{"id": c["id"], "name": c["name"], "ip": c["ip"]} for c in CAMERAS]})
return jsonify({
"cameras": [{"id": c["id"], "name": c["name"], "ip": c["ip"]} for c in CAMERAS]
})
@app.route("/api/camera/select", methods=["POST"])
def api_camera_select():
data = request.get_json(silent=True) or {}
camera_id = data.get("camera_id")
camera = _get_camera_by_id(camera_id)
if camera is None:
return jsonify({"ok": False, "error": "Invalid camera id"}), 400
with lock:
state["selected_camera_id"] = camera["id"]
state["frame"] = None
state["groups"] = []
state["people_count"] = 0
state["alert_active"] = False
state["fps"] = 0
state["stream_status"] = "connecting"
return jsonify({"ok": True})
@app.route("/alerts/<path:filename>")
def serve_alert_image(filename): return send_from_directory("alerts", filename)
def serve_alert_image(filename):
return send_from_directory("alerts", filename)
# ---------------------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------------------
if __name__ == "__main__":
for cam in CAMERAS: threading.Thread(target=_process_stream, args=(cam,), daemon=True).start()
time.sleep(1.5)
threading.Thread(target=_update_grid_frame, daemon=True).start()
print("\n Grid View Dashboard → http://localhost:5000\n")
threading.Thread(target=_process_stream, daemon=True).start()
print(f"\n Surveillance Dashboard → http://localhost:5000\n")
app.run(host="0.0.0.0", port=5000, debug=False, threaded=True)

View File

@ -3,7 +3,7 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Surveillance Dashboard - Multi-Cam Grid</title>
<title>Surveillance Dashboard</title>
<style>
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
@ -78,11 +78,20 @@
@keyframes pulse { 0%,100%{ opacity:1 } 50%{ opacity:.35 } }
.clock { font-size: .85rem; color: var(--text-dim); font-variant-numeric: tabular-nums; }
.camera-select {
background: var(--surface-2);
color: var(--text);
border: 1px solid var(--border);
border-radius: 8px;
padding: 6px 10px;
font-size: .85rem;
min-width: 170px;
}
/* -------- Layout -------- */
.container {
display: grid;
grid-template-columns: 1fr 340px;
grid-template-columns: 1fr 320px;
grid-template-rows: auto 1fr;
gap: 20px;
padding: 20px 28px 28px;
@ -123,7 +132,6 @@
overflow: hidden;
position: relative;
min-height: 480px;
aspect-ratio: 16/9;
}
.video-panel img {
width: 100%;
@ -165,8 +173,10 @@
letter-spacing: .5px;
box-shadow: 0 8px 30px rgba(239,68,68,.35);
z-index: 200;
animation: bannerIn .4s ease-out;
}
.alert-banner.show { display: flex; align-items: center; gap: 10px; }
@keyframes bannerIn { from { opacity:0; transform:translateX(-50%) translateY(-12px); } }
/* -------- Sidebar -------- */
.sidebar {
@ -191,6 +201,7 @@
}
.panel-body { padding: 12px 16px; }
/* Groups panel */
.group-item {
display: flex;
justify-content: space-between;
@ -200,8 +211,8 @@
}
.group-item:last-child { border-bottom: none; }
.group-meta { display: flex; flex-direction: column; gap: 2px; }
.group-meta span:first-child { font-weight: 600; font-size: .95rem; }
.group-meta .cam-name { font-size: .75rem; color: var(--accent); }
.group-meta span:first-child { font-weight: 600; font-size: .9rem; }
.group-meta span:last-child { font-size: .75rem; color: var(--text-dim); }
.group-timer {
font-variant-numeric: tabular-nums;
font-weight: 700;
@ -210,152 +221,282 @@
border-radius: 6px;
background: var(--surface-2);
}
.group-timer.warning { color: var(--orange); background: rgba(245,158,11,.1); }
.group-timer.danger { color: var(--red); background: rgba(239,68,68,.1); }
.no-groups { color: var(--text-dim); font-size: .85rem; padding: 8px 0; }
.alert-log { max-height: 480px; overflow-y: auto; }
/* Alert history */
.alert-log {
max-height: 380px;
overflow-y: auto;
scrollbar-width: thin;
scrollbar-color: var(--border) transparent;
}
.alert-entry {
display: flex;
gap: 12px;
padding: 12px 8px;
padding: 10px 0;
border-bottom: 1px solid var(--border);
cursor: pointer;
transition: background .2s;
border-radius: 6px;
padding: 10px 8px;
}
.alert-entry:hover { background: var(--surface-2); }
.alert-thumb { width: 80px; height: 50px; border-radius: 4px; object-fit: cover; }
.alert-entry:last-child { border-bottom: none; }
.alert-thumb {
width: 64px;
height: 44px;
border-radius: 6px;
object-fit: cover;
border: 1px solid var(--border);
flex-shrink: 0;
}
.alert-info { display: flex; flex-direction: column; gap: 2px; }
.alert-info .time { font-size: .75rem; color: var(--text-dim); }
.alert-info .desc { font-size: .85rem; font-weight: 600; display: block; }
.alert-info .desc { font-size: .82rem; font-weight: 600; }
.alert-info .duration { font-size: .72rem; color: var(--orange); }
/* -------- Lightbox -------- */
.lightbox {
display: none;
position: fixed;
inset: 0;
background: rgba(0,0,0,.85);
z-index: 300;
justify-content: center;
align-items: center;
cursor: pointer;
}
.lightbox.open { display: flex; }
.lightbox img {
max-width: 90vw;
max-height: 85vh;
border-radius: 8px;
box-shadow: 0 12px 60px rgba(0,0,0,.6);
}
/* -------- Responsive -------- */
@media (max-width: 960px) {
.container {
grid-template-columns: 1fr;
}
.kpi-strip {
grid-template-columns: repeat(2, 1fr);
}
}
</style>
</head>
<body>
<!-- Alert banner -->
<div class="alert-banner" id="alertBanner">
<svg width="20" height="20" fill="none" stroke="currentColor" stroke-width="2.2" viewBox="0 0 24 24"><path d="M10.29 3.86L1.82 18a2 2 0 001.71 3h16.94a2 2 0 001.71-3L13.71 3.86a2 2 0 00-3.42 0z"/><line x1="12" y1="9" x2="12" y2="13"/><line x1="12" y1="17" x2="12.01" y2="17"/></svg>
GATHERING DETECTED — Multiple groups active
GATHERING ALERT — People grouped for over 20 seconds
</div>
<header>
<div class="logo">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.8"><path d="M23 19a2 2 0 01-2 2H3a2 2 0 01-2-2V8a2 2 0 012-2h4l2-3h6l2 3h4a2 2 0 012 2z"/><circle cx="12" cy="13" r="4"/></svg>
Surveillance Dash: Multi-Grid
Surveillance Dashboard
</div>
<div class="header-right">
<div class="status-badge live" id="statusBadge">
<select id="cameraSelect" class="camera-select"></select>
<div class="status-badge connecting" id="statusBadge">
<span class="dot"></span>
<span id="statusText">System Live</span>
<span id="statusText">Connecting</span>
</div>
<div class="clock" id="clock"></div>
</div>
</header>
<div class="container">
<!-- KPI strip -->
<div class="kpi-strip">
<div class="kpi">
<span class="kpi-label">People (Total)</span>
<div class="kpi" id="kpiPeople">
<span class="kpi-label">People Detected</span>
<span class="kpi-value accent" id="valPeople">0</span>
</div>
<div class="kpi">
<div class="kpi" id="kpiGroups">
<span class="kpi-label">Active Groups</span>
<span class="kpi-value orange" id="valGroups">0</span>
</div>
<div class="kpi" id="kpiAlerts">
<span class="kpi-label">Global Alerts</span>
<span class="kpi-label">Total Alerts</span>
<span class="kpi-value red" id="valAlerts">0</span>
</div>
<div class="kpi">
<span class="kpi-label">Node Status</span>
<span class="kpi-value green" id="valNodes">Online</span>
<span class="kpi-label">FPS</span>
<span class="kpi-value green" id="valFps">0</span>
</div>
</div>
<!-- Video feed -->
<div class="video-panel">
<img id="videoFeed" src="/video_feed" alt="Simultaneous Feed">
<img id="videoFeed" src="/video_feed" alt="Live Feed">
<div class="video-overlay">
<span class="overlay-tag">GRID VIEW</span>
<span class="overlay-tag" id="overlayPeople">0 People Total</span>
<span class="overlay-tag">LIVE</span>
<span class="overlay-tag" id="overlayPeople">0 People</span>
</div>
</div>
<!-- Sidebar -->
<div class="sidebar">
<!-- Active groups -->
<div class="panel">
<div class="panel-header">Active Groups (All Cams)</div>
<div class="panel-header">Active Groups</div>
<div class="panel-body" id="groupList">
<div style="color:var(--text-dim); font-size:.8rem;">Monitoring...</div>
<div class="no-groups">No groups detected</div>
</div>
</div>
<!-- Alert history -->
<div class="panel" style="flex:1;">
<div class="panel-header">Global Alert History</div>
<div class="panel-header">Alert History</div>
<div class="panel-body alert-log" id="alertLog">
<div style="color:var(--text-dim); font-size:.8rem;">Scanning for events...</div>
<div class="no-groups">No alerts yet</div>
</div>
</div>
</div>
</div>
<!-- Lightbox for alert images -->
<div class="lightbox" id="lightbox" onclick="this.classList.remove('open')">
<img id="lightboxImg" src="" alt="Alert snapshot">
</div>
<script>
function updateClock() {
const now = new Date();
document.getElementById('clock').textContent = now.toLocaleTimeString();
document.getElementById('clock').textContent = now.toLocaleString('en-US', {
hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: false,
year: 'numeric', month: 'short', day: 'numeric'
});
}
setInterval(updateClock, 1000);
updateClock();
let prevAlertActive = false;
let bannerTimeout = null;
let camerasLoaded = false;
async function loadCameras() {
try {
const res = await fetch('/api/cameras');
const data = await res.json();
const select = document.getElementById('cameraSelect');
select.innerHTML = data.cameras
.map(c => `<option value="${c.id}">${c.name} (${c.ip})</option>`)
.join('');
camerasLoaded = true;
} catch (e) {
console.error('Failed to load cameras', e);
}
}
async function selectCamera(cameraId) {
try {
await fetch('/api/camera/select', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ camera_id: cameraId })
});
} catch (e) {
console.error('Failed to switch camera', e);
}
}
async function poll() {
try {
const res = await fetch('/api/status');
const d = await res.json();
document.getElementById('valPeople').textContent = d.total_people_count;
// KPIs
document.getElementById('valPeople').textContent = d.people_count;
document.getElementById('valGroups').textContent = d.groups.length;
document.getElementById('valAlerts').textContent = d.alerts.length;
document.getElementById('overlayPeople').textContent = d.total_people_count + ' People Total';
document.getElementById('valFps').textContent = d.fps;
document.getElementById('overlayPeople').textContent = d.people_count + ' People';
let allGroups = [];
Object.keys(d.cameras).forEach(id => {
d.cameras[id].groups.forEach(g => allGroups.push({...g, camName: d.cameras[id].name}));
});
// Stream status badge
const badge = document.getElementById('statusBadge');
const stext = document.getElementById('statusText');
badge.className = 'status-badge ' + d.stream_status;
stext.textContent = d.stream_status === 'live' ? 'Live' :
d.stream_status === 'error' ? 'Error' : 'Connecting';
if (d.active_camera_name) {
stext.textContent = `${stext.textContent} - ${d.active_camera_name}`;
}
document.getElementById('valGroups').textContent = allGroups.length;
document.getElementById('kpiAlerts').classList.toggle('alert-glow', d.alert_active);
if (camerasLoaded) {
const cameraSelect = document.getElementById('cameraSelect');
if (cameraSelect.value !== d.selected_camera_id) {
cameraSelect.value = d.selected_camera_id;
}
}
// Alert KPI glow
const kpiAlerts = document.getElementById('kpiAlerts');
kpiAlerts.classList.toggle('alert-glow', d.alert_active);
// Alert banner
const banner = document.getElementById('alertBanner');
if (d.alert_active && !prevAlertActive) banner.classList.add('show');
else if (!d.alert_active) banner.classList.remove('show');
if (d.alert_active && !prevAlertActive) {
banner.classList.add('show');
clearTimeout(bannerTimeout);
bannerTimeout = setTimeout(() => banner.classList.remove('show'), 8000);
}
prevAlertActive = d.alert_active;
// Groups list
const gl = document.getElementById('groupList');
if (allGroups.length === 0) gl.innerHTML = '<div style="color:var(--text-dim); font-size:.8rem;">No groups detected</div>';
else {
gl.innerHTML = allGroups.map(g => `
<div class="group-item">
if (d.groups.length === 0) {
gl.innerHTML = '<div class="no-groups">No groups detected</div>';
} else {
gl.innerHTML = d.groups.map(g => {
const cls = g.duration >= 20 ? 'danger' : g.duration >= 10 ? 'warning' : '';
return `<div class="group-item">
<div class="group-meta">
<span>${g.count} People</span>
<span class="cam-name">${g.camName}</span>
<span>Group #${g.id}</span>
</div>
<span class="group-timer ${g.duration > 20 ? 'danger' : ''}">${g.duration}s</span>
</div>
`).join('');
<span class="group-timer ${cls}">${g.duration}s</span>
</div>`;
}).join('');
}
// Alert log
const al = document.getElementById('alertLog');
if (d.alerts.length === 0) al.innerHTML = '<div style="color:var(--text-dim); font-size:.8rem;">No alerts recorded</div>';
else {
if (d.alerts.length === 0) {
al.innerHTML = '<div class="no-groups">No alerts yet</div>';
} else {
al.innerHTML = d.alerts.map(a => `
<div class="alert-entry">
<div class="alert-entry" onclick="openLightbox('/${a.image}')">
<img class="alert-thumb" src="/${a.image}" alt="Alert">
<div class="alert-info">
<span class="time">${a.time} - ${a.camera}</span>
<span class="time">${a.time}</span>
<span class="desc">${a.people} people gathered</span>
<span class="duration">Duration: ${a.duration}s</span>
</div>
</div>
`).join('');
}
} catch (e) { console.error('Poll error', e); }
} catch (e) {
console.error('Poll error', e);
}
}
setInterval(poll, 1500);
function openLightbox(src) {
document.getElementById('lightboxImg').src = src;
document.getElementById('lightbox').classList.add('open');
}
setInterval(poll, 1000);
document.getElementById('cameraSelect').addEventListener('change', (e) => {
selectCamera(e.target.value);
});
loadCameras().then(poll);
poll();
</script>
</body>