feat: add test script for YOLO inference on surveillance video files

main
bahawal.baloch 2026-04-10 12:45:44 +05:00
parent 3383830c08
commit 6c85ad6ff5
1 changed files with 54 additions and 0 deletions

54
test_video.py Normal file
View File

@ -0,0 +1,54 @@
import os
from ultralytics import YOLO
def main():
# 1. Paths
model_path = "person_detector_best.pt"
video_path = os.path.join(
"video_data",
"S3 - 55 40 WJ -7",
"download_ch18_20260406080008_20260406090008.mp4"
)
# 2. Validations
if not os.path.exists(model_path):
print(f"[!] Error: Model not found at '{model_path}'")
return
if not os.path.exists(video_path):
print(f"[!] Error: Video not found at '{video_path}'")
return
# 3. Load Model
print(f"[*] Loading model: {model_path}")
# Using the Ultralytics YOLO class
model = YOLO(model_path)
# 4. Run Inference
print(f"[*] Running inference on video: {video_path}")
print("[*] Press 'q' in the video window to stop inference early.")
# 'stream=True' is memory-efficient and processes the video frame by frame
# 'save=True' will save the annotated video to 'runs/detect/predict'
# 'show=True' will display the video as it processes
results = model.predict(
source=video_path,
conf=0.35, # Confidence threshold (adjust if needed)
save=True, # Save the output video
show=True, # Show video in real-time
stream=True, # Important for large 2GB video files
imgsz=1280,
)
# Since stream=True, we must iterate over the results generator to execute inference
try:
for _ in results:
pass
except KeyboardInterrupt:
print("\n[!] Inference interrupted by user.")
print("\n[*] Inference finished.")
print("[*] Check the 'runs/detect/predict' directory (or similar) for your annotated video.")
if __name__ == "__main__":
main()