import os from ultralytics import YOLO def main(): # 1. Paths model_path = "person_detector_best.pt" video_path = os.path.join( "video_data", "S3 - 55 40 WJ -7", "download_ch18_20260406080008_20260406090008.mp4" ) # 2. Validations if not os.path.exists(model_path): print(f"[!] Error: Model not found at '{model_path}'") return if not os.path.exists(video_path): print(f"[!] Error: Video not found at '{video_path}'") return # 3. Load Model print(f"[*] Loading model: {model_path}") # Using the Ultralytics YOLO class model = YOLO(model_path) # 4. Run Inference print(f"[*] Running inference on video: {video_path}") print("[*] Press 'q' in the video window to stop inference early.") # 'stream=True' is memory-efficient and processes the video frame by frame # 'save=True' will save the annotated video to 'runs/detect/predict' # 'show=True' will display the video as it processes results = model.predict( source=video_path, conf=0.35, # Confidence threshold (adjust if needed) save=True, # Save the output video show=True, # Show video in real-time stream=True, # Important for large 2GB video files imgsz=1280, ) # Since stream=True, we must iterate over the results generator to execute inference try: for _ in results: pass except KeyboardInterrupt: print("\n[!] Inference interrupted by user.") print("\n[*] Inference finished.") print("[*] Check the 'runs/detect/predict' directory (or similar) for your annotated video.") if __name__ == "__main__": main()