Unverified 提交 e2b7bc0b authored 作者: Ben Milanko's avatar Ben Milanko 提交者: GitHub

YouTube Livestream Detection (#2752)

* Youtube livestream detection * dependancy update to auto install pafy * Remove print * include youtube_dl in deps * PEP8 reformat * youtube url check fix * reduce lines * add comment * update check_requirements * stream framerate fix * Update README.md * cleanup * PEP8 * remove cap.retrieve() failure code Co-authored-by: 's avatarGlenn Jocher <glenn.jocher@ultralytics.com>
上级 9029759c
...@@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam ...@@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam
file.mp4 # video file.mp4 # video
path/ # directory path/ # directory
path/*.jpg # glob path/*.jpg # glob
rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream 'https://youtu.be/NUsoVlDFqZg' # YouTube video
rtmp://192.168.1.105/live/test # rtmp stream 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
``` ```
To run inference on example images in `data/images`: To run inference on example images in `data/images`:
......
...@@ -19,7 +19,7 @@ def detect(save_img=False): ...@@ -19,7 +19,7 @@ def detect(save_img=False):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://')) ('rtsp://', 'rtmp://', 'http://', 'https://'))
# Directories # Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
......
...@@ -20,8 +20,8 @@ from PIL import Image, ExifTags ...@@ -20,8 +20,8 @@ from PIL import Image, ExifTags
from torch.utils.data import Dataset from torch.utils.data import Dataset
from tqdm import tqdm from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
clean_str resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first from utils.torch_utils import torch_distributed_zero_first
# Parameters # Parameters
...@@ -275,14 +275,20 @@ class LoadStreams: # multiple IP or RTSP cameras ...@@ -275,14 +275,20 @@ class LoadStreams: # multiple IP or RTSP cameras
for i, s in enumerate(sources): for i, s in enumerate(sources):
# Start the thread to read frames from the video stream # Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='') print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) url = eval(s) if s.isnumeric() else s
if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
url = pafy.new(url).getbest(preftype="mp4").url
cap = cv2.VideoCapture(url)
assert cap.isOpened(), f'Failed to open {s}' assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100 self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame _, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True) thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).') print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start() thread.start()
print('') # newline print('') # newline
...@@ -303,7 +309,7 @@ class LoadStreams: # multiple IP or RTSP cameras ...@@ -303,7 +309,7 @@ class LoadStreams: # multiple IP or RTSP cameras
success, im = cap.retrieve() success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0 self.imgs[index] = im if success else self.imgs[index] * 0
n = 0 n = 0
time.sleep(0.01) # wait time time.sleep(1 / self.fps) # wait time
def __iter__(self): def __iter__(self):
self.count = -1 self.count = -1
...@@ -444,7 +450,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing ...@@ -444,7 +450,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
gb += self.imgs[i].nbytes gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close() pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''): def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes # Cache dataset labels, check images and read shapes
x = {} # dict x = {} # dict
...@@ -489,7 +495,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing ...@@ -489,7 +495,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close() pbar.close()
if nf == 0: if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
...@@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ ...@@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
b[[1, 3]] = np.clip(b[[1, 3]], 0, h) b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128') Usage: from utils.datasets import *; autosplit('../coco128')
......
...@@ -91,17 +91,20 @@ def check_git_status(): ...@@ -91,17 +91,20 @@ def check_git_status():
print(e) print(e)
def check_requirements(file='requirements.txt', exclude=()): def check_requirements(requirements='requirements.txt', exclude=()):
# Check installed dependencies meet requirements # Check installed dependencies meet requirements (pass *.txt file or list of packages)
import pkg_resources as pkg import pkg_resources as pkg
prefix = colorstr('red', 'bold', 'requirements:') prefix = colorstr('red', 'bold', 'requirements:')
file = Path(file) if isinstance(requirements, (str, Path)): # requirements.txt file
if not file.exists(): file = Path(requirements)
print(f"{prefix} {file.resolve()} not found, check failed.") if not file.exists():
return print(f"{prefix} {file.resolve()} not found, check failed.")
return
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates n = 0 # number of packages updates
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
for r in requirements: for r in requirements:
try: try:
pkg.require(r) pkg.require(r)
...@@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()): ...@@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()):
print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
if n: # if packages updated if n: # if packages updated
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s)) # emoji-safe print(emojis(s)) # emoji-safe
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论