inference_webcam_ts_compositing.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. """
  2. Inference on webcams: Use a model on webcam input.
  3. Once launched, the script is in background collection mode.
  4. Press B to toggle between background capture mode and matting mode. The frame shown when B is pressed is used as background for matting.
  5. Press Q to exit.
  6. Example:
  7. python inference_webcam.py \
  8. --model-type mattingrefine \
  9. --model-backbone resnet50 \
  10. --model-checkpoint "PATH_TO_CHECKPOINT" \
  11. --resolution 1280 720
  12. """
  13. import argparse, os, shutil, time
  14. import cv2
  15. import numpy as np
  16. import torch
  17. from PyQt5 import QtGui, QtCore, uic
  18. from PyQt5 import QtWidgets
  19. from PyQt5.QtWidgets import QMainWindow, QApplication
  20. from torch import nn
  21. from torch.utils.data import DataLoader
  22. from torchvision.transforms import Compose, ToTensor, Resize
  23. from torchvision.transforms.functional import to_pil_image
  24. from threading import Thread, Lock, Condition
  25. from tqdm import tqdm
  26. from PIL import Image, ImageTk
  27. from dataset import VideoDataset
  28. from model import MattingBase, MattingRefine
  29. # --------------- Arguments ---------------
  30. parser = argparse.ArgumentParser(description='Inference from web-cam')
  31. parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine'])
  32. parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
  33. parser.add_argument('--model-backbone-scale', type=float, default=0.25)
  34. parser.add_argument('--model-checkpoint', type=str, required=True)
  35. parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding'])
  36. parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
  37. parser.add_argument('--model-refine-threshold', type=float, default=0.7)
  38. parser.add_argument('--hide-fps', action='store_true')
  39. parser.add_argument('--resolution', type=int, nargs=2, metavar=('width', 'height'), default=(1280, 720))
  40. parser.add_argument('--device-id', type=int, default=0)
  41. parser.add_argument('--background-image', type=str, default="")
  42. args = parser.parse_args()
  43. # ----------- Utility classes -------------
  44. # A wrapper that reads data from cv2.VideoCapture in its own thread to optimize.
  45. # Use .read() in a tight loop to get the newest frame
  46. class Camera:
  47. def __init__(self, device_id=0, width=1280, height=720):
  48. self.capture = cv2.VideoCapture(device_id)
  49. self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
  50. self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
  51. self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
  52. self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
  53. # self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
  54. self.exposure = self.capture.get(cv2.CAP_PROP_EXPOSURE)
  55. self.capture.set(cv2.CAP_PROP_BACKLIGHT, 0)
  56. self.capture.set(cv2.CAP_PROP_EXPOSURE,self.exposure)
  57. self.frameAvailable = False
  58. self.success_reading, self.frame = self.capture.read()
  59. self.cv = Condition()
  60. self.thread = Thread(target=self.__update, args=())
  61. self.thread.daemon = True
  62. self.thread.start()
  63. def __update(self):
  64. while self.success_reading:
  65. grabbed, frame = self.capture.read()
  66. with self.cv:
  67. self.success_reading = grabbed
  68. self.frame = frame
  69. self.frameAvailable = True
  70. self.cv.notify()
  71. def brighter(self):
  72. if self.exposure < -2:
  73. self.exposure += 1
  74. self.capture.set(cv2.CAP_PROP_EXPOSURE,self.exposure)
  75. print(self.exposure)
  76. def darker(self):
  77. if self.exposure > -12:
  78. self.exposure -= 1
  79. self.capture.set(cv2.CAP_PROP_EXPOSURE,self.exposure)
  80. print(self.exposure)
  81. def read(self):
  82. with self.cv:
  83. self.cv.wait_for(lambda: self.frameAvailable)
  84. frame = self.frame.copy()
  85. self.frameAvailable = False
  86. return frame
  87. def __exit__(self, exec_type, exc_value, traceback):
  88. self.capture.release()
  89. # An FPS tracker that computes exponentialy moving average FPS
  90. class FPSTracker:
  91. def __init__(self, ratio=0.5):
  92. self._last_tick = None
  93. self._avg_fps = None
  94. self.ratio = ratio
  95. def tick(self):
  96. if self._last_tick is None:
  97. self._last_tick = time.time()
  98. return None
  99. t_new = time.time()
  100. fps_sample = 1.0 / (t_new - self._last_tick)
  101. self._avg_fps = self.ratio * fps_sample + (1 - self.ratio) * self._avg_fps if self._avg_fps is not None else fps_sample
  102. self._last_tick = t_new
  103. return self.get()
  104. def get(self):
  105. return self._avg_fps
  106. # Wrapper for playing a stream with cv2.imshow(). It can accept an image and return keypress info for basic interactivity.
  107. # It also tracks FPS and optionally overlays info onto the stream.
  108. class Displayer(QMainWindow):
  109. def __init__(self, title, width, height, show_info=True):
  110. self.width, self.height = width, height
  111. self.show_info = show_info
  112. self.fps_tracker = FPSTracker()
  113. QMainWindow.__init__(self)
  114. self.setFixedSize(width, height)
  115. self.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
  116. self.image_label = QtWidgets.QLabel(self)
  117. self.image_label.resize(width, height)
  118. self.key = None
  119. def keyPressEvent(self, event):
  120. self.key = event.text()
  121. def closeEvent(self, event):
  122. self.key = 'q'
  123. # Update the currently showing frame and return key press char code
  124. def step(self, image):
  125. fps_estimate = self.fps_tracker.tick()
  126. if self.show_info and fps_estimate is not None:
  127. message = f"{int(fps_estimate)} fps | {self.width}x{self.height}"
  128. cv2.putText(image, message, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
  129. pix = self.convert_cv_qt(image)
  130. self.image_label.setPixmap(pix)
  131. QApplication.processEvents()
  132. key = self.key
  133. self.key = None
  134. return key
  135. def convert_cv_qt(self, cv_img):
  136. """Convert from an opencv image to QPixmap"""
  137. h, w, ch = cv_img.shape
  138. bytes_per_line = ch * w
  139. if ch == 3:
  140. convert_to_Qt_format = QtGui.QImage(cv_img.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
  141. elif ch == 4:
  142. convert_to_Qt_format = QtGui.QImage(cv_img.data, w, h, bytes_per_line, QtGui.QImage.Format_RGBA8888)
  143. return QtGui.QPixmap.fromImage(convert_to_Qt_format)
  144. # --------------- Main ---------------
  145. model = torch.jit.load(args.model_checkpoint)
  146. model.backbone_scale = args.model_backbone_scale
  147. model.refine_mode = args.model_refine_mode
  148. model.refine_sample_pixels = args.model_refine_sample_pixels
  149. model.refine_threshold = args.model_refine_threshold
  150. model = model.to(torch.device('cuda'))
  151. width, height = args.resolution
  152. cam = Camera(device_id=args.device_id,width=width, height=height)
  153. app = QApplication(['MattingV2'])
  154. dsp = Displayer('MattingV2', cam.width, cam.height, show_info=(not args.hide_fps))
  155. dsp.show()
  156. def cv2_frame_to_cuda(frame):
  157. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  158. if 'fp16'in args.model_checkpoint:
  159. return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).to(torch.float16).cuda()
  160. else:
  161. return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).to(torch.float32).cuda()
  162. with torch.no_grad():
  163. while True:
  164. bgr = None
  165. while True: # grab bgr
  166. frame = cam.read()
  167. frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  168. key = dsp.step(frameRGB)
  169. if key == 'b':
  170. bgr = cv2_frame_to_cuda(frame)
  171. break
  172. elif key == 'w':
  173. cam.brighter()
  174. elif key == 's':
  175. cam.darker()
  176. elif key == 'q':
  177. exit()
  178. if args.background_image == "":
  179. #green screen
  180. bgImage = torch.zeros_like(bgr)
  181. bgImage[0,1] = torch.ones_like(bgr[0,0])
  182. else:
  183. bgImage = cv2.imread(args.background_image, cv2.IMREAD_UNCHANGED)
  184. bgImage = cv2.resize(bgImage, (frame.shape[1], frame.shape[0]))
  185. bgImage = cv2_frame_to_cuda(bgImage)
  186. while True: # matting
  187. frame = cam.read()
  188. src = cv2_frame_to_cuda(frame)
  189. pha, fgr = model(src, bgr)[:2]
  190. res = pha * fgr + (1 - pha) * bgImage
  191. res = res.mul(255).byte().cpu().permute(0, 2, 3, 1).numpy()[0]
  192. key = dsp.step(res.copy())
  193. if key == 'b':
  194. break
  195. elif key == 'w':
  196. cam.brighter()
  197. elif key == 's':
  198. cam.darker()
  199. elif key == 'q':
  200. exit()