inference_webcam_ts_compositing.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. """
  2. Inference on webcams: Use a model on webcam input.
  3. Once launched, the script is in background collection mode.
  4. Press B to toggle between background capture mode and matting mode. The frame shown when B is pressed is used as background for matting.
  5. Press Q to exit.
  6. Example:
  7. python inference_webcam.py \
  8. --model-type mattingrefine \
  9. --model-backbone resnet50 \
  10. --model-checkpoint "PATH_TO_CHECKPOINT" \
  11. --resolution 1280 720
  12. """
  13. import argparse, os, shutil, time
  14. import cv2
  15. import numpy as np
  16. import torch
  17. from PyQt5 import QtGui, QtCore, uic
  18. from PyQt5 import QtWidgets
  19. from PyQt5.QtWidgets import QMainWindow, QApplication
  20. from torch import nn
  21. from torch.utils.data import DataLoader
  22. from torchvision.transforms import Compose, ToTensor, Resize
  23. from torchvision.transforms.functional import to_pil_image
  24. from threading import Thread, Lock, Condition
  25. from tqdm import tqdm
  26. from PIL import Image, ImageTk
  27. from dataset import VideoDataset
  28. from model import MattingBase, MattingRefine
  29. from inference_webcam import Displayer, FPSTracker, Camera
  30. # --------------- Arguments ---------------
  31. parser = argparse.ArgumentParser(description='Inference from web-cam')
  32. parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine'])
  33. parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
  34. parser.add_argument('--model-backbone-scale', type=float, default=0.25)
  35. parser.add_argument('--model-checkpoint', type=str, required=True)
  36. parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding'])
  37. parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
  38. parser.add_argument('--model-refine-threshold', type=float, default=0.7)
  39. parser.add_argument('--hide-fps', action='store_true')
  40. parser.add_argument('--resolution', type=int, nargs=2, metavar=('width', 'height'), default=(1280, 720))
  41. parser.add_argument('--device-id', type=int, default=0)
  42. parser.add_argument('--background-image', type=str, default="")
  43. parser.add_argument('--fps-limit', type=int, default=1000)
  44. args = parser.parse_args()
  45. # --------------- Main ---------------
  46. default_float_dtype = torch.get_default_dtype()
  47. model = torch.jit.load(args.model_checkpoint)
  48. model.backbone_scale = args.model_backbone_scale
  49. model.refine_mode = args.model_refine_mode
  50. model.refine_sample_pixels = args.model_refine_sample_pixels
  51. model.refine_threshold = args.model_refine_threshold
  52. model = model.to(torch.device('cuda'))
  53. width, height = args.resolution
  54. cam = Camera(device_id=args.device_id,width=width, height=height)
  55. app = QApplication(['MattingV2'])
  56. dsp = Displayer('MattingV2', cam.width, cam.height, show_info=(not args.hide_fps))
  57. dsp.show()
  58. def cv2_frame_to_cuda(frame):
  59. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  60. if 'fp16'in args.model_checkpoint:
  61. return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).to(torch.float16).cuda()
  62. else:
  63. pic = Image.fromarray(frame)
  64. img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
  65. img = img.cuda()
  66. img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
  67. img = img.permute((2, 0, 1)).contiguous()
  68. tmp = img.to(dtype=default_float_dtype).div(255)
  69. tmp.unsqueeze_(0)
  70. tmp = tmp.to(torch.float32)
  71. return tmp
  72. #return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).to(torch.float32).cuda()
  73. with torch.no_grad():
  74. while True:
  75. bgr = None
  76. while True: # grab bgr
  77. frame = cam.read()
  78. frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  79. key = dsp.step(frameRGB)
  80. if key == 'b':
  81. bgr = cv2_frame_to_cuda(frame)
  82. break
  83. elif key == 'w':
  84. cam.brighter()
  85. elif key == 's':
  86. cam.darker()
  87. elif key == 'q':
  88. exit()
  89. if args.background_image == "":
  90. #green screen
  91. bgImage = torch.zeros_like(bgr)
  92. bgImage[0,1] = torch.ones_like(bgr[0,0])
  93. else:
  94. bgImage = cv2.imread(args.background_image, cv2.IMREAD_UNCHANGED)
  95. bgImage = cv2.resize(bgImage, (frame.shape[1], frame.shape[0]))
  96. bgImage = cv2_frame_to_cuda(bgImage)
  97. while True: # matting
  98. frame = cam.read()
  99. src = cv2_frame_to_cuda(frame)
  100. pha, fgr = model(src, bgr)[:2]
  101. res = pha * fgr + (1 - pha) * bgImage
  102. res = res.mul(255).byte().cpu().permute(0, 2, 3, 1).numpy()[0]
  103. key = dsp.step(res.copy())
  104. if key == 'b':
  105. break
  106. elif key == 'w':
  107. cam.brighter()
  108. elif key == 's':
  109. cam.darker()
  110. elif key == 'q':
  111. exit()