1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,158 @@ |
1 |
+#!/usr/bin/env python3 |
|
2 |
+ |
|
3 |
+""" |
|
4 |
+Zoom in on video motion. |
|
5 |
+""" |
|
6 |
+ |
|
7 |
+import pyffstream |
|
8 |
+import numpy as np |
|
9 |
+import PIL.Image |
|
10 |
+import cv2 |
|
11 |
+import scipy.signal |
|
12 |
+ |
|
13 |
+ |
|
14 |
+def args_pre(parser): |
|
15 |
+ # Add arguments. |
|
16 |
+ parser.add_argument( |
|
17 |
+ '--margins', metavar=('L', 'R', 'T', 'B'), type=float, nargs=4, |
|
18 |
+ default=[0, 0, 0, 0], |
|
19 |
+ help=""" |
|
20 |
+ margins (left, right, top, bottom, in percent) of output video |
|
21 |
+ (default: %(default)s) |
|
22 |
+ """) |
|
23 |
+ parser.add_argument( |
|
24 |
+ '--blur-factor', metavar='F', type=float, default=0.05, |
|
25 |
+ help="blur size factor (default: %(default)s)") |
|
26 |
+ parser.add_argument( |
|
27 |
+ '--blur-threshold', metavar='T', type=int, default=32, |
|
28 |
+ help="blur threshold (default: %(default)s)") |
|
29 |
+ parser.add_argument( |
|
30 |
+ '--lowpass-factor', metavar='F', type=float, default=0.00015, |
|
31 |
+ help="low-pass filter cutoff frequency factor (default: %(default)s)") |
|
32 |
+ |
|
33 |
+ |
|
34 |
+def init(args): |
|
35 |
+ # Set arguments. |
|
36 |
+ args.history = int(30 * args.output_fps) |
|
37 |
+ args.blur_size = ( |
|
38 |
+ int(np.ceil(args.working_width * args.blur_factor)) // 2 * 2 + 1 |
|
39 |
+ ) |
|
40 |
+ args.lost_size = args.working_width * args.working_height * 0.001 |
|
41 |
+ args.b, args.a = scipy.signal.butter( |
|
42 |
+ 1, args.working_width / args.output_fps * args.lowpass_factor |
|
43 |
+ ) |
|
44 |
+ args.history_track_ratio = 0.025 |
|
45 |
+ args.resample_ratios = [ |
|
46 |
+ # (1.0, PIL.Image.NEAREST), |
|
47 |
+ (0.5, PIL.Image.BILINEAR), |
|
48 |
+ (0.2, PIL.Image.BICUBIC), |
|
49 |
+ (0.0, PIL.Image.LANCZOS), |
|
50 |
+ ] |
|
51 |
+ |
|
52 |
+ # Set state. |
|
53 |
+ class State: |
|
54 |
+ pass |
|
55 |
+ state = State() |
|
56 |
+ state.background_subtractor = cv2.createBackgroundSubtractorMOG2( |
|
57 |
+ history=args.history |
|
58 |
+ ) |
|
59 |
+ state.filter_state = [] |
|
60 |
+ return state |
|
61 |
+ |
|
62 |
+ |
|
63 |
+def process(args, state, frame, frame_num): |
|
64 |
+ # Create debug frame. |
|
65 |
+ if args.debug: |
|
66 |
+ debug_frame = frame.copy() |
|
67 |
+ else: |
|
68 |
+ debug_frame = None |
|
69 |
+ |
|
70 |
+ # Subtract background, blur and threshold. |
|
71 |
+ foreground = state.background_subtractor.apply(frame) |
|
72 |
+ mask = cv2.compare( |
|
73 |
+ cv2.GaussianBlur(foreground, (args.blur_size, args.blur_size), 0), |
|
74 |
+ args.blur_threshold, |
|
75 |
+ cv2.CMP_GE, |
|
76 |
+ ) |
|
77 |
+ if args.debug: |
|
78 |
+ debug_frame[mask > 0] = (0, 255, 0) |
|
79 |
+ debug_frame[foreground > 0] = (255, 0, 0) |
|
80 |
+ |
|
81 |
+ # Nothing interesting? |
|
82 |
+ if np.count_nonzero(mask) < args.lost_size: |
|
83 |
+ # Reset rectangle. |
|
84 |
+ x, y, w, h = 0, 0, args.working_width, args.working_height |
|
85 |
+ else: |
|
86 |
+ # Find bounding rectangle. |
|
87 |
+ x, y, w, h = cv2.boundingRect(mask) |
|
88 |
+ if args.debug: |
|
89 |
+ cv2.rectangle( |
|
90 |
+ debug_frame, (x, y), (x+w, y+h), (0, 255, 0), 2 * args.thickness |
|
91 |
+ ) |
|
92 |
+ |
|
93 |
+ # Add rectangle margins. |
|
94 |
+ ml, mr, mt, mb = args.margins |
|
95 |
+ m = max(w, h) |
|
96 |
+ x = max(x - int(m * ml / 100), 0) |
|
97 |
+ y = max(y - int(m * mt / 100), 0) |
|
98 |
+ w = min(w + int(m * (ml+mr) / 100), args.working_width - x) |
|
99 |
+ h = min(h + int(m * (mt+mb) / 100), args.working_height - y) |
|
100 |
+ if args.debug: |
|
101 |
+ cv2.rectangle( |
|
102 |
+ debug_frame, (x, y), (x+w, y+h), (0, 0, 255), 2 * args.thickness |
|
103 |
+ ) |
|
104 |
+ |
|
105 |
+ # Filter rectangle. |
|
106 |
+ x1, y1, x2, y2 = x, y, x+w, y+h |
|
107 |
+ if frame_num == args.start_frame: |
|
108 |
+ state.filter_state = [ |
|
109 |
+ coord * scipy.signal.lfilter_zi(args.b, args.a) |
|
110 |
+ for coord in (x1, y1, x2, y2) |
|
111 |
+ ] |
|
112 |
+ (x1, y1, x2, y2), filter_state_next = zip(*( |
|
113 |
+ scipy.signal.lfilter(args.b, args.a, [coord], zi=zi) |
|
114 |
+ for coord, zi in |
|
115 |
+ zip((x1, y1, x2, y2), state.filter_state) |
|
116 |
+ )) |
|
117 |
+ if frame_num >= args.start_frame + args.history * args.history_track_ratio: |
|
118 |
+ state.filter_state = filter_state_next |
|
119 |
+ x1, y1, x2, y2 = [int(coord[0]) for coord in (x1, y1, x2, y2)] |
|
120 |
+ x, y, w, h = x1, y1, x2-x1, y2-y1 |
|
121 |
+ if args.debug: |
|
122 |
+ cv2.rectangle( |
|
123 |
+ debug_frame, (x, y), (x+w, y+h), (255, 0, 255), 2 * args.thickness |
|
124 |
+ ) |
|
125 |
+ |
|
126 |
+ # Fix rectangle. |
|
127 |
+ x, y, w, h = pyffstream.fix_rect(args, x, y, w, h) |
|
128 |
+ |
|
129 |
+ # Determine resampling method. |
|
130 |
+ for i, (ratio, resample) in enumerate(args.resample_ratios): |
|
131 |
+ if min(w / args.output_width, h / args.output_height) >= ratio: |
|
132 |
+ break |
|
133 |
+ if args.debug: |
|
134 |
+ color_coeff = i / max(1, len(args.resample_ratios) - 1) |
|
135 |
+ color = ( |
|
136 |
+ 255 * (0 + color_coeff), |
|
137 |
+ 255 * (1 - color_coeff), |
|
138 |
+ 0, |
|
139 |
+ ) |
|
140 |
+ cv2.rectangle( |
|
141 |
+ debug_frame, (x, y), (x+w, y+h), color, 2 * args.thickness, |
|
142 |
+ ) |
|
143 |
+ |
|
144 |
+ # Cut and resize. |
|
145 |
+ output_frame = pyffstream.resize( |
|
146 |
+ frame[y:y+h, x:x+w], args.output_width, args.output_height, resample |
|
147 |
+ ) |
|
148 |
+ |
|
149 |
+ # Return. |
|
150 |
+ return output_frame, debug_frame |
|
151 |
+ |
|
152 |
+ |
|
153 |
+def main(): |
|
154 |
+ pyffstream.run(__doc__, process, init, args_pre) |
|
155 |
+ |
|
156 |
+ |
|
157 |
+if __name__ == '__main__': |
|
158 |
+ main() |