MogensR commited on
Commit
f7c6a9c
Β·
1 Parent(s): 157d197

Update utils/cv_processing.py

Browse files
Files changed (1) hide show
  1. utils/cv_processing.py +292 -50
utils/cv_processing.py CHANGED
@@ -1,72 +1,302 @@
1
  #!/usr/bin/env python3
2
  """
3
- cv_processing.py Β· slim orchestrator layer
4
  ──────────────────────────────────────────────────────────────────────────────
5
- Keeps the public API (segment_person_hq, refine_mask_hq, replace_background_hq,
6
- create_professional_background, validate_video_file) exactly the same so that
7
- existing callers do **not** need to change their imports.
8
-
9
- All heavy-lifting implementations live in:
10
- utils.segmentation
11
- utils.refinement
12
- utils.compositing
13
- utils.background_factory
14
- utils.background_presets
 
 
 
15
  """
16
 
17
  from __future__ import annotations
18
 
19
- # ── std / 3rd-party ────────────────────────────────────────────────────────
20
- import os, logging, cv2, numpy as np
21
  from pathlib import Path
22
- from typing import Tuple, Dict, Any, Optional
23
-
24
- # ── project helpers (new modules we split out) ─────────────────────────────
25
- from utils.segmentation import (
26
- segment_person_hq,
27
- segment_person_hq_original,
28
- SegmentationError,
29
- )
30
- from utils.refinement import (
31
- refine_mask_hq, MaskRefinementError,
32
- )
33
- from .compositing import (
34
- replace_background_hq, BackgroundReplacementError,
35
- )
36
- from utils.background_factory import create_professional_background
37
- from utils.background_presets import PROFESSIONAL_BACKGROUNDS # still used in the UI
38
 
39
  logger = logging.getLogger(__name__)
40
 
41
  # ----------------------------------------------------------------------------
42
- # LIGHT CONFIG – only what the UI still needs
 
 
 
 
 
 
 
 
 
 
 
 
43
  # ----------------------------------------------------------------------------
44
- USE_AUTO_TEMPORAL_CONSISTENCY = True # placeholder for future smoothing
 
 
 
 
 
 
 
45
 
46
- # Validator soft-limits (kept here because validate_video_file still lives here)
47
- MIN_AREA_RATIO = 0.015
48
- MAX_AREA_RATIO = 0.97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  # ----------------------------------------------------------------------------
51
- # PUBLIC 1-LINERS to keep old call-sites working
52
  # ----------------------------------------------------------------------------
53
- # (They're just re-exports from their new homes.)
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- __all__ = [
56
- "segment_person_hq",
57
- "segment_person_hq_original",
58
- "refine_mask_hq",
59
- "replace_background_hq",
60
- "create_professional_background",
61
- "validate_video_file",
62
- "SegmentationError",
63
- "MaskRefinementError",
64
- "BackgroundReplacementError",
65
- "PROFESSIONAL_BACKGROUNDS",
66
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # ----------------------------------------------------------------------------
69
- # VIDEO VALIDATION (unchanged)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # ----------------------------------------------------------------------------
71
  def validate_video_file(video_path: str) -> Tuple[bool, str]:
72
  """
@@ -108,4 +338,16 @@ def validate_video_file(video_path: str) -> Tuple[bool, str]:
108
 
109
  except Exception as e:
110
  logger.error(f"validate_video_file: {e}")
111
- return False, f"Validation error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ cv_processing.py Β· slim orchestrator layer (self-contained)
4
  ──────────────────────────────────────────────────────────────────────────────
5
+ Public API (unchanged):
6
+ - segment_person_hq(frame, predictor=None, fallback_enabled=True) -> mask (H,W) float32 [0..1]
7
+ - segment_person_hq_original(...) -> alias of segment_person_hq (back-compat)
8
+ - refine_mask_hq(frame, mask, matanyone=None, fallback_enabled=True) -> mask (H,W) float32 [0..1]
9
+ - replace_background_hq(frame, mask, background, fallback_enabled=True) -> frame uint8 (H,W,3)
10
+ - create_professional_background(key_or_cfg, width, height) -> RGB uint8 (H,W,3)
11
+ - validate_video_file(video_path) -> (bool, reason)
12
+
13
+ Design:
14
+ * NO imports from other utils.* modules β†’ avoids circular imports.
15
+ * Torch & diffusers imported lazily inside functions.
16
+ * All masks are single-channel float32 in [0..1] at boundaries between stages.
17
+ * MatAnyOne step() is fed (N,C,H,W); no 5D tensors.
18
  """
19
 
20
  from __future__ import annotations
21
 
22
+ import logging
 
23
  from pathlib import Path
24
+ from typing import Any, Dict, Optional, Tuple
25
+
26
+ import cv2
27
+ import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  logger = logging.getLogger(__name__)
30
 
31
  # ----------------------------------------------------------------------------
32
+ # Background presets (minimal set; callers can keep their own catalog if needed)
33
+ # ----------------------------------------------------------------------------
34
+ PROFESSIONAL_BACKGROUNDS_LOCAL: Dict[str, Dict[str, Any]] = {
35
+ "office": {"color": (240, 248, 255), "gradient": True},
36
+ "studio": {"color": (32, 32, 32), "gradient": False},
37
+ "nature": {"color": (34, 139, 34), "gradient": True},
38
+ "abstract": {"color": (75, 0, 130), "gradient": True},
39
+ "white": {"color": (255, 255, 255), "gradient": False},
40
+ "black": {"color": (0, 0, 0), "gradient": False},
41
+ }
42
+
43
+ # ----------------------------------------------------------------------------
44
+ # Helpers
45
  # ----------------------------------------------------------------------------
46
+ def _ensure_rgb(img: np.ndarray) -> np.ndarray:
47
+ """Convert BGR→RGB if looks like BGR; otherwise pass-through."""
48
+ if img is None:
49
+ return img
50
+ if img.ndim == 3 and img.shape[2] == 3:
51
+ # Heuristic: assume OpenCV BGR
52
+ return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
53
+ return img
54
 
55
+ def _ensure_bgr(img: np.ndarray) -> np.ndarray:
56
+ """Convert RGB→BGR if looks like RGB; otherwise pass-through."""
57
+ if img is None:
58
+ return img
59
+ if img.ndim == 3 and img.shape[2] == 3:
60
+ # Heuristic: assume non-OpenCV images are RGB
61
+ return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
62
+ return img
63
+
64
+ def _to_mask01(m: np.ndarray) -> np.ndarray:
65
+ """Ensure single-channel float32 [0..1]."""
66
+ if m is None:
67
+ return None
68
+ if m.ndim == 3:
69
+ m = m[..., 0]
70
+ m = m.astype(np.float32)
71
+ if m.max() > 1.0:
72
+ m = m / 255.0
73
+ return np.clip(m, 0.0, 1.0)
74
+
75
+ def _feather(mask01: np.ndarray, k: int = 2) -> np.ndarray:
76
+ """Small Gaussian feather for cleaner edges."""
77
+ if mask01.ndim == 3:
78
+ mask01 = mask01[..., 0]
79
+ k = max(1, int(k) * 2 + 1)
80
+ m = cv2.GaussianBlur((mask01 * 255.0).astype(np.uint8), (k, k), 0)
81
+ return (m.astype(np.float32) / 255.0)
82
+
83
+ def _vertical_gradient(top: Tuple[int,int,int], bottom: Tuple[int,int,int], width: int, height: int) -> np.ndarray:
84
+ bg = np.zeros((height, width, 3), dtype=np.uint8)
85
+ for y in range(height):
86
+ t = y / max(1, height - 1)
87
+ r = int(top[0] * (1 - t) + bottom[0] * t)
88
+ g = int(top[1] * (1 - t) + bottom[1] * t)
89
+ b = int(top[2] * (1 - t) + bottom[2] * t)
90
+ bg[y, :] = (r, g, b)
91
+ return bg
92
 
93
  # ----------------------------------------------------------------------------
94
+ # Background creation (kept here to match public API)
95
  # ----------------------------------------------------------------------------
96
+ def create_professional_background(key_or_cfg: Any, width: int, height: int) -> np.ndarray:
97
+ """
98
+ Accepts:
99
+ - key: str in local preset dict
100
+ - cfg: {"color": (r,g,b), "gradient": bool}
101
+ Returns RGB uint8 image (H,W,3).
102
+ """
103
+ if isinstance(key_or_cfg, str):
104
+ cfg = PROFESSIONAL_BACKGROUNDS_LOCAL.get(key_or_cfg, PROFESSIONAL_BACKGROUNDS_LOCAL["office"])
105
+ elif isinstance(key_or_cfg, dict):
106
+ cfg = key_or_cfg
107
+ else:
108
+ cfg = PROFESSIONAL_BACKGROUNDS_LOCAL["office"]
109
 
110
+ color = tuple(int(x) for x in cfg.get("color", (255, 255, 255)))
111
+ use_grad = bool(cfg.get("gradient", False))
112
+
113
+ if not use_grad:
114
+ return np.full((height, width, 3), color, dtype=np.uint8)
115
+
116
+ # Simple vertical gradient dark->base color
117
+ dark = (int(color[0]*0.7), int(color[1]*0.7), int(color[2]*0.7))
118
+ bg = _vertical_gradient(dark, color, width, height)
119
+ return bg # already RGB by convention
120
+
121
+ # ----------------------------------------------------------------------------
122
+ # Segmentation
123
+ # ----------------------------------------------------------------------------
124
+ def _simple_person_segmentation(frame_bgr: np.ndarray) -> np.ndarray:
125
+ """
126
+ Very simple fallback segmentation by suppressing green/white backgrounds.
127
+ Returns mask01 (H,W) float32.
128
+ """
129
+ hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV)
130
+
131
+ lower_green = np.array([40, 40, 40], dtype=np.uint8)
132
+ upper_green = np.array([80, 255, 255], dtype=np.uint8)
133
+ green_mask = cv2.inRange(hsv, lower_green, upper_green)
134
+
135
+ lower_white = np.array([0, 0, 200], dtype=np.uint8)
136
+ upper_white = np.array([180, 30, 255], dtype=np.uint8)
137
+ white_mask = cv2.inRange(hsv, lower_white, upper_white)
138
+
139
+ bg_mask = cv2.bitwise_or(green_mask, white_mask)
140
+ person_mask = cv2.bitwise_not(bg_mask)
141
+
142
+ kernel = np.ones((5, 5), np.uint8)
143
+ person_mask = cv2.morphologyEx(person_mask, cv2.MORPH_CLOSE, kernel)
144
+ person_mask = cv2.morphologyEx(person_mask, cv2.MORPH_OPEN, kernel)
145
+
146
+ return (person_mask.astype(np.float32) / 255.0)
147
+
148
+ def segment_person_hq(frame: np.ndarray, predictor: Optional[Any] = None, fallback_enabled: bool = True) -> np.ndarray:
149
+ """
150
+ Try SAM2 predictor if available; return single-channel float32 mask in [0..1].
151
+ - predictor.set_image expects RGB
152
+ - predictor.predict returns masks with shapes (N,H,W) or (H,W)
153
+ """
154
+ try:
155
+ if predictor is not None and hasattr(predictor, "set_image") and hasattr(predictor, "predict"):
156
+ rgb = _ensure_rgb(frame)
157
+ predictor.set_image(rgb)
158
+ h, w = rgb.shape[:2]
159
+ center = np.array([[w // 2, h // 2]])
160
+ labels = np.array([1])
161
+ masks, scores, _ = predictor.predict(
162
+ point_coords=center,
163
+ point_labels=labels,
164
+ multimask_output=True
165
+ )
166
+
167
+ # Normalize and pick best
168
+ if isinstance(masks, np.ndarray):
169
+ m = masks
170
+ else:
171
+ m = np.array(masks)
172
+
173
+ if m.ndim == 3: # N,H,W
174
+ idx = int(np.argmax(scores)) if scores is not None else 0
175
+ m = m[idx]
176
+ elif m.ndim == 2: # H,W
177
+ pass
178
+ else:
179
+ raise RuntimeError(f"Unexpected SAM2 mask shape: {m.shape}")
180
+
181
+ return _to_mask01(m)
182
+
183
+ except Exception as e:
184
+ logger.warning("SAM2 segmentation failed: %s", e)
185
+
186
+ return _simple_person_segmentation(frame) if fallback_enabled else np.ones(frame.shape[:2], dtype=np.float32)
187
+
188
+ # Back-compat alias (some code may import this)
189
+ segment_person_hq_original = segment_person_hq
190
+
191
+ # ----------------------------------------------------------------------------
192
+ # Refinement (MatAnyOne)
193
+ # ----------------------------------------------------------------------------
194
+ def _to_tensor_chw(img_uint8_bgr: np.ndarray) -> "torch.Tensor":
195
+ import torch
196
+ rgb = cv2.cvtColor(img_uint8_bgr, cv2.COLOR_BGR2RGB)
197
+ t = torch.from_numpy(rgb).permute(2, 0, 1).contiguous().float() / 255.0 # (3,H,W)
198
+ return t
199
+
200
+ def _mask_to_tensor01(mask01: np.ndarray) -> "torch.Tensor":
201
+ import torch
202
+ m = torch.from_numpy(mask01.astype(np.float32)).unsqueeze(0).unsqueeze(0) # (1,1,H,W)
203
+ return m
204
+
205
+ def _tensor_to_mask01(t: "torch.Tensor") -> np.ndarray:
206
+ import torch
207
+ if t.ndim == 4:
208
+ t = t[0, 0]
209
+ elif t.ndim == 3:
210
+ t = t[0]
211
+ return np.clip(t.detach().float().cpu().numpy(), 0.0, 1.0)
212
+
213
+ def _simple_mask_refinement(mask01: np.ndarray) -> np.ndarray:
214
+ m = (mask01 * 255.0).astype(np.uint8)
215
+ m = cv2.GaussianBlur(m, (5, 5), 0)
216
+ m = cv2.bilateralFilter(m, 9, 75, 75)
217
+ return (m.astype(np.float32) / 255.0)
218
+
219
+ def refine_mask_hq(frame: np.ndarray, mask: np.ndarray, matanyone: Optional[Any] = None, fallback_enabled: bool = True) -> np.ndarray:
220
+ """
221
+ If MatAnyOne processor is available, refine the mask (single-channel).
222
+ - Converts inputs to tensors with shapes:
223
+ image: (1,3,H,W)
224
+ mask: (1,1,H,W)
225
+ - No 5D tensors; avoids conv2d errors like [1,1,3,720,1280].
226
+ """
227
+ H, W = frame.shape[:2]
228
+ mask01 = _to_mask01(mask)
229
+
230
+ try:
231
+ if matanyone is not None:
232
+ import torch
233
+
234
+ img_t = _to_tensor_chw(frame).unsqueeze(0) # (1,3,H,W)
235
+ mask_t = _mask_to_tensor01(mask01) # (1,1,H,W)
236
+
237
+ device = "cuda" if torch.cuda.is_available() else "cpu"
238
+ img_t = img_t.to(device)
239
+ mask_t = mask_t.to(device)
240
+
241
+ if hasattr(matanyone, "step"):
242
+ with torch.inference_mode():
243
+ out = matanyone.step(
244
+ image_tensor=img_t,
245
+ mask_tensor=mask_t,
246
+ objects=None,
247
+ first_frame_pred=True
248
+ )
249
+ # out should be (1,1,H,W)
250
+ if hasattr(matanyone, "output_prob_to_mask"):
251
+ out = matanyone.output_prob_to_mask(out)
252
+ return _tensor_to_mask01(out)
253
+
254
+ elif hasattr(matanyone, "process"):
255
+ # Generic .process(image, mask) path; accepts numpy/PIL
256
+ refined = matanyone.process(frame, mask01)
257
+ refined = np.asarray(refined).astype(np.float32)
258
+ return _to_mask01(refined)
259
+
260
+ else:
261
+ logger.warning("MatAnyOne provided but no 'step' or 'process' method found.")
262
+
263
+ except Exception as e:
264
+ logger.warning("MatAnyOne refinement failed: %s", e)
265
+
266
+ return _simple_mask_refinement(mask01) if fallback_enabled else mask01
267
 
268
  # ----------------------------------------------------------------------------
269
+ # Compositing
270
+ # ----------------------------------------------------------------------------
271
+ def replace_background_hq(frame: np.ndarray, mask01: np.ndarray, background: np.ndarray, fallback_enabled: bool = True) -> np.ndarray:
272
+ """
273
+ Composite frame over background using feathered mask.
274
+ Inputs:
275
+ - frame: (H,W,3) uint8 (BGR or RGB, doesn't matter for linear blend)
276
+ - mask01: (H,W) or (H,W,1) float32 in [0..1]
277
+ - background: (H,W,3) uint8
278
+ Returns:
279
+ - composited frame (H,W,3) uint8 (same channel order as inputs)
280
+ """
281
+ try:
282
+ H, W = frame.shape[:2]
283
+ if background.shape[:2] != (H, W):
284
+ background = cv2.resize(background, (W, H), interpolation=cv2.INTER_LANCZOS4)
285
+
286
+ m = _to_mask01(mask01)
287
+ m = _feather(m, k=2)
288
+ m3 = np.repeat(m[:, :, None], 3, axis=2)
289
+
290
+ comp = frame.astype(np.float32) * m3 + background.astype(np.float32) * (1.0 - m3)
291
+ return np.clip(comp, 0, 255).astype(np.uint8)
292
+ except Exception as e:
293
+ if fallback_enabled:
294
+ logger.warning("Compositing failed (%s) – returning original frame", e)
295
+ return frame
296
+ raise
297
+
298
+ # ----------------------------------------------------------------------------
299
+ # Video validation (detailed)
300
  # ----------------------------------------------------------------------------
301
  def validate_video_file(video_path: str) -> Tuple[bool, str]:
302
  """
 
338
 
339
  except Exception as e:
340
  logger.error(f"validate_video_file: {e}")
341
+ return False, f"Validation error: {e}"
342
+
343
+ # ----------------------------------------------------------------------------
344
+ # Public symbols
345
+ # ----------------------------------------------------------------------------
346
+ __all__ = [
347
+ "segment_person_hq",
348
+ "segment_person_hq_original",
349
+ "refine_mask_hq",
350
+ "replace_background_hq",
351
+ "create_professional_background",
352
+ "validate_video_file",
353
+ ]