hongyu12321 commited on
Commit
e7cd452
·
verified ·
1 Parent(s): 8fc4073

Upload face_utils.py

Browse files
Files changed (1) hide show
  1. face_utils.py +43 -0
face_utils.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image, ImageDraw
3
+ from facenet_pytorch import MTCNN
4
+
5
+ class FaceCropper:
6
+ """Detect faces and return (cropped_face, annotated_image)."""
7
+ def __init__(self, device: str | None = None, image_size: int = 224):
8
+ import torch
9
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
10
+ self.mtcnn = MTCNN(keep_all=True, device=self.device)
11
+ self.image_size = image_size
12
+
13
+ def _to_pil(self, img):
14
+ if isinstance(img, Image.Image):
15
+ return img.convert("RGB")
16
+ return Image.fromarray(img).convert("RGB")
17
+
18
+ def detect_and_crop(self, img, select="largest"):
19
+ pil = self._to_pil(img)
20
+ boxes, probs = self.mtcnn.detect(pil)
21
+
22
+ annotated = pil.copy()
23
+ draw = ImageDraw.Draw(annotated)
24
+
25
+ if boxes is None or len(boxes) == 0:
26
+ return None, annotated, {"boxes": np.empty((0,4)), "scores": np.empty((0,))}
27
+
28
+ for b, p in zip(boxes, probs):
29
+ x1, y1, x2, y2 = map(float, b)
30
+ draw.rectangle([x1, y1, x2, y2], outline=(255, 0, 0), width=3)
31
+ draw.text((x1, y1-10), f"{p:.2f}", fill=(255, 0, 0))
32
+
33
+ idx = 0
34
+ if select == "largest":
35
+ areas = [(b[2]-b[0])*(b[3]-b[1]) for b in boxes]
36
+ idx = int(np.argmax(areas))
37
+ elif isinstance(select, int) and 0 <= select < len(boxes):
38
+ idx = select
39
+
40
+ x1, y1, x2, y2 = boxes[idx].astype(int)
41
+ face = pil.crop((x1, y1, x2, y2))
42
+
43
+ return face, annotated, {"boxes": boxes, "scores": probs}