hysts HF Staff commited on
Commit
401a860
·
1 Parent(s): 2343867
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +13 -16
  3. requirements.txt +9 -9
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💻
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -3,14 +3,7 @@
3
  from __future__ import annotations
4
 
5
  import functools
6
- import os
7
  import pathlib
8
- import subprocess
9
-
10
- if os.environ.get('SYSTEM') == 'spaces':
11
- subprocess.call('pip uninstall -y opencv-python'.split())
12
- subprocess.call('pip uninstall -y opencv-python-headless'.split())
13
- subprocess.call('pip install opencv-python-headless==4.5.5.62'.split())
14
 
15
  import cv2
16
  import face_alignment
@@ -19,7 +12,9 @@ import numpy as np
19
  import torch
20
 
21
  TITLE = 'face-alignment'
22
- DESCRIPTION = 'This is an unofficial demo for https://github.com/1adrianb/face-alignment.'
 
 
23
 
24
 
25
  def detect(
@@ -27,12 +22,14 @@ def detect(
27
  detector,
28
  device: torch.device,
29
  ) -> np.ndarray:
30
- preds = detector.get_landmarks(image)
31
- if len(preds) == 0:
32
- raise RuntimeError('No face was found')
33
 
34
  res = image.copy()
35
- for pts in preds:
 
 
36
  tl = pts.min(axis=0)
37
  br = pts.max(axis=0)
38
  size = (br - tl).max()
@@ -43,18 +40,18 @@ def detect(
43
 
44
 
45
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
46
- detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
47
  device=device.type)
48
- func = functools.partial(detect, detector=detector, device=device)
49
 
50
  image_paths = sorted(pathlib.Path('images').glob('*.jpg'))
51
  examples = [[path.as_posix()] for path in image_paths]
52
 
53
  gr.Interface(
54
- fn=func,
55
  inputs=gr.Image(label='Input', type='numpy'),
56
  outputs=gr.Image(label='Output', type='numpy'),
57
  examples=examples,
58
  title=TITLE,
59
  description=DESCRIPTION,
60
- ).launch(show_api=False)
 
3
  from __future__ import annotations
4
 
5
  import functools
 
6
  import pathlib
 
 
 
 
 
 
7
 
8
  import cv2
9
  import face_alignment
 
12
  import torch
13
 
14
  TITLE = 'face-alignment'
15
+ DESCRIPTION = 'https://github.com/1adrianb/face-alignment'
16
+
17
+ MAX_IMAGE_SIZE = 1800
18
 
19
 
20
  def detect(
 
22
  detector,
23
  device: torch.device,
24
  ) -> np.ndarray:
25
+ landmarks, _, boxes = detector.get_landmarks(image, return_bboxes=True)
26
+ if landmarks is None:
27
+ return image
28
 
29
  res = image.copy()
30
+ for pts, box in zip(landmarks, boxes):
31
+ box = np.round(box[:4]).astype(int)
32
+ cv2.rectangle(res, tuple(box[:2]), tuple(box[2:]), (0, 255, 0), 2)
33
  tl = pts.min(axis=0)
34
  br = pts.max(axis=0)
35
  size = (br - tl).max()
 
40
 
41
 
42
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
43
+ detector = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D,
44
  device=device.type)
45
+ fn = functools.partial(detect, detector=detector, device=device)
46
 
47
  image_paths = sorted(pathlib.Path('images').glob('*.jpg'))
48
  examples = [[path.as_posix()] for path in image_paths]
49
 
50
  gr.Interface(
51
+ fn=fn,
52
  inputs=gr.Image(label='Input', type='numpy'),
53
  outputs=gr.Image(label='Output', type='numpy'),
54
  examples=examples,
55
  title=TITLE,
56
  description=DESCRIPTION,
57
+ ).queue().launch()
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
- face-alignment==1.3.5
2
- numba==0.55.1
3
- numpy==1.21.5
4
- opencv-python-headless==4.5.5.62
5
- Pillow==9.0.1
6
- scikit-image==0.19.2
7
- scipy==1.8.0
8
- torch==1.11.0
9
- torchvision==0.12.0
 
1
+ face-alignment==1.4.0
2
+ numba==0.57.1
3
+ numpy==1.24.4
4
+ opencv-python-headless==4.8.0.74
5
+ Pillow==10.0.0
6
+ scikit-image==0.21.0
7
+ scipy==1.10.1
8
+ torch==2.0.1
9
+ torchvision==0.15.2