This repository has been archived by the owner on Jun 15, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 93
/
Copy pathdemo.py
60 lines (50 loc) · 2.6 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import cv2
import argparse
import chainer
from entity import params
from pose_detector import PoseDetector, draw_person_pose
from face_detector import FaceDetector, draw_face_keypoints
from hand_detector import HandDetector, draw_hand_keypoints
chainer.using_config('enable_backprop', False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pose detector')
parser.add_argument('--img', help='image file path')
parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
# load model
pose_detector = PoseDetector("posenet", "models/coco_posenet.npz", device=args.gpu)
hand_detector = HandDetector("handnet", "models/handnet.npz", device=args.gpu)
face_detector = FaceDetector("facenet", "models/facenet.npz", device=args.gpu)
# read image
img = cv2.imread(args.img)
# inference
print("Estimating pose...")
person_pose_array, _ = pose_detector(img)
res_img = cv2.addWeighted(img, 0.6, draw_person_pose(img, person_pose_array), 0.4, 0)
# each person detected
for person_pose in person_pose_array:
unit_length = pose_detector.get_unit_length(person_pose)
# face estimation
print("Estimating face keypoints...")
cropped_face_img, bbox = pose_detector.crop_face(img, person_pose, unit_length)
if cropped_face_img is not None:
face_keypoints = face_detector(cropped_face_img)
res_img = draw_face_keypoints(res_img, face_keypoints, (bbox[0], bbox[1]))
cv2.rectangle(res_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), 1)
# hands estimation
print("Estimating hands keypoints...")
hands = pose_detector.crop_hands(img, person_pose, unit_length)
if hands["left"] is not None:
hand_img = hands["left"]["img"]
bbox = hands["left"]["bbox"]
hand_keypoints = hand_detector(hand_img, hand_type="left")
res_img = draw_hand_keypoints(res_img, hand_keypoints, (bbox[0], bbox[1]))
cv2.rectangle(res_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), 1)
if hands["right"] is not None:
hand_img = hands["right"]["img"]
bbox = hands["right"]["bbox"]
hand_keypoints = hand_detector(hand_img, hand_type="right")
res_img = draw_hand_keypoints(res_img, hand_keypoints, (bbox[0], bbox[1]))
cv2.rectangle(res_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255), 1)
print('Saving result into result.png...')
cv2.imwrite('result.png', res_img)