-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathhand_pose.py
59 lines (44 loc) · 1.61 KB
/
hand_pose.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
"""
#Author : Arijit Mukherjee
#Date : June 2016
#B.P. Poddar Institute of Management and Technology
#Inteligent Human-Computer Interaction with depth prediction using normal webcam and IR leds
#Inspired by : http://research.microsoft.com/pubs/220845/depth4free_SIGGRAPH.pdf
Demo application to predict hand-pose from a set of test data
"""
#Importing Opencv and Numpy
import cv2
import numpy as np
#Importing our dependencies
import util as ut
import svm_train as st
import time
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
model=st.trainSVM(9,20,'TrainData2')
move_text={'1':'GRAB','2':'Bless','3':'Rock','4':'Stop','5':'ThumbsUp','6':'Victory','7':'Stop2','8':'Left','9':'Right'}
#Camera and font initialization
cam=int(raw_input("Enter Camera Index : "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
#The main event loop
while(cap.isOpened()):
move=''
t=time.time()
_,img=cap.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray.copy(),150,255,cv2.THRESH_TOZERO)
cv2.imshow('thresh',th1)
_,contours,hierarchy = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)
cnt=ut.getMaxContour(contours,4000)
if cnt!=None:
gesture,res=ut.getGestureImg(cnt,img,th1,model)
cv2.imshow('PredictedGesture',cv2.imread('TrainData2/'+res+'_1.jpg'))
move=' '+move_text[res]
fps=int(1/(time.time()-t))
cv2.putText(img,"FPS: "+str(fps)+move,(50,50), font,1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Frame',img)
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()