-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy path_video.py
executable file
·73 lines (56 loc) · 1.64 KB
/
_video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#!/usr/bin/env python2
import os
import sys
import pickle
import numpy as np
np.set_printoptions(precision=2)
from sklearn.cluster import DBSCAN
import cv2
import openface
dlibFacePredictor='/root/openface/openface/models/dlib/shape_predictor_68_face_landmarks.dat'
networkModel='/root/openface/openface/models/openface/nn4.small2.v1.t7'
imgDim=96
align = openface.AlignDlib(dlibFacePredictor)
net = openface.TorchNeuralNet(networkModel, imgDim)
def getReps(bgrImg):
try:
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
bb = align.getAllFaceBoundingBoxes(rgbImg)
alignedFaces = [align.align(imgDim, rgbImg, box, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) for box in bb]
reps = [net.forward(alignedFace) for alignedFace in alignedFaces]
return reps
except:
return None
def getRep(bgrImg):
try:
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
bb = align.getLargestFaceBoundingBox(rgbImg)
alignedFace = align.align(imgDim, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
rep = net.forward(alignedFace)
return rep
except:
return None
print("***Video Processing***")
idx = 0
vectors = []
cap = cv2.VideoCapture(sys.argv[1])
while True:
ret, frame = cap.read()
if ret is False:
break
idx += 1
if idx%20 != 0:
continue
reps = getReps(frame)
if reps is not None:
if len(reps) > 0:
print(len(reps))
vectors.extend(reps)
# cv2.imshow('', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# print(idx)
fileDir = os.path.dirname(os.path.realpath(__file__))
pkFile = sys.argv[1].split('/')[-1] + '.pk'
pkFile = fileDir + '/{}'.format(pkFile)
pickle.dump(vectors, open(pkFile, 'w'))