-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathface_detect.py
123 lines (106 loc) · 4.01 KB
/
face_detect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
""" Experiment with face detection and image filtering using OpenCV """
import cv2
import numpy as np
import random
def webcam(cap):
while True:
# Capture frame-by-frame
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
return
def faces_webcam(cap):
while True:
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
return
def blur_faces_webcam(cap, x_blur, y_blur):
while True:
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))
kernel = np.ones((x_blur, y_blur), 'uint8')
for (x, y, w, h) in faces:
frame[y:y+h, x:x+w, :] = cv2.dilate(frame[y:y+h, x:x+w, :], kernel)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
return
def draw_face(frame, x, y, h, w):
# attempt to take avg color of face box pixels for better skin match than grey
# red_sum = 0
# green_sum = 0
# blue_sum = 0
# for i in range(x, x+w):
# for j in range(y, y+h):
# red_sum += frame[i, j][0]
# green_sum += frame[i, j][1]
# blue_sum += frame[i, j][2]
# red_sum /= w*h
# green_sum /= w*h ct Toolboxes
# blue_sum /= w*h
# color = (red_sum, green_sum, blue_sum)
angle = 0
startAngle = 0
endAngle = 360
thickness = -1
# face
# center_coordinates = (x+w//2, y+h//2)
# axesLength = (w//2, h//2)
# color = (127, 127, 127)
# cv2.ellipse(frame, center_coordinates, axesLength, angle,
# startAngle, endAngle, color, thickness)
# eyes
center_coordinates1 = (int(x+w*.275), int(y+h*.35))
center_coordinates2 = (int(x+w*.725), int(y+h*.35))
axesLength = (w//10, h//10)
color = (255, 255, 255)
cv2.ellipse(frame, center_coordinates1, axesLength, angle,
startAngle, endAngle, color, thickness)
cv2.ellipse(frame, center_coordinates2, axesLength, angle,
startAngle, endAngle, color, thickness)
# pupils
center_coordinates1 = (int(x+w*.275), int(y+h*.375))
center_coordinates2 = (int(x+w*.725), int(y+h*.375))
axesLength = (w//32, h//32)
color = (0, 0, 0)
cv2.ellipse(frame, center_coordinates1, axesLength, angle,
startAngle, endAngle, color, thickness)
cv2.ellipse(frame, center_coordinates2, axesLength, angle,
startAngle, endAngle, color, thickness)
# mouth
center_coordinates = (x+w//2, int(y+h*.7))
axesLength = (w//4, h//8)
angle = 0
startAngle = 15
endAngle = 165
thickness = 10
cv2.ellipse(frame, center_coordinates, axesLength, angle,
startAngle, endAngle, color, thickness)
def draw_on_faces_webcam(cap, x_blur, y_blur):
while True:
ret, frame = cap.read()
faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))
kernel = np.ones((x_blur, y_blur), 'uint8')
for (x, y, w, h) in faces:
frame[y:y+h, x:x+w, :] = cv2.dilate(frame[y:y+h, x:x+w, :], kernel)
# cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255))
draw_face(frame, x, y, h, w)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
return
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
print('PRESS Q TO QUIT!')
# pick one mode at a time to use program with:
# webcam(cap)
# faces_webcam(cap)
# blur_faces_webcam(cap, 20,20)
draw_on_faces_webcam(cap, 20, 20)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()