Skip to content

Commit

Permalink
Merge pull request #1103 from serengil/feat-task-1103-bugs-and-enhanc…
Browse files Browse the repository at this point in the history
…ements

Feat task 1103 bugs and enhancements
  • Loading branch information
serengil authored Mar 11, 2024
2 parents 65d6312 + 820ba59 commit bef2cc9
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 40 deletions.
23 changes: 13 additions & 10 deletions deepface/detectors/YuNet.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,15 @@ def build_model(self) -> Any:
"""

opencv_version = cv2.__version__.split(".")
if not len(opencv_version) >= 2:
raise ValueError(
f"OpenCv's version must have major and minor values but it is {opencv_version}"
)

opencv_version_major = int(opencv_version[0])
opencv_version_minor = int(opencv_version[1])

if len(opencv_version) > 2 and int(opencv_version[0]) == 4 and int(opencv_version[1]) < 8:
if opencv_version_major < 4 or (opencv_version_major == 4 and opencv_version_minor < 8):
# min requirement: https://github.com/opencv/opencv_zoo/issues/172
raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}")

Expand Down Expand Up @@ -67,9 +74,9 @@ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
# resize image if it is too large (Yunet fails to detect faces on large input sometimes)
# I picked 640 as a threshold because it is the default value of max_size in Yunet.
resized = False
r = 1 # resize factor
if height > 640 or width > 640:
r = 640.0 / max(height, width)
original_image = img.copy()
img = cv2.resize(img, (int(width * r), int(height * r)))
height, width = img.shape[0], img.shape[1]
resized = True
Expand All @@ -93,16 +100,12 @@ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
left eye, nose tip, the right corner and left corner of the mouth respectively.
"""
(x, y, w, h, x_re, y_re, x_le, y_le) = list(map(int, face[:8]))
left_eye = (x_re, y_re)
right_eye = (x_le, y_le)

# Yunet returns negative coordinates if it thinks part of
# the detected face is outside the frame.
# We set the coordinate to 0 if they are negative.
# YuNet returns negative coordinates if it thinks part of the detected face
# is outside the frame.
x = max(x, 0)
y = max(y, 0)
if resized:
img = original_image
x, y, w, h = int(x / r), int(y / r), int(w / r), int(h / r)
x_re, y_re, x_le, y_le = (
int(x_re / r),
Expand All @@ -118,8 +121,8 @@ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
w=w,
h=h,
confidence=confidence,
left_eye=left_eye,
right_eye=right_eye,
left_eye=(x_re, y_re),
right_eye=(x_le, y_le),
)
resp.append(facial_area)
return resp
65 changes: 35 additions & 30 deletions deepface/modules/verification.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def verify(
model: FacialRecognition = modeling.build_model(model_name)
dims = model.output_shape

# extract faces from img1
if isinstance(img1_path, list):
# given image is already pre-calculated embedding
if not all(isinstance(dim, float) for dim in img1_path):
Expand All @@ -115,16 +116,20 @@ def verify(
img1_embeddings = [img1_path]
img1_facial_areas = [None]
else:
img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings(
img_path=img1_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
)
try:
img1_embeddings, img1_facial_areas = __extract_faces_and_embeddings(
img_path=img1_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
)
except ValueError as err:
raise ValueError("Exception while processing img1_path") from err

# extract faces from img2
if isinstance(img2_path, list):
# given image is already pre-calculated embedding
if not all(isinstance(dim, float) for dim in img2_path):
Expand All @@ -147,15 +152,18 @@ def verify(
img2_embeddings = [img2_path]
img2_facial_areas = [None]
else:
img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings(
img_path=img2_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
)
try:
img2_embeddings, img2_facial_areas = __extract_faces_and_embeddings(
img_path=img2_path,
model_name=model_name,
detector_backend=detector_backend,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
normalization=normalization,
)
except ValueError as err:
raise ValueError("Exception while processing img2_path") from err

no_facial_area = {
"x": None,
Expand Down Expand Up @@ -218,18 +226,15 @@ def __extract_faces_and_embeddings(
model: FacialRecognition = modeling.build_model(model_name)
target_size = model.input_shape

try:
img_objs = detection.extract_faces(
img_path=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
)
except ValueError as err:
raise ValueError("Exception while processing img1_path") from err
img_objs = detection.extract_faces(
img_path=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
expand_percentage=expand_percentage,
)

# find embeddings for each face
for img_obj in img_objs:
Expand Down

0 comments on commit bef2cc9

Please sign in to comment.