Skip to content

Commit

Permalink
Merge pull request #911 from serengil/feat-task-0712-some-improvements
Browse files Browse the repository at this point in the history
Feat task 0712 some improvements
  • Loading branch information
serengil authored Dec 8, 2023
2 parents 0b22c54 + 2cc5f39 commit 5696d27
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 95 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Tests
name: Tests and Linting

on:
push:
Expand Down
44 changes: 34 additions & 10 deletions deepface/DeepFace.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
from deepface.commons import functions, realtime, distance as dst
from deepface.commons.logger import Logger

# pylint: disable=no-else-raise

logger = Logger(module="DeepFace")

# -----------------------------------
Expand Down Expand Up @@ -465,8 +467,16 @@ def find(
file_name = f"representations_{model_name}.pkl"
file_name = file_name.replace("-", "_").lower()

if path.exists(db_path + "/" + file_name):
df_cols = [
"identity",
f"{model_name}_representation",
"target_x",
"target_y",
"target_w",
"target_h",
]

if path.exists(db_path + "/" + file_name):
if not silent:
logger.warn(
f"Representations for images in {db_path} folder were previously stored"
Expand All @@ -477,6 +487,12 @@ def find(
with open(f"{db_path}/{file_name}", "rb") as f:
representations = pickle.load(f)

if len(representations) > 0 and len(representations[0]) != len(df_cols):
raise ValueError(
f"Seems existing {db_path}/{file_name} is out-of-the-date."
"Delete it and re-run."
)

if not silent:
logger.info(f"There are {len(representations)} representations found in {file_name}")

Expand Down Expand Up @@ -523,7 +539,7 @@ def find(
align=align,
)

for img_content, _, _ in img_objs:
for img_content, img_region, _ in img_objs:
embedding_obj = represent(
img_path=img_content,
model_name=model_name,
Expand All @@ -538,6 +554,10 @@ def find(
instance = []
instance.append(employee)
instance.append(img_representation)
instance.append(img_region["x"])
instance.append(img_region["y"])
instance.append(img_region["w"])
instance.append(img_region["h"])
representations.append(instance)

# -------------------------------
Expand All @@ -553,10 +573,13 @@ def find(

# ----------------------------
# now, we got representations for facial database
df = pd.DataFrame(representations, columns=["identity", f"{model_name}_representation"])
df = pd.DataFrame(
representations,
columns=df_cols,
)

# img path might have more than once face
target_objs = functions.extract_faces(
source_objs = functions.extract_faces(
img=img_path,
target_size=target_size,
detector_backend=detector_backend,
Expand All @@ -567,9 +590,9 @@ def find(

resp_obj = []

for target_img, target_region, _ in target_objs:
for source_img, source_region, _ in source_objs:
target_embedding_obj = represent(
img_path=target_img,
img_path=source_img,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
Expand All @@ -580,10 +603,10 @@ def find(
target_representation = target_embedding_obj[0]["embedding"]

result_df = df.copy() # df will be filtered in each img
result_df["source_x"] = target_region["x"]
result_df["source_y"] = target_region["y"]
result_df["source_w"] = target_region["w"]
result_df["source_h"] = target_region["h"]
result_df["source_x"] = source_region["x"]
result_df["source_y"] = source_region["y"]
result_df["source_w"] = source_region["w"]
result_df["source_h"] = source_region["h"]

distances = []
for index, instance in df.iterrows():
Expand Down Expand Up @@ -815,6 +838,7 @@ def extract_faces(
"""

resp_objs = []

img_objs = functions.extract_faces(
img=img_path,
target_size=target_size,
Expand Down
129 changes: 70 additions & 59 deletions deepface/commons/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

logger = Logger(module="commons.functions")

# pylint: disable=no-else-raise

# --------------------------------------------------
# configurations of dependencies

Expand Down Expand Up @@ -73,49 +75,52 @@ def loadBase64Img(uri):
"""
encoded_data = uri.split(",")[1]
nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
return img_bgr


def load_image(img):
"""Load image from path, url, base64 or numpy array.
"""
Load image from path, url, base64 or numpy array.
Args:
img: a path, url, base64 or numpy array.
Raises:
ValueError: if the image path does not exist.
Returns:
numpy array: the loaded image.
image (numpy array): the loaded image in BGR format
image name (str): image name itself
"""

# The image is already a numpy array
if type(img).__module__ == np.__name__:
return img
return img, None

# The image is a base64 string
if img.startswith("data:image/"):
return loadBase64Img(img)
return loadBase64Img(img), None

# The image is a url
if img.startswith("http"):
return np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("RGB"))[
:, :, ::-1
]
return (
np.array(Image.open(requests.get(img, stream=True, timeout=60).raw).convert("BGR"))[
:, :, ::-1
],
# return url as image name
img,
)

# The image is a path
if os.path.isfile(img) is not True:
raise ValueError(f"Confirm that {img} exists")

# For reading images with unicode names
with open(img, "rb") as img_f:
chunk = img_f.read()
chunk_arr = np.frombuffer(chunk, dtype=np.uint8)
img = cv2.imdecode(chunk_arr, cv2.IMREAD_COLOR)
return img
# image must be a file on the system then

# This causes troubles when reading files with non english names
# return cv2.imread(img)
# image name must have english characters
if img.isascii() is False:
raise ValueError(f"Input image must not have non-english characters - {img}")

img_obj_bgr = cv2.imread(img)
# img_obj_rgb = cv2.cvtColor(img_obj_bgr, cv2.COLOR_BGR2RGB)
return img_obj_bgr, img


# --------------------------------------------------
Expand Down Expand Up @@ -152,7 +157,7 @@ def extract_faces(
extracted_faces = []

# img might be path, base64 or numpy array. Convert it to numpy whatever it is.
img = load_image(img)
img, img_name = load_image(img)
img_region = [0, 0, img.shape[1], img.shape[0]]

if detector_backend == "skip":
Expand All @@ -163,10 +168,17 @@ def extract_faces(

# in case of no face found
if len(face_objs) == 0 and enforce_detection is True:
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo "
+ "or consider to set enforce_detection param to False."
)
if img_name is not None:
raise ValueError(
f"Face could not be detected in {img_name}."
"Please confirm that the picture is a face photo "
"or consider to set enforce_detection param to False."
)
else:
raise ValueError(
"Face could not be detected. Please confirm that the picture is a face photo "
"or consider to set enforce_detection param to False."
)

if len(face_objs) == 0 and enforce_detection is False:
face_objs = [(img, img_region, 0)]
Expand All @@ -177,39 +189,38 @@ def extract_faces(
current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)

# resize and padding
if current_img.shape[0] > 0 and current_img.shape[1] > 0:
factor_0 = target_size[0] / current_img.shape[0]
factor_1 = target_size[1] / current_img.shape[1]
factor = min(factor_0, factor_1)

dsize = (
int(current_img.shape[1] * factor),
int(current_img.shape[0] * factor),
factor_0 = target_size[0] / current_img.shape[0]
factor_1 = target_size[1] / current_img.shape[1]
factor = min(factor_0, factor_1)

dsize = (
int(current_img.shape[1] * factor),
int(current_img.shape[0] * factor),
)
current_img = cv2.resize(current_img, dsize)

diff_0 = target_size[0] - current_img.shape[0]
diff_1 = target_size[1] - current_img.shape[1]
if grayscale is False:
# Put the base image in the middle of the padded image
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
(0, 0),
),
"constant",
)
else:
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
),
"constant",
)
current_img = cv2.resize(current_img, dsize)

diff_0 = target_size[0] - current_img.shape[0]
diff_1 = target_size[1] - current_img.shape[1]
if grayscale is False:
# Put the base image in the middle of the padded image
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
(0, 0),
),
"constant",
)
else:
current_img = np.pad(
current_img,
(
(diff_0 // 2, diff_0 - diff_0 // 2),
(diff_1 // 2, diff_1 - diff_1 // 2),
),
"constant",
)

# double check: if target image is not still the same size with target.
if current_img.shape[0:2] != target_size:
Expand Down
5 changes: 3 additions & 2 deletions deepface/commons/logger.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import logging
from datetime import datetime

# pylint: disable=broad-except
class Logger:
Expand All @@ -17,7 +18,7 @@ def __init__(self, module=None):

def info(self, message):
if self.log_level <= logging.INFO:
self.dump_log(message)
self.dump_log(f"{message}")

def debug(self, message):
if self.log_level <= logging.DEBUG:
Expand All @@ -36,4 +37,4 @@ def critical(self, message):
self.dump_log(f"💥 {message}")

def dump_log(self, message):
print(message)
print(f"{str(datetime.now())[2:-7]} - {message}")
19 changes: 17 additions & 2 deletions deepface/detectors/DlibWrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,19 @@

logger = Logger(module="detectors.DlibWrapper")


def build_model():

home = functions.get_deepface_home()

import dlib # this requirement is not a must that's why imported here
# this is not a must dependency. do not import it in the global level.
try:
import dlib
except ModuleNotFoundError as e:
raise ImportError(
"Dlib is an optional detector, ensure the library is installed."
"Please install using 'pip install dlib' "
) from e

# check required file exists in the home/.deepface/weights folder
if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
Expand Down Expand Up @@ -40,7 +48,14 @@ def build_model():

def detect_face(detector, img, align=True):

import dlib # this requirement is not a must that's why imported here
# this is not a must dependency. do not import it in the global level.
try:
import dlib
except ModuleNotFoundError as e:
raise ImportError(
"Dlib is an optional detector, ensure the library is installed."
"Please install using 'pip install dlib' "
) from e

resp = []

Expand Down
Loading

0 comments on commit 5696d27

Please sign in to comment.