diff --git a/src/bma_client_lib/bma_client.py b/src/bma_client_lib/bma_client.py index 1704b6c..0be90c8 100644 --- a/src/bma_client_lib/bma_client.py +++ b/src/bma_client_lib/bma_client.py @@ -25,6 +25,7 @@ ThumbnailJob, ThumbnailSourceJob, ) +from .pillow_resize_and_crop import transform_image logger = logging.getLogger("bma_client") @@ -248,7 +249,7 @@ def write_and_upload_result(self, job: Job, result: "JobResult", filename: str) metadata: dict[str, int | str] = {} if isinstance(job, ImageConversionJob | ThumbnailJob): image, exif = result - if not isinstance(image, Image.Image) or not isinstance(exif, Image.Exif): + if not isinstance(image[0], Image.Image) or not isinstance(exif, Image.Exif): raise TypeError("Fuck") # apply format specific encoding options kwargs = {} @@ -258,7 +259,11 @@ def write_and_upload_result(self, job: Job, result: "JobResult", filename: str) logger.debug(f"Format {job.mimetype} has custom encoding settings, kwargs is now: {kwargs}") else: logger.debug(f"No custom settings for format {job.mimetype}") - image.save(buf, format=job.filetype, exif=exif, **kwargs) + # sequence? + if len(image) > 1: + kwargs["append_images"] = image[1:] + kwargs["save_all"] = True + image[0].save(buf, format=job.filetype, exif=exif, **kwargs) elif isinstance(job, ImageExifExtractionJob): logger.debug(f"Got exif data {result}") @@ -266,10 +271,18 @@ def write_and_upload_result(self, job: Job, result: "JobResult", filename: str) elif isinstance(job, ThumbnailSourceJob): image, exif = result - if not isinstance(image, Image.Image) or not isinstance(exif, Image.Exif): + if not isinstance(image[0], Image.Image) or not isinstance(exif, Image.Exif): raise TypeError("Fuck") - image.save(buf, format="WEBP", lossless=True, quality=1) - metadata = {"width": 500, "height": image.height, "mimetype": "image/webp"} + kwargs = {} + # thumbnailsources are always WEBP + if "image/webp" in self.settings["encoding"]["images"]: + kwargs.update(self.settings["encoding"]["images"]["image/webp"]) + # sequence? + if len(image) > 1: + kwargs["append_images"] = image[1:] + kwargs["save_all"] = True + image[0].save(buf, format="WEBP", **kwargs) + metadata = {"width": 500, "height": image[0].height, "mimetype": "image/webp"} else: logger.error("Unsupported job type") @@ -315,15 +328,11 @@ def handle_image_conversion_job( logger.debug(f"Desired image size is {size}, aspect ratio: {ratio} ({orig_str}), converting image...") start = time.time() - # custom AR or not? - if job.custom_aspect_ratio: - image = ImageOps.fit(image=image, size=size, method=Image.Resampling.LANCZOS, centering=crop_center) # type: ignore[assignment] - else: - image.thumbnail(size=size, resample=Image.Resampling.LANCZOS) + images = transform_image(original_img=image, crop_w=size[0], crop_h=size[1]) logger.debug(f"Converting image size and AR took {time.time() - start} seconds") logger.debug("Done, returning result...") - return image, exif + return images, exif def upload_job_result( self, diff --git a/src/bma_client_lib/datastructures.py b/src/bma_client_lib/datastructures.py index 6813ea0..6f52cd0 100644 --- a/src/bma_client_lib/datastructures.py +++ b/src/bma_client_lib/datastructures.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from typing import TypeAlias -from PIL import Image +from PIL import Image, ImageFile @dataclass @@ -20,6 +20,7 @@ class BaseJob: finished: bool source_url: str source_filename: str + source_mimetype: str schema_name: str @@ -54,7 +55,7 @@ class ThumbnailJob(ImageConversionJob): "ThumbnailJob": ThumbnailJob, } -ImageConversionJobResult: TypeAlias = tuple[Image.Image, Image.Exif] +ImageConversionJobResult: TypeAlias = tuple[list[Image.Image | ImageFile.ImageFile], Image.Exif] ThumbnailSourceJobResult: TypeAlias = ImageConversionJobResult ExifExtractionJobResult: TypeAlias = dict[str, dict[str, str]] JobResult: TypeAlias = ImageConversionJobResult | ExifExtractionJobResult | ThumbnailSourceJobResult diff --git a/src/bma_client_lib/pillow_resize_and_crop.py b/src/bma_client_lib/pillow_resize_and_crop.py new file mode 100644 index 0000000..90123ce --- /dev/null +++ b/src/bma_client_lib/pillow_resize_and_crop.py @@ -0,0 +1,66 @@ +"""Pillow cropping with sequence (gif, webp) support. + +Borrowed from https://gist.github.com/muratgozel/ce1aa99f97fc1a99b3f3ec90cf77e5f5 +""" + +from math import fabs, floor + +from PIL import Image, ImageFile, ImageSequence + + +def transform_image(original_img: Image.Image, crop_w: int, crop_h: int) -> list[Image.Image | ImageFile.ImageFile]: + """Resizes and crops the image to the specified crop_w and crop_h if necessary. + + Works with multi frame gif and webp images. + + Args: + original_img(Image.Image): is the image instance created by pillow ( Image.open(filepath) ) + crop_w(int): is the width in pixels for the image that will be resized and cropped + crop_h(int): is the height in pixels for the image that will be resized and cropped + + returns: + Instance of an Image or list of frames which they are instances of an Image individually + """ + img_w, img_h = (original_img.size[0], original_img.size[1]) + n_frames = getattr(original_img, "n_frames", 1) + + def transform_frame(frame: Image.Image) -> Image.Image | ImageFile.ImageFile: + """Resizes and crops the individual frame in the image.""" + # resize the image to the specified height if crop_w is null in the recipe + if crop_w is None: + if crop_h == img_h: + return frame + new_w = floor(img_w * crop_h / img_h) + new_h = crop_h + return frame.resize((new_w, new_h), resample=Image.Resampling.LANCZOS) + + # return the original image if crop size is equal to img size + if crop_w == img_w and crop_h == img_h: + return frame + + # first resize to get most visible area of the image and then crop + w_diff = fabs(crop_w - img_w) + h_diff = fabs(crop_h - img_h) + enlarge_image = bool(crop_w > img_w or crop_h > img_h) + shrink_image = bool(crop_w < img_w or crop_h < img_h) + + if enlarge_image is True: + new_w = floor(crop_h * img_w / img_h) if h_diff > w_diff else crop_w + new_h = floor(crop_w * img_h / img_w) if h_diff < w_diff else crop_h + + if shrink_image is True: + new_w = crop_w if h_diff > w_diff else floor(crop_h * img_w / img_h) + new_h = crop_h if h_diff < w_diff else floor(crop_w * img_h / img_w) + + left = (new_w - crop_w) // 2 + right = left + crop_w + top = (new_h - crop_h) // 2 + bottom = top + crop_h + + return frame.resize((new_w, new_h), resample=Image.Resampling.LANCZOS).crop((left, top, right, bottom)) + + # single frame image + if n_frames == 1: + return [transform_frame(original_img)] + # in the case of a multiframe image + return [transform_frame(frame) for frame in ImageSequence.Iterator(original_img)]