diff --git a/Makefile b/Makefile index afa941a..c46b81c 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ +#multi-platform execution of services .ONESHELL: SHELL := /bin/bash @@ -28,7 +29,9 @@ build-push-dimensionality-reduction: docker buildx build --platform linux/amd64,linux/arm64 -t ifcacomputing/dimensionality-reduction-api --push dimensionality_reduction_api build-push-model-inference: - docker buildx build --platform linux/amd64,linux/arm64 -t ifcacomputing/model-inference-api:latest --push model_inference_api + docker build -t ghcr.io/grycap/mls-arm-api model_inference_api + docker push ghcr.io/grycap/mls-arm-api + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/grycap/mls-arm-api --push model_inference_api run-data-drift-detector: docker run --name data-drift-detection -p 5001:8000 ifcacomputing/data-drift-detection-api @@ -37,4 +40,24 @@ run-dimensionality-reduction: docker run --name dimensionality-reduction -p 5002:8000 ifcacomputing/dimensionality-reduction-api run-model-inference: - docker run --name model-inference -p 5003:8000 ifcacomputing/model-inference-api \ No newline at end of file + docker run --name model-inference -p 5003:8000 ifcacomputing/model-inference-api + +mls: + docker build -t ghcr.io/grycap/mls-arm-api model_inference_api + docker push ghcr.io/grycap/mls-arm-api + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/grycap/mls-arm-api --push model_inference_api + +dds: + docker build -t ghcr.io/grycap/dds-arm-api detector_api + docker push ghcr.io/grycap/dds-arm-api + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/grycap/dds-arm-api --push detector_api + +emc: + docker build -t ghcr.io/grycap/emc-arm-api embedding_matrix + docker push ghcr.io/grycap/emc-arm-api + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/grycap/emc-arm-api --push embedding_matrix +drs: + docker build -t ghcr.io/grycap/drs-arm-api dimensionality_reduction_api + docker push ghcr.io/grycap/drs-arm-api + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/grycap/drs-arm-api --push dimensionality_reduction_api + diff --git a/creation_service/ML_inference/ml_inference.yaml b/creation_service/ML_inference/ml_inference.yaml new file mode 100644 index 0000000..62fb98a --- /dev/null +++ b/creation_service/ML_inference/ml_inference.yaml @@ -0,0 +1,30 @@ +functions: + oscar: + - oscar-cluster: + name: mls-service + memory: 2Gi + cpu: '2' + image: ghcr.io/grycap/mls-arm-api + script: script.sh + log_level: INFO + input: + - storage_provider: minio.default + path: images/input + output: + - suffix: + - txt + storage_provider: minio.default + path: mls/output + - suffix: + - png + storage_provider: minio.minio1 + path: drs/input + +storage_providers: + minio: + minio1: + endpoint: 'https://minio.frosty-grothendieck5.im.grycap.net' + region: us-east-1 + access_key: minio + secret_key: minio123 + verify: false diff --git a/creation_service/ML_inference/script.sh b/creation_service/ML_inference/script.sh new file mode 100644 index 0000000..1ed53d9 --- /dev/null +++ b/creation_service/ML_inference/script.sh @@ -0,0 +1,4 @@ +FILE_NAME=`basename "$INPUT_FILE_PATH"` +OUTPUT_FILE="$TMP_OUTPUT_DIR/$FILE_NAME" +cp $INPUT_FILE_PATH $OUTPUT_FILE +python3 main-service.py $INPUT_FILE_PATH $OUTPUT_FILE diff --git a/creation_service/dimensionality_reduction/dimensional_reduction.yaml b/creation_service/dimensionality_reduction/dimensional_reduction.yaml new file mode 100644 index 0000000..efcff84 --- /dev/null +++ b/creation_service/dimensionality_reduction/dimensional_reduction.yaml @@ -0,0 +1,16 @@ +functions: + oscar: + - oscar-cluster1: + name: drs-service + memory: 2Gi + cpu: '2' + image: ghcr.io/grycap/drs-arm-api + script: script.sh + log_level: INFO + input: + - storage_provider: minio + path: drs/input + output: + - storage_provider: minio + path: drs/output + diff --git a/creation_service/dimensionality_reduction/script.sh b/creation_service/dimensionality_reduction/script.sh new file mode 100644 index 0000000..ab66d2e --- /dev/null +++ b/creation_service/dimensionality_reduction/script.sh @@ -0,0 +1,4 @@ +FILE_NAME=`basename "$INPUT_FILE_PATH"` +OUTPUT_FILE="$TMP_OUTPUT_DIR/$FILE_NAME" + +python3 main-service.py $INPUT_FILE_PATH $OUTPUT_FILE diff --git a/creation_service/drift_detection/drift_detection.yaml b/creation_service/drift_detection/drift_detection.yaml new file mode 100644 index 0000000..b12df71 --- /dev/null +++ b/creation_service/drift_detection/drift_detection.yaml @@ -0,0 +1,25 @@ +functions: + oscar: + - oscar-cluster1: + name: dds-service + memory: 2Gi + cpu: '2' + image: ghcr.io/grycap/dds-arm-api + script: script.sh + log_level: INFO + input: + - storage_provider: minio + path: emc/output + output: + - storage_provider: minio.minio-rasp + path: dds/output + - storage_provider: minio.default + path: dds/output +storage_providers: + minio: + minio-rasp: + endpoint: 'https://minio.graspi.im.grycap.net' + region: us-east-1 + access_key: minio + secret_key: minio-aisprint-2021 + verify: false diff --git a/creation_service/drift_detection/script.sh b/creation_service/drift_detection/script.sh new file mode 100644 index 0000000..d283b4d --- /dev/null +++ b/creation_service/drift_detection/script.sh @@ -0,0 +1,3 @@ +FILE_NAME=`basename "$INPUT_FILE_PATH"` +OUTPUT_FILE="$TMP_OUTPUT_DIR/$FILE_NAME" +python3 main-service.py $INPUT_FILE_PATH $OUTPUT_FILE \ No newline at end of file diff --git a/creation_service/embedding_matrix/embedding_matrix.yaml b/creation_service/embedding_matrix/embedding_matrix.yaml new file mode 100644 index 0000000..8d3e4bb --- /dev/null +++ b/creation_service/embedding_matrix/embedding_matrix.yaml @@ -0,0 +1,15 @@ +functions: + oscar: + - oscar-cluster1: + name: emc-service + memory: 2Gi + cpu: '2' + image: ghcr.io/grycap/emc-arm-api + script: script.sh + log_level: INFO + input: + - storage_provider: minio + path: drs/output + output: + - storage_provider: minio + path: emc/output diff --git a/creation_service/embedding_matrix/script.sh b/creation_service/embedding_matrix/script.sh new file mode 100644 index 0000000..5903ce2 --- /dev/null +++ b/creation_service/embedding_matrix/script.sh @@ -0,0 +1,3 @@ +FILE_NAME=`basename "$INPUT_FILE_PATH"` +OUTPUT_FILE="$TMP_OUTPUT_DIR/$FILE_NAME" +python3 main-service.py $INPUT_FILE_PATH $OUTPUT_FILE diff --git a/detector_api/app/detector/detector.pkl b/detector_api/app/detector/detector.pkl index 1eaf262..e86b391 100644 Binary files a/detector_api/app/detector/detector.pkl and b/detector_api/app/detector/detector.pkl differ diff --git a/detector_api/app/main-service.py b/detector_api/app/main-service.py new file mode 100644 index 0000000..079559f --- /dev/null +++ b/detector_api/app/main-service.py @@ -0,0 +1,61 @@ +"""API module.""" +# main-service.py for execute DDS (Drift Detection Service) service. + +import logging +import sys +import json +import numpy as np +from detector import Detector +from litestar import Router, get, post +from litestar.status_codes import ( + HTTP_200_OK, +) +from schemas import ( + DetectorInputData, + DistanceBasedResponse, + HealthResponse, +) +from settings import api_settings, detector_settings + +detector = Detector( + settings=detector_settings, +) + +def check_drift(data: DetectorInputData,out) -> DistanceBasedResponse: + #async + """Check if drift is present. + + :param data: input data + :type data: DetectorInputData + :return: drift data information + :rtype: BaseCheckDriftResponse + """ + + + logging.info("Checking drift...") + check_drift_result = detector.check_drift( + #The value parameter has to be in an array with the N embedding + values=np.array(data['values']), + alpha=data['alpha'], + ) + return_input_values=data['return_input_values'] + value=np.array(data['values']) + check_drift_result["distance"] = check_drift_result["distance"].distance + if return_input_values: + check_drift_result["values"] = value.tolist() # noqa: PD011 + result = DistanceBasedResponse( + **check_drift_result, + ) + #Save the results of the algorithm in the output bucket (dds/output) + f = open(str(out)+".txt", "a") + f.write(str(result)) + f.close() + +#Read json object with data from EMC service +with open(sys.argv[1]) as f: + data = json.load(f) +print(data) + +#Pass parameters (data and output bucket) to the drift detection algorithm +check_drift(data,sys.argv[2]) + diff --git a/dimensionality_reduction_api/Dockerfile b/dimensionality_reduction_api/Dockerfile index 503f95b..39dd3f1 100644 --- a/dimensionality_reduction_api/Dockerfile +++ b/dimensionality_reduction_api/Dockerfile @@ -15,4 +15,4 @@ COPY app ./ USER app -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "1"] \ No newline at end of file +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "1"] diff --git a/dimensionality_reduction_api/app/main-service.py b/dimensionality_reduction_api/app/main-service.py new file mode 100644 index 0000000..d898de5 --- /dev/null +++ b/dimensionality_reduction_api/app/main-service.py @@ -0,0 +1,51 @@ +# main-service.py for execute DRS (Demensionality Reduction Service) service. + +import logging +import sys +import numpy as np +from PIL import Image +from litestar import Router, get, post +from litestar.enums import RequestEncodingType +from litestar.params import Body +from litestar.status_codes import ( + HTTP_200_OK, +) + +from dr import DimensionalityReduction +from schemas import ( + DimensionalityReductionInputData, + DimensionalityReductionResponse, + HealthResponse, +) +from settings import ( + api_settings, + encoder_settings, + transformer_settings, +) + +dr = DimensionalityReduction( + settings_encoder=encoder_settings, + settings_transformer=transformer_settings, +) + +def dim_red(image, out): + logging.info("Transforming image...") + transformed_image = dr.transform( + data=image, + ) + logging.info("Image transformed.") + logging.info("Encoding image...") + reduced_image = dr.encode( + data=transformed_image, + ) + logging.info("Image encoded.") + + #Write algorithm results to output bucket + f = open(str(out)+".json", "a") + f.write(str(np.squeeze(reduced_image).tolist())) + f.close() + +#Capture the image from the input bucket (upload/input) and pass as parameters to the dimensionality +#reduction function the content of the image and the output bucket to put the results of the algorithm +img = Image.open(sys.argv[1]) +dim_red(img,sys.argv[2]) diff --git a/dimensionality_reduction_api/app/objects/encoder.pt b/dimensionality_reduction_api/app/objects/encoder.pt index 70a4e36..55e858a 100644 Binary files a/dimensionality_reduction_api/app/objects/encoder.pt and b/dimensionality_reduction_api/app/objects/encoder.pt differ diff --git a/dimensionality_reduction_api/app/objects/transformer.pt b/dimensionality_reduction_api/app/objects/transformer.pt index a1f9f71..b183195 100644 Binary files a/dimensionality_reduction_api/app/objects/transformer.pt and b/dimensionality_reduction_api/app/objects/transformer.pt differ diff --git a/embedding_matrix_api/Dockerfile b/embedding_matrix_api/Dockerfile new file mode 100644 index 0000000..73462b1 --- /dev/null +++ b/embedding_matrix_api/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.10-slim +LABEL author="Vicente Rodriguez Benitez " + +WORKDIR /app + +COPY requirements/requirements.txt . + +RUN apt-get -y update && \ + addgroup --gid 1001 --system app && \ + adduser --no-create-home --shell /bin/false --disabled-password --uid 1001 --system --group app && \ + pip install --upgrade -r requirements.txt --no-cache-dir && \ + rm requirements.txt + + + +COPY app ./ + +USER app + +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "4"] diff --git a/embedding_matrix_api/__init__.py b/embedding_matrix_api/__init__.py new file mode 100644 index 0000000..9fc9b63 --- /dev/null +++ b/embedding_matrix_api/__init__.py @@ -0,0 +1 @@ +"""Embedding matrix API __init__.""" diff --git a/embedding_matrix_api/app/__init__.py b/embedding_matrix_api/app/__init__.py new file mode 100644 index 0000000..ade0be5 --- /dev/null +++ b/embedding_matrix_api/app/__init__.py @@ -0,0 +1 @@ +"""App __init__.""" diff --git a/embedding_matrix_api/app/api.py b/embedding_matrix_api/app/api.py new file mode 100644 index 0000000..544865d --- /dev/null +++ b/embedding_matrix_api/app/api.py @@ -0,0 +1,85 @@ +"""API module.""" + +import logging + +from litestar import Router, get, post +from litestar.enums import RequestEncodingType +from litestar.params import Body +from litestar.status_codes import ( + HTTP_200_OK, +) + +from model import Model +from schemas import ( + HealthResponse, + ModelInputData, + PredictResponse, +) +from settings import ( + api_settings, + model_settings, + transformer_settings, +) + +model = Model( + settings_model=model_settings, + settings_transformer=transformer_settings, +) + + +@post( + path="/predict", + status_code=HTTP_200_OK, +) +async def predict( + data: ModelInputData = Body( + media_type=RequestEncodingType.MULTI_PART, + ), +) -> PredictResponse: + """Predict function. + + :param data: model input data + :type data: ModelInputData + :return: prediction response + :rtype: BasePredictionResponse + """ + image = await data.image + logging.info("Transforming image...") + transformed_image = model.transform( + data=image, + ) + logging.info("Image transformed.") + logging.info("Predicting...") + prediction = model.predict( + data=transformed_image, + ) + logging.info("Prediction made.") + return PredictResponse( + **prediction, + ) + + +@get( + path="/health", + status_code=HTTP_200_OK, +) +async def health() -> HealthResponse: + """Health check function. + + :return: Health check response + :rtype: HealthResponse + """ + return HealthResponse( + name=api_settings.PROJECT_NAME, + api_version=api_settings.VERSION, + ) + + +api_router = Router( + path="/api", + tags=["API"], + route_handlers=[ + predict, + health, + ], +) diff --git a/embedding_matrix_api/app/main-service.py b/embedding_matrix_api/app/main-service.py new file mode 100644 index 0000000..c7b731e --- /dev/null +++ b/embedding_matrix_api/app/main-service.py @@ -0,0 +1,91 @@ +# main-service.py for execute EMC (Embedding Matrix Generator) service. +# The main idea of this service is to run and list the output bucket of the DRS service to see when N embedding has been done, if the N results are not there, the service does nothing else. Once you have N embedding create an array (json object) with the results of the DRS service. + + +import os +import sys +import time +from minio import Minio +import urllib3 +import io +import ast +import json + +#Function to create object with data in json format +class data_json: + def __init__(self, alpha, value,return_input_values): + self.alpha = alpha + self.values = value + self.return_input_values= return_input_values + + +print("Running service ...") + +#Creation of the minio object that will connect to the minIO server +client = Minio( + "minio.frosty-grothendieck5.im.grycap.net", + access_key="minio", + secret_key="minio123", + secure=True, + region="us-east-1", + +) +# Number of objects (N embedding) +objCount=890 + +# List the output bucket of the DRS service +objects = client.list_objects("drs", recursive=True, prefix="output/",) +objList=0 + +#Count the number of items in the bucket +for obj in objects: + objList=objList+1 + +#Determine if there is N embedding in the bucket +if objList>objCount+1: + dataFiles = [] + objects = client.list_objects("drs", recursive=True, prefix="output/",) + z=0 + for i in objects: + print(i) + #Read object content inside bucket + response = client.get_object( + bucket_name = 'drs', + object_name= i.object_name + ) + + #Convert to string format ('utf-8') + x=str(response.read(),'utf-8') + print(x) + #Determine if the object has content (eliminate problem with remote MinIO server + #that leaves a blank object on the first run) + + if x!="": + #Adapt the data to the input format for the DDS service and create an array of values + x1=x.strip('\n') + b = ast.literal_eval(x1) + dataFiles.append(b) + z=0 + else: + z=1 + #In case there is no blank object + if z==0: + #Create json object with data for DDS service + values=data_json(float('0.05'),dataFiles,True) + data = json.dumps(values.__dict__) + + #Write the file with the N embedding data in the service output bucket + out=sys.argv[2] + f = open(str(out)+".json", "w") + f.write(str(data)) + f.close() + + #Delete the objects in the input bucket (bucket drs/output) to be able to start the next N embedding collection + objects = client.list_objects("drs", recursive=True, prefix="output/",) + for ii in objects: + client.remove_object("drs", ii.object_name) + time.sleep(1) # bucket load time + + print("Process completed ...") +else: + print("There are not enough objects in the bucket ") diff --git a/embedding_matrix_api/app/main.py b/embedding_matrix_api/app/main.py new file mode 100644 index 0000000..def63c7 --- /dev/null +++ b/embedding_matrix_api/app/main.py @@ -0,0 +1,46 @@ +"""Main module.""" + +from api import api_router +from litestar import Litestar +from litestar.app import OpenAPIConfig +from litestar.logging import LoggingConfig +from litestar.openapi import OpenAPIController +from settings import api_settings + + +class CustomOpenAPIController(OpenAPIController): + """Custom OpenAPI controller.""" + + path = "/docs" + + +openapi_config = OpenAPIConfig( + title=api_settings.PROJECT_NAME, + version=api_settings.VERSION, + openapi_controller=CustomOpenAPIController, +) + +logging_config = LoggingConfig( + loggers={ + "app": { + "level": "INFO", + "handlers": ["queue_listener"], + }, + }, +) + +app = Litestar( + route_handlers=[api_router], + openapi_config=openapi_config, + logging_config=logging_config, +) + +if __name__ == "__main__": + import uvicorn + + uvicorn.run( + app, + host="0.0.0.0", # noqa: S104 + port=5001, + log_level="debug", + ) diff --git a/embedding_matrix_api/app/model.py b/embedding_matrix_api/app/model.py new file mode 100644 index 0000000..9e8f786 --- /dev/null +++ b/embedding_matrix_api/app/model.py @@ -0,0 +1,162 @@ +"""Model module.""" + +import datetime +import logging + +import PIL +import numpy as np +import torch +import torch.nn as nn +import torchvision + +from settings import ModelSettings, TransformerSettings +from utils import SingletonMeta + + +class CNN(nn.Module): + def __init__(self) -> None: + super().__init__() + + self.conv1 = nn.Sequential( + nn.Conv2d( + in_channels=1, + out_channels=16, + kernel_size=5, + stride=1, + padding=2, + ), + nn.ReLU(), + nn.MaxPool2d(kernel_size=2), + ) + self.conv2 = nn.Sequential( + nn.Conv2d(16, 32, 5, 1, 2), + nn.ReLU(), + nn.MaxPool2d(2), + ) + + # fully connected layer, output 10 classes + self.out = nn.Linear(32 * 7 * 7, 10) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + + # flatten the output of conv2 to (batch_size, 32 * 7 * 7) + x = x.view(x.size(0), -1) + output = self.out(x) + return output + + +class Model(metaclass=SingletonMeta): + """Model class.""" + + def __init__( + self: "Model", + settings_model: ModelSettings, + settings_transformer: TransformerSettings, + ) -> None: + """Init method.""" + logging.info("Loading model...") + self.model = self.load_model( + settings=settings_model, + ) + logging.info("Model loaded.") + logging.info("Loading transformer...") + self.transformer = self.load_transformer( + settings=settings_transformer, + ) + logging.info("Transformer loaded.") + + def load_model( + self: "Model", + settings: ModelSettings, + ) -> CNN: + """Load model. + + :return model + :rtype: CNN + """ + model = self._load_model( + settings=settings, + ) + return model + + def load_transformer( + self: "Model", + settings: TransformerSettings, + ) -> torchvision.transforms.Compose: + """Load transformer. + + :return transformer + :rtype: torchvision.transforms.Compose + """ + transformer = self._load_transformer( + settings=settings, + ) + return transformer + + def encode(self, data: np.ndarray) -> np.ndarray: + """Encode data. + + :param data: data + :type data: np.ndarray + :return: encoded data + :rtype: np.ndarray + """ + with torch.no_grad(): + encoded = self.encoder(data).numpy() + return encoded + + def predict( + self: "Model", + data: torch.Tensor, + ) -> dict[str, str | int | float]: + """Predict. + + :param data: data tensor + :type data: torch.Tensor + :return: predict information + :rtype: dict[str, str | int | float] + """ + with torch.no_grad(): + pred = self.model(data) + class_pred = torch.max(pred, dim=1).indices.item() + + return { + "datetime": datetime.datetime.now(tz=datetime.timezone.utc).strftime( + "%d/%m/%Y %H:%M:%S.%f", + ), + "prediction": class_pred, + } + + def transform(self, data: PIL.Image) -> torch.Tensor: + """Transform data. + + :param data: data + :type data: PIL.Image + :return: transformed data + :rtype: torch.Tensor + """ + transformed = self.transformer(data).unsqueeze(0) + return transformed + + @staticmethod + def _load_model(settings: ModelSettings) -> CNN: + model = CNN() + model.eval() + model_state_dict = torch.load( + f=settings.FILE_PATH, + ) + for k, _v in model_state_dict.copy().items(): + model_state_dict[k.removeprefix("_orig_mod.")] = model_state_dict.pop(k) + model.load_state_dict(state_dict=model_state_dict) + return model + + @staticmethod + def _load_transformer( + settings: TransformerSettings, + ) -> torchvision.transforms.Compose: + transformer = torch.load( + f=settings.FILE_PATH, + ) + return transformer diff --git a/embedding_matrix_api/app/objects/model.pt b/embedding_matrix_api/app/objects/model.pt new file mode 100644 index 0000000..9ff0e60 Binary files /dev/null and b/embedding_matrix_api/app/objects/model.pt differ diff --git a/embedding_matrix_api/app/objects/transformer.pt b/embedding_matrix_api/app/objects/transformer.pt new file mode 100644 index 0000000..b183195 Binary files /dev/null and b/embedding_matrix_api/app/objects/transformer.pt differ diff --git a/embedding_matrix_api/app/schemas/__init__.py b/embedding_matrix_api/app/schemas/__init__.py new file mode 100644 index 0000000..18477e4 --- /dev/null +++ b/embedding_matrix_api/app/schemas/__init__.py @@ -0,0 +1,13 @@ +"""Schemas __init__.""" + +from .model import ( + ModelInputData, + PredictResponse, +) +from .health import HealthResponse + +__all__ = [ + "HealthResponse", + "ModelInputData", + "PredictResponse", +] diff --git a/embedding_matrix_api/app/schemas/health.py b/embedding_matrix_api/app/schemas/health.py new file mode 100644 index 0000000..2552371 --- /dev/null +++ b/embedding_matrix_api/app/schemas/health.py @@ -0,0 +1,10 @@ +"""Health schema.""" + +from pydantic import BaseModel + + +class HealthResponse(BaseModel): + """Health schema.""" + + name: str + api_version: str diff --git a/embedding_matrix_api/app/schemas/model.py b/embedding_matrix_api/app/schemas/model.py new file mode 100644 index 0000000..c76c7eb --- /dev/null +++ b/embedding_matrix_api/app/schemas/model.py @@ -0,0 +1,42 @@ +"""Model schemas.""" + +from io import BytesIO + +from PIL import Image, ImageOps +from litestar.datastructures import UploadFile +from pydantic import BaseConfig, BaseModel, validator + + +class ModelInputData(BaseModel): + """Model input data class.""" + + image: UploadFile + + @validator("image", pre=False) + async def parse_image(cls, data: UploadFile) -> Image: + """Parse image. + + :param data: data + :type data: UploadFile + :return: image + :rtype: Image + """ + data = await data.read() + image = ImageOps.grayscale( + image=Image.open( + fp=BytesIO( + initial_bytes=data, + ), + ), + ).resize((28, 28)) + return image + + class Config(BaseConfig): + arbitrary_types_allowed = True + + +class PredictResponse(BaseModel): + """Predict response class.""" + + datetime: str + prediction: int diff --git a/embedding_matrix_api/app/settings.py b/embedding_matrix_api/app/settings.py new file mode 100644 index 0000000..a722e96 --- /dev/null +++ b/embedding_matrix_api/app/settings.py @@ -0,0 +1,36 @@ +"""Settings module.""" + +from pathlib import Path + +from pydantic import BaseSettings + + +class APISettings(BaseSettings): + """Settings class. + + Set variables to be used + """ + + API_V1_STR: str = "/api/v1" + PROJECT_NAME: str = "Embedding Matrix service" + VERSION = "1.0.0" + + +class ModelSettings(BaseSettings): + """Model settings class. + + Set detector variables to be used + """ + + FILE_PATH: Path = Path("objects/model.pt") + + +class TransformerSettings(BaseSettings): + """Transformer settings class.""" + + FILE_PATH: Path = Path("objects/transformer.pt") + + +api_settings = APISettings() +model_settings = ModelSettings() +transformer_settings = TransformerSettings() diff --git a/embedding_matrix_api/app/utils.py b/embedding_matrix_api/app/utils.py new file mode 100644 index 0000000..3b642cc --- /dev/null +++ b/embedding_matrix_api/app/utils.py @@ -0,0 +1,14 @@ +"""Utility functions for the app.""" + + +class SingletonMeta(type): + """Singleton metaclass.""" + + _instances = {} + + def __call__(cls, *args, **kwargs): # noqa: ANN204, ANN101, ANN002, ANN003 + """Return existing instance if exists, otherwise create a new one.""" + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + cls._instances[cls] = instance + return cls._instances[cls] diff --git a/embedding_matrix_api/requirements/requirements.txt b/embedding_matrix_api/requirements/requirements.txt new file mode 100644 index 0000000..14d5311 --- /dev/null +++ b/embedding_matrix_api/requirements/requirements.txt @@ -0,0 +1,9 @@ +litestar[standard]==2.0.0a7 +numpy==1.24.3 +--extra-index-url https://download.pytorch.org/whl/cpu +torch==2.0.1 +torchvision==0.15.2 +Pillow==9.5.0 +minio==7.1.15 +urllib3==2.0.2 + diff --git a/model_inference_api/app/main-service.py b/model_inference_api/app/main-service.py new file mode 100644 index 0000000..9e17c63 --- /dev/null +++ b/model_inference_api/app/main-service.py @@ -0,0 +1,68 @@ +"""API module.""" + +import logging +import sys +import numpy as np +from PIL import Image +from litestar import Router, get, post +from litestar.enums import RequestEncodingType +from litestar.params import Body +from litestar.status_codes import ( + HTTP_200_OK, +) + +from model import Model +from schemas import ( + HealthResponse, + ModelInputData, + PredictResponse, +) +from settings import ( + api_settings, + model_settings, + transformer_settings, +) + +model = Model( + settings_model=model_settings, + settings_transformer=transformer_settings, +) + +def predict( + data: ModelInputData = Body( + media_type=RequestEncodingType.MULTI_PART, + ), +) -> PredictResponse: + """Predict function. + + :param data: model input data + :type data: ModelInputData + :return: prediction response + :rtype: BasePredictionResponse + """ + image = data + logging.info("Transforming image...") + transformed_image = model.transform( + data=image, + ) + logging.info("Image transformed.") + logging.info("Predicting...") + prediction = model.predict( + data=transformed_image, + ) + logging.info("Prediction made.") + return PredictResponse( + **prediction, + ) + + +#upload image from bucket +img = Image.open(sys.argv[1]) + +#execution of inference model using the predict function +result=predict(img) + +#save the results to a txt file +f = open(str(sys.argv[2])+".txt", "a") +f.write(str(result)) +f.close() diff --git a/model_inference_api/app/objects/transformer.pt b/model_inference_api/app/objects/transformer.pt index a1f9f71..b183195 100644 Binary files a/model_inference_api/app/objects/transformer.pt and b/model_inference_api/app/objects/transformer.pt differ