Skip to content

Commit

Permalink
working, wrong model loading
Browse files Browse the repository at this point in the history
  • Loading branch information
Matteo Omenetti [email protected] committed Jan 10, 2025
1 parent fa8420b commit b06cc6f
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@

from docling_ibm_models.code_formula_model.utils.conversations import conv_v1

from docling_ibm_models.code_formula_model.models.vary_opt import varyOPTForCausalLM
from docling_ibm_models.code_formula_model.models.vary_opt_image_processor import VaryOptImageProcessor
from docling_ibm_models.code_formula_model.models.sam_opt import SamOPTForCausalLM
from docling_ibm_models.code_formula_model.models.sam_opt_image_processor import SamOptImageProcessor


_log = logging.getLogger(__name__)
Expand Down Expand Up @@ -68,10 +68,10 @@ def __init__(
torch.set_num_threads(self._num_threads)

self._tokenizer = AutoTokenizer.from_pretrained(artifacts_path, use_fast=True, padding_side='left')
self._model = varyOPTForCausalLM.from_pretrained(artifacts_path).to(self._device)
self._model = SamOPTForCausalLM.from_pretrained(artifacts_path).to(self._device)
self._model.eval()

self._image_processor = VaryOptImageProcessor.from_pretrained(artifacts_path)
self._image_processor = SamOptImageProcessor.from_pretrained(artifacts_path)

_log.debug("CodeFormulaModel settings: {}".format(self.info()))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
from transformers import OPTConfig, OPTModel, OPTForCausalLM


class varyOptConfig(OPTConfig):
model_type = "vary_opt"
class SamOptConfig(OPTConfig):
model_type = "sam_opt"

def __init__(self, sam_image_size=1024, sam_mm_projector_in=1024, sam_mm_projector_out=768, **kwargs):
super().__init__(**kwargs)
Expand All @@ -39,11 +39,11 @@ def __init__(self, sam_image_size=1024, sam_mm_projector_in=1024, sam_mm_project
self.sam_mm_projector_out = sam_mm_projector_out


class varyOPTModel(OPTModel):
config_class = varyOptConfig
class SamOPTModel(OPTModel):
config_class = SamOptConfig

def __init__(self, config: OPTConfig):
super(varyOPTModel, self).__init__(config)
super(SamOPTModel, self).__init__(config)
self.vision_tower = build_sam_vit_b(image_size=config.sam_image_size)

self.mm_projector = nn.Linear(
Expand Down Expand Up @@ -97,7 +97,7 @@ def forward(

inputs_embeds = torch.stack(new_input_embeds, dim=0)

return super(varyOPTModel, self).forward(
return super(SamOPTModel, self).forward(
input_ids=None,
attention_mask=attention_mask,
past_key_values=past_key_values,
Expand All @@ -109,12 +109,12 @@ def forward(
)


class varyOPTForCausalLM(OPTForCausalLM):
config_class = varyOptConfig
class SamOPTForCausalLM(OPTForCausalLM):
config_class = SamOptConfig

def __init__(self, config):
super(OPTForCausalLM, self).__init__(config)
self.model = varyOPTModel(config)
self.model = SamOPTModel(config)

self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)

Expand Down Expand Up @@ -213,5 +213,5 @@ def prepare_inputs_for_generation(
return model_inputs


AutoConfig.register("vary_opt", varyOptConfig)
AutoModelForCausalLM.register(varyOptConfig, varyOPTForCausalLM)
AutoConfig.register("sam_opt", SamOptConfig)
AutoModelForCausalLM.register(SamOptConfig, SamOPTForCausalLM)
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from transformers import AutoImageProcessor


class VaryOptImageProcessor(ImageProcessingMixin):
class SamOptImageProcessor(ImageProcessingMixin):

def __init__(self, size=(1024, 1024), mean=None, std=None, **kwargs):
super().__init__(**kwargs)
Expand All @@ -32,4 +32,4 @@ def __call__(self, image):
return image


AutoImageProcessor.register(VaryOptImageProcessor, VaryOptImageProcessor)
AutoImageProcessor.register(SamOptImageProcessor, SamOptImageProcessor)

0 comments on commit b06cc6f

Please sign in to comment.