Skip to content

Commit 591fa88

Browse files
yonigozlanBernardZach
authored andcommitted
uniformize processor Mllama (huggingface#33876)
* uniformize processor Mllama * nit syntax * nit
1 parent 206e2a9 commit 591fa88

File tree

2 files changed

+59
-30
lines changed

2 files changed

+59
-30
lines changed

src/transformers/models/mllama/processing_mllama.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
from ...image_utils import ImageInput
2424
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
2525
from ...tokenization_utils_base import (
26-
BatchEncoding,
2726
PreTokenizedInput,
2827
TextInput,
2928
)
@@ -226,8 +225,10 @@ def __call__(
226225
self,
227226
images: Optional[ImageInput] = None,
228227
text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
228+
audio=None,
229+
videos=None,
229230
**kwargs: Unpack[MllamaProcessorKwargs],
230-
) -> BatchEncoding:
231+
) -> BatchFeature:
231232
"""
232233
Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
233234
arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
@@ -250,7 +251,7 @@ def __call__(
250251
- `'np'`: Return NumPy `np.ndarray` objects.
251252
- `'jax'`: Return JAX `jnp.ndarray` objects.
252253
Returns:
253-
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
254+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
254255
255256
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
256257
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
@@ -323,9 +324,9 @@ def __call__(
323324
data["cross_attention_mask"] = cross_attention_mask
324325

325326
return_tensors = common_kwargs.pop("return_tensors", None)
326-
batch_encoding = BatchFeature(data=data, tensor_type=return_tensors)
327+
batch_feature = BatchFeature(data=data, tensor_type=return_tensors)
327328

328-
return batch_encoding
329+
return batch_feature
329330

330331
def batch_decode(self, *args, **kwargs):
331332
"""

tests/models/mllama/test_processor_mllama.py

+53-25
Original file line numberDiff line numberDiff line change
@@ -13,32 +13,44 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16+
import shutil
17+
import tempfile
1618
import unittest
19+
from typing import Optional
1720

1821
import numpy as np
1922

2023
from transformers import MllamaProcessor
2124
from transformers.testing_utils import require_torch, require_vision
2225
from transformers.utils import is_vision_available
2326

27+
from ...test_processing_common import ProcessorTesterMixin
28+
2429

2530
if is_vision_available():
2631
from PIL import Image
2732

2833

2934
@require_torch
3035
@require_vision
31-
class MllamaProcessorTest(unittest.TestCase):
36+
class MllamaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
37+
processor_class = MllamaProcessor
38+
3239
def setUp(self):
33-
self.checkpoint = "hf-internal-testing/mllama-11b" # TODO: change
34-
self.processor = MllamaProcessor.from_pretrained(self.checkpoint)
40+
self.checkpoint = "hf-internal-testing/mllama-11b"
41+
processor = MllamaProcessor.from_pretrained(self.checkpoint)
3542
self.image1 = Image.new("RGB", (224, 220))
3643
self.image2 = Image.new("RGB", (512, 128))
37-
self.image_token = self.processor.image_token
38-
self.image_token_id = self.processor.image_token_id
39-
self.pad_token_id = self.processor.tokenizer.pad_token_id
40-
self.bos_token = self.processor.bos_token
41-
self.bos_token_id = self.processor.tokenizer.bos_token_id
44+
self.image_token = processor.image_token
45+
self.image_token_id = processor.image_token_id
46+
self.pad_token_id = processor.tokenizer.pad_token_id
47+
self.bos_token = processor.bos_token
48+
self.bos_token_id = processor.tokenizer.bos_token_id
49+
self.tmpdirname = tempfile.mkdtemp()
50+
processor.save_pretrained(self.tmpdirname)
51+
52+
def tearDown(self):
53+
shutil.rmtree(self.tmpdirname)
4254

4355
def test_apply_chat_template(self):
4456
# Message contains content which a mix of lists with images and image urls and string
@@ -64,8 +76,8 @@ def test_apply_chat_template(self):
6476
],
6577
},
6678
]
67-
68-
rendered = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
79+
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
80+
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
6981

7082
expected_rendered = (
7183
"<|begin_of_text|>"
@@ -96,7 +108,7 @@ def test_apply_chat_template(self):
96108
],
97109
},
98110
]
99-
input_ids = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
111+
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
100112
expected_ids = [
101113
128000, # <|begin_of_text|>
102114
128006, # <|start_header_id|>
@@ -142,15 +154,15 @@ def test_apply_chat_template(self):
142154
}
143155
]
144156

145-
rendered = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
157+
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
146158
expected_rendered = (
147159
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n"
148160
"Describe this image in two sentences<|image|> Test sentence <|image|>ok\n<|eot_id|>"
149161
"<|start_header_id|>assistant<|end_header_id|>\n\n"
150162
)
151163
self.assertEqual(rendered, expected_rendered)
152164

153-
input_ids = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
165+
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
154166
# fmt: off
155167
expected_ids = [
156168
128000, 128006, 882, 128007, 271, 75885, 420, 2217, 304, 1403, 23719, 128256,
@@ -176,18 +188,19 @@ def test_apply_chat_template(self):
176188
}
177189
]
178190

179-
rendered_list = self.processor.apply_chat_template(messages_list, add_generation_prompt=True, tokenize=False)
180-
rendered_str = self.processor.apply_chat_template(messages_str, add_generation_prompt=True, tokenize=False)
191+
rendered_list = processor.apply_chat_template(messages_list, add_generation_prompt=True, tokenize=False)
192+
rendered_str = processor.apply_chat_template(messages_str, add_generation_prompt=True, tokenize=False)
181193
self.assertEqual(rendered_list, rendered_str)
182194

183195
def test_process_interleaved_images_prompts_image_splitting(self):
196+
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
184197
# Test that a single image is processed correctly
185-
inputs = self.processor(images=self.image2, size={"width": 224, "height": 224})
198+
inputs = processor(images=self.image2, size={"width": 224, "height": 224})
186199
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 4, 3, 224, 224))
187200

188201
# Test that text is processed correctly
189202
text = "<|begin_of_text|>This is a test sentence.<|end_of_text|>"
190-
inputs = self.processor(text=text)
203+
inputs = processor(text=text)
191204
expected_ids = [128000, 2028, 374, 264, 1296, 11914, 13, 128001]
192205
self.assertEqual(inputs["input_ids"][0], expected_ids)
193206
self.assertEqual(inputs["attention_mask"][0], [1] * len(expected_ids))
@@ -197,7 +210,7 @@ def test_process_interleaved_images_prompts_image_splitting(self):
197210
image_str = "<|image|>"
198211
text_str = "This is a test sentence."
199212
text = image_str + text_str
200-
inputs = self.processor(
213+
inputs = processor(
201214
text=text,
202215
images=self.image1,
203216
size={"width": 128, "height": 128},
@@ -225,7 +238,7 @@ def test_process_interleaved_images_prompts_image_splitting(self):
225238
]
226239
# fmt: onn
227240
images = [[self.image1], [self.image1, self.image2]]
228-
inputs = self.processor(text=text, images=images, padding=True, size={"width": 256, "height": 256})
241+
inputs = processor(text=text, images=images, padding=True, size={"width": 256, "height": 256})
229242

230243
self.assertEqual(inputs["pixel_values"].shape, (2, 2, 4, 3, 256, 256))
231244
for input_ids_i, attention_mask_i, expected_ids_i in zip(inputs["input_ids"], inputs["attention_mask"], expected_ids):
@@ -264,34 +277,49 @@ def test_process_interleaved_images_prompts_image_error(self):
264277
"This is a test sentence.",
265278
"In this other sentence we try some good things",
266279
]
267-
inputs = self.processor(text=text, images=None, padding=True)
280+
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
281+
inputs = processor(text=text, images=None, padding=True)
268282
self.assertIsNotNone(inputs["input_ids"])
269283

270284
text = [
271285
"This is a test sentence.<|image|>",
272286
"In this other sentence we try some good things",
273287
]
274288
with self.assertRaises(ValueError):
275-
self.processor(text=text, images=None, padding=True)
289+
processor(text=text, images=None, padding=True)
276290

277291
images = [[self.image1], []]
278292
with self.assertRaises(ValueError):
279-
self.processor(text=text, images=images, padding=True)
293+
processor(text=text, images=images, padding=True)
280294

281295
text = [
282296
"This is a test sentence.<|image|>",
283297
"In this other sentence we try some good things<|image|>",
284298
]
285299
with self.assertRaises(ValueError):
286-
self.processor(text=text, images=None, padding=True)
300+
processor(text=text, images=None, padding=True)
287301

288302
text = [
289303
"This is a test sentence.<|image|>",
290304
"In this other sentence we try some good things<|image|>",
291305
]
292306
images = [[self.image1], [self.image2]]
293-
inputs = self.processor(text=text, images=images, padding=True)
307+
inputs = processor(text=text, images=images, padding=True)
294308

295309
images = [[self.image1, self.image2], []]
296310
with self.assertRaises(ValueError):
297-
self.processor(text=text, images=None, padding=True)
311+
processor(text=text, images=None, padding=True)
312+
313+
# Override as MllamaProcessor needs image tokens in prompts
314+
def prepare_text_inputs(self, batch_size: Optional[int] = None):
315+
if batch_size is None:
316+
return "lower newer <|image|>"
317+
318+
if batch_size < 1:
319+
raise ValueError("batch_size must be greater than 0")
320+
321+
if batch_size == 1:
322+
return ["lower newer <|image|>"]
323+
return ["lower newer <|image|>", "<|image|> upper older longer string"] + ["<|image|> lower newer"] * (
324+
batch_size - 2
325+
)

0 commit comments

Comments
 (0)