diff --git a/xinference/model/llm/llm_family.json b/xinference/model/llm/llm_family.json index 5de369b444..799fb472de 100644 --- a/xinference/model/llm/llm_family.json +++ b/xinference/model/llm/llm_family.json @@ -10948,6 +10948,53 @@ "<|im_end|>" ] }, + { + "version": 1, + "context_length": 131072, + "model_name": "mPLUG-Owl3", + "model_lang": [ + "en", + "zh" + ], + "model_ability": [ + "chat", + "vision" + ], + "model_description": "mPLUG-Owl3: Towards Long Image-Sequence Understanding in Multi-Modal Large Language Models", + "model_specs": [ + { + "model_format": "pytorch", + "model_size_in_billions": 1, + "quantizations": [ + "none" + ], + "model_id": "mPLUG/mPLUG-Owl3-1B-241014" + }, + { + "model_format": "pytorch", + "model_size_in_billions": 2, + "quantizations": [ + "none" + ], + "model_id": "mPLUG/mPLUG-Owl3-2B-241014" + }, + { + "model_format": "pytorch", + "model_size_in_billions": 7, + "quantizations": [ + "none" + ], + "model_id": "mPLUG/mPLUG-Owl3-7B-241101" + } + ], + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", + "stop_token_ids": [ + 151645 + ], + "stop": [ + "<|im_end|>" + ] + }, { "version": 1, "context_length": 4096, diff --git a/xinference/model/llm/llm_family_modelscope.json b/xinference/model/llm/llm_family_modelscope.json index 2b45e16964..9a1649036d 100644 --- a/xinference/model/llm/llm_family_modelscope.json +++ b/xinference/model/llm/llm_family_modelscope.json @@ -8724,6 +8724,56 @@ "<|im_end|>" ] }, + { + "version": 1, + "context_length": 131072, + "model_name": "mPLUG-Owl3", + "model_lang": [ + "en", + "zh" + ], + "model_ability": [ + "chat", + "vision" + ], + "model_description": "mPLUG-Owl3: Towards Long Image-Sequence Understanding in Multi-Modal Large Language Models", + "model_specs": [ + { + "model_format": "pytorch", + "model_size_in_billions": 1, + "quantizations": [ + "none" + ], + "model_id": "iic/mPLUG-Owl3-1B-241014", + "model_hub": "modelscope" + }, + { + "model_format": "pytorch", + "model_size_in_billions": 2, + "quantizations": [ + "none" + ], + "model_id": "iic/mPLUG-Owl3-2B-241014", + "model_hub": "modelscope" + }, + { + "model_format": "pytorch", + "model_size_in_billions": 7, + "quantizations": [ + "none" + ], + "model_id": "iic/mPLUG-Owl3-7B-241101", + "model_hub": "modelscope" + } + ], + "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}", + "stop_token_ids": [ + 151645 + ], + "stop": [ + "<|im_end|>" + ] + }, { "version": 1, "context_length": 4096,