Skip to content

Commit 3d0c70c

Browse files
committed
ready for open, adjust the local path
1 parent 1e22b8b commit 3d0c70c

File tree

8 files changed

+6
-13
lines changed

8 files changed

+6
-13
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ Our code support several archs
5050
- qwen2vl: for Qwen2-VL
5151
- mllama: for Llama3.2-Vision
5252

53+
If you are using openai apis, please remember to specify the model_name and your customized api_key and api_base in [load_openai.py](./models/load_openai.py)
5354

5455
First, download the [dataset](https://huggingface.co/datasets/Foreshhh/vlsbench) in huggingface and specify the downloaded dir as the `ROOT_DIR`.
5556

eval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def eval_main(data, data_root, model_inference, output_path):
7272
if __name__ == "__main__":
7373
parser = argparse.ArgumentParser()
7474
parser.add_argument("--arch", type=str, default="llava") # if you specify the openai model, you need to specify the api name in load_openai.py
75-
parser.add_argument("--data_root", type=str, default='/mnt/hwfile/trustai/huxuhao/vlsbench')
75+
parser.add_argument("--data_root", type=str, default='~/vlsbench')
7676
parser.add_argument("--output_dir", type=str, default='./outputs')
7777
args = parser.parse_args()
7878

eval_utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,6 @@ class VLSBENCHOUTPUT(BaseModel):
5959
def get_openai_structered_eval_answer(
6060
data_messages, model_name, response_class=VLSBENCHOUTPUT
6161
):
62-
os.environ["http_proxy"] = "http://10.1.20.57:23128"
63-
os.environ["https_proxy"] = "http://10.1.20.57:23128"
6462
api_base = f"https://api.openai.com/v1"
6563
api_key = os.environ.get("OPENAI_API_KEY")
6664
client = OpenAI(api_key=api_key, base_url=api_base)

models/load_llava.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def load_image(image_file):
3434

3535
# load_model
3636
print(f"[INFO] Load llava from origin!")
37-
model_path = "/mnt/hwfile/trustai/huxuhao/models/llava-v1.5-7b"
37+
model_path = "liuhaotian/llava-v1.5-7b"
3838
model_name = get_model_name_from_path(model_path)
3939
tokenizer, model, image_processor, context_len = load_pretrained_model(
4040
model_path, None, model_name

models/load_llava_hf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
# load_model
2020
print(f"[INFO] Load llavaforcontionalgeneration")
21-
model_path = "/mnt/hwfile/trustai/huxuhao/models/llava-1.5-7b-hf"
21+
model_path = "llava_hf/llava-1.5-7b-hf"
2222
model = LlavaForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map='cuda')
2323
processor = LlavaProcessor.from_pretrained(model_path)
2424

models/load_mllama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
# load model
1313
print(f"[INFO] Load MllamaForConditionalGeneration")
14-
model_path = "/mnt/hwfile/trustai/huxuhao/models/Llama-3.2-11B-Vision-Instruct"
14+
model_path = "meta-llama/Llama-3.2-11B-Vision-Instruct"
1515
model = MllamaForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map='auto').to("cuda")
1616
processor = AutoProcessor.from_pretrained(model_path)
1717

models/load_openai.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,13 @@
1717
api_base = ""
1818

1919
if model_name == 'local':
20-
if "http_proxy" in os.environ:
21-
del os.environ["http_proxy"]
22-
if 'HTTP_PROXY' in os.environ:
23-
del os.environ["HTTP_PROXY"]
2420
client = OpenAI(api_key=api_key, base_url=api_base)
2521
model_name = client.models.list().data[0].id
2622
else:
2723
assert model_name in CLOSE_API_NAME
2824
if not api_key and not api_base: # not set, then default
2925
api_key = os.environ.get("OPENAI_API_KEY")
3026
api_base = f"https://api.openai.com/v1"
31-
os.environ['http_proxy'] = "http://10.1.20.57:23128"
32-
os.environ['https_proxy'] = "http://10.1.20.57:23128"
3327
client = OpenAI(api_key=api_key, base_url=api_base)
3428
print(model_name)
3529

models/load_qwen2vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
# load model
1313
print(f"[INFO] Load qwenvl2forconditionalgeneration")
14-
model_path = "/mnt/hwfile/trustai/huxuhao/models/Qwen2-VL-7B-Instruct"
14+
model_path = "Qwen/Qwen2-VL-7B-Instruct"
1515
model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.bfloat16).to("cuda")
1616
processor = AutoProcessor.from_pretrained(model_path)
1717

0 commit comments

Comments
 (0)