|
1 | | -# launch server |
2 | | -# python -m sglang.launch_server --model mistralai/Mistral-7B-Instruct-v0.3 --lora-paths /home/ying/test_lora lora1=/home/ying/test_lora_1 lora2=/home/ying/test_lora_2 --disable-radix --disable-cuda-graph --max-loras-per-batch 4 |
3 | | - |
4 | | -# send requests |
5 | | -# lora_path[i] specifies the LoRA used for text[i], so make sure they have the same length |
6 | | -# use None to specify base-only prompt, e.x. "lora_path": [None, "/home/ying/test_lora"] |
7 | | -import json |
8 | | - |
9 | | -import requests |
10 | | - |
11 | | -url = "http://127.0.0.1:30000" |
12 | | -json_data = { |
13 | | - "text": [ |
14 | | - "prompt 1", |
15 | | - "prompt 2", |
16 | | - "prompt 3", |
17 | | - "prompt 4", |
18 | | - "prompt 5", |
19 | | - "prompt 6", |
20 | | - "prompt 7", |
21 | | - ], |
22 | | - "sampling_params": {"max_new_tokens": 32}, |
23 | | - "lora_path": [ |
24 | | - "/home/ying/test_lora", |
25 | | - "lora1", |
26 | | - "lora2", |
27 | | - "lora1", |
28 | | - "lora2", |
29 | | - None, |
30 | | - None, |
31 | | - ], |
32 | | -} |
33 | | -response = requests.post( |
34 | | - url + "/generate", |
35 | | - json=json_data, |
36 | | -) |
37 | | -print(json.dumps(response.json())) |
| 1 | +""" |
| 2 | +OpenAI-compatible LoRA adapter usage with SGLang. |
| 3 | +
|
| 4 | +Server Setup: |
| 5 | + python -m sglang.launch_server \\ |
| 6 | + --model meta-llama/Llama-3.1-8B-Instruct \\ |
| 7 | + --enable-lora \\ |
| 8 | + --lora-paths sql=/path/to/sql python=/path/to/python |
| 9 | +""" |
| 10 | + |
| 11 | +import openai |
| 12 | + |
| 13 | +client = openai.Client(base_url="http://127.0.0.1:30000/v1", api_key="EMPTY") |
| 14 | + |
| 15 | + |
| 16 | +def main(): |
| 17 | + print("SGLang OpenAI-Compatible LoRA Examples\n") |
| 18 | + |
| 19 | + # Example 1: NEW - Adapter in model parameter (OpenAI-compatible) |
| 20 | + print("1. Chat with LoRA adapter in model parameter:") |
| 21 | + response = client.chat.completions.create( |
| 22 | + model="meta-llama/Llama-3.1-8B-Instruct:sql", # ← adapter:name syntax |
| 23 | + messages=[{"role": "user", "content": "Convert to SQL: show all users"}], |
| 24 | + max_tokens=50, |
| 25 | + ) |
| 26 | + print(f" Response: {response.choices[0].message.content}\n") |
| 27 | + |
| 28 | + # Example 2: Completions API with adapter |
| 29 | + print("2. Completion with LoRA adapter:") |
| 30 | + response = client.completions.create( |
| 31 | + model="meta-llama/Llama-3.1-8B-Instruct:python", |
| 32 | + prompt="def fibonacci(n):", |
| 33 | + max_tokens=50, |
| 34 | + ) |
| 35 | + print(f" Response: {response.choices[0].text}\n") |
| 36 | + |
| 37 | + # Example 3: OLD - Backward compatible with explicit lora_path |
| 38 | + print("3. Backward compatible (explicit lora_path):") |
| 39 | + response = client.chat.completions.create( |
| 40 | + model="meta-llama/Llama-3.1-8B-Instruct", |
| 41 | + messages=[{"role": "user", "content": "Convert to SQL: show all users"}], |
| 42 | + extra_body={"lora_path": "sql"}, |
| 43 | + max_tokens=50, |
| 44 | + ) |
| 45 | + print(f" Response: {response.choices[0].message.content}\n") |
| 46 | + |
| 47 | + # Example 4: Base model (no adapter) |
| 48 | + print("4. Base model without adapter:") |
| 49 | + response = client.chat.completions.create( |
| 50 | + model="meta-llama/Llama-3.1-8B-Instruct", |
| 51 | + messages=[{"role": "user", "content": "Hello!"}], |
| 52 | + max_tokens=30, |
| 53 | + ) |
| 54 | + print(f" Response: {response.choices[0].message.content}\n") |
| 55 | + |
| 56 | + print("All examples completed!") |
| 57 | + |
| 58 | + |
| 59 | +if __name__ == "__main__": |
| 60 | + try: |
| 61 | + main() |
| 62 | + except Exception as e: |
| 63 | + print(f"Error: {e}") |
| 64 | + print( |
| 65 | + "\nEnsure server is running:\n" |
| 66 | + " python -m sglang.launch_server --model ... --enable-lora --lora-paths ..." |
| 67 | + ) |
0 commit comments