diff --git a/humanscript b/humanscript index 364202c..cf72c69 100755 --- a/humanscript +++ b/humanscript @@ -13,7 +13,7 @@ source "${CONFIG_FILE}" # Defaults HUMANSCRIPT_API_KEY="${HUMANSCRIPT_API_KEY:-"sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}" HUMANSCRIPT_MODEL="${HUMANSCRIPT_MODEL:-"gpt-4"}" -HUMANSCRIPT_API="${HUMANSCRIPT_API:-"https://api.openai.com"}" +HUMANSCRIPT_API="${HUMANSCRIPT_API:-"https://api.openai.com/v1"}" HUMANSCRIPT_EXECUTE="${HUMANSCRIPT_EXECUTE:-"true"}" HUMANSCRIPT_REGENERATE="${HUMANSCRIPT_REGENERATE:-"false"}" @@ -67,7 +67,7 @@ data=$(echo '{ }' | jq) # Send request and stream chunked responses back -curl --silent --show-error "${HUMANSCRIPT_API}/v1/chat/completions" \ +curl --silent --show-error "${HUMANSCRIPT_API}/chat/completions" \ --header "Content-Type: application/json" \ --header "Authorization: Bearer ${HUMANSCRIPT_API_KEY}" \ --data "${data}" | \ diff --git a/readme.md b/readme.md index 80ecc34..356e9fe 100644 --- a/readme.md +++ b/readme.md @@ -160,14 +160,14 @@ $ HUMANSCRIPT_REGENERATE="true" ./asciiman ### `HUMANSCRIPT_API` -Default: `https://api.openai.com` +Default: `https://api.openai.com/v1` A server following OpenAI's Chat Completion API. Many local proxies exist that implement this API in front of locally running LLMs like Llama 2. [LM Studio](https://lmstudio.ai/) is a good option. ```shell -HUMANSCRIPT_API="http://localhost:1234" +HUMANSCRIPT_API="http://localhost:1234/v1" ``` ### `HUMANSCRIPT_API_KEY`