-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathopenai-sample-script.R
88 lines (71 loc) · 2.77 KB
/
openai-sample-script.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
library(httr2)
##
## Update the values below to match your OpenAI deployment
##
OPENAI_KEY <- readLines("openai_key.txt")
Sys.setenv(OPENAI_API_KEY = OPENAI_KEY)
# Edit the following line with the endpoint of the API.
# You can find this in the Azure portal under "Keys and Endpoint".
ENDPOINT <- "https://openai-test-20230120.openai.azure.com/"
# Edit the following line with the model you want to use.
# You can find options in the Azure portal under "Model Deployments"
DEPLOYMENT <- "text-davinci-003"
##
## Manually generate a response via the OpenAI API
##
req <- request(ENDPOINT) %>%
req_url_path_append("openai/deployments") %>%
req_url_path_append(DEPLOYMENT) %>%
req_url_path_append("completions") %>%
req_url_query("api-version"="2022-12-01")
payload <- '{
"prompt": "Hello world",
"max_tokens": 100,
"temperature": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"top_p": 0.5,
"best_of": 1,
"stop": null
}'
req <- req %>%
req_headers(
`api-key` = Sys.getenv("OPENAI_API_KEY"),
) %>%
req_body_raw(payload, "application/json")
result <- req %>% req_perform()
result %>% resp_body_json() -> completion
cat(completion$choices[[1]]$text,"\n")
# use the openai functon to automate the process
source("openai.R")
# basic usage with text-davinci-003
openai("Tell me a joke")
openai("Tell me a dad joke")
# Results are non-deterministic, and not necessarily factual
openai("What is the date today?")
openai("What is the date today?")
# Exploring Codex, as used by Copilot
openai("fibonacci <- function (n)", model="code-davinci-002")
openai("# read data from file", model="code-davinci-002")
openai("# read data from file\ndata <- ", model="code-davinci-002")
openai("# read data from file penguins.csv\ndata <- ", model="code-davinci-002")
# davinci3 is more powerful than davinci2, which is more powerful than curie
openai("Write a limerick about the python language", model="text-davinci-003")
openai("Write a limerick about the python language", model="text-davinci-002")
openai("Write a limerick about the python language", model="text-curie-001")
# exploring token generation
openai("An unusual cat name is:", model="text-davinci-002")
openai("An unusual cat name is:", model="text-davinci-002", max_tokens=1)
openai("An unusual cat name is:", model="text-davinci-002", max_tokens=2)
openai("An unusual cat name is:", model="text-davinci-002", max_tokens=3)
openai("An unusual cat name is:", model="text-davinci-002", max_tokens=4)
# add tokens back to the prompt and regenerate
newprompt <- "An unusually long name for a cat is: C"
for (i in 1:10) {
completion <- openai(newprompt,
model="text-davinci-002",
max_tokens=1,
print.it=FALSE)
newprompt <- paste0(newprompt, completion)
print(newprompt)
}