Skip to content

Commit

Permalink
Make minor changes
Browse files Browse the repository at this point in the history
  • Loading branch information
hauselin committed Jul 1, 2024
1 parent e1c5cc4 commit 3a9f003
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 23 deletions.
32 changes: 26 additions & 6 deletions R/ollama.R
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,28 @@ package_config <- list(



















#' Create a httr2 request object.
#'
#' Creates a httr2 request object with base URL, headers and endpoint. Used by other functions in the package and not intended to be used directly.
#'
#' @param endpoint The endpoint to create the request
#' @param address The base URL to use. Default is NULL, which uses http://127.0.0.1:11434
#' @param host The base URL to use. Default is NULL, which uses http://127.0.0.1:11434
#'
#' @return A httr2 request object.
#' @export
Expand All @@ -22,10 +38,13 @@ package_config <- list(
#' create_request("/api/tags")
#' create_request("/api/chat")
#' create_request("/api/embeddings")
create_request <- function(endpoint, address = NULL) {
create_request <- function(endpoint, host = NULL) {

url <- package_config$baseurls[1] # use default base URL
if (!is.null(address)) url <- address # use custom base URL
if (is.null(host)) {
url <- package_config$baseurls[1] # use default base URL
} else {
url <- host # use custom base URL
}
url <- httr2::url_parse(url)
url$path <- endpoint
req <- httr2::request(httr2::url_build(url))
Expand All @@ -42,6 +61,7 @@ create_request <- function(endpoint, address = NULL) {
#'
#' @param output The output format. Default is "df". Other options are "resp", "jsonlist", "raw", "text".
#' @param endpoint The endpoint to get the models. Default is "/api/tags".
#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL.
#'
#' @return A response in the format specified in the output parameter.
#' @export
Expand All @@ -52,12 +72,12 @@ create_request <- function(endpoint, address = NULL) {
#' list_models("resp") # httr2 response object
#' list_models("jsonlist")
#' list_models("raw")
list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/tags") {
list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/tags", host = NULL) {

if (!output[1] %in% c("df", "resp", "jsonlist", "raw", "text")) {
stop("Invalid output format specified. Supported formats are 'df', 'resp', 'jsonlist', 'raw', 'text'.")
}
req <- create_request(endpoint)
req <- create_request(endpoint, host)
req <- httr2::req_method(req, "GET")
tryCatch({
resp <- httr2::req_perform(req)
Expand Down
14 changes: 7 additions & 7 deletions README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ If it doesn't work or you don't have `devtools` installed, please run `install.p
library(ollamar)
test_connection() # test connection to Ollama server; returns a httr2 response object
# Ollama local server running
# Ollama local server running successfully
# <httr2_response>
list_models() # list available models (models you've pulled/downloaded)
Expand Down Expand Up @@ -102,10 +102,10 @@ messages <- list(
)
resp <- chat("llama3", messages) # default returns httr2 response object
resp # <httr2_response>
chat("llama3", messages, "df") # data frame/tibble
chat("llama3", messages, "raw") # raw string
chat("llama3", messages, "jsonlist") # list
chat("llama3", messages, "text") # text vector
chat("llama3", messages, output = "df") # data frame/tibble
chat("llama3", messages, output = "raw") # raw string
chat("llama3", messages, output = "jsonlist") # list
chat("llama3", messages, output = "text", stream = FALSE) # text vector
messages <- list(
list(role = "user", content = "Hello!"),
Expand All @@ -114,7 +114,7 @@ messages <- list(
list(role = "assistant", content = "Rishi Sunak"),
list(role = "user", content = "List all the previous messages.")
)
chat("llama3", messages, "text")
chat("llama3", messages, output = "df")
```

#### Streaming responses
Expand All @@ -129,7 +129,7 @@ messages <- list(
)
# use "llama3" model, provide list of messages, return text/vector output, and stream the output
chat("llama3", messages, "text", TRUE)
chat("llama3", messages, output = "text", stream = TRUE)
# chat(model = "llama3", messages = messages, output = "text", stream = TRUE) # same as above
```

Expand Down
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ section](#notes) below for more information.
library(ollamar)

test_connection() # test connection to Ollama server; returns a httr2 response object
# Ollama local server running
# Ollama local server running successfully
# <httr2_response>

list_models() # list available models (models you've pulled/downloaded)
Expand Down Expand Up @@ -117,10 +117,10 @@ messages <- list(
)
resp <- chat("llama3", messages) # default returns httr2 response object
resp # <httr2_response>
chat("llama3", messages, "df") # data frame/tibble
chat("llama3", messages, "raw") # raw string
chat("llama3", messages, "jsonlist") # list
chat("llama3", messages, "text") # text vector
chat("llama3", messages, output = "df") # data frame/tibble
chat("llama3", messages, output = "raw") # raw string
chat("llama3", messages, output = "jsonlist") # list
chat("llama3", messages, output = "text", stream = FALSE) # text vector

messages <- list(
list(role = "user", content = "Hello!"),
Expand All @@ -129,7 +129,7 @@ messages <- list(
list(role = "assistant", content = "Rishi Sunak"),
list(role = "user", content = "List all the previous messages.")
)
chat("llama3", messages, "text")
chat("llama3", messages, output = "df")
```

#### Streaming responses
Expand All @@ -144,7 +144,7 @@ messages <- list(
)

# use "llama3" model, provide list of messages, return text/vector output, and stream the output
chat("llama3", messages, "text", TRUE)
chat("llama3", messages, output = "text", stream = TRUE)
# chat(model = "llama3", messages = messages, output = "text", stream = TRUE) # same as above
```

Expand Down
4 changes: 2 additions & 2 deletions man/create_request.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion man/list_models.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 3a9f003

Please sign in to comment.