diff --git a/NEWS.md b/NEWS.md
index 58ea07e..9beec2b 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,7 +1,7 @@
# ollamar (development version)
- `generate()` and `chat()` support [structured output](https://ollama.com/blog/structured-outputs) via `format` parameter.
-- `test_connection()` returns boolean instead of `httr2` object. #29
+- `test_connection()` returns `httr2::response` object by default, but also support returning a logical value. #29
- `chat()` supports [tool calling](https://ollama.com/blog/tool-support) via `tools` parameter. Added `get_tool_calls()` helper function to process tools. #30
- Simplify README and add Get started vignette with more examples.
@@ -29,8 +29,8 @@
## Bug fixes
-- Fixed invalid URLs.
-- Updated title and description.
+- Fixed invalid URLs.
+- Updated title and description.
# ollamar 1.0.0
@@ -38,8 +38,8 @@
## New features
-- Integrate R with Ollama to run language models locally on your own machine.
-- Include `test_connection()` function to test connection to Ollama server.
+- Integrate R with Ollama to run language models locally on your own machine.
+- Include `test_connection(logical = TRUE)` function to test connection to Ollama server.
- Include `list_models()` function to list available models.
- Include `pull()` function to pull a model from Ollama server.
- Include `delete()` function to delete a model from Ollama server.
diff --git a/R/ollama.R b/R/ollama.R
index 77d8045..e3f82b6 100644
--- a/R/ollama.R
+++ b/R/ollama.R
@@ -76,7 +76,7 @@ create_request <- function(endpoint, host = NULL) {
#' @references
#' [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion)
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' # text prompt
#' generate("llama3", "The sky is...", stream = FALSE, output = "df")
#' # stream and increase temperature
@@ -187,7 +187,7 @@ generate <- function(model, prompt, suffix = "", images = "", format = list(), s
#' @return A response in the format specified in the output parameter.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' # one message
#' messages <- list(
#' list(role = "user", content = "How are you doing?")
@@ -306,7 +306,7 @@ chat <- function(model, messages, tools = list(), stream = FALSE, format = list(
#' @return A response in the format specified in the output parameter.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' create("mario", "FROM llama3\nSYSTEM You are mario from Super Mario Bros.")
#' generate("mario", "who are you?", output = "text") # model should say it's Mario
#' delete("mario") # delete the model created above
@@ -388,7 +388,7 @@ create <- function(name, modelfile = NULL, stream = FALSE, path = NULL, endpoint
#' @return A response in the format specified in the output parameter.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' list_models() # returns dataframe
#' list_models("df") # returns dataframe
#' list_models("resp") # httr2 response object
@@ -435,7 +435,7 @@ list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), end
#' @return A response in the format specified in the output parameter.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' # show("llama3") # returns jsonlist
#' show("llama3", output = "resp") # returns response object
show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), endpoint = "/api/show", host = NULL) {
@@ -482,7 +482,7 @@ show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), e
#' @return A httr2 response object.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' copy("llama3", "llama3_copy")
#' delete("llama3_copy") # delete the model was just got copied
copy <- function(source, destination, endpoint = "/api/copy", host = NULL) {
@@ -576,7 +576,7 @@ delete <- function(name, endpoint = "/api/delete", host = NULL) {
#' @return A httr2 response object.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' pull("llama3")
#' pull("all-minilm", stream = FALSE)
pull <- function(name, stream = FALSE, insecure = FALSE, endpoint = "/api/pull", host = NULL) {
@@ -644,7 +644,7 @@ pull <- function(name, stream = FALSE, insecure = FALSE, endpoint = "/api/pull",
#' @return A httr2 response object.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' push("mattw/pygmalion:latest")
push <- function(name, insecure = FALSE, stream = FALSE, output = c("resp", "jsonlist", "raw", "text", "df"), endpoint = "/api/push", host = NULL) {
@@ -744,7 +744,7 @@ normalize <- function(x) {
#' @return A numeric matrix of the embedding. Each column is the embedding for one input.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' embed("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.")
#' # pass multiple inputs
#' embed("nomic-embed-text:latest", c("Good bye", "Bye", "See you."))
@@ -816,7 +816,7 @@ embed <- function(model, input, truncate = TRUE, normalize = TRUE, keep_alive =
#' @return A numeric vector of the embedding.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' embeddings("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.")
#' # pass model options to the model
#' embeddings("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
@@ -869,7 +869,7 @@ embeddings <- function(model, prompt, normalize = TRUE, keep_alive = "5m", endpo
#' @return A response in the format specified in the output parameter.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' ps("text")
ps <- function(output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "/api/ps", host = NULL) {
output <- output[1]
@@ -915,7 +915,7 @@ ps <- function(output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = "
#' @return Does not return anything. It prints the conversation in the console.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' ohelp(first_prompt = "quit")
#' # regular usage: ohelp()
ohelp <- function(model = "codegemma:7b", ...) {
@@ -964,7 +964,7 @@ ohelp <- function(model = "codegemma:7b", ...) {
#' @return A logical value indicating if the model exists.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' model_avail("codegemma:7b")
#' model_avail("abc")
#' model_avail("llama3")
diff --git a/R/utils.R b/R/utils.R
index d7898ab..b8e6089 100644
--- a/R/utils.R
+++ b/R/utils.R
@@ -1,30 +1,39 @@
#' Test connection to Ollama server
#'
#' @description
-#' `test_connection()` tests whether the Ollama server is running or not.
+#' Tests whether the Ollama server is running or not.
#'
#' @param url The URL of the Ollama server. Default is http://localhost:11434
+#' @param logical Logical. If TRUE, returns a boolean value. Default is FALSE.
#'
-#' @return Boolean TRUE if the server is running, otherwise FALSE.
+#' @return Boolean value or httr2 response object, where status_code is either 200 (success) or 503 (error).
#' @export
#'
#' @examples
-#' test_connection()
+#' test_connection(logical = TRUE)
#' test_connection("http://localhost:11434") # default url
#' test_connection("http://127.0.0.1:11434")
-test_connection <- function(url = "http://localhost:11434") {
+test_connection <- function(url = "http://localhost:11434", logical = FALSE) {
req <- httr2::request(url)
req <- httr2::req_method(req, "GET")
+
tryCatch(
{
resp <- httr2::req_perform(req)
message("Ollama local server running")
- return(TRUE)
+ if (logical) {
+ return(TRUE)
+ } else {
+ return(resp)
+ }
},
error = function(e) {
message("Ollama local server not running or wrong server.\nDownload and launch Ollama app to run the server. Visit https://ollama.com or https://github.com/ollama/ollama")
- req$status_code <- 503
- return(FALSE)
+ if (logical) {
+ return(FALSE)
+ } else {
+ return(httr2::response(status_code = 503, url = url))
+ }
}
)
}
@@ -117,7 +126,7 @@ get_tool_calls <- function(resp) {
#' @return A data frame, json list, raw or httr2 response object.
#' @export
#'
-#' @examplesIf test_connection()
+#' @examplesIf test_connection(logical = TRUE)
#' resp <- list_models("resp")
#' resp_process(resp, "df") # parse response to dataframe/tibble
#' resp_process(resp, "jsonlist") # parse response to list
diff --git a/README.Rmd b/README.Rmd
index 6659116..a3b4ec5 100644
--- a/README.Rmd
+++ b/README.Rmd
@@ -21,7 +21,7 @@ knitr::opts_chunk$set(
[](https://cran.r-project.org/package=ollamar)
-The [Ollama R library](https://hauselin.github.io/ollama-r/) is the easiest way to integrate R with [Ollama](https://ollama.com/), which lets you run language models locally on your own machine.
+The [Ollama R library](https://hauselin.github.io/ollama-r/) is the easiest way to integrate R with [Ollama](https://ollama.com/), which lets you run language models locally on your own machine.
The library also makes it easy to work with data structures (e.g., conversational/chat histories) that are standard for different LLMs (such as those provided by OpenAI and Anthropic). It also lets you specify different output formats (e.g., dataframes, text/vector, lists) that best suit your need, allowing easy integration with other libraries/tools and parallelization via the `httr2` library.
@@ -44,11 +44,11 @@ This library has been inspired by the official [Ollama Python](https://github.co
- Linux: `curl -fsSL https://ollama.com/install.sh | sh`
- [Docker image](https://hub.docker.com/r/ollama/ollama)
-2. Open/launch the Ollama app to start the local server.
+2. Open/launch the Ollama app to start the local server.
3. Install either the stable or latest/development version of `ollamar`.
-Stable version:
+Stable version:
```{r eval=FALSE}
install.packages("ollamar")
@@ -65,7 +65,7 @@ remotes::install_github("hauselin/ollamar")
Below is a basic demonstration of how to use the library. For details, see the [getting started vignette](https://hauselin.github.io/ollama-r/articles/ollamar.html) on our [main page](https://hauselin.github.io/ollama-r/).
-`ollamar` uses the [`httr2` library](https://httr2.r-lib.org/index.html) to make HTTP requests to the Ollama server, so many functions in this library returns an `httr2_response` object by default. If the response object says `Status: 200 OK`, then the request was successful.
+`ollamar` uses the [`httr2` library](https://httr2.r-lib.org/index.html) to make HTTP requests to the Ollama server, so many functions in this library returns an `httr2_response` object by default. If the response object says `Status: 200 OK`, then the request was successful.
```{r eval=FALSE}
library(ollamar)
@@ -77,7 +77,7 @@ test_connection() # test connection to Ollama server
pull("llama3.1") # download a model (equivalent bash code: ollama run llama3.1)
# generate a response/text based on a prompt; returns an httr2 response by default
-resp <- generate("llama3.1", "tell me a 5-word story")
+resp <- generate("llama3.1", "tell me a 5-word story")
resp
#' interpret httr2 response object
@@ -88,15 +88,15 @@ resp
#' Body: In memory (414 bytes)
# get just the text from the response object
-resp_process(resp, "text")
+resp_process(resp, "text")
# get the text as a tibble dataframe
-resp_process(resp, "df")
+resp_process(resp, "df")
# alternatively, specify the output type when calling the function initially
txt <- generate("llama3.1", "tell me a 5-word story", output = "text")
# list available models (models you've pulled/downloaded)
-list_models()
+list_models()
name size parameter_size quantization_level modified
1 codegemma:7b 5 GB 9B Q4_0 2024-07-27T23:44:10
2 llama3.1:latest 4.7 GB 8.0B Q4_0 2024-07-31T07:44:33
diff --git a/README.md b/README.md
index a0b9b28..5a2b1c9 100644
--- a/README.md
+++ b/README.md
@@ -101,7 +101,7 @@ test_connection() # test connection to Ollama server
pull("llama3.1") # download a model (equivalent bash code: ollama run llama3.1)
# generate a response/text based on a prompt; returns an httr2 response by default
-resp <- generate("llama3.1", "tell me a 5-word story")
+resp <- generate("llama3.1", "tell me a 5-word story")
resp
#' interpret httr2 response object
@@ -112,15 +112,15 @@ resp
#' Body: In memory (414 bytes)
# get just the text from the response object
-resp_process(resp, "text")
+resp_process(resp, "text")
# get the text as a tibble dataframe
-resp_process(resp, "df")
+resp_process(resp, "df")
# alternatively, specify the output type when calling the function initially
txt <- generate("llama3.1", "tell me a 5-word story", output = "text")
# list available models (models you've pulled/downloaded)
-list_models()
+list_models()
name size parameter_size quantization_level modified
1 codegemma:7b 5 GB 9B Q4_0 2024-07-27T23:44:10
2 llama3.1:latest 4.7 GB 8.0B Q4_0 2024-07-31T07:44:33
diff --git a/man/chat.Rd b/man/chat.Rd
index 8cb96a8..5a652a3 100644
--- a/man/chat.Rd
+++ b/man/chat.Rd
@@ -45,7 +45,7 @@ A response in the format specified in the output parameter.
Generate a chat completion with message history
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# one message
messages <- list(
list(role = "user", content = "How are you doing?")
diff --git a/man/copy.Rd b/man/copy.Rd
index 5e24730..0a0387d 100644
--- a/man/copy.Rd
+++ b/man/copy.Rd
@@ -22,7 +22,7 @@ A httr2 response object.
Creates a model with another name from an existing model.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
copy("llama3", "llama3_copy")
delete("llama3_copy") # delete the model was just got copied
\dontshow{\}) # examplesIf}
diff --git a/man/create.Rd b/man/create.Rd
index c156e51..cdb729b 100644
--- a/man/create.Rd
+++ b/man/create.Rd
@@ -33,7 +33,7 @@ A response in the format specified in the output parameter.
It is recommended to set \code{modelfile} to the content of the Modelfile rather than just set path.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
create("mario", "FROM llama3\nSYSTEM You are mario from Super Mario Bros.")
generate("mario", "who are you?", output = "text") # model should say it's Mario
delete("mario") # delete the model created above
diff --git a/man/embed.Rd b/man/embed.Rd
index 6c2935a..e844627 100644
--- a/man/embed.Rd
+++ b/man/embed.Rd
@@ -39,7 +39,7 @@ A numeric matrix of the embedding. Each column is the embedding for one input.
Supercedes the \code{embeddings()} function.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
embed("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.")
# pass multiple inputs
embed("nomic-embed-text:latest", c("Good bye", "Bye", "See you."))
diff --git a/man/embeddings.Rd b/man/embeddings.Rd
index edb6b81..dac27f1 100644
--- a/man/embeddings.Rd
+++ b/man/embeddings.Rd
@@ -36,7 +36,7 @@ A numeric vector of the embedding.
This function will be deprecated over time and has been superceded by \code{embed()}. See \code{embed()} for more details.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
embeddings("nomic-embed-text:latest", "The quick brown fox jumps over the lazy dog.")
# pass model options to the model
embeddings("nomic-embed-text:latest", "Hello!", temperature = 0.1, num_predict = 3)
diff --git a/man/generate.Rd b/man/generate.Rd
index 7cc8e44..a12eaa3 100644
--- a/man/generate.Rd
+++ b/man/generate.Rd
@@ -60,7 +60,7 @@ A response in the format specified in the output parameter.
Generate a response for a given prompt
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# text prompt
generate("llama3", "The sky is...", stream = FALSE, output = "df")
# stream and increase temperature
diff --git a/man/list_models.Rd b/man/list_models.Rd
index 9bff4d9..00008b6 100644
--- a/man/list_models.Rd
+++ b/man/list_models.Rd
@@ -24,7 +24,7 @@ A response in the format specified in the output parameter.
List models that are available locally
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
list_models() # returns dataframe
list_models("df") # returns dataframe
list_models("resp") # httr2 response object
diff --git a/man/model_avail.Rd b/man/model_avail.Rd
index bdfd263..168e4b6 100644
--- a/man/model_avail.Rd
+++ b/man/model_avail.Rd
@@ -16,7 +16,7 @@ A logical value indicating if the model exists.
Check if model is available locally
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
model_avail("codegemma:7b")
model_avail("abc")
model_avail("llama3")
diff --git a/man/ohelp.Rd b/man/ohelp.Rd
index 20d394b..e94fb29 100644
--- a/man/ohelp.Rd
+++ b/man/ohelp.Rd
@@ -18,7 +18,7 @@ Does not return anything. It prints the conversation in the console.
Chat with a model in real-time in R console
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
ohelp(first_prompt = "quit")
# regular usage: ohelp()
\dontshow{\}) # examplesIf}
diff --git a/man/ps.Rd b/man/ps.Rd
index f5fdc7c..9a85f91 100644
--- a/man/ps.Rd
+++ b/man/ps.Rd
@@ -24,7 +24,7 @@ A response in the format specified in the output parameter.
List models that are currently loaded into memory
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
ps("text")
\dontshow{\}) # examplesIf}
}
diff --git a/man/pull.Rd b/man/pull.Rd
index 3c766b0..ca48272 100644
--- a/man/pull.Rd
+++ b/man/pull.Rd
@@ -30,7 +30,7 @@ A httr2 response object.
See https://ollama.com/library for a list of available models. Use the list_models() function to get the list of models already downloaded/installed on your machine. Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
pull("llama3")
pull("all-minilm", stream = FALSE)
\dontshow{\}) # examplesIf}
diff --git a/man/push.Rd b/man/push.Rd
index d094dfa..40ee1e1 100644
--- a/man/push.Rd
+++ b/man/push.Rd
@@ -33,7 +33,7 @@ A httr2 response object.
Push or upload a model to an Ollama model library. Requires registering for ollama.ai and adding a public key first.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
push("mattw/pygmalion:latest")
\dontshow{\}) # examplesIf}
}
diff --git a/man/resp_process.Rd b/man/resp_process.Rd
index 3f4fc27..2a94d08 100644
--- a/man/resp_process.Rd
+++ b/man/resp_process.Rd
@@ -21,7 +21,7 @@ A data frame, json list, raw or httr2 response object.
Process httr2 response object
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
resp <- list_models("resp")
resp_process(resp, "df") # parse response to dataframe/tibble
resp_process(resp, "jsonlist") # parse response to list
diff --git a/man/show.Rd b/man/show.Rd
index e6cb826..fde175d 100644
--- a/man/show.Rd
+++ b/man/show.Rd
@@ -30,7 +30,7 @@ A response in the format specified in the output parameter.
Model information includes details, modelfile, template, parameters, license, system prompt.
}
\examples{
-\dontshow{if (test_connection()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
+\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# show("llama3") # returns jsonlist
show("llama3", output = "resp") # returns response object
\dontshow{\}) # examplesIf}
diff --git a/man/test_connection.Rd b/man/test_connection.Rd
index bf0740e..722b281 100644
--- a/man/test_connection.Rd
+++ b/man/test_connection.Rd
@@ -4,19 +4,21 @@
\alias{test_connection}
\title{Test connection to Ollama server}
\usage{
-test_connection(url = "http://localhost:11434")
+test_connection(url = "http://localhost:11434", logical = FALSE)
}
\arguments{
\item{url}{The URL of the Ollama server. Default is http://localhost:11434}
+
+\item{logical}{Logical. If TRUE, returns a boolean value. Default is FALSE.}
}
\value{
-Boolean TRUE if the server is running, otherwise FALSE.
+Boolean value or httr2 response object, where status_code is either 200 (success) or 503 (error).
}
\description{
-\code{test_connection()} tests whether the Ollama server is running or not.
+Tests whether the Ollama server is running or not.
}
\examples{
-test_connection()
+test_connection(logical = TRUE)
test_connection("http://localhost:11434") # default url
test_connection("http://127.0.0.1:11434")
}
diff --git a/revdep/README.md b/revdep/README.md
index 8a907b9..cf42f96 100644
--- a/revdep/README.md
+++ b/revdep/README.md
@@ -9,8 +9,8 @@
|language |(EN) |
|collate |en_US.UTF-8 |
|ctype |en_US.UTF-8 |
-|tz |America/New_York |
-|date |2024-08-24 |
+|tz |America/Toronto |
+|date |2024-12-29 |
|rstudio |2024.04.2+764 Chocolate Cosmos (desktop) |
|pandoc |3.1.11 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/aarch64/ (via rmarkdown) |
@@ -18,8 +18,17 @@
|package |old |new |Δ |
|:-------|:-----|:----------|:--|
-|ollamar |1.2.0 |1.2.0.9000 |* |
-|httr2 |NA |1.0.3 |* |
+|ollamar |1.2.1 |1.2.1.9000 |* |
+|askpass |NA |1.2.1 |* |
+|openssl |NA |2.3.0 |* |
+|pillar |NA |1.10.0 |* |
+|sys |NA |3.4.3 |* |
# Revdeps
+## New problems (1)
+
+|package |version |error |warning |note |
+|:-------|:-------|:------|:-------|:----|
+|[mall](problems.md#mall)|0.1.0 |__+1__ | | |
+
diff --git a/revdep/cran.md b/revdep/cran.md
index 782ef68..8b6f23c 100644
--- a/revdep/cran.md
+++ b/revdep/cran.md
@@ -1,7 +1,15 @@
## revdepcheck results
-We checked 1 reverse dependencies, comparing R CMD check results across CRAN and dev versions of this package.
+We checked 2 reverse dependencies, comparing R CMD check results across CRAN and dev versions of this package.
- * We saw 0 new problems
+ * We saw 1 new problems
* We failed to check 0 packages
+Issues with CRAN packages are summarised below.
+
+### New problems
+(This reports the first line of each new failure)
+
+* mall
+ checking tests ...
+
diff --git a/revdep/problems.md b/revdep/problems.md
index 9a20736..b967f05 100644
--- a/revdep/problems.md
+++ b/revdep/problems.md
@@ -1 +1,39 @@
-*Wow, no problems at all. :)*
\ No newline at end of file
+# mall
+
+
+
+* Version: 0.1.0
+* GitHub: NA
+* Source code: https://github.com/cran/mall
+* Date/Publication: 2024-10-24 14:30:02 UTC
+* Number of recursive dependencies: 49
+
+Run `revdepcheck::revdep_details(, "mall")` for more info
+
+
+
+## Newly broken
+
+* checking tests ...
+ ```
+ Running ‘testthat.R’
+ ERROR
+ Running the tests in ‘tests/testthat.R’ failed.
+ Last 13 lines of output:
+ Error in `con$status_code`: $ operator is invalid for atomic vectors
+ Backtrace:
+ ▆
+ 1. └─mall:::skip_if_no_ollama() at test-llm-verify.R:42:3
+ 2. └─mall:::ollama_is_present() at tests/testthat/helper-ollama.R:20:3
+ ── Error ('test-zzz-cache.R:2:3'): Ollama cache exists and delete ──────────────
+ Error in `con$status_code`: $ operator is invalid for atomic vectors
+ Backtrace:
+ ▆
+ 1. └─mall:::skip_if_no_ollama() at test-zzz-cache.R:2:3
+ 2. └─mall:::ollama_is_present() at tests/testthat/helper-ollama.R:20:3
+
+ [ FAIL 8 | WARN 0 | SKIP 7 | PASS 38 ]
+ Error: Test failures
+ Execution halted
+ ```
+
diff --git a/tests/testthat/test-chat.R b/tests/testthat/test-chat.R
index 208f18f..890a061 100644
--- a/tests/testthat/test-chat.R
+++ b/tests/testthat/test-chat.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("chat function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
messages <- list(
list(role = "user", content = "Tell me a 5-word story.")
@@ -56,7 +56,7 @@ test_that("chat function works with basic input", {
})
test_that("chat function handles streaming correctly", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
messages <- list(
list(role = "user", content = "Count to 5")
@@ -70,7 +70,7 @@ test_that("chat function handles streaming correctly", {
test_that("chat function handles multiple messages", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
messages <- list(
list(role = "user", content = "Hello!"),
@@ -86,7 +86,7 @@ test_that("chat function handles multiple messages", {
})
test_that("chat function handles additional options", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
messages <- list(
list(role = "user", content = "Tell me a very short joke")
@@ -102,7 +102,7 @@ test_that("chat function handles additional options", {
test_that("chat function handles images in messages", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
skip_if_not(model_avail("benzie/llava-phi-3"), "benzie/llava-phi-3 model not available")
images <- c(file.path(system.file("extdata", package = "ollamar"), "image1.png"),
@@ -131,7 +131,7 @@ test_that("chat function handles images in messages", {
test_that("chat function tool calling", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
add_two_numbers <- function(x, y) {
return(x + y)
@@ -239,7 +239,7 @@ test_that("chat function tool calling", {
test_that("structured output", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
format <- list(
type = "object",
diff --git a/tests/testthat/test-copy.R b/tests/testthat/test-copy.R
index 860d3c9..bf66ba3 100644
--- a/tests/testthat/test-copy.R
+++ b/tests/testthat/test-copy.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("copy function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
copy("llama3", "llama3-BACKUP")
expect_true(model_avail("llama3-BACKUP"))
diff --git a/tests/testthat/test-create.R b/tests/testthat/test-create.R
index fd3ad9b..71acbda 100644
--- a/tests/testthat/test-create.R
+++ b/tests/testthat/test-create.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("create function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
expect_error(create("mario"))
expect_error(create("mario", modelfile = "abc", path = "abc"))
diff --git a/tests/testthat/test-delete.R b/tests/testthat/test-delete.R
index 3f908cd..823ed9e 100644
--- a/tests/testthat/test-delete.R
+++ b/tests/testthat/test-delete.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("delete function works", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# wrong model
expect_invisible(delete("sdafds"))
diff --git a/tests/testthat/test-embed.R b/tests/testthat/test-embed.R
index cc56e96..47ecdc1 100644
--- a/tests/testthat/test-embed.R
+++ b/tests/testthat/test-embed.R
@@ -4,7 +4,7 @@ library(ollamar)
# Note for the following test to work you need to make sure the "all-minilm" model exists locally
test_that("embed function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# one input
result <- embed("all-minilm", "hello")
diff --git a/tests/testthat/test-embeddings.R b/tests/testthat/test-embeddings.R
index a56a85c..b5bdb27 100644
--- a/tests/testthat/test-embeddings.R
+++ b/tests/testthat/test-embeddings.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("embeddings function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
result <- embeddings("all-minilm", "hello")
expect_type(result, "double")
diff --git a/tests/testthat/test-generate.R b/tests/testthat/test-generate.R
index aca5bcc..3eafa47 100644
--- a/tests/testthat/test-generate.R
+++ b/tests/testthat/test-generate.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("generate function works with different outputs and resp_process", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# incorrect output type
expect_error(generate("llama3", "The sky is...", output = "abc"))
@@ -46,7 +46,7 @@ test_that("generate function works with different outputs and resp_process", {
})
test_that("generate function works with additional options", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
expect_s3_class(generate("llama3", "The sky is...", num_predict = 1, temperature = 0), "httr2_response")
expect_error(generate("llama3", "The sky is...", abc = 1, sdf = 2))
@@ -55,7 +55,7 @@ test_that("generate function works with additional options", {
test_that("generate function works with images", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
skip_if_not(model_avail("benzie/llava-phi-3"), "benzie/llava-phi-3 model not available")
image_path <- file.path(system.file("extdata", package = "ollamar"), "image1.png")
@@ -83,7 +83,7 @@ test_that("generate function works with images", {
test_that("structured output", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
format <- list(
type = "object",
diff --git a/tests/testthat/test-list_models.R b/tests/testthat/test-list_models.R
index 47ab6cc..9fdfd3b 100644
--- a/tests/testthat/test-list_models.R
+++ b/tests/testthat/test-list_models.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("list_models function works", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# incorrect output type
expect_error(list_models("sdf"))
diff --git a/tests/testthat/test-model_avail.R b/tests/testthat/test-model_avail.R
index f44313d..c2300f0 100644
--- a/tests/testthat/test-model_avail.R
+++ b/tests/testthat/test-model_avail.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("model_avail function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
expect_false(model_avail("test"))
expect_true(model_avail("llama3"))
diff --git a/tests/testthat/test-ps.R b/tests/testthat/test-ps.R
index 7fa4160..727cbb8 100644
--- a/tests/testthat/test-ps.R
+++ b/tests/testthat/test-ps.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("ps list running models endpoint", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# load models first
g1 <- generate('llama3', "tell me a 5 word story")
diff --git a/tests/testthat/test-pull.R b/tests/testthat/test-pull.R
index 6a79b53..2d6d274 100644
--- a/tests/testthat/test-pull.R
+++ b/tests/testthat/test-pull.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("pull function works", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
# streaming is FALSE by default
# wrong model
diff --git a/tests/testthat/test-push.R b/tests/testthat/test-push.R
index 37e4173..5d6f569 100644
--- a/tests/testthat/test-push.R
+++ b/tests/testthat/test-push.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("push function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
expect_s3_class(push("mattw/pygmalion:latest"), "httr2_response")
diff --git a/tests/testthat/test-show.R b/tests/testthat/test-show.R
index e885d09..a2f373e 100644
--- a/tests/testthat/test-show.R
+++ b/tests/testthat/test-show.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("show function works", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
result <- show("llama3", output = "resp")
expect_s3_class(result, "httr2_response")
diff --git a/tests/testthat/test-test_connection.R b/tests/testthat/test-test_connection.R
index 731b149..3eed4b9 100644
--- a/tests/testthat/test-test_connection.R
+++ b/tests/testthat/test-test_connection.R
@@ -2,7 +2,9 @@ library(testthat)
library(ollamar)
test_that("test_connection function works", {
- skip_if_not(test_connection(), "Ollama server not available")
- expect_equal(test_connection(), TRUE)
- expect_equal(test_connection(url = "dsfdsf"), FALSE)
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
+
+ expect_equal(test_connection()$status_code, 200)
+ expect_equal(test_connection(logical = TRUE), TRUE)
+ expect_equal(test_connection(url = "dsfdsf")$status_code, 503)
})
diff --git a/tests/testthat/test-utils.R b/tests/testthat/test-utils.R
index 1061157..f28e026 100644
--- a/tests/testthat/test-utils.R
+++ b/tests/testthat/test-utils.R
@@ -2,7 +2,7 @@ library(testthat)
library(ollamar)
test_that("copy function works with basic input", {
- skip_if_not(test_connection(), "Ollama server not available")
+ skip_if_not(test_connection(logical = TRUE), "Ollama server not available")
x <- rnorm(5)
expect_true(vector_norm(x) == sqrt(sum(x^2)))
diff --git a/vignettes/ollamar.Rmd b/vignettes/ollamar.Rmd
index f883924..8703b4b 100644
--- a/vignettes/ollamar.Rmd
+++ b/vignettes/ollamar.Rmd
@@ -18,11 +18,11 @@ ollamar is the easiest way to integrate R with [Ollama](https://ollama.com/), wh
- Linux: `curl -fsSL https://ollama.com/install.sh | sh`
- [Docker image](https://hub.docker.com/r/ollama/ollama)
-2. Open/launch the Ollama app to start the local server.
+2. Open/launch the Ollama app to start the local server.
3. Install either the stable or latest/development version of `ollamar`.
-Stable version:
+Stable version:
```{r eval=FALSE}
install.packages("ollamar")
@@ -46,7 +46,7 @@ test_connection() # test connection to Ollama server
# if you see "Ollama local server not running or wrong server," Ollama app/server isn't running
# generate a response/text based on a prompt; returns an httr2 response by default
-resp <- generate("llama3.1", "tell me a 5-word story")
+resp <- generate("llama3.1", "tell me a 5-word story")
resp
#' interpret httr2 response object
@@ -56,15 +56,15 @@ resp
#' Body: In memory (414 bytes)
# get just the text from the response object
-resp_process(resp, "text")
+resp_process(resp, "text")
# get the text as a tibble dataframe
-resp_process(resp, "df")
+resp_process(resp, "df")
# alternatively, specify the output type when calling the function initially
txt <- generate("llama3.1", "tell me a 5-word story", output = "text")
# list available models (models you've pulled/downloaded)
-list_models()
+list_models()
name size parameter_size quantization_level modified
1 codegemma:7b 5 GB 9B Q4_0 2024-07-27T23:44:10
2 llama3.1:latest 4.7 GB 8.0B Q4_0 2024-07-31T07:44:33
@@ -212,10 +212,10 @@ messages <- create_messages(
You can convert `data.frame`, `tibble` or `data.table` objects to `list()` of messages and vice versa with functions from base R or other popular libraries.
```{r eval=FALSE}
-# create a list of messages
+# create a list of messages
messages <- create_messages(
create_message("You're a knowledgeable tour guide.", role = "system"),
- create_message("What is the capital of Australia?")
+ create_message("What is the capital of Australia?")
)
# convert to dataframe
@@ -336,7 +336,7 @@ resp <- chat("llama3.1", msg, tools = list(tool1), output = "tools")
tool <- resp[[1]] # get the first tool/function
# call the tool function with arguments: add_two_numbers(3, 1)
-do.call(tool$name, tool$arguments)
+do.call(tool$name, tool$arguments)
```
Pass in multiple tools. The model will pick the best tool to use based on the context of the message.
@@ -347,16 +347,16 @@ resp <- chat("llama3.1", msg, tools = list(tool1, tool2), output = "tools")
tool <- resp[[1]] # get the first tool/function
# call the tool function with arguments: multiply_two_numbers(3, 4)
-do.call(tool$name, tool$arguments)
+do.call(tool$name, tool$arguments)
```
-Pass in multiple tools and get the model to use multiple tools. Note that LLM responses are inherently stochastic, so sometimes the model might choose to call only one tool, and sometimes might call tools multiple times.
+Pass in multiple tools and get the model to use multiple tools. Note that LLM responses are inherently stochastic, so sometimes the model might choose to call only one tool, and sometimes might call tools multiple times.
```{r eval=FALSE}
msg <- create_message("add three plus four. then multiply by ten")
resp <- chat("llama3.1", msg, tools = list(tool1, tool2), output = "tools")
-# first tool/function: add_two_numbers(3, 4)
+# first tool/function: add_two_numbers(3, 4)
do.call(resp[[1]]$name, resp[[1]]$arguments) # 7
# second tool/function: multiply_two_numbers(7, 10)
do.call(resp[[2]]$name, resp[[2]]$arguments) # 70
@@ -364,16 +364,16 @@ do.call(resp[[2]]$name, resp[[2]]$arguments) # 70
### Structured outputs
-The `chat()` and `generate()` functions support [structured outputs](https://ollama.com/blog/structured-outputs), making it possible to constrain a model's output to a specified format defined by a JSON schema (R list).
+The `chat()` and `generate()` functions support [structured outputs](https://ollama.com/blog/structured-outputs), making it possible to constrain a model's output to a specified format defined by a JSON schema (R list).
-```{r eval=FALSE}
+```{r eval=FALSE}
# define a JSON schema as a list to constrain a model's output
format <- list(
type = "object",
properties = list(
name = list(type = "string"),
capital = list(type = "string"),
- languages = list(type = "array",
+ languages = list(type = "array",
items = list(type = "string")
)
),
@@ -410,9 +410,9 @@ resps <- req_perform_parallel(reqs) # list of httr2_request objects
# process the responses
sapply(resps, resp_process, "text") # get responses as text
-# [1] "She found him in Paris." "She found the key upstairs."
-# [3] "She found her long-lost sister." "She found love on Mars."
-# [5] "She found the diamond ring."
+# [1] "She found him in Paris." "She found the key upstairs."
+# [3] "She found her long-lost sister." "She found love on Mars."
+# [5] "She found the diamond ring."
```
@@ -437,7 +437,7 @@ resps <- req_perform_parallel(reqs) # list of httr2_request objects
# process the responses
sapply(resps, resp_process, "text") # get responses as text
-# [1] "Positive" "Negative."
+# [1] "Positive" "Negative."
# [3] "'neutral' translates to... 'other'."
@@ -467,8 +467,8 @@ resps <- req_perform_parallel(reqs) # list of httr2_request objects
# process the responses
bind_rows(lapply(resps, resp_process, "df")) # get responses as dataframes
# # A tibble: 3 × 4
-# model role content created_at
-#
+# model role content created_at
+#
# 1 llama3.1 assistant Positive 2024-08-05T17:54:27.758618Z
# 2 llama3.1 assistant negative 2024-08-05T17:54:27.657525Z
# 3 llama3.1 assistant other 2024-08-05T17:54:27.657067Z