From 77fdeea0c3d4368ace61be96080d62f4a545e21e Mon Sep 17 00:00:00 2001 From: Hause Lin Date: Mon, 27 Jan 2025 23:23:44 -0500 Subject: [PATCH] Add version endpoint and update create --- NAMESPACE | 1 + NEWS.md | 3 ++ R/ollama.R | 74 ++++++++++++++++++++++------------ R/utils.R | 2 + _pkgdown.yml | 1 + man/create.Rd | 23 ++++++----- man/ver.Rd | 27 +++++++++++++ tests/testthat/test-chat.R | 2 +- tests/testthat/test-create.R | 6 +-- tests/testthat/test-generate.R | 2 +- tests/testthat/test-ps.R | 2 +- tests/testthat/test-pull.R | 4 +- tests/testthat/test-ver.R | 8 ++++ 13 files changed, 110 insertions(+), 45 deletions(-) create mode 100644 man/ver.Rd create mode 100644 tests/testthat/test-ver.R diff --git a/NAMESPACE b/NAMESPACE index 22ca7a4..a8c9d7d 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -31,6 +31,7 @@ export(test_connection) export(validate_message) export(validate_messages) export(validate_options) +export(ver) importFrom(crayon,green) importFrom(crayon,red) importFrom(glue,glue) diff --git a/NEWS.md b/NEWS.md index fcf5764..361dcc0 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,8 @@ # ollamar (development version) +- Add `ver()` function to [retrieve Ollama version](https://github.com/ollama/ollama/blob/main/docs/api.md#version). +- Update `create()` function. + # ollamar 1.2.2 - `generate()` and `chat()` support [structured output](https://ollama.com/blog/structured-outputs) via `format` parameter. diff --git a/R/ollama.R b/R/ollama.R index e3f82b6..347976c 100644 --- a/R/ollama.R +++ b/R/ollama.R @@ -289,14 +289,14 @@ chat <- function(model, messages, tools = list(), stream = FALSE, format = list( -#' Create a model from a Modelfile +#' Create a model #' -#' It is recommended to set `modelfile` to the content of the Modelfile rather than just set path. +#' Create a model from another model, a safetensors directory (not implemented), or a GGUF file (not implemented). #' -#' @param name Name of the model to create. -#' @param modelfile Contents of the Modelfile as character string. Default is NULL. +#' @param model Name of the model to create. +#' @param from Name of an existing model to create the new model from. +#' @param system System prompt for the model. Default is NULL. #' @param stream Enable response streaming. Default is FALSE. -#' @param path The path to the Modelfile. Default is NULL. #' @param endpoint The endpoint to create the model. Default is "/api/create". #' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. #' @@ -307,34 +307,22 @@ chat <- function(model, messages, tools = list(), stream = FALSE, format = list( #' @export #' #' @examplesIf test_connection(logical = TRUE) -#' create("mario", "FROM llama3\nSYSTEM You are mario from Super Mario Bros.") +#' create("mario", "deepseek-r1:1.5b", system = "You are Mario from Super Mario Bros.") +#' model_avail("mario") # check mario model has been created +#' list_models() # mario model has been created #' generate("mario", "who are you?", output = "text") # model should say it's Mario #' delete("mario") # delete the model created above -create <- function(name, modelfile = NULL, stream = FALSE, path = NULL, endpoint = "/api/create", host = NULL) { - - if (is.null(modelfile) && is.null(path)) { - stop("Either modelfile or path must be provided. Using modelfile is recommended.") - } - - if (!is.null(modelfile) && !is.null(path)) { - stop("Only one of modelfile or path should be provided.") - } - - if (!is.null(path)) { - if (file.exists(path)) { - modelfile <- paste0(readLines("inst/extdata/example_modefile.txt", warn = FALSE), collapse = "\n") - cat(paste0("Modefile\n", modelfile, "\n")) - } else { - stop("The path provided does not exist.") - } - } +#' model_avail("mario") # model no longer exists +create <- function(model, from, system = NULL, stream = FALSE, endpoint = "/api/create", host = NULL) { req <- create_request(endpoint, host) req <- httr2::req_method(req, "POST") + # TODO: add other parameters body_json <- list( - name = name, - modelfile = modelfile, + model = model, + from = from, + system = system, stream = stream ) @@ -896,6 +884,40 @@ ps <- function(output = c("df", "resp", "jsonlist", "raw", "text"), endpoint = " +#' Retrieve Ollama version +#' +#' @param endpoint The endpoint to list the running models. Default is "/api/version". +#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL. +#' +#' @references +#' [API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md#version) +#' +#' @return A character string of the Ollama version. +#' @export +#' +#' @examplesIf test_connection(logical = TRUE) +#' ver() +ver <- function(endpoint = "/api/version", host = NULL) { + req <- create_request(endpoint, host) + req <- httr2::req_method(req, "GET") + tryCatch( + { + resp <- httr2::req_perform(req) + return(resp_process(resp = resp, output = "text")) + }, + error = function(e) { + stop(e) + } + ) +} + + + + + + + + diff --git a/R/utils.R b/R/utils.R index b8e6089..61c0b78 100644 --- a/R/utils.R +++ b/R/utils.R @@ -275,6 +275,8 @@ resp_process <- function(resp, output = c("df", "jsonlist", "raw", "resp", "text } else if (output == "text") { return(df_response$name) } + } else if (grepl("api/version", resp$url)) { + return(httr2::resp_body_json(resp)$version) } } diff --git a/_pkgdown.yml b/_pkgdown.yml index 719ff65..4c89b75 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -45,6 +45,7 @@ reference: - embed - embeddings - ps + - ver - subtitle: API helpers desc: Work with and extend the Ollama API. diff --git a/man/create.Rd b/man/create.Rd index cdb729b..415b7d6 100644 --- a/man/create.Rd +++ b/man/create.Rd @@ -2,25 +2,25 @@ % Please edit documentation in R/ollama.R \name{create} \alias{create} -\title{Create a model from a Modelfile} +\title{Create a model} \usage{ create( - name, - modelfile = NULL, + model, + from, + system = NULL, stream = FALSE, - path = NULL, endpoint = "/api/create", host = NULL ) } \arguments{ -\item{name}{Name of the model to create.} +\item{model}{Name of the model to create.} -\item{modelfile}{Contents of the Modelfile as character string. Default is NULL.} +\item{from}{Name of an existing model to create the new model from.} -\item{stream}{Enable response streaming. Default is FALSE.} +\item{system}{System prompt for the model. Default is NULL.} -\item{path}{The path to the Modelfile. Default is NULL.} +\item{stream}{Enable response streaming. Default is FALSE.} \item{endpoint}{The endpoint to create the model. Default is "/api/create".} @@ -30,13 +30,16 @@ create( A response in the format specified in the output parameter. } \description{ -It is recommended to set \code{modelfile} to the content of the Modelfile rather than just set path. +Create a model from another model, a safetensors directory (not implemented), or a GGUF file (not implemented). } \examples{ \dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} -create("mario", "FROM llama3\nSYSTEM You are mario from Super Mario Bros.") +create("mario", "deepseek-r1:1.5b", system = "You are Mario from Super Mario Bros.") +model_avail("mario") # check mario model has been created +list_models() # mario model has been created generate("mario", "who are you?", output = "text") # model should say it's Mario delete("mario") # delete the model created above +model_avail("mario") # model no longer exists \dontshow{\}) # examplesIf} } \references{ diff --git a/man/ver.Rd b/man/ver.Rd new file mode 100644 index 0000000..5e8f00d --- /dev/null +++ b/man/ver.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ollama.R +\name{ver} +\alias{ver} +\title{Retrieve Ollama version} +\usage{ +ver(endpoint = "/api/version", host = NULL) +} +\arguments{ +\item{endpoint}{The endpoint to list the running models. Default is "/api/version".} + +\item{host}{The base URL to use. Default is NULL, which uses Ollama's default base URL.} +} +\value{ +A character string of the Ollama version. +} +\description{ +Retrieve Ollama version +} +\examples{ +\dontshow{if (test_connection(logical = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +ver() +\dontshow{\}) # examplesIf} +} +\references{ +\href{https://github.com/ollama/ollama/blob/main/docs/api.md#version}{API documentation} +} diff --git a/tests/testthat/test-chat.R b/tests/testthat/test-chat.R index f41b328..d3f526a 100644 --- a/tests/testthat/test-chat.R +++ b/tests/testthat/test-chat.R @@ -262,7 +262,7 @@ test_that("structured output", { ) msg <- create_message("tell me about canada") - resp <- chat("llama3.1", msg, format = format) + resp <- chat("llama3.1:8b", msg, format = format) # content <- httr2::resp_body_json(resp)$message$content structured_output <- resp_process(resp, "structured") expect_equal(tolower(structured_output$name), "canada") diff --git a/tests/testthat/test-create.R b/tests/testthat/test-create.R index 71acbda..6597fe8 100644 --- a/tests/testthat/test-create.R +++ b/tests/testthat/test-create.R @@ -4,11 +4,7 @@ library(ollamar) test_that("create function works with basic input", { skip_if_not(test_connection(logical = TRUE), "Ollama server not available") - expect_error(create("mario")) - expect_error(create("mario", modelfile = "abc", path = "abc")) - expect_error(create("mario", path = "abc")) - - resp <- create("mario", "FROM llama3\nSYSTEM You are mario from Super Mario Bros.") + resp <- create("mario", "deepseek-r1:1.5b") expect_s3_class(resp, "httr2_response") expect_equal(resp$status_code, 200) expect_true(model_avail("mario")) diff --git a/tests/testthat/test-generate.R b/tests/testthat/test-generate.R index 3eafa47..3618265 100644 --- a/tests/testthat/test-generate.R +++ b/tests/testthat/test-generate.R @@ -105,7 +105,7 @@ test_that("structured output", { ) msg <- "tell me about canada" - resp <- generate("llama3.1", prompt = msg, format = format) + resp <- generate("llama3.1:8b", prompt = msg, format = format) # response <- httr2::resp_body_json(resp)$response structured_output <- resp_process(resp, "structured") expect_equal(tolower(structured_output$name), "canada") diff --git a/tests/testthat/test-ps.R b/tests/testthat/test-ps.R index 727cbb8..6755e21 100644 --- a/tests/testthat/test-ps.R +++ b/tests/testthat/test-ps.R @@ -8,7 +8,7 @@ test_that("ps list running models endpoint", { g1 <- generate('llama3', "tell me a 5 word story") result <- ps() - expect_true(nrow(result) > 1) + expect_true(nrow(result) >= 1) expect_true(all(c("name", "size", "parameter_size", "quantization_level", "digest", "expires_at") %in% names(result))) expect_s3_class(ps("df"), "data.frame") expect_s3_class(ps("resp"), "httr2_response") diff --git a/tests/testthat/test-pull.R b/tests/testthat/test-pull.R index 2d6d274..747b3d7 100644 --- a/tests/testthat/test-pull.R +++ b/tests/testthat/test-pull.R @@ -31,7 +31,7 @@ test_that("pull function works", { expect_vector(result$body) # correct model - result <- pull('llama3', stream = TRUE) + result <- pull('snowflake-arctic-embed:22m', stream = TRUE) # for this endpoint, even when stream = FALSE, the response is chunked) expect_true(httr2::resp_headers(result)$`Transfer-Encoding` == "chunked") expect_s3_class(result, "httr2_response") @@ -51,5 +51,7 @@ test_that("pull function works", { expect_s3_class(pull('sdafd', stream = TRUE, insecure = TRUE), "httr2_response") expect_s3_class(pull('sdafd', stream = TRUE, insecure = FALSE), "httr2_response") + delete("snowflake-arctic-embed:22m") + }) diff --git a/tests/testthat/test-ver.R b/tests/testthat/test-ver.R new file mode 100644 index 0000000..0be5695 --- /dev/null +++ b/tests/testthat/test-ver.R @@ -0,0 +1,8 @@ +library(testthat) +library(ollamar) + +test_that("ps list running models endpoint", { + skip_if_not(test_connection(logical = TRUE), "Ollama server not available") + + expect_type(ver(), "character") +})