Skip to content

Commit

Permalink
Refactor delete endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
hauselin committed Jul 28, 2024
1 parent 9395c52 commit 4226f33
Show file tree
Hide file tree
Showing 7 changed files with 50 additions and 25 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/R-CMD-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
push:
branches: [main, master, show]
branches: [main, master]
pull_request:
branches: [main, master, show]
branches: [main, master]

name: R-CMD-check

Expand Down
20 changes: 11 additions & 9 deletions R/ollama.R
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ create_request <- function(endpoint, host = NULL) {
#' generate("llama3", "The sky is...", stream = TRUE, output = "text", temperature = 2.0)
#' generate("llama3", "The sky is...", stream = FALSE, output = "jsonlist")
generate <- function(model, prompt, suffix = "", images = "", system = "", template = "", context = list(), stream = FALSE, raw = FALSE, keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text"), endpoint = "/api/generate", host = NULL, ...) {

output <- output[1]
if (!output %in% c("df", "resp", "jsonlist", "raw", "text")) {
stop("Invalid output format specified. Supported formats: 'df', 'resp', 'jsonlist', 'raw', 'text'")
Expand Down Expand Up @@ -195,7 +194,6 @@ generate <- function(model, prompt, suffix = "", images = "", system = "", templ
#' )
#' chat("llama3", messages, stream = TRUE)
chat <- function(model, messages, tools = list(), stream = FALSE, keep_alive = "5m", output = c("resp", "jsonlist", "raw", "df", "text"), endpoint = "/api/chat", host = NULL, ...) {

output <- output[1]
if (!output %in% c("df", "resp", "jsonlist", "raw", "text")) {
stop("Invalid output format specified. Supported formats: 'df', 'resp', 'jsonlist', 'raw', 'text'")
Expand Down Expand Up @@ -328,7 +326,6 @@ list_models <- function(output = c("df", "resp", "jsonlist", "raw", "text"), end
#' # show("llama3") # returns jsonlist
#' show("llama3", output = "resp") # returns response object
show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), endpoint = "/api/show", host = NULL) {

output <- output[1]
if (!output %in% c("resp", "jsonlist", "raw")) {
stop("Invalid output format specified. Supported formats: 'resp', 'jsonlist', 'raw'")
Expand All @@ -350,7 +347,6 @@ show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), e
stop(e)
}
)

}


Expand All @@ -365,7 +361,7 @@ show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), e
#'
#' Delete a model from your local machine that you downlaoded using the pull() function. To see which models are available, use the list_models() function.
#'
#' @param model A character string of the model name such as "llama3".
#' @param name A character string of the model name such as "llama3".
#' @param endpoint The endpoint to delete the model. Default is "/api/delete".
#' @param host The base URL to use. Default is NULL, which uses Ollama's default base URL.
#'
Expand All @@ -378,10 +374,16 @@ show <- function(name, verbose = FALSE, output = c("jsonlist", "resp", "raw"), e
#' \dontrun{
#' delete("llama3")
#' }
delete <- function(model, endpoint = "/api/delete", host = NULL) {
delete <- function(name, endpoint = "/api/delete", host = NULL) {
if (!model_avail(name)) {
message("Available models listed below.")
print(list_models(output = 'text', host = host))
return(invisible())
}

req <- create_request(endpoint, host)
req <- httr2::req_method(req, "DELETE")
body_json <- list(model = model)
body_json <- list(name = name)
req <- httr2::req_body_json(req, body_json)

tryCatch(
Expand All @@ -390,7 +392,7 @@ delete <- function(model, endpoint = "/api/delete", host = NULL) {
return(resp)
},
error = function(e) {
message("Model not found and cannot be deleted. Please check the model name with list_models() and try again.")
stop("Model not found and cannot be deleted. Please check the model name with list_models() and try again.")
}
)
}
Expand Down Expand Up @@ -685,7 +687,7 @@ model_avail <- function(model) {
}
}
if (!exist) {
cat(paste("Model", model, "does not exist. Please check available models with list_models() or download the model with pull().\n"))
message(paste("Model", model, "does not exist.\nPlease check available models with list_models() or download the model with pull()."))
}
return(exist)
}
24 changes: 15 additions & 9 deletions README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -23,21 +23,27 @@ The [Ollama R library](https://hauselin.github.io/ollama-r/) provides the easies

> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
See [Ollama's Github page](https://github.com/ollama/ollama) for more information. See also the [Ollama API documentation and endpoints](https://github.com/ollama/ollama/blob/main/docs/api.md). For Ollama Python, see [ollama-python](https://github.com/ollama/ollama-python). You'll need to have the [Ollama](https://ollama.com/) app installed on your computer to use this library.
See [Ollama's Github page](https://github.com/ollama/ollama) for more information. See also the [Ollama API documentation and endpoints](https://github.com/ollama/ollama/blob/main/docs/api.md). For Ollama Python, see [ollama-python](https://github.com/ollama/ollama-python). You'll need to have the [Ollama](https://ollama.com/) app installed on your computer to use this library.

## Installation

1. You should have the Ollama app installed on your computer. Download it from [Ollama](https://ollama.com/).

2. Open/launch the Ollama app to start the local server. You can then run your language models locally, on your own machine/computer.
2. Open/launch the Ollama app to start the local server. You can then run your language models locally, on your own machine/computer.

3. Install the development version of `ollamar` R library like so:
3. Install the **stable** version like so:

```r
install.packages("ollamar")
```

4. ALternatively, for the **latest/development** version with more/latest features, you can install it like so:

``` r
devtools::install_github("hauselin/ollamar")
```

If it doesn't work or you don't have `devtools` installed, please run `install.packages("devtools")` in R or RStudio first.
If it doesn't work or you don't have `devtools` installed, please run `install.packages("devtools")` in R or RStudio first.

## Usage

Expand All @@ -51,8 +57,8 @@ test_connection() # test connection to Ollama server; returns a httr2 response
# <httr2_response>
list_models() # list available models (models you've pulled/downloaded)
name size parameter_size quantization_level modified
<chr> <chr> <chr> <chr> <chr>
name size parameter_size quantization_level modified
<chr> <chr> <chr> <chr> <chr>
1 llama3:latest 4.7 GB 8B Q4_0 2024-05-01T21:01:00
2 mistral-openorca:latest 4.1 GB 7B Q4_0 2024-04-25T16:45:00
```
Expand All @@ -69,7 +75,7 @@ list_models() # verify you've pulled/downloaded the model

### Delete a model

Delete a model and its data (see [API doc](https://github.com/ollama/ollama/blob/main/docs/api.md#delete-a-model)). You can see what models you've downloaded with `list_models()`. To download a model, specify the name of the model.
Delete a model and its data (see [API doc](https://github.com/ollama/ollama/blob/main/docs/api.md#delete-a-model)). You can see what models you've downloaded with `list_models()`. To download a model, specify the name of the model.

```{r eval=FALSE}
list_models() # see the models you've pulled/downloaded
Expand Down Expand Up @@ -177,7 +183,7 @@ test_connection()

#### Parsing `httr2_response` objects with `resp_process()`

`ollamar` uses the [`httr2` library](https://httr2.r-lib.org/index.html) to make HTTP requests to the Ollama server, so many functions in this library returns an `httr2_response` object by default.
`ollamar` uses the [`httr2` library](https://httr2.r-lib.org/index.html) to make HTTP requests to the Ollama server, so many functions in this library returns an `httr2_response` object by default.

You can either parse the output with `resp_process()` or use the `output` parameter in the function to specify the output format. Generally, the `output` parameter can be one of `"df"`, `"jsonlist"`, `"raw"`, `"resp"`, or `"text"`.

Expand All @@ -204,7 +210,7 @@ resp_process(resp, "text") # text vector

#### Utility/helper functions to format and prepare messages for the `chat()` function

Internally, messages are represented as a `list` of many distinct `list` messages. Each list/message object has two elements: `role` (can be `"user"` or `"assistant"` or `"system"`) and `content` (the message text). The example below shows how the messages/lists are presented.
Internally, messages are represented as a `list` of many distinct `list` messages. Each list/message object has two elements: `role` (can be `"user"` or `"assistant"` or `"system"`) and `content` (the message text). The example below shows how the messages/lists are presented.

```{r eval=FALSE}
list( # main list containing all the messages
Expand Down
13 changes: 10 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,14 @@ use this library.
2. Open/launch the Ollama app to start the local server. You can then
run your language models locally, on your own machine/computer.

3. Install the development version of `ollamar` R library like so:
3. Install the **stable** version like so:

``` r
install.packages("ollamar")
```

4. ALternatively, for the **latest/development** version with
more/latest features, you can install it like so:

``` r
devtools::install_github("hauselin/ollamar")
Expand All @@ -56,8 +63,8 @@ test_connection() # test connection to Ollama server; returns a httr2 response
# <httr2_response>

list_models() # list available models (models you've pulled/downloaded)
name size parameter_size quantization_level modified
<chr> <chr> <chr> <chr> <chr>
name size parameter_size quantization_level modified
<chr> <chr> <chr> <chr> <chr>
1 llama3:latest 4.7 GB 8B Q4_0 2024-05-01T21:01:00
2 mistral-openorca:latest 4.1 GB 7B Q4_0 2024-04-25T16:45:00
```
Expand Down
4 changes: 2 additions & 2 deletions man/delete.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions tests/testthat/test-delete.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
library(testthat)
library(ollamar)

test_that("delete function works", {
skip_if_not(test_connection()$status_code == 200, "Ollama server not available")

# wrong model
expect_invisible(delete("sdafds"))
})

File renamed without changes.

0 comments on commit 4226f33

Please sign in to comment.