Interface for Large Language Model APIs in R

8
0
R

LLMR

CRAN status
CRAN downloads
License: MIT
R-CMD-check
Lifecycle: stable
GitHub issues

LLMR offers a unified interface for interacting with multiple Large Language Model APIs in R.

Installation

install.packages("LLMR")        # CRAN (preferred)
# Development version:
# remotes::install_github("asanaei/LLMR")

Quick Start

Configuration

Configuration is designed so the same code can easily be done with multiple providers, models, parameters.

llm_config(
  provider     = "openai",
  model        = "gpt-4o-mini",
  api_key      = "YOUR API KEY", # never write it directly
  temperature  = 0,
  max_tokens   = 256,
)

One‑shot text generation (llm_call())

library(LLMR)

cfg <- llm_config(
  provider = "openai",
  model    = "gpt-4o",
  api_key  = Sys.getenv("OPENAI_API_KEY"),
  temperature = 0.7)

slogan <- call_llm( 
  config   = cfg,
  messages = c(
    system = "You are a branding expert.",
    user   = "Six‑word catch‑phrase for eco‑friendly balloons." )
)

cat(slogan)

Short embeddings

sentences <- c(
  one="Quiet rivers mirror bright skies.",
  two="Thunder shakes the mountain path.",
  three="Water is the juice of life!")

emb_cfg <- llm_config(
  provider = "voyage",
  model    = "voyage-large-2",
  api_key  = Sys.getenv("VOYAGE_KEY") )

emb <- call_llm(emb_cfg, sentences) |> parse_embeddings()

dim(emb)
cor(t(emb))

# also see get_batched_embeddings

Conversation when you need memory (chat())

chat <- chat_session(
  config = cfg,
  system = "You teach statistics tersely.")
  
chat$send( "Explain p‑values in 12 words.")
chat$send( "Now give a three‑word analogy.")
print(chat)

Functional Mapping (llm_fun())

movies <- c("Inception", "Spirited Away", "Parasite")

taglines <- llm_fn(
  x      = movies,
  prompt = "One‑line playful tagline for the film {x}",
  .config = cfg)

tibble(movies, taglines)

Data‑Frame Helper (llm_mutate())

library(dplyr)

songs <- tibble(
  title  = c("Blue in Green", "Giant Steps"),
  artist = c("Miles Davis", "John Coltrane") )

songs |>
  llm_mutate(
    .config = cfg,
    output=two_word,
    .system_prompt = 'answer in exactly two words',
   prompt = "Guess the jazz sub‑genre for '{title}' (two words)."
  ) |>
  mutate(title_and_artist = paste(title,artist)) |> 
  llm_mutate(
    .config = cfg,
    output=three_word,
    .system_prompt = 'answer in exactly three words',
   prompt = "Guess the jazz sub‑genre for '{title_and_artist}' (three words)."
  )
  

Multimodal in One Short Request

png(tmp <- tempfile(fileext = ".png"))
plot(rnorm(10000)|>sort(), pch = '.',col='blue',ann=FALSE)
grid()
dev.off()

vision_cfg <- llm_config(
  provider = "openai",
  model    = "gpt-4.1",
  api_key  = Sys.getenv("OPENAI_API_KEY")
)

call_llm(
  vision_cfg,
  c( user = "Describe this picture in five words.",
     file = tmp)  # tmp is the path for the example plot created above
)

  • Provider‑specific settings (e.g., model, endpoint) are forwarded automatically.
  • Raw response & token usage: attributes(result)$raw_json and attributes(result)$usage.

Removed Legacy Objects

Agent class and LLMConversation were removed to give LLMR better focus.

Contributions

Pull requests and issues welcome—please include a minimal reproducible example.