Interface for Large Language Model APIs in R
LLMR offers a unified interface for interacting with multiple Large Language Model APIs in R.
install.packages("LLMR") # CRAN (preferred)
# Development version:
# remotes::install_github("asanaei/LLMR")
Configuration is designed so the same code can easily be done with multiple providers, models, parameters.
llm_config(
provider = "openai",
model = "gpt-4o-mini",
api_key = "YOUR API KEY", # never write it directly
temperature = 0,
max_tokens = 256,
)
llm_call()
)library(LLMR)
cfg <- llm_config(
provider = "openai",
model = "gpt-4o",
api_key = Sys.getenv("OPENAI_API_KEY"),
temperature = 0.7)
slogan <- call_llm(
config = cfg,
messages = c(
system = "You are a branding expert.",
user = "Six‑word catch‑phrase for eco‑friendly balloons." )
)
cat(slogan)
sentences <- c(
one="Quiet rivers mirror bright skies.",
two="Thunder shakes the mountain path.",
three="Water is the juice of life!")
emb_cfg <- llm_config(
provider = "voyage",
model = "voyage-large-2",
api_key = Sys.getenv("VOYAGE_KEY") )
emb <- call_llm(emb_cfg, sentences) |> parse_embeddings()
dim(emb)
cor(t(emb))
# also see get_batched_embeddings
chat()
)chat <- chat_session(
config = cfg,
system = "You teach statistics tersely.")
chat$send( "Explain p‑values in 12 words.")
chat$send( "Now give a three‑word analogy.")
print(chat)
llm_fun()
)movies <- c("Inception", "Spirited Away", "Parasite")
taglines <- llm_fn(
x = movies,
prompt = "One‑line playful tagline for the film {x}",
.config = cfg)
tibble(movies, taglines)
llm_mutate()
)library(dplyr)
songs <- tibble(
title = c("Blue in Green", "Giant Steps"),
artist = c("Miles Davis", "John Coltrane") )
songs |>
llm_mutate(
.config = cfg,
output=two_word,
.system_prompt = 'answer in exactly two words',
prompt = "Guess the jazz sub‑genre for '{title}' (two words)."
) |>
mutate(title_and_artist = paste(title,artist)) |>
llm_mutate(
.config = cfg,
output=three_word,
.system_prompt = 'answer in exactly three words',
prompt = "Guess the jazz sub‑genre for '{title_and_artist}' (three words)."
)
png(tmp <- tempfile(fileext = ".png"))
plot(rnorm(10000)|>sort(), pch = '.',col='blue',ann=FALSE)
grid()
dev.off()
vision_cfg <- llm_config(
provider = "openai",
model = "gpt-4.1",
api_key = Sys.getenv("OPENAI_API_KEY")
)
call_llm(
vision_cfg,
c( user = "Describe this picture in five words.",
file = tmp) # tmp is the path for the example plot created above
)
model
, endpoint
) are forwarded automatically.attributes(result)$raw_json
and attributes(result)$usage
.Agent
class and LLMConversation
were removed to give LLMR
better focus.
Pull requests and issues welcome—please include a minimal reproducible example.