-
Notifications
You must be signed in to change notification settings - Fork 76
Expand file tree
/
Copy pathresilient_example.rs
More file actions
31 lines (25 loc) · 845 Bytes
/
resilient_example.rs
File metadata and controls
31 lines (25 loc) · 845 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
//! Example demonstrating the ResilientLLM wrapper with retry/backoff.
//!
//! Run with:
//! `cargo run --example resilient_example --features openai`
use llm::builder::{LLMBackend, LLMBuilder};
use llm::chat::ChatMessage;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
llm::init_logging();
let api_key = std::env::var("OPENAI_API_KEY").unwrap_or_default();
let llm = LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(api_key)
.model("gpt-4o-mini")
.resilient(true)
.resilient_attempts(3)
.resilient_backoff(200, 2000)
.build()?;
let messages = vec![ChatMessage::user()
.content("Reply with a single short greeting.")
.build()];
let response = llm.chat(&messages).await?;
println!("{response}");
Ok(())
}