Skip to content

Commit 45338f5

Browse files
RoyLinRoyLin
authored andcommitted
feat(llm): add dedicated GlmClient for Zhipu AI
Extract GLM-specific defaults (base_url, chat path) from factory into a proper GlmClient struct wrapping OpenAiClient. Factory now instantiates GlmClient directly instead of manually configuring OpenAiClient.
1 parent d7b1163 commit 45338f5

3 files changed

Lines changed: 83 additions & 5 deletions

File tree

core/src/llm/factory.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
//! LLM client factory
22
33
use super::anthropic::AnthropicClient;
4+
use super::glm::GlmClient;
45
use super::openai::OpenAiClient;
56
use super::types::SecretString;
67
use super::LlmClient;
@@ -130,10 +131,7 @@ pub fn create_client_with_config(config: LlmConfig) -> Arc<dyn LlmClient> {
130131
Arc::new(client)
131132
}
132133
"glm" | "zhipu" | "bigmodel" => {
133-
let mut client = OpenAiClient::new(api_key, config.model)
134-
.with_provider_name(config.provider.clone())
135-
.with_retry_config(retry)
136-
.with_chat_completions_path("/api/paas/v4/chat/completions");
134+
let mut client = GlmClient::new(api_key, config.model).with_retry_config(retry);
137135
if let Some(base_url) = config.base_url {
138136
client = client.with_base_url(base_url);
139137
}

core/src/llm/glm.rs

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
//! Zhipu AI (GLM) LLM client
2+
//!
3+
//! GLM uses an OpenAI-compatible API but with a different endpoint path.
4+
//! This client wraps `OpenAiClient` with the correct GLM defaults.
5+
6+
use super::openai::OpenAiClient;
7+
use super::types::*;
8+
use super::LlmClient;
9+
use crate::retry::RetryConfig;
10+
use anyhow::Result;
11+
use async_trait::async_trait;
12+
use tokio::sync::mpsc;
13+
#[cfg(test)]
14+
use {super::http::HttpClient, std::sync::Arc};
15+
16+
const GLM_BASE_URL: &str = "https://open.bigmodel.cn";
17+
const GLM_CHAT_PATH: &str = "/api/paas/v4/chat/completions";
18+
19+
/// Zhipu AI (GLM) client
20+
pub struct GlmClient(OpenAiClient);
21+
22+
impl GlmClient {
23+
pub fn new(api_key: String, model: String) -> Self {
24+
Self(
25+
OpenAiClient::new(api_key, model)
26+
.with_provider_name("glm")
27+
.with_base_url(GLM_BASE_URL.to_string())
28+
.with_chat_completions_path(GLM_CHAT_PATH),
29+
)
30+
}
31+
32+
pub fn with_temperature(mut self, temperature: f32) -> Self {
33+
self.0 = self.0.with_temperature(temperature);
34+
self
35+
}
36+
37+
pub fn with_max_tokens(mut self, max_tokens: usize) -> Self {
38+
self.0 = self.0.with_max_tokens(max_tokens);
39+
self
40+
}
41+
42+
pub fn with_base_url(mut self, base_url: String) -> Self {
43+
self.0 = self.0.with_base_url(base_url);
44+
self
45+
}
46+
47+
pub fn with_retry_config(mut self, retry_config: RetryConfig) -> Self {
48+
self.0 = self.0.with_retry_config(retry_config);
49+
self
50+
}
51+
52+
#[cfg(test)]
53+
pub fn with_http_client(mut self, http: Arc<dyn HttpClient>) -> Self {
54+
self.0 = self.0.with_http_client(http);
55+
self
56+
}
57+
}
58+
59+
#[async_trait]
60+
impl LlmClient for GlmClient {
61+
async fn complete(
62+
&self,
63+
messages: &[Message],
64+
system: Option<&str>,
65+
tools: &[ToolDefinition],
66+
) -> Result<LlmResponse> {
67+
self.0.complete(messages, system, tools).await
68+
}
69+
70+
async fn complete_streaming(
71+
&self,
72+
messages: &[Message],
73+
system: Option<&str>,
74+
tools: &[ToolDefinition],
75+
) -> Result<mpsc::Receiver<StreamEvent>> {
76+
self.0.complete_streaming(messages, system, tools).await
77+
}
78+
}

core/src/llm/mod.rs

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,19 @@
11
//! LLM client abstraction layer
22
//!
33
//! Provides a unified interface for interacting with LLM providers
4-
//! (Anthropic Claude, OpenAI, and OpenAI-compatible providers).
4+
//! (Anthropic Claude, OpenAI, Zhipu AI GLM, and OpenAI-compatible providers).
55
66
pub mod anthropic;
77
pub mod factory;
8+
pub mod glm;
89
pub mod http;
910
pub mod openai;
1011
mod types;
1112

1213
// Re-export public types
1314
pub use anthropic::AnthropicClient;
1415
pub use factory::{create_client_with_config, LlmConfig};
16+
pub use glm::GlmClient;
1517
pub use http::{default_http_client, HttpClient, HttpResponse, StreamingHttpResponse};
1618
pub use openai::OpenAiClient;
1719
pub use types::*;

0 commit comments

Comments
 (0)