diff --git a/examples/modelconfig-xai.yaml b/examples/modelconfig-xai.yaml
new file mode 100644
index 000000000..28b961e99
--- /dev/null
+++ b/examples/modelconfig-xai.yaml
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: xai-api-key
+ namespace: default
+type: Opaque
+stringData:
+ apiKey: your-xai-api-key-here
+---
+apiVersion: kagent.dev/v1alpha2
+kind: ModelConfig
+metadata:
+ name: grok-4-1-fast-reasoning
+ namespace: default
+spec:
+ provider: XAI
+ model: grok-4-1-fast-reasoning
+ apiKeySecret: xai-api-key
+ apiKeySecretKey: apiKey
+ xAI:
+ temperature: "0.7"
+ maxTokens: 4096
+ topP: "0.9"
+ tools:
+ - web_search
+ - x_search
+ - code_execution
+ liveSearchMode: auto
diff --git a/go/api/v1alpha1/modelconfig_types.go b/go/api/v1alpha1/modelconfig_types.go
index 4a08803f7..0d1c9b3b7 100644
--- a/go/api/v1alpha1/modelconfig_types.go
+++ b/go/api/v1alpha1/modelconfig_types.go
@@ -25,7 +25,7 @@ const (
)
// ModelProvider represents the model provider type
-// +kubebuilder:validation:Enum=Anthropic;OpenAI;AzureOpenAI;Ollama;Gemini;GeminiVertexAI;AnthropicVertexAI
+// +kubebuilder:validation:Enum=Anthropic;OpenAI;AzureOpenAI;Ollama;Gemini;GeminiVertexAI;AnthropicVertexAI;XAI
type ModelProvider string
const (
@@ -36,6 +36,7 @@ const (
ModelProviderGemini ModelProvider = "Gemini"
ModelProviderGeminiVertexAI ModelProvider = "GeminiVertexAI"
ModelProviderAnthropicVertexAI ModelProvider = "AnthropicVertexAI"
+ ModelProviderXAI ModelProvider = "XAI"
)
type BaseVertexAIConfig struct {
@@ -202,6 +203,19 @@ type OllamaConfig struct {
type GeminiConfig struct{}
+// XAIConfig contains xAI-specific configuration options
+type XAIConfig struct {
+ OpenAIConfig `json:",inline"`
+
+ // Server-side tools to enable
+ // +optional
+ Tools []string `json:"tools,omitempty"`
+
+ // Live search mode for real-time data retrieval
+ // +optional
+ LiveSearchMode string `json:"liveSearchMode,omitempty"`
+}
+
// ModelConfigSpec defines the desired state of ModelConfig.
//
// +kubebuilder:validation:XValidation:message="provider.openAI must be nil if the provider is not OpenAI",rule="!(has(self.openAI) && self.provider != 'OpenAI')"
@@ -211,6 +225,7 @@ type GeminiConfig struct{}
// +kubebuilder:validation:XValidation:message="provider.gemini must be nil if the provider is not Gemini",rule="!(has(self.gemini) && self.provider != 'Gemini')"
// +kubebuilder:validation:XValidation:message="provider.geminiVertexAI must be nil if the provider is not GeminiVertexAI",rule="!(has(self.geminiVertexAI) && self.provider != 'GeminiVertexAI')"
// +kubebuilder:validation:XValidation:message="provider.anthropicVertexAI must be nil if the provider is not AnthropicVertexAI",rule="!(has(self.anthropicVertexAI) && self.provider != 'AnthropicVertexAI')"
+// +kubebuilder:validation:XValidation:message="provider.xAI must be nil if the provider is not XAI",rule="!(has(self.xAI) && self.provider != 'XAI')"
type ModelConfigSpec struct {
Model string `json:"model"`
@@ -262,6 +277,10 @@ type ModelConfigSpec struct {
// Anthropic-specific configuration
// +optional
AnthropicVertexAI *AnthropicVertexAIConfig `json:"anthropicVertexAI,omitempty"`
+
+ // xAI-specific configuration
+ // +optional
+ XAI *XAIConfig `json:"xAI,omitempty"`
}
// Model Configurations
diff --git a/go/api/v1alpha1/zz_generated.deepcopy.go b/go/api/v1alpha1/zz_generated.deepcopy.go
index f887a5ff2..eb6b17c61 100644
--- a/go/api/v1alpha1/zz_generated.deepcopy.go
+++ b/go/api/v1alpha1/zz_generated.deepcopy.go
@@ -722,6 +722,11 @@ func (in *ModelConfigSpec) DeepCopyInto(out *ModelConfigSpec) {
*out = new(AnthropicVertexAIConfig)
(*in).DeepCopyInto(*out)
}
+ if in.XAI != nil {
+ in, out := &in.XAI, &out.XAI
+ *out = new(XAIConfig)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelConfigSpec.
@@ -1111,3 +1116,24 @@ func (in *ValueSource) DeepCopy() *ValueSource {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *XAIConfig) DeepCopyInto(out *XAIConfig) {
+ *out = *in
+ in.OpenAIConfig.DeepCopyInto(&out.OpenAIConfig)
+ if in.Tools != nil {
+ in, out := &in.Tools, &out.Tools
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XAIConfig.
+func (in *XAIConfig) DeepCopy() *XAIConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(XAIConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/go/api/v1alpha2/modelconfig_types.go b/go/api/v1alpha2/modelconfig_types.go
index 4e9315730..5268eee9b 100644
--- a/go/api/v1alpha2/modelconfig_types.go
+++ b/go/api/v1alpha2/modelconfig_types.go
@@ -25,7 +25,7 @@ const (
)
// ModelProvider represents the model provider type
-// +kubebuilder:validation:Enum=Anthropic;OpenAI;AzureOpenAI;Ollama;Gemini;GeminiVertexAI;AnthropicVertexAI
+// +kubebuilder:validation:Enum=Anthropic;OpenAI;AzureOpenAI;Ollama;Gemini;GeminiVertexAI;AnthropicVertexAI;XAI
type ModelProvider string
const (
@@ -36,6 +36,7 @@ const (
ModelProviderGemini ModelProvider = "Gemini"
ModelProviderGeminiVertexAI ModelProvider = "GeminiVertexAI"
ModelProviderAnthropicVertexAI ModelProvider = "AnthropicVertexAI"
+ ModelProviderXAI ModelProvider = "XAI"
)
type BaseVertexAIConfig struct {
@@ -210,6 +211,19 @@ type OllamaConfig struct {
type GeminiConfig struct{}
+// XAIConfig contains xAI-specific configuration options
+type XAIConfig struct {
+ OpenAIConfig `json:",inline"`
+
+ // Server-side tools to enable
+ // +optional
+ Tools []string `json:"tools,omitempty"`
+
+ // Live search mode for real-time data retrieval
+ // +optional
+ LiveSearchMode string `json:"liveSearchMode,omitempty"`
+}
+
// TLSConfig contains TLS/SSL configuration options for model provider connections.
// This enables agents to connect to internal LiteLLM gateways or other providers
// that use self-signed certificates or custom certificate authorities.
@@ -255,6 +269,7 @@ type TLSConfig struct {
// +kubebuilder:validation:XValidation:message="provider.gemini must be nil if the provider is not Gemini",rule="!(has(self.gemini) && self.provider != 'Gemini')"
// +kubebuilder:validation:XValidation:message="provider.geminiVertexAI must be nil if the provider is not GeminiVertexAI",rule="!(has(self.geminiVertexAI) && self.provider != 'GeminiVertexAI')"
// +kubebuilder:validation:XValidation:message="provider.anthropicVertexAI must be nil if the provider is not AnthropicVertexAI",rule="!(has(self.anthropicVertexAI) && self.provider != 'AnthropicVertexAI')"
+// +kubebuilder:validation:XValidation:message="provider.xAI must be nil if the provider is not XAI",rule="!(has(self.xAI) && self.provider != 'XAI')"
// +kubebuilder:validation:XValidation:message="apiKeySecret must be set if apiKeySecretKey is set",rule="!(has(self.apiKeySecretKey) && !has(self.apiKeySecret))"
// +kubebuilder:validation:XValidation:message="apiKeySecretKey must be set if apiKeySecret is set",rule="!(has(self.apiKeySecret) && !has(self.apiKeySecretKey))"
// +kubebuilder:validation:XValidation:message="caCertSecretKey requires caCertSecretRef",rule="!(has(self.tls) && has(self.tls.caCertSecretKey) && size(self.tls.caCertSecretKey) > 0 && (!has(self.tls.caCertSecretRef) || size(self.tls.caCertSecretRef) == 0))"
@@ -306,6 +321,10 @@ type ModelConfigSpec struct {
// +optional
AnthropicVertexAI *AnthropicVertexAIConfig `json:"anthropicVertexAI,omitempty"`
+ // xAI-specific configuration
+ // +optional
+ XAI *XAIConfig `json:"xAI,omitempty"`
+
// TLS configuration for provider connections.
// Enables agents to connect to internal LiteLLM gateways or other providers
// that use self-signed certificates or custom certificate authorities.
diff --git a/go/api/v1alpha2/zz_generated.deepcopy.go b/go/api/v1alpha2/zz_generated.deepcopy.go
index 694c281ee..9f647cedf 100644
--- a/go/api/v1alpha2/zz_generated.deepcopy.go
+++ b/go/api/v1alpha2/zz_generated.deepcopy.go
@@ -554,6 +554,11 @@ func (in *ModelConfigSpec) DeepCopyInto(out *ModelConfigSpec) {
*out = new(AnthropicVertexAIConfig)
(*in).DeepCopyInto(*out)
}
+ if in.XAI != nil {
+ in, out := &in.XAI, &out.XAI
+ *out = new(XAIConfig)
+ (*in).DeepCopyInto(*out)
+ }
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLSConfig)
@@ -989,3 +994,24 @@ func (in *ValueSource) DeepCopy() *ValueSource {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *XAIConfig) DeepCopyInto(out *XAIConfig) {
+ *out = *in
+ in.OpenAIConfig.DeepCopyInto(&out.OpenAIConfig)
+ if in.Tools != nil {
+ in, out := &in.Tools, &out.Tools
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XAIConfig.
+func (in *XAIConfig) DeepCopy() *XAIConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(XAIConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/go/cli/internal/cli/agent/const.go b/go/cli/internal/cli/agent/const.go
index f982d4f18..a4dc3efa0 100644
--- a/go/cli/internal/cli/agent/const.go
+++ b/go/cli/internal/cli/agent/const.go
@@ -16,6 +16,7 @@ const (
OPENAI_API_KEY = "OPENAI_API_KEY"
ANTHROPIC_API_KEY = "ANTHROPIC_API_KEY"
AZUREOPENAI_API_KEY = "AZUREOPENAI_API_KEY"
+ XAI_API_KEY = "XAI_API_KEY"
// kagent env variables
KAGENT_DEFAULT_MODEL_PROVIDER = "KAGENT_DEFAULT_MODEL_PROVIDER"
@@ -39,6 +40,8 @@ func GetModelProvider() v1alpha2.ModelProvider {
return v1alpha2.ModelProviderAnthropic
case GetModelProviderHelmValuesKey(v1alpha2.ModelProviderAzureOpenAI):
return v1alpha2.ModelProviderAzureOpenAI
+ case GetModelProviderHelmValuesKey(v1alpha2.ModelProviderXAI):
+ return v1alpha2.ModelProviderXAI
default:
return v1alpha2.ModelProviderOpenAI
}
@@ -62,6 +65,8 @@ func GetProviderAPIKey(provider v1alpha2.ModelProvider) string {
return ANTHROPIC_API_KEY
case v1alpha2.ModelProviderAzureOpenAI:
return AZUREOPENAI_API_KEY
+ case v1alpha2.ModelProviderXAI:
+ return XAI_API_KEY
default:
return ""
}
diff --git a/go/config/crd/bases/kagent.dev_modelconfigs.yaml b/go/config/crd/bases/kagent.dev_modelconfigs.yaml
index 74153526d..18af7911f 100644
--- a/go/config/crd/bases/kagent.dev_modelconfigs.yaml
+++ b/go/config/crd/bases/kagent.dev_modelconfigs.yaml
@@ -256,7 +256,50 @@ spec:
- Gemini
- GeminiVertexAI
- AnthropicVertexAI
+ - XAI
type: string
+ xAI:
+ description: xAI-specific configuration
+ properties:
+ baseUrl:
+ description: Base URL for the OpenAI API (overrides default)
+ type: string
+ frequencyPenalty:
+ description: Frequency penalty
+ type: string
+ liveSearchMode:
+ description: Live search mode for real-time data retrieval
+ type: string
+ maxTokens:
+ description: Maximum tokens to generate
+ type: integer
+ "n":
+ description: N value
+ type: integer
+ organization:
+ description: Organization ID for the OpenAI API
+ type: string
+ presencePenalty:
+ description: Presence penalty
+ type: string
+ seed:
+ description: Seed value
+ type: integer
+ temperature:
+ description: Temperature for sampling
+ type: string
+ timeout:
+ description: Timeout
+ type: integer
+ tools:
+ description: Server-side tools to enable
+ items:
+ type: string
+ type: array
+ topP:
+ description: Top-p sampling parameter
+ type: string
+ type: object
required:
- model
- provider
@@ -278,6 +321,8 @@ spec:
- message: provider.anthropicVertexAI must be nil if the provider is not
AnthropicVertexAI
rule: '!(has(self.anthropicVertexAI) && self.provider != ''AnthropicVertexAI'')'
+ - message: provider.xAI must be nil if the provider is not XAI
+ rule: '!(has(self.xAI) && self.provider != ''XAI'')'
status:
description: ModelConfigStatus defines the observed state of ModelConfig.
properties:
@@ -576,6 +621,7 @@ spec:
- Gemini
- GeminiVertexAI
- AnthropicVertexAI
+ - XAI
type: string
tls:
description: |-
@@ -615,6 +661,56 @@ spec:
Production deployments MUST use proper certificates.
type: boolean
type: object
+ xAI:
+ description: xAI-specific configuration
+ properties:
+ baseUrl:
+ description: Base URL for the OpenAI API (overrides default)
+ type: string
+ frequencyPenalty:
+ description: Frequency penalty
+ type: string
+ liveSearchMode:
+ description: Live search mode for real-time data retrieval
+ type: string
+ maxTokens:
+ description: Maximum tokens to generate
+ type: integer
+ "n":
+ description: N value
+ type: integer
+ organization:
+ description: Organization ID for the OpenAI API
+ type: string
+ presencePenalty:
+ description: Presence penalty
+ type: string
+ reasoningEffort:
+ description: Reasoning effort
+ enum:
+ - minimal
+ - low
+ - medium
+ - high
+ type: string
+ seed:
+ description: Seed value
+ type: integer
+ temperature:
+ description: Temperature for sampling
+ type: string
+ timeout:
+ description: Timeout
+ type: integer
+ tools:
+ description: Server-side tools to enable
+ items:
+ type: string
+ type: array
+ topP:
+ description: Top-p sampling parameter
+ type: string
+ type: object
required:
- model
- provider
@@ -636,6 +732,8 @@ spec:
- message: provider.anthropicVertexAI must be nil if the provider is not
AnthropicVertexAI
rule: '!(has(self.anthropicVertexAI) && self.provider != ''AnthropicVertexAI'')'
+ - message: provider.xAI must be nil if the provider is not XAI
+ rule: '!(has(self.xAI) && self.provider != ''XAI'')'
- message: apiKeySecret must be set if apiKeySecretKey is set
rule: '!(has(self.apiKeySecretKey) && !has(self.apiKeySecret))'
- message: apiKeySecretKey must be set if apiKeySecret is set
diff --git a/go/internal/adk/types.go b/go/internal/adk/types.go
index ca08f8ab3..562f45655 100644
--- a/go/internal/adk/types.go
+++ b/go/internal/adk/types.go
@@ -69,6 +69,7 @@ const (
ModelTypeGeminiAnthropic = "gemini_anthropic"
ModelTypeOllama = "ollama"
ModelTypeGemini = "gemini"
+ ModelTypeXAI = "xai"
)
func (o *OpenAI) MarshalJSON() ([]byte, error) {
@@ -185,6 +186,52 @@ func (g *Gemini) GetType() string {
return ModelTypeGemini
}
+// XAI uses OpenAI-compatible API but with a different default baseURL and server-side tools support
+type XAI struct {
+ OpenAI
+ // Server-side tools (e.g., web_search, x_search, code_execution, collections_search)
+ Tools []string `json:"tools,omitempty"`
+ // Live search mode for real-time data retrieval ("off", "auto", "on")
+ LiveSearchMode string `json:"live_search_mode,omitempty"`
+}
+
+func (x *XAI) MarshalJSON() ([]byte, error) {
+ // Create a map to ensure we override the embedded BaseModel.Type
+ result := make(map[string]interface{})
+
+ // Marshal the embedded OpenAI to get all its fields
+ openaiBytes, err := json.Marshal(x.OpenAI)
+ if err != nil {
+ return nil, err
+ }
+ var openaiMap map[string]interface{}
+ if err := json.Unmarshal(openaiBytes, &openaiMap); err != nil {
+ return nil, err
+ }
+
+ // Copy OpenAI fields
+ for k, v := range openaiMap {
+ result[k] = v
+ }
+
+ // Override type to xai (this ensures BaseModel.Type is overridden)
+ result["type"] = ModelTypeXAI
+
+ // Add XAI-specific fields
+ if len(x.Tools) > 0 {
+ result["tools"] = x.Tools
+ }
+ if x.LiveSearchMode != "" {
+ result["live_search_mode"] = x.LiveSearchMode
+ }
+
+ return json.Marshal(result)
+}
+
+func (x *XAI) GetType() string {
+ return ModelTypeXAI
+}
+
func ParseModel(bytes []byte) (Model, error) {
var model BaseModel
if err := json.Unmarshal(bytes, &model); err != nil {
@@ -197,6 +244,12 @@ func ParseModel(bytes []byte) (Model, error) {
return nil, err
}
return &gemini, nil
+ case ModelTypeXAI:
+ var xai XAI
+ if err := json.Unmarshal(bytes, &xai); err != nil {
+ return nil, err
+ }
+ return &xai, nil
case ModelTypeAzureOpenAI:
var azureOpenAI AzureOpenAI
if err := json.Unmarshal(bytes, &azureOpenAI); err != nil {
@@ -291,3 +344,24 @@ var _ driver.Valuer = &AgentConfig{}
func (a AgentConfig) Value() (driver.Value, error) {
return json.Marshal(a)
}
+
+// MarshalJSON ensures the Model interface is properly marshaled using its concrete type's MarshalJSON
+func (a AgentConfig) MarshalJSON() ([]byte, error) {
+ type Alias AgentConfig
+ return json.Marshal(&struct {
+ Model json.RawMessage `json:"model"`
+ *Alias
+ }{
+ Model: func() json.RawMessage {
+ if a.Model == nil {
+ return nil
+ }
+ b, err := json.Marshal(a.Model)
+ if err != nil {
+ return nil
+ }
+ return b
+ }(),
+ Alias: (*Alias)(&a),
+ })
+}
diff --git a/go/internal/controller/translator/agent/adk_api_translator.go b/go/internal/controller/translator/agent/adk_api_translator.go
index 0eeafb507..f870ff632 100644
--- a/go/internal/controller/translator/agent/adk_api_translator.go
+++ b/go/internal/controller/translator/agent/adk_api_translator.go
@@ -613,6 +613,47 @@ func populateTLSFields(baseModel *adk.BaseModel, tlsConfig *v1alpha2.TLSConfig)
}
}
+// populateOpenAIFields populates OpenAI-compatible fields from OpenAIConfig
+// into an OpenAI struct. This is used by both OpenAI and XAI providers.
+func populateOpenAIFields(openai *adk.OpenAI, config *v1alpha2.OpenAIConfig, mdd *modelDeploymentData) {
+ if config == nil {
+ return
+ }
+
+ // Only overwrite BaseUrl if a non-empty value is provided (preserves defaults like XAI's https://api.x.ai/v1)
+ if config.BaseURL != "" {
+ openai.BaseUrl = config.BaseURL
+ }
+ openai.Temperature = utils.ParseStringToFloat64(config.Temperature)
+ openai.TopP = utils.ParseStringToFloat64(config.TopP)
+ openai.FrequencyPenalty = utils.ParseStringToFloat64(config.FrequencyPenalty)
+ openai.PresencePenalty = utils.ParseStringToFloat64(config.PresencePenalty)
+
+ if config.MaxTokens > 0 {
+ openai.MaxTokens = &config.MaxTokens
+ }
+ if config.Seed != nil {
+ openai.Seed = config.Seed
+ }
+ if config.N != nil {
+ openai.N = config.N
+ }
+ if config.Timeout != nil {
+ openai.Timeout = config.Timeout
+ }
+ if config.ReasoningEffort != nil {
+ effort := string(*config.ReasoningEffort)
+ openai.ReasoningEffort = &effort
+ }
+
+ if config.Organization != "" {
+ mdd.EnvVars = append(mdd.EnvVars, corev1.EnvVar{
+ Name: "OPENAI_ORGANIZATION",
+ Value: config.Organization,
+ })
+ }
+}
+
// addTLSConfiguration adds TLS certificate volume mounts to modelDeploymentData
// when TLS configuration is present in the ModelConfig.
// Note: TLS configuration fields are now included in agent config JSON via BaseModel,
@@ -690,37 +731,7 @@ func (a *adkApiTranslator) translateModel(ctx context.Context, namespace, modelC
// Populate TLS fields in BaseModel
populateTLSFields(&openai.BaseModel, model.Spec.TLS)
- if model.Spec.OpenAI != nil {
- openai.BaseUrl = model.Spec.OpenAI.BaseURL
- openai.Temperature = utils.ParseStringToFloat64(model.Spec.OpenAI.Temperature)
- openai.TopP = utils.ParseStringToFloat64(model.Spec.OpenAI.TopP)
- openai.FrequencyPenalty = utils.ParseStringToFloat64(model.Spec.OpenAI.FrequencyPenalty)
- openai.PresencePenalty = utils.ParseStringToFloat64(model.Spec.OpenAI.PresencePenalty)
-
- if model.Spec.OpenAI.MaxTokens > 0 {
- openai.MaxTokens = &model.Spec.OpenAI.MaxTokens
- }
- if model.Spec.OpenAI.Seed != nil {
- openai.Seed = model.Spec.OpenAI.Seed
- }
- if model.Spec.OpenAI.N != nil {
- openai.N = model.Spec.OpenAI.N
- }
- if model.Spec.OpenAI.Timeout != nil {
- openai.Timeout = model.Spec.OpenAI.Timeout
- }
- if model.Spec.OpenAI.ReasoningEffort != nil {
- effort := string(*model.Spec.OpenAI.ReasoningEffort)
- openai.ReasoningEffort = &effort
- }
-
- if model.Spec.OpenAI.Organization != "" {
- modelDeploymentData.EnvVars = append(modelDeploymentData.EnvVars, corev1.EnvVar{
- Name: "OPENAI_ORGANIZATION",
- Value: model.Spec.OpenAI.Organization,
- })
- }
- }
+ populateOpenAIFields(openai, model.Spec.OpenAI, modelDeploymentData)
return openai, modelDeploymentData, secretHashBytes, nil
case v1alpha2.ModelProviderAnthropic:
if model.Spec.APIKeySecret != "" {
@@ -915,7 +926,59 @@ func (a *adkApiTranslator) translateModel(ctx context.Context, namespace, modelC
// Populate TLS fields in BaseModel
populateTLSFields(&gemini.BaseModel, model.Spec.TLS)
- return gemini, modelDeploymentData, secretHashBytes, nil
+return gemini, modelDeploymentData, secretHashBytes, nil
+ case v1alpha2.ModelProviderXAI:
+ if model.Spec.APIKeySecret != "" {
+ modelDeploymentData.EnvVars = append(modelDeploymentData.EnvVars, corev1.EnvVar{
+ Name: "XAI_API_KEY",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: model.Spec.APIKeySecret,
+ },
+ Key: model.Spec.APIKeySecretKey,
+ },
+ },
+ })
+ }
+ xai := &adk.XAI{
+ OpenAI: adk.OpenAI{
+ BaseModel: adk.BaseModel{
+ Model: model.Spec.Model,
+ Headers: model.Spec.DefaultHeaders,
+ },
+ },
+ }
+ // Populate TLS fields in BaseModel
+ populateTLSFields(&xai.BaseModel, model.Spec.TLS)
+
+ if model.Spec.XAI != nil {
+ // Populate OpenAI fields
+ openaiConfig := &v1alpha2.OpenAIConfig{
+ BaseURL: model.Spec.XAI.BaseURL,
+ Organization: model.Spec.XAI.Organization,
+ Temperature: model.Spec.XAI.Temperature,
+ MaxTokens: model.Spec.XAI.MaxTokens,
+ TopP: model.Spec.XAI.TopP,
+ FrequencyPenalty: model.Spec.XAI.FrequencyPenalty,
+ PresencePenalty: model.Spec.XAI.PresencePenalty,
+ Seed: model.Spec.XAI.Seed,
+ N: model.Spec.XAI.N,
+ Timeout: model.Spec.XAI.Timeout,
+ ReasoningEffort: model.Spec.XAI.ReasoningEffort,
+ }
+ populateOpenAIFields(&xai.OpenAI, openaiConfig, modelDeploymentData)
+
+ if len(model.Spec.XAI.Tools) > 0 {
+ xai.Tools = slices.Clone(model.Spec.XAI.Tools)
+ }
+ if model.Spec.XAI.LiveSearchMode != "" {
+ xai.LiveSearchMode = model.Spec.XAI.LiveSearchMode
+ } else {
+ xai.LiveSearchMode = "off"
+ }
+ }
+ return xai, modelDeploymentData, secretHashBytes, nil
}
return nil, nil, nil, fmt.Errorf("unknown model provider: %s", model.Spec.Provider)
diff --git a/go/internal/httpserver/handlers/modelconfig.go b/go/internal/httpserver/handlers/modelconfig.go
index 2c5c6ceee..b38c945b0 100644
--- a/go/internal/httpserver/handlers/modelconfig.go
+++ b/go/internal/httpserver/handlers/modelconfig.go
@@ -168,7 +168,13 @@ func getStructJSONKeys(structType reflect.Type) []string {
for i := 0; i < structType.NumField(); i++ {
field := structType.Field(i)
jsonTag := field.Tag.Get("json")
- if jsonTag != "" && jsonTag != "-" {
+
+ // Handle embedded structs (anonymous fields) with json:",inline"
+ if field.Anonymous && strings.Contains(jsonTag, "inline") {
+ // Recursively get keys from embedded struct
+ embeddedKeys := getStructJSONKeys(field.Type)
+ keys = append(keys, embeddedKeys...)
+ } else if jsonTag != "" && jsonTag != "-" {
tagParts := strings.Split(jsonTag, ",")
keys = append(keys, tagParts[0])
}
@@ -312,6 +318,13 @@ func (h *ModelConfigHandler) HandleCreateModelConfig(w ErrorResponseWriter, r *h
} else {
log.V(1).Info("No AnthropicVertexAI params provided in create.")
}
+ case v1alpha2.ModelProviderXAI:
+ if req.XAIParams != nil {
+ modelConfig.Spec.XAI = req.XAIParams
+ log.V(1).Info("Assigned XAI params to spec")
+ } else {
+ log.V(1).Info("No XAI params provided in create.")
+ }
default:
providerConfigErr = fmt.Errorf("unsupported provider type: %s", req.Provider.Type)
}
@@ -432,6 +445,7 @@ func (h *ModelConfigHandler) HandleUpdateModelConfig(w ErrorResponseWriter, r *h
Gemini: nil,
GeminiVertexAI: nil,
AnthropicVertexAI: nil,
+ XAI: nil,
}
// --- Update Secret if API Key is provided (and not Ollama) ---
@@ -511,6 +525,13 @@ func (h *ModelConfigHandler) HandleUpdateModelConfig(w ErrorResponseWriter, r *h
} else {
log.V(1).Info("No AnthropicVertexAI params provided in update.")
}
+ case v1alpha2.ModelProviderXAI:
+ if req.XAIParams != nil {
+ modelConfig.Spec.XAI = req.XAIParams
+ log.V(1).Info("Assigned updated XAI params to spec")
+ } else {
+ log.V(1).Info("No XAI params provided in update.")
+ }
default:
providerConfigErr = fmt.Errorf("unsupported provider type specified: %s", req.Provider.Type)
}
diff --git a/go/internal/httpserver/handlers/models.go b/go/internal/httpserver/handlers/models.go
index 106f04913..42a31f8f8 100644
--- a/go/internal/httpserver/handlers/models.go
+++ b/go/internal/httpserver/handlers/models.go
@@ -83,6 +83,17 @@ func (h *ModelHandler) HandleListSupportedModels(w ErrorResponseWriter, r *http.
{Name: "claude-sonnet-4@20250514", FunctionCalling: true},
{Name: "claude-3-5-haiku@20241022", FunctionCalling: true},
},
+ v1alpha2.ModelProviderXAI: {
+ {Name: "grok-4-1-fast-reasoning", FunctionCalling: true},
+ {Name: "grok-4-1-fast-non-reasoning", FunctionCalling: true},
+ {Name: "grok-code-fast-1", FunctionCalling: true},
+ {Name: "grok-4-fast-reasoning", FunctionCalling: true},
+ {Name: "grok-4-fast-non-reasoning", FunctionCalling: true},
+ {Name: "grok-4-0709", FunctionCalling: true},
+ {Name: "grok-3-mini", FunctionCalling: true},
+ {Name: "grok-3", FunctionCalling: true},
+ {Name: "grok-2-vision-1212", FunctionCalling: true},
+ },
}
log.Info("Successfully listed supported models", "count", len(supportedModels))
diff --git a/go/internal/httpserver/handlers/providers.go b/go/internal/httpserver/handlers/providers.go
index 1e2f1fdde..20a4c49e8 100644
--- a/go/internal/httpserver/handlers/providers.go
+++ b/go/internal/httpserver/handlers/providers.go
@@ -26,7 +26,7 @@ func getRequiredKeysForModelProvider(providerType v1alpha2.ModelProvider) []stri
case v1alpha2.ModelProviderAzureOpenAI:
// Based on the +required comments in the AzureOpenAIConfig struct definition
return []string{"azureEndpoint", "apiVersion"}
- case v1alpha2.ModelProviderOpenAI, v1alpha2.ModelProviderAnthropic, v1alpha2.ModelProviderOllama:
+ case v1alpha2.ModelProviderOpenAI, v1alpha2.ModelProviderAnthropic, v1alpha2.ModelProviderOllama, v1alpha2.ModelProviderXAI:
// These providers currently have no fields marked as strictly required in the API definition
return []string{}
default:
@@ -101,6 +101,7 @@ func (h *ProviderHandler) HandleListSupportedModelProviders(w ErrorResponseWrite
{v1alpha2.ModelProviderGemini, reflect.TypeFor[v1alpha2.GeminiConfig]()},
{v1alpha2.ModelProviderGeminiVertexAI, reflect.TypeFor[v1alpha2.GeminiVertexAIConfig]()},
{v1alpha2.ModelProviderAnthropicVertexAI, reflect.TypeFor[v1alpha2.AnthropicVertexAIConfig]()},
+ {v1alpha2.ModelProviderXAI, reflect.TypeFor[v1alpha2.XAIConfig]()},
}
providersResponse := []map[string]any{}
diff --git a/go/pkg/client/api/types.go b/go/pkg/client/api/types.go
index a3a4b0e82..6180093ba 100644
--- a/go/pkg/client/api/types.go
+++ b/go/pkg/client/api/types.go
@@ -65,6 +65,7 @@ type CreateModelConfigRequest struct {
GeminiParams *v1alpha2.GeminiConfig `json:"gemini,omitempty"`
GeminiVertexAIParams *v1alpha2.GeminiVertexAIConfig `json:"geminiVertexAI,omitempty"`
AnthropicVertexAIParams *v1alpha2.AnthropicVertexAIConfig `json:"anthropicVertexAI,omitempty"`
+ XAIParams *v1alpha2.XAIConfig `json:"xAI,omitempty"`
}
// UpdateModelConfigRequest represents a request to update a model configuration
@@ -79,6 +80,7 @@ type UpdateModelConfigRequest struct {
GeminiParams *v1alpha2.GeminiConfig `json:"gemini,omitempty"`
GeminiVertexAIParams *v1alpha2.GeminiVertexAIConfig `json:"geminiVertexAI,omitempty"`
AnthropicVertexAIParams *v1alpha2.AnthropicVertexAIConfig `json:"anthropicVertexAI,omitempty"`
+ XAIParams *v1alpha2.XAIConfig `json:"xAI,omitempty"`
}
// Agent types
diff --git a/helm/kagent-crds/templates/kagent.dev_modelconfigs.yaml b/helm/kagent-crds/templates/kagent.dev_modelconfigs.yaml
index 74153526d..18af7911f 100644
--- a/helm/kagent-crds/templates/kagent.dev_modelconfigs.yaml
+++ b/helm/kagent-crds/templates/kagent.dev_modelconfigs.yaml
@@ -256,7 +256,50 @@ spec:
- Gemini
- GeminiVertexAI
- AnthropicVertexAI
+ - XAI
type: string
+ xAI:
+ description: xAI-specific configuration
+ properties:
+ baseUrl:
+ description: Base URL for the OpenAI API (overrides default)
+ type: string
+ frequencyPenalty:
+ description: Frequency penalty
+ type: string
+ liveSearchMode:
+ description: Live search mode for real-time data retrieval
+ type: string
+ maxTokens:
+ description: Maximum tokens to generate
+ type: integer
+ "n":
+ description: N value
+ type: integer
+ organization:
+ description: Organization ID for the OpenAI API
+ type: string
+ presencePenalty:
+ description: Presence penalty
+ type: string
+ seed:
+ description: Seed value
+ type: integer
+ temperature:
+ description: Temperature for sampling
+ type: string
+ timeout:
+ description: Timeout
+ type: integer
+ tools:
+ description: Server-side tools to enable
+ items:
+ type: string
+ type: array
+ topP:
+ description: Top-p sampling parameter
+ type: string
+ type: object
required:
- model
- provider
@@ -278,6 +321,8 @@ spec:
- message: provider.anthropicVertexAI must be nil if the provider is not
AnthropicVertexAI
rule: '!(has(self.anthropicVertexAI) && self.provider != ''AnthropicVertexAI'')'
+ - message: provider.xAI must be nil if the provider is not XAI
+ rule: '!(has(self.xAI) && self.provider != ''XAI'')'
status:
description: ModelConfigStatus defines the observed state of ModelConfig.
properties:
@@ -576,6 +621,7 @@ spec:
- Gemini
- GeminiVertexAI
- AnthropicVertexAI
+ - XAI
type: string
tls:
description: |-
@@ -615,6 +661,56 @@ spec:
Production deployments MUST use proper certificates.
type: boolean
type: object
+ xAI:
+ description: xAI-specific configuration
+ properties:
+ baseUrl:
+ description: Base URL for the OpenAI API (overrides default)
+ type: string
+ frequencyPenalty:
+ description: Frequency penalty
+ type: string
+ liveSearchMode:
+ description: Live search mode for real-time data retrieval
+ type: string
+ maxTokens:
+ description: Maximum tokens to generate
+ type: integer
+ "n":
+ description: N value
+ type: integer
+ organization:
+ description: Organization ID for the OpenAI API
+ type: string
+ presencePenalty:
+ description: Presence penalty
+ type: string
+ reasoningEffort:
+ description: Reasoning effort
+ enum:
+ - minimal
+ - low
+ - medium
+ - high
+ type: string
+ seed:
+ description: Seed value
+ type: integer
+ temperature:
+ description: Temperature for sampling
+ type: string
+ timeout:
+ description: Timeout
+ type: integer
+ tools:
+ description: Server-side tools to enable
+ items:
+ type: string
+ type: array
+ topP:
+ description: Top-p sampling parameter
+ type: string
+ type: object
required:
- model
- provider
@@ -636,6 +732,8 @@ spec:
- message: provider.anthropicVertexAI must be nil if the provider is not
AnthropicVertexAI
rule: '!(has(self.anthropicVertexAI) && self.provider != ''AnthropicVertexAI'')'
+ - message: provider.xAI must be nil if the provider is not XAI
+ rule: '!(has(self.xAI) && self.provider != ''XAI'')'
- message: apiKeySecret must be set if apiKeySecretKey is set
rule: '!(has(self.apiKeySecretKey) && !has(self.apiKeySecret))'
- message: apiKeySecretKey must be set if apiKeySecret is set
diff --git a/python/packages/kagent-adk/src/kagent/adk/models/__init__.py b/python/packages/kagent-adk/src/kagent/adk/models/__init__.py
index 19c7943ba..09810b878 100644
--- a/python/packages/kagent-adk/src/kagent/adk/models/__init__.py
+++ b/python/packages/kagent-adk/src/kagent/adk/models/__init__.py
@@ -1,3 +1,3 @@
-from ._openai import AzureOpenAI, OpenAI
+from ._openai import AzureOpenAI, OpenAI, XAI
-__all__ = ["OpenAI", "AzureOpenAI"]
+__all__ = ["OpenAI", "AzureOpenAI", "XAI"]
diff --git a/python/packages/kagent-adk/src/kagent/adk/models/_openai.py b/python/packages/kagent-adk/src/kagent/adk/models/_openai.py
index 30fda6e9b..36c18da17 100644
--- a/python/packages/kagent-adk/src/kagent/adk/models/_openai.py
+++ b/python/packages/kagent-adk/src/kagent/adk/models/_openai.py
@@ -485,3 +485,38 @@ def _client(self) -> AsyncAzureOpenAI:
default_headers=self.default_headers,
http_client=http_client,
)
+
+
+class XAI(BaseOpenAI):
+ """XAI (xAI Grok) model implementation."""
+
+ type: Literal["xai"]
+ # XAI-specific fields
+ tools: Optional[list[str]] = None
+ live_search_mode: Optional[str] = None
+
+ def __init__(self, **data):
+ """Initialize XAI model with default base URL if not provided."""
+ # Set default base URL if not provided or empty
+ if "base_url" not in data or not data["base_url"]:
+ data["base_url"] = "https://api.x.ai/v1"
+ super().__init__(**data)
+
+ @cached_property
+ def _client(self) -> AsyncOpenAI:
+ """Get the XAI client with optional custom SSL configuration."""
+ http_client = self._create_http_client()
+ api_key = self.api_key or os.environ.get("XAI_API_KEY")
+
+ return AsyncOpenAI(
+ api_key=api_key,
+ base_url=self.base_url or None,
+ default_headers=self.default_headers,
+ timeout=self.timeout,
+ http_client=http_client,
+ )
+
+ @classmethod
+ def supported_models(cls) -> list[str]:
+ """Returns a list of supported models in regex for LlmRegistry."""
+ return [r"grok-.*"]
diff --git a/python/packages/kagent-adk/src/kagent/adk/types.py b/python/packages/kagent-adk/src/kagent/adk/types.py
index 90350e95d..1360b1c31 100644
--- a/python/packages/kagent-adk/src/kagent/adk/types.py
+++ b/python/packages/kagent-adk/src/kagent/adk/types.py
@@ -19,6 +19,7 @@
from .models import AzureOpenAI as OpenAIAzure
from .models import OpenAI as OpenAINative
+from .models import XAI as XAINative
logger = logging.getLogger(__name__)
@@ -66,6 +67,13 @@ class OpenAI(BaseLLM):
type: Literal["openai"]
+class XAI(OpenAI):
+ tools: list[str] | None = None
+ live_search_mode: str | None = None
+
+ type: Literal["xai"]
+
+
class AzureOpenAI(BaseLLM):
type: Literal["azure_openai"]
@@ -93,7 +101,7 @@ class Gemini(BaseLLM):
class AgentConfig(BaseModel):
- model: Union[OpenAI, Anthropic, GeminiVertexAI, GeminiAnthropic, Ollama, AzureOpenAI, Gemini] = Field(
+ model: Union[OpenAI, Anthropic, GeminiVertexAI, GeminiAnthropic, Ollama, AzureOpenAI, Gemini, XAI] = Field(
discriminator="type"
)
description: str
@@ -188,6 +196,29 @@ def to_agent(self, name: str, sts_integration: Optional[ADKTokenPropagationPlugi
)
elif self.model.type == "gemini":
model = self.model.model
+ elif self.model.type == "xai":
+ model = XAINative(
+ type="xai",
+ base_url=self.model.base_url,
+ default_headers=extra_headers,
+ frequency_penalty=self.model.frequency_penalty,
+ max_tokens=self.model.max_tokens,
+ model=self.model.model,
+ n=self.model.n,
+ presence_penalty=self.model.presence_penalty,
+ reasoning_effort=self.model.reasoning_effort,
+ seed=self.model.seed,
+ temperature=self.model.temperature,
+ timeout=self.model.timeout,
+ top_p=self.model.top_p,
+ # XAI-specific fields (not yet implemented in basic support)
+ tools=self.model.tools,
+ live_search_mode=self.model.live_search_mode,
+ # TLS configuration
+ tls_disable_verify=self.model.tls_disable_verify,
+ tls_ca_cert_path=self.model.tls_ca_cert_path,
+ tls_disable_system_cas=self.model.tls_disable_system_cas,
+ )
else:
raise ValueError(f"Invalid model type: {self.model.type}")
return Agent(
diff --git a/ui/src/app/models/new/page.tsx b/ui/src/app/models/new/page.tsx
index c2df5853d..7e9e53e84 100644
--- a/ui/src/app/models/new/page.tsx
+++ b/ui/src/app/models/new/page.tsx
@@ -18,7 +18,8 @@ import type {
ProviderModelsResponse,
GeminiConfigPayload,
GeminiVertexAIConfigPayload,
- AnthropicVertexAIConfigPayload
+ AnthropicVertexAIConfigPayload,
+ XAIConfigPayload
} from "@/types";
import { toast } from "sonner";
import { isResourceNameValid, createRFC1123ValidName } from "@/lib/utils";
@@ -69,6 +70,11 @@ const processModelParams = (requiredParams: ModelParam[], optionalParams: ModelP
'stream'
]);
+ // Special handling for array parameters
+ const arrayKeys = new Set([
+ 'tools'
+ ]);
+
Object.entries(allParams).forEach(([key, value]) => {
if (numericKeys.has(key)) {
const numValue = parseFloat(value);
@@ -89,6 +95,10 @@ const processModelParams = (requiredParams: ModelParam[], optionalParams: ModelP
console.warn(`Invalid boolean for parameter '${key}': '${value}'. Treating as false.`);
providerParams[key] = false;
}
+ } else if (arrayKeys.has(key)) {
+ if (value.trim() !== '') {
+ providerParams[key] = value.split(',').map(t => t.trim()).filter(t => t !== '');
+ }
} else {
if (value.trim() !== '') {
providerParams[key] = value;
@@ -231,7 +241,14 @@ function ModelPageContent() {
const initialOptional: ModelParam[] = Object.entries(fetchedParams)
.filter(([key]) => !requiredKeys.includes(key))
.map(([key, value], index) => {
- const displayValue = (value === null || value === undefined) ? "" : String(value);
+ let displayValue = "";
+ if (value !== null && value !== undefined) {
+ if (Array.isArray(value)) {
+ displayValue = value.join(', ');
+ } else {
+ displayValue = String(value);
+ }
+ }
return { id: `fetched-opt-${index}`, key, value: displayValue };
});
@@ -453,6 +470,9 @@ function ModelPageContent() {
case 'AnthropicVertexAI':
payload.anthropicVertexAI = providerParams as AnthropicVertexAIConfigPayload;
break;
+ case 'XAI':
+ payload.xAI = providerParams as XAIConfigPayload;
+ break;
default:
console.error("Unsupported provider type during payload construction:", providerType);
toast.error("Internal error: Unsupported provider type.");
@@ -471,6 +491,10 @@ function ModelPageContent() {
anthropic: payload.anthropic,
azureOpenAI: payload.azureOpenAI,
ollama: payload.ollama,
+ gemini: payload.gemini,
+ geminiVertexAI: payload.geminiVertexAI,
+ anthropicVertexAI: payload.anthropicVertexAI,
+ xAI: payload.xAI,
};
const modelConfigRef = k8sRefUtils.toRef(modelConfigNamespace || '', modelConfigName);
response = await updateModelConfig(modelConfigRef, updatePayload);
@@ -610,8 +634,3 @@ export default function ModelPage() {
);
}
-
-
-
-
-
diff --git a/ui/src/components/ModelProviderCombobox.tsx b/ui/src/components/ModelProviderCombobox.tsx
index 98ede6b02..34a26c9b1 100644
--- a/ui/src/components/ModelProviderCombobox.tsx
+++ b/ui/src/components/ModelProviderCombobox.tsx
@@ -11,6 +11,7 @@ import { Anthropic } from './icons/Anthropic';
import { Ollama } from './icons/Ollama';
import { Azure } from './icons/Azure';
import { Gemini } from './icons/Gemini';
+import { XAI } from './icons/XAI';
interface ComboboxOption {
label: string; // e.g., "OpenAI - gpt-4o"
@@ -64,6 +65,7 @@ export function ModelProviderCombobox({
'Gemini': Gemini,
'GeminiVertexAI': Gemini,
'AnthropicVertexAI': Anthropic,
+ 'XAI': XAI,
};
if (!providerKey || !PROVIDER_ICONS[providerKey]) {
return null;
@@ -199,4 +201,4 @@ export function ModelProviderCombobox({
);
-}
\ No newline at end of file
+}
diff --git a/ui/src/components/icons/XAI.tsx b/ui/src/components/icons/XAI.tsx
new file mode 100644
index 000000000..a99bc444e
--- /dev/null
+++ b/ui/src/components/icons/XAI.tsx
@@ -0,0 +1,12 @@
+export function XAI({ className }: { className?: string }) {
+ return (
+
+ );
+}
diff --git a/ui/src/components/onboarding/steps/ModelConfigStep.tsx b/ui/src/components/onboarding/steps/ModelConfigStep.tsx
index 126ba52e4..c7f1cb476 100644
--- a/ui/src/components/onboarding/steps/ModelConfigStep.tsx
+++ b/ui/src/components/onboarding/steps/ModelConfigStep.tsx
@@ -192,6 +192,7 @@ export function ModelConfigStep({
case 'Gemini': payload.gemini = {}; break;
case 'GeminiVertexAI': payload.geminiVertexAI = {}; break;
case 'AnthropicVertexAI': payload.anthropicVertexAI = {}; break;
+ case 'XAI': payload.xAI = {}; break;
case 'Ollama':
const modelTag = values.modelTag?.trim() || "";
if (modelTag && modelTag !== OLLAMA_DEFAULT_TAG) {
diff --git a/ui/src/lib/providers.ts b/ui/src/lib/providers.ts
index 79246c174..f8ec0c3e5 100644
--- a/ui/src/lib/providers.ts
+++ b/ui/src/lib/providers.ts
@@ -1,6 +1,6 @@
-export type BackendModelProviderType = "OpenAI" | "AzureOpenAI" | "Anthropic" | "Ollama" | "Gemini" | "GeminiVertexAI" | "AnthropicVertexAI";
-export const modelProviders = ["OpenAI", "AzureOpenAI", "Anthropic", "Ollama", "Gemini", "GeminiVertexAI", "AnthropicVertexAI"] as const;
+export type BackendModelProviderType = "OpenAI" | "AzureOpenAI" | "Anthropic" | "Ollama" | "Gemini" | "GeminiVertexAI" | "AnthropicVertexAI" | "XAI";
+export const modelProviders = ["OpenAI", "AzureOpenAI", "Anthropic", "Ollama", "Gemini", "GeminiVertexAI", "AnthropicVertexAI", "XAI"] as const;
export type ModelProviderKey = typeof modelProviders[number];
@@ -62,6 +62,13 @@ export const PROVIDERS_INFO: {
modelDocsLink: "https://cloud.google.com/vertex-ai/docs",
help: "Configure your Google Cloud project and credentials for Vertex AI."
},
+ XAI: {
+ name: "xAI",
+ type: "XAI",
+ apiKeyLink: "https://console.x.ai",
+ modelDocsLink: "https://docs.x.ai/docs/models",
+ help: "Get your API key from xAI."
+ },
};
export const isValidProviderInfoKey = (key: string): key is ModelProviderKey => {
@@ -87,4 +94,4 @@ export const getProviderFormKey = (providerType: BackendModelProviderType): Mode
}
}
return undefined;
-}
\ No newline at end of file
+}
diff --git a/ui/src/types/index.ts b/ui/src/types/index.ts
index 075ae5d88..49f877d26 100644
--- a/ui/src/types/index.ts
+++ b/ui/src/types/index.ts
@@ -108,6 +108,12 @@ export interface AnthropicVertexAIConfigPayload {
topK?: number;
}
+// XAI uses OpenAI-compatible API, so extend OpenAI config and add XAI-specific fields
+export interface XAIConfigPayload extends OpenAIConfigPayload {
+ tools?: string[];
+ liveSearchMode?: string;
+}
+
export interface CreateModelConfigRequest {
ref: string;
provider: Pick;
@@ -120,6 +126,7 @@ export interface CreateModelConfigRequest {
gemini?: GeminiConfigPayload;
geminiVertexAI?: GeminiVertexAIConfigPayload;
anthropicVertexAI?: AnthropicVertexAIConfigPayload;
+ xAI?: XAIConfigPayload;
}
export interface UpdateModelConfigPayload {
@@ -133,6 +140,7 @@ export interface UpdateModelConfigPayload {
gemini?: GeminiConfigPayload;
geminiVertexAI?: GeminiVertexAIConfigPayload;
anthropicVertexAI?: AnthropicVertexAIConfigPayload;
+ xAI?: XAIConfigPayload;
}
/**