Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions crates/coverage-report/src/requests_expected_differences.json
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,15 @@
"fields": [
{ "pattern": "messages.length", "reason": "Parallel tool results grouped in one Tool message expand to separate function_call_output items in Responses API" }
]
},
{
"testCase": "simpleRequestTruncated",
"source": "Google",
"target": "*",
"fields": [
{ "pattern": "messages[*].content", "reason": "Empty assistant content (no parts) from truncated Gemini response gains a placeholder empty text part when roundtripped through other providers" },
{ "pattern": "messages.length", "reason": "Empty assistant content from truncated Gemini response may change message count when roundtripped through providers that split/merge messages" }
]
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,15 @@
"fields": [
{ "pattern": "messages.length", "reason": "Google reasoning content expands to separate output items in Responses format" }
]
},
{
"testCase": "simpleRequestTruncated",
"source": "Google",
"target": "*",
"fields": [
{ "pattern": "messages[*].content", "reason": "Empty assistant content (no parts) from truncated Gemini response gains a placeholder empty text part when roundtripped through other providers" },
{ "pattern": "messages.length", "reason": "Empty assistant content from truncated Gemini response may change message count when roundtripped through providers that split/merge messages" }
]
}
]
}
16 changes: 12 additions & 4 deletions crates/lingua/src/providers/google/convert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,17 @@ impl TryFromLLM<GoogleContent> for Message {
.ok_or(ConvertError::MissingRequiredField {
field: "role".to_string(),
})?;
let parts = content.parts.ok_or(ConvertError::MissingRequiredField {
field: "parts".to_string(),
})?;
// Only allow missing parts for "model" role — Gemini may omit parts when
// maxOutputTokens is exhausted (MAX_TOKENS finish reason). For other roles,
// missing parts means this isn't a Google-format message (e.g. Anthropic
// messages have "content" not "parts").
let parts: Vec<GooglePart> = if role == "model" {
content.parts.unwrap_or_default()
} else {
content.parts.ok_or(ConvertError::MissingRequiredField {
field: "parts".to_string(),
})?
};

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this was causing the error:

FAILED test_openai.py::test_max_tokens - Failed: gemini-2.5-flash: BadRequestError: Conversion to universal format failed: Missing required field: parts

the new response is now:

{
  "choices": [
    {
      "finish_reason": "length",
      "index": 0,
      "message": {
        "annotations": [],
        "role": "assistant",
      },
    },
  ],
  "created": 0,
  "id": "chatcmpl-transformed",
  "model": "gemini-2.5-flash",
  "object": "chat.completion",
  "usage": {
    "completion_tokens": 0,
    "prompt_tokens": 10,
    "total_tokens": 10,
  },
}
`;

match role {
"model" => {
Expand Down Expand Up @@ -454,7 +462,7 @@ impl TryFromLLM<Message> for GoogleContent {

Ok(GoogleContent {
role: Some(role),
parts: Some(parts),
parts: if parts.is_empty() { None } else { Some(parts) },
})
}
}
Expand Down
47 changes: 47 additions & 0 deletions payloads/cases/simple.ts
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,53 @@ export const simpleCases: TestCaseCollection = {
},
},

simpleRequestTruncated: {
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
messages: [
{
role: "user",
content: "Write a very long essay about the ocean.",
},
],
max_completion_tokens: 1,
},
responses: {
model: OPENAI_RESPONSES_MODEL,
max_output_tokens: 1,
input: [
{
role: "user",
content: "Write a very long essay about the ocean.",
},
],
},
anthropic: {
model: ANTHROPIC_MODEL,
max_tokens: 1,
messages: [
{
role: "user",
content: "Write a very long essay about the ocean.",
},
],
},
google: {
contents: [
{
role: "user",
parts: [{ text: "Write a very long essay about the ocean." }],
},
],
generationConfig: {
maxOutputTokens: 1,
},
},
bedrock: null,
"bedrock-anthropic": null,
"vertex-anthropic": null,
},

toolCallRequest: {
"chat-completions": {
model: OPENAI_CHAT_COMPLETIONS_MODEL,
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading