Skip to content

Commit 1f82c4e

Browse files
ccurmemdrxy
andauthored
feat(genai): support gemini 3 (#1365)
Co-authored-by: Mason Daugherty <[email protected]>
1 parent 39e4f5a commit 1f82c4e

File tree

10 files changed

+613
-107
lines changed

10 files changed

+613
-107
lines changed

libs/genai/langchain_google_genai/_common.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,12 @@ class _BaseGoogleGenerativeAI(BaseModel):
5656
"""Run inference with this temperature.
5757
5858
Must be within `[0.0, 2.0]`.
59+
60+
!!! warning "Gemini 3.0+ models"
61+
62+
Setting `temperature < 1.0` for Gemini 3.0+ models can cause infinite loops,
63+
degraded reasoning performance, and failure on complex tasks.
64+
5965
"""
6066

6167
top_p: float | None = None
@@ -163,7 +169,21 @@ class _BaseGoogleGenerativeAI(BaseModel):
163169
media_resolution: MediaResolution | None = Field(
164170
default=None,
165171
)
166-
"""Media resolution for the input media."""
172+
"""Media resolution for the input media.
173+
174+
May be defined at the individual part level, allowing for mixed-resolution requests
175+
(e.g., images and videos of different resolutions in the same request).
176+
177+
May be `'low'`, `'medium'`, or `'high'`.
178+
179+
Can be set either per-part or globally for all media inputs in the request. To set
180+
globally, set in the `generation_config`.
181+
182+
!!! warning "Model compatibility"
183+
184+
Setting per-part media resolution requests to Gemini 2.5 models is not
185+
supported.
186+
"""
167187

168188
thinking_budget: int | None = Field(
169189
default=None,

libs/genai/langchain_google_genai/chat_models.py

Lines changed: 223 additions & 27 deletions
Large diffs are not rendered by default.

libs/genai/langchain_google_genai/llms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def _generate(
124124
generations.append(
125125
[
126126
Generation(
127-
text=g.message.content,
127+
text=g.message.text,
128128
generation_info={
129129
**g.generation_info,
130130
"usage_metadata": g.message.usage_metadata,
@@ -148,7 +148,7 @@ def _stream(
148148
run_manager=run_manager,
149149
**kwargs,
150150
):
151-
chunk = GenerationChunk(text=stream_chunk.message.content)
151+
chunk = GenerationChunk(text=stream_chunk.message.text)
152152
yield chunk
153153
if run_manager:
154154
run_manager.on_llm_new_token(

libs/genai/pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@ license = {text = "MIT"}
99
readme = "README.md"
1010
authors = []
1111

12-
version = "3.0.3"
12+
version = "3.1.0"
1313
requires-python = ">=3.10.0,<4.0.0"
1414
dependencies = [
15-
"langchain-core>=1.0.0,<2.0.0",
16-
"google-ai-generativelanguage>=0.7.0,<1.0.0",
15+
"langchain-core>=1.0.5,<2.0.0",
16+
"google-ai-generativelanguage>=0.9.0,<1.0.0",
1717
"pydantic>=2.0.0,<3.0.0",
1818
"filetype>=1.2.0,<2.0.0",
1919
]

libs/genai/tests/integration_tests/test_chat_models.py

Lines changed: 136 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,16 @@ def _check_usage_metadata(message: AIMessage) -> None:
7575
def _check_tool_calls(response: BaseMessage, expected_name: str) -> None:
7676
"""Check tool calls are as expected."""
7777
assert isinstance(response, AIMessage)
78-
assert isinstance(response.content, str)
79-
assert response.content == ""
78+
if isinstance(response.content, list):
79+
text_content = "".join(
80+
block.get("text", "")
81+
for block in response.content
82+
if isinstance(block, dict) and block.get("type") == "text"
83+
)
84+
assert text_content == ""
85+
else:
86+
assert isinstance(response.content, str)
87+
assert response.content == ""
8088

8189
# function_call
8290
function_call = response.additional_kwargs.get("function_call")
@@ -120,7 +128,15 @@ async def test_chat_google_genai_batch(is_async: bool, with_tags: bool) -> None:
120128
result = llm.batch(cast("list", messages), config=config)
121129

122130
for token in result:
123-
assert isinstance(token.content, str)
131+
if isinstance(token.content, list):
132+
text_content = "".join(
133+
block.get("text", "")
134+
for block in token.content
135+
if isinstance(block, dict) and block.get("type") == "text"
136+
)
137+
assert len(text_content) > 0
138+
else:
139+
assert isinstance(token.content, str)
124140

125141

126142
@pytest.mark.parametrize("is_async", [False, True])
@@ -140,9 +156,19 @@ async def test_chat_google_genai_invoke(is_async: bool) -> None:
140156
config={"tags": ["foo"]},
141157
generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7},
142158
)
159+
143160
assert isinstance(result, AIMessage)
144-
assert isinstance(result.content, str)
145-
assert not result.content.startswith(" ")
161+
if isinstance(result.content, list):
162+
text_content = "".join(
163+
block.get("text", "")
164+
for block in result.content
165+
if isinstance(block, dict) and block.get("type") == "text"
166+
)
167+
assert len(text_content) > 0
168+
assert not text_content.startswith(" ")
169+
else:
170+
assert isinstance(result.content, str)
171+
assert not result.content.startswith(" ")
146172
_check_usage_metadata(result)
147173

148174

@@ -248,7 +274,15 @@ def test_chat_google_genai_invoke_thinking(
248274
)
249275

250276
assert isinstance(result, AIMessage)
251-
assert isinstance(result.content, str)
277+
if isinstance(result.content, list):
278+
text_content = "".join(
279+
block.get("text", "")
280+
for block in result.content
281+
if isinstance(block, dict) and block.get("type") == "text"
282+
)
283+
assert len(text_content) > 0
284+
else:
285+
assert isinstance(result.content, str)
252286

253287
_check_usage_metadata(result)
254288

@@ -265,7 +299,10 @@ def _check_thinking_output(content: list, output_version: str) -> None:
265299
if output_version == "v0":
266300
thinking_key = "thinking"
267301
if content:
268-
assert isinstance(content[-1], str)
302+
if isinstance(content[-1], dict) and content[-1].get("type") == "text":
303+
assert isinstance(content[-1].get("text"), str)
304+
else:
305+
assert isinstance(content[-1], str)
269306

270307
else: # v1
271308
thinking_key = "reasoning"
@@ -540,7 +577,7 @@ def simple_tool(query: str) -> str:
540577

541578
def test_chat_google_genai_invoke_thinking_disabled() -> None:
542579
"""Test invoking a thinking model with zero `thinking_budget`."""
543-
llm = ChatGoogleGenerativeAI(model=_THINKING_MODEL, thinking_budget=0)
580+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", thinking_budget=0)
544581

545582
result = llm.invoke(
546583
"How many O's are in Google? Please tell me how you double checked the result",
@@ -566,8 +603,17 @@ def test_chat_google_genai_invoke_no_image_generation_without_modalities() -> No
566603
generation_config={"top_k": 2, "top_p": 1, "temperature": 0.7},
567604
)
568605
assert isinstance(result, AIMessage)
569-
assert isinstance(result.content, str)
570-
assert not result.content.startswith(" ")
606+
if isinstance(result.content, list):
607+
text_content = "".join(
608+
block.get("text", "")
609+
for block in result.content
610+
if isinstance(block, dict) and block.get("type") == "text"
611+
)
612+
assert len(text_content) > 0
613+
assert not text_content.startswith(" ")
614+
else:
615+
assert isinstance(result.content, str)
616+
assert not result.content.startswith(" ")
571617
_check_usage_metadata(result)
572618

573619

@@ -617,16 +663,33 @@ def test_chat_google_genai_multimodal(
617663
any_chunk = False
618664
for chunk in llm.stream(messages):
619665
print(chunk) # noqa: T201
620-
assert isinstance(chunk.content, str)
621-
if chunk.content:
622-
any_chunk = True
666+
if isinstance(chunk.content, list):
667+
text_content = "".join(
668+
block.get("text", "")
669+
for block in chunk.content
670+
if isinstance(block, dict) and block.get("type") == "text"
671+
)
672+
if text_content:
673+
any_chunk = True
674+
else:
675+
assert isinstance(chunk.content, str)
676+
if chunk.content:
677+
any_chunk = True
623678
assert any_chunk
624679
else:
625680
# Test invoke
626681
response = llm.invoke(messages)
627682
assert isinstance(response, AIMessage)
628-
assert isinstance(response.content, str)
629-
assert len(response.content.strip()) > 0
683+
if isinstance(response.content, list):
684+
text_content = "".join(
685+
block.get("text", "")
686+
for block in response.content
687+
if isinstance(block, dict) and block.get("type") == "text"
688+
)
689+
assert len(text_content.strip()) > 0
690+
else:
691+
assert isinstance(response.content, str)
692+
assert len(response.content.strip()) > 0
630693

631694

632695
@pytest.mark.parametrize(
@@ -674,7 +737,15 @@ def test_chat_google_genai_single_call_with_history() -> None:
674737
message3 = HumanMessage(content=text_question2)
675738
response = model.invoke([message1, message2, message3])
676739
assert isinstance(response, AIMessage)
677-
assert isinstance(response.content, str)
740+
if isinstance(response.content, list):
741+
text_content = "".join(
742+
block.get("text", "")
743+
for block in response.content
744+
if isinstance(block, dict) and block.get("type") == "text"
745+
)
746+
assert len(text_content) > 0
747+
else:
748+
assert isinstance(response.content, str)
678749

679750

680751
@pytest.mark.parametrize(
@@ -700,7 +771,15 @@ def test_chat_google_genai_system_message(
700771
message3 = HumanMessage(content=text_question2)
701772
response = model.invoke([system_message, message1, message2, message3])
702773
assert isinstance(response, AIMessage)
703-
assert isinstance(response.content, str)
774+
if isinstance(response.content, list):
775+
text_content = "".join(
776+
block.get("text", "")
777+
for block in response.content
778+
if isinstance(block, dict) and block.get("type") == "text"
779+
)
780+
assert len(text_content) > 0
781+
else:
782+
assert isinstance(response.content, str)
704783

705784

706785
def test_generativeai_get_num_tokens_gemini() -> None:
@@ -788,13 +867,28 @@ def search(
788867
assert len(tool_messages) > 0
789868
assert len(response.tool_calls) == len(tool_messages)
790869

791-
result = llm_with_search.invoke([request, response, *tool_messages])
870+
follow_up = HumanMessage(
871+
content=(
872+
"Based on the search results above, what did you find about the bird "
873+
"colors?"
874+
)
875+
)
876+
result = llm_with_search.invoke([request, response, *tool_messages, follow_up])
792877

793878
assert isinstance(result, AIMessage)
794-
content_str = (
795-
result.content if isinstance(result.content, str) else str(result.content)
796-
)
797-
assert "brown" in content_str.lower()
879+
880+
if isinstance(result.content, list):
881+
text_content = "".join(
882+
block.get("text", "")
883+
for block in result.content
884+
if isinstance(block, dict) and block.get("type") == "text"
885+
)
886+
assert "brown" in text_content.lower()
887+
else:
888+
content_str = (
889+
result.content if isinstance(result.content, str) else str(result.content)
890+
)
891+
assert "brown" in content_str.lower()
798892

799893

800894
def test_chat_vertexai_gemini_function_calling() -> None:
@@ -862,9 +956,8 @@ def my_tool(name: str, age: int, likes: list[str]) -> None:
862956
# Test .content_blocks property
863957
content_blocks = response.content_blocks
864958
assert isinstance(content_blocks, list)
865-
assert len(content_blocks) == 1
866-
assert isinstance(content_blocks[0], dict)
867-
assert content_blocks[0].get("type") == "tool_call"
959+
tool_call_blocks = [b for b in content_blocks if b.get("type") == "tool_call"]
960+
assert len(tool_call_blocks) == 1
868961

869962

870963
@pytest.mark.flaky(retries=3, delay=1)
@@ -1027,9 +1120,11 @@ def _check_web_search_output(message: AIMessage, output_version: str) -> None:
10271120
# Lazy parsing
10281121
content_blocks = message.content_blocks
10291122
text_blocks = [block for block in content_blocks if block["type"] == "text"]
1030-
assert len(text_blocks) == 1
1031-
text_block = text_blocks[0]
1032-
assert text_block.get("annotations")
1123+
assert len(text_blocks) >= 1
1124+
1125+
# Check that at least one block has annotations
1126+
text_block = next((b for b in text_blocks if b.get("annotations")), None)
1127+
assert text_block is not None
10331128

10341129
if output_version == "v1":
10351130
text_blocks = [block for block in message.content if block["type"] == "text"] # type: ignore[misc,index]
@@ -1186,8 +1281,19 @@ def test_search_builtin_with_citations(use_streaming: bool) -> None:
11861281
def _check_code_execution_output(message: AIMessage, output_version: str) -> None:
11871282
if output_version == "v0":
11881283
blocks = [block for block in message.content if isinstance(block, dict)]
1284+
# Find code execution blocks
1285+
code_blocks = [
1286+
block
1287+
for block in blocks
1288+
if block.get("type") in {"executable_code", "code_execution_result"}
1289+
]
1290+
# For integration test, code execution must happen
1291+
assert code_blocks, (
1292+
f"No code execution blocks found in content: "
1293+
f"{[block.get('type') for block in blocks]}"
1294+
)
11891295
expected_block_types = {"executable_code", "code_execution_result"}
1190-
assert {block.get("type") for block in blocks} == expected_block_types
1296+
assert {block.get("type") for block in code_blocks} == expected_block_types
11911297

11921298
else:
11931299
# v1
@@ -1207,7 +1313,7 @@ def test_code_execution_builtin(output_version: str) -> None:
12071313
).bind_tools([{"code_execution": {}}])
12081314
input_message = {
12091315
"role": "user",
1210-
"content": "What is 3^3?",
1316+
"content": "Calculate the value of 3^3 using Python code execution.",
12111317
}
12121318

12131319
full: BaseMessageChunk | None = None

libs/genai/tests/integration_tests/test_function_call.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,6 @@ def search_tool(query: str) -> str:
6161
llm = ChatGoogleGenerativeAI(model=model_name).bind(functions=[search_tool])
6262
response = llm.invoke("weather in san francisco")
6363
assert isinstance(response, AIMessage)
64-
assert isinstance(response.content, str)
65-
assert response.content == ""
6664
function_call = response.additional_kwargs.get("function_call")
6765
assert function_call
6866
assert function_call["name"] == "search_tool"
@@ -85,8 +83,6 @@ def test_pydantic_call(model_name: str) -> None:
8583
llm = ChatGoogleGenerativeAI(model=model_name).bind(functions=[MyModel])
8684
response = llm.invoke("my name is Erick and I am 27 years old")
8785
assert isinstance(response, AIMessage)
88-
assert isinstance(response.content, str)
89-
assert response.content == ""
9086
function_call = response.additional_kwargs.get("function_call")
9187
assert function_call
9288
assert function_call["name"] == "MyModel"

libs/genai/tests/integration_tests/test_llms.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def test_safety_settings_gemini(model_name: str) -> None:
102102
streamed_messages = list(output_stream)
103103
assert len(streamed_messages) > 0
104104

105-
# test with safety filters on instantiation
105+
# test with safety filters on instantiation
106106
llm = GoogleGenerativeAI(
107107
model=model_name,
108108
safety_settings=safety_settings,

0 commit comments

Comments
 (0)