Skip to content

Commit bafc746

Browse files
authored
chore: cleanup docs, extra model_config declaration (#1033)
Fixes #988 Remove `tunedModels/` prefix since it is no longer supported in GenAI Ensure `models/` prefix for LLM and Embedding models (whilst ensuring it is stripped for LangSmith meta)
2 parents c1ff72f + e0808ec commit bafc746

File tree

6 files changed

+21
-12
lines changed

6 files changed

+21
-12
lines changed

libs/community/langchain_google_community/bq_storage_vectorstores/_base.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,6 @@ class BaseBigQueryVectorStore(VectorStore, BaseModel, ABC):
7979
_logger: Any = None
8080
_full_table_id: Optional[str] = None
8181

82-
model_config = ConfigDict(
83-
arbitrary_types_allowed=True,
84-
)
85-
8682
@abstractmethod
8783
def sync_data(self) -> None:
8884
...

libs/genai/langchain_google_genai/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
.. code-block:: python
2323
pip install -U langchain-google-genai
2424
25-
## Using Chat Models
25+
**Using Chat Models**
2626
2727
After setting up your environment with the required API key, you can interact with the Google Gemini models.
2828
@@ -33,7 +33,7 @@
3333
llm = ChatGoogleGenerativeAI(model="gemini-pro")
3434
llm.invoke("Sing a ballad of LangChain.")
3535
36-
## Using LLMs
36+
**Using LLMs**
3737
3838
The package also supports generating text with Google's models.
3939
@@ -44,7 +44,7 @@
4444
llm = GoogleGenerativeAI(model="gemini-pro")
4545
llm.invoke("Once upon a time, a library called LangChain")
4646
47-
## Embedding Generation
47+
**Embedding Generation**
4848
4949
The package also supports creating embeddings with Google's models, useful for textual similarity and other NLP applications.
5050

libs/genai/langchain_google_genai/chat_models.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1232,9 +1232,7 @@ def validate_environment(self) -> Self:
12321232
if self.top_k is not None and self.top_k <= 0:
12331233
raise ValueError("top_k must be positive")
12341234

1235-
if not any(
1236-
self.model.startswith(prefix) for prefix in ("models/", "tunedModels/")
1237-
):
1235+
if not any(self.model.startswith(prefix) for prefix in ("models/",)):
12381236
self.model = f"models/{self.model}"
12391237

12401238
additional_headers = self.additional_headers or {}

libs/genai/langchain_google_genai/embeddings.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ def validate_environment(self) -> Self:
9797
google_api_key = self.google_api_key
9898
client_info = get_client_info("GoogleGenerativeAIEmbeddings")
9999

100+
if not any(self.model.startswith(prefix) for prefix in ("models/",)):
101+
self.model = f"models/{self.model}"
102+
100103
self.client = build_generative_service(
101104
credentials=self.credentials,
102105
api_key=google_api_key,

libs/genai/langchain_google_genai/llms.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,9 @@ def __init__(self, **kwargs: Any) -> None:
6363
def validate_environment(self) -> Self:
6464
"""Validates params and passes them to google-generativeai package."""
6565

66+
if not any(self.model.startswith(prefix) for prefix in ("models/",)):
67+
self.model = f"models/{self.model}"
68+
6669
self.client = ChatGoogleGenerativeAI(
6770
api_key=self.google_api_key,
6871
credentials=self.credentials,
@@ -86,6 +89,15 @@ def _get_ls_params(
8689
"""Get standard params for tracing."""
8790
ls_params = super()._get_ls_params(stop=stop, **kwargs)
8891
ls_params["ls_provider"] = "google_genai"
92+
93+
models_prefix = "models/"
94+
ls_model_name = (
95+
self.model[len(models_prefix) :]
96+
if self.model and self.model.startswith(models_prefix)
97+
else self.model
98+
)
99+
ls_params["ls_model_name"] = ls_model_name
100+
89101
if ls_max_tokens := kwargs.get("max_output_tokens", self.max_output_tokens):
90102
ls_params["ls_max_tokens"] = ls_max_tokens
91103
return ls_params

libs/genai/tests/unit_tests/test_llms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ def test_tracing_params() -> None:
3838
"HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_LOW_AND_ABOVE"
3939
}, # Invalid arg
4040
)
41-
assert llm.model == "gemini-pro"
41+
assert llm.model == "models/gemini-pro"
4242
ls_params = llm._get_ls_params()
43-
assert ls_params["ls_model_name"] == "gemini-pro"
43+
assert ls_params.get("ls_model_name") == "gemini-pro"
4444
mock_warning.assert_called_once()
4545
call_args = mock_warning.call_args[0][0]
4646
assert "Unexpected argument 'safety_setting'" in call_args

0 commit comments

Comments
 (0)