diff --git a/ai-integrations/langchain-graphrag.ipynb b/ai-integrations/langchain-graphrag.ipynb
index 8ef1026..b8c7a57 100644
--- a/ai-integrations/langchain-graphrag.ipynb
+++ b/ai-integrations/langchain-graphrag.ipynb
@@ -25,7 +25,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade pymongo langchain_community wikipedia langchain_openai langchain_mongodb pyvis"
+ "pip install --quiet --upgrade pymongo langchain_community wikipedia langchain_openai langchain_mongodb langchain-text-splitters pyvis"
]
},
{
@@ -88,7 +88,7 @@
"outputs": [],
"source": [
"from langchain_community.document_loaders import WikipediaLoader\n",
- "from langchain.text_splitter import TokenTextSplitter\n",
+ "from langchain_text_splitters import TokenTextSplitter\n",
"\n",
"# Load Wikipedia pages corresponding to the query \"Sherlock Holmes\"\n",
"wikipedia_pages = WikipediaLoader(query=\"Sherlock Holmes\", load_max_docs=3).load()\n",
diff --git a/ai-integrations/langchain-local-rag.ipynb b/ai-integrations/langchain-local-rag.ipynb
index 190319b..d9e5b3c 100644
--- a/ai-integrations/langchain-local-rag.ipynb
+++ b/ai-integrations/langchain-local-rag.ipynb
@@ -56,7 +56,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade pymongo langchain langchain-community langchain-huggingface gpt4all pypdf"
+ "pip install --quiet --upgrade pymongo langchain langchain-community langchain-huggingface langchain-text-splitters gpt4all pypdf"
]
},
{
@@ -103,7 +103,7 @@
"outputs": [],
"source": [
"from langchain_community.document_loaders import PyPDFLoader\n",
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n",
"# Load the PDF\n",
"loader = PyPDFLoader(\"https://investors.mongodb.com/node/13176/pdf\")\n",
diff --git a/ai-integrations/langchain-memory-semantic-cache.ipynb b/ai-integrations/langchain-memory-semantic-cache.ipynb
index ae3b4c1..7c76090 100644
--- a/ai-integrations/langchain-memory-semantic-cache.ipynb
+++ b/ai-integrations/langchain-memory-semantic-cache.ipynb
@@ -15,7 +15,7 @@
"source": [
"This notebook is a companion to the [Memory and Semantic Caching](https://www.mongodb.com/docs/atlas/ai-integrations/langchain/memory-semantic-cache/) tutorial. Refer to the page for set-up instructions and detailed explanations.\n",
"\n",
- "\n",
+ "\n",
"
\n",
""
]
@@ -31,7 +31,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai pypdf"
+ "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai langchain-text-splitters pypdf"
]
},
{
@@ -85,7 +85,7 @@
"outputs": [],
"source": [
"from langchain_community.document_loaders import PyPDFLoader\n",
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n",
"# Load the PDF\n",
"loader = PyPDFLoader(\"https://investors.mongodb.com/node/13176/pdf\")\n",
diff --git a/ai-integrations/langchain-parent-document-retrieval.ipynb b/ai-integrations/langchain-parent-document-retrieval.ipynb
index 8813dc8..913370f 100644
--- a/ai-integrations/langchain-parent-document-retrieval.ipynb
+++ b/ai-integrations/langchain-parent-document-retrieval.ipynb
@@ -13,7 +13,7 @@
"source": [
"This notebook is a companion to the [Parent Document Retrieval](https://www.mongodb.com/docs/atlas/ai-integrations/langchain/parent-document-retrieval/) page. Refer to the page for set-up instructions and detailed explanations.\n",
"\n",
- "\n",
+ "\n",
"
\n",
""
]
@@ -28,7 +28,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai pymongo pypdf"
+ "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai langchain-text-splitters pymongo pypdf"
]
},
{
@@ -50,7 +50,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"from langchain_community.document_loaders import PyPDFLoader\n",
"\n",
"# Load the PDF\n",
diff --git a/ai-integrations/langchain.ipynb b/ai-integrations/langchain.ipynb
index 4779bac..d0d238c 100644
--- a/ai-integrations/langchain.ipynb
+++ b/ai-integrations/langchain.ipynb
@@ -28,7 +28,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai pymongo pypdf"
+ "pip install --quiet --upgrade langchain langchain-community langchain-core langchain-mongodb langchain-voyageai langchain-openai langchain-text-splitters pymongo pypdf"
]
},
{
@@ -51,7 +51,7 @@
"outputs": [],
"source": [
"from langchain_community.document_loaders import PyPDFLoader\n",
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n",
"# Load the PDF\n",
"loader = PyPDFLoader(\"https://investors.mongodb.com/node/13176/pdf\")\n",
diff --git a/use-cases/rag.ipynb b/use-cases/rag.ipynb
index 479bb45..00e5fb4 100644
--- a/use-cases/rag.ipynb
+++ b/use-cases/rag.ipynb
@@ -30,7 +30,7 @@
},
"outputs": [],
"source": [
- "pip install --quiet --upgrade pymongo sentence_transformers einops langchain langchain_community pypdf huggingface_hub"
+ "pip install --quiet --upgrade pymongo sentence_transformers einops langchain langchain_community langchain-text-splitters pypdf huggingface_hub"
]
},
{
@@ -71,7 +71,7 @@
"outputs": [],
"source": [
"from langchain_community.document_loaders import PyPDFLoader\n",
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n",
"# Load the PDF\n",
"loader = PyPDFLoader(\"https://investors.mongodb.com/node/12236/pdf\")\n",