@@ -13,30 +13,36 @@ For details on available memory operations, see the [Tool Methods](python-sdk.md
1313You can bind memory tools directly to a LangChain LLM:
1414
1515``` python
16+ import asyncio
1617from agent_memory_client import create_memory_client
1718from agent_memory_client.integrations.langchain import get_memory_tools
1819from langchain_openai import ChatOpenAI
1920from langchain_core.tools import StructuredTool
2021
21- # Initialize the memory client
22- memory_client = await create_memory_client(" http://localhost:8000" )
23-
24- # Get memory tools as LangChain StructuredTool instances
25- tools: list[StructuredTool] = get_memory_tools(
26- memory_client = memory_client,
27- session_id = " user_session_123" ,
28- user_id = " alice"
29- )
30-
31- # Bind tools to an LLM
32- llm = ChatOpenAI(model = " gpt-4o" )
33- llm_with_tools = llm.bind_tools(tools)
34-
35- # Use the LLM with memory capabilities
36- response = await llm_with_tools.ainvoke(
37- " Remember that I prefer morning meetings and I work remotely"
38- )
39- print (response)
22+
23+ async def main ():
24+ # Initialize the memory client
25+ memory_client = await create_memory_client(" http://localhost:8000" )
26+
27+ # Get memory tools as LangChain StructuredTool instances
28+ tools: list[StructuredTool] = get_memory_tools(
29+ memory_client = memory_client,
30+ session_id = " user_session_123" ,
31+ user_id = " alice"
32+ )
33+
34+ # Bind tools to an LLM
35+ llm = ChatOpenAI(model = " gpt-4o" )
36+ llm_with_tools = llm.bind_tools(tools)
37+
38+ # Use the LLM with memory capabilities
39+ response = await llm_with_tools.ainvoke(
40+ " Remember that I prefer morning meetings and I work remotely"
41+ )
42+ print (response)
43+
44+
45+ asyncio.run(main())
4046```
4147
4248The LLM can now automatically use memory tools to store and retrieve information during conversations.
@@ -60,83 +66,95 @@ pip install agent-memory-client langchain langchain-openai langgraph
6066Here's a complete example of creating a memory-enabled LangChain agent:
6167
6268``` python
69+ import asyncio
6370from agent_memory_client import create_memory_client
6471from agent_memory_client.integrations.langchain import get_memory_tools
6572from langchain.agents import create_tool_calling_agent, AgentExecutor
6673from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
6774from langchain_core.tools import StructuredTool
6875from langchain_openai import ChatOpenAI
6976
70- # Initialize memory client
71- memory_client = await create_memory_client(" http://localhost:8000" )
72-
73- # Get memory tools
74- tools: list[StructuredTool] = get_memory_tools(
75- memory_client = memory_client,
76- session_id = " my_session" ,
77- user_id = " alice"
78- )
79-
80- # Create LangChain agent
81- llm = ChatOpenAI(model = " gpt-4o" )
82- prompt = ChatPromptTemplate.from_messages([
83- (" system" , " You are a helpful assistant with persistent memory." ),
84- (" human" , " {input} " ),
85- MessagesPlaceholder(" agent_scratchpad" ),
86- ])
87-
88- agent = create_tool_calling_agent(llm, tools, prompt)
89- executor = AgentExecutor(agent = agent, tools = tools)
90-
91- # Use the agent
92- result = await executor.ainvoke({
93- " input" : " Remember that I love pizza and work at TechCorp"
94- })
95- print (result[" output" ])
96-
97- # Later conversation - agent can recall the information
98- result = await executor.ainvoke({
99- " input" : " What do you know about my food preferences?"
100- })
101- print (result[" output" ])
77+
78+ async def main ():
79+ # Initialize memory client
80+ memory_client = await create_memory_client(" http://localhost:8000" )
81+
82+ # Get memory tools
83+ tools: list[StructuredTool] = get_memory_tools(
84+ memory_client = memory_client,
85+ session_id = " my_session" ,
86+ user_id = " alice"
87+ )
88+
89+ # Create LangChain agent
90+ llm = ChatOpenAI(model = " gpt-4o" )
91+ prompt = ChatPromptTemplate.from_messages([
92+ (" system" , " You are a helpful assistant with persistent memory." ),
93+ (" human" , " {input} " ),
94+ MessagesPlaceholder(" agent_scratchpad" ),
95+ ])
96+
97+ agent = create_tool_calling_agent(llm, tools, prompt)
98+ executor = AgentExecutor(agent = agent, tools = tools)
99+
100+ # Use the agent
101+ result = await executor.ainvoke({
102+ " input" : " Remember that I love pizza and work at TechCorp"
103+ })
104+ print (result[" output" ])
105+
106+ # Later conversation - agent can recall the information
107+ result = await executor.ainvoke({
108+ " input" : " What do you know about my food preferences?"
109+ })
110+ print (result[" output" ])
111+
112+
113+ asyncio.run(main())
102114```
103115
104116## Using with LangGraph
105117
106118You can use memory tools in LangGraph workflows:
107119
108120``` python
121+ import asyncio
109122from agent_memory_client import create_memory_client
110123from agent_memory_client.integrations.langchain import get_memory_tools
111124from langchain_core.tools import StructuredTool
112125from langchain_openai import ChatOpenAI
113126from langgraph.prebuilt import create_react_agent
114127
115- # Initialize memory client
116- memory_client = await create_memory_client(" http://localhost:8000" )
117-
118- # Get memory tools
119- tools: list[StructuredTool] = get_memory_tools(
120- memory_client = memory_client,
121- session_id = " langgraph_session" ,
122- user_id = " alice"
123- )
124-
125- # Create a LangGraph agent with memory tools
126- llm = ChatOpenAI(model = " gpt-4o" )
127- graph = create_react_agent(llm, tools)
128-
129- # Use the agent
130- result = await graph.ainvoke({
131- " messages" : [(" user" , " Remember that I'm learning Python and prefer visual examples" )]
132- })
133- print (result[" messages" ][- 1 ].content)
134-
135- # Continue the conversation
136- result = await graph.ainvoke({
137- " messages" : [(" user" , " What programming language am I learning?" )]
138- })
139- print (result[" messages" ][- 1 ].content)
128+
129+ async def main ():
130+ # Initialize memory client
131+ memory_client = await create_memory_client(" http://localhost:8000" )
132+
133+ # Get memory tools
134+ tools: list[StructuredTool] = get_memory_tools(
135+ memory_client = memory_client,
136+ session_id = " langgraph_session" ,
137+ user_id = " alice"
138+ )
139+
140+ # Create a LangGraph agent with memory tools
141+ llm = ChatOpenAI(model = " gpt-4o" )
142+ graph = create_react_agent(llm, tools)
143+
144+ # Use the agent
145+ result = await graph.ainvoke({
146+ " messages" : [(" user" , " Remember that I'm learning Python and prefer visual examples" )]
147+ })
148+ print (result[" messages" ][- 1 ].content)
149+
150+ # Continue the conversation
151+ result = await graph.ainvoke({
152+ " messages" : [(" user" , " What programming language am I learning?" )]
153+ })
154+ print (result[" messages" ][- 1 ].content)
155+
156+
157+ asyncio.run(main())
140158```
141159
142160## Advanced Usage
@@ -146,31 +164,40 @@ print(result["messages"][-1].content)
146164Get only specific tools you need:
147165
148166``` python
167+ import asyncio
168+ from agent_memory_client import create_memory_client
149169from agent_memory_client.integrations.langchain import get_memory_tools
150170from langchain_core.tools import StructuredTool
151171
152- tools: list[StructuredTool] = get_memory_tools(
153- memory_client = client,
154- session_id = " chat_session" ,
155- user_id = " alice" ,
156- tools = [" search_memory" , " create_long_term_memory" ]
157- )
172+
173+ async def main ():
174+ # Initialize memory client
175+ memory_client = await create_memory_client(" http://localhost:8000" )
176+
177+ tools: list[StructuredTool] = get_memory_tools(
178+ memory_client = memory_client,
179+ session_id = " chat_session" ,
180+ user_id = " alice" ,
181+ tools = [" search_memory" , " create_long_term_memory" ]
182+ )
183+
184+
185+ asyncio.run(main())
158186```
159187
160188### Combining with Custom Tools
161189
162190Combine memory tools with your own custom tools:
163191
164192``` python
165- from langchain_core.tools import tool
193+ import asyncio
194+ from agent_memory_client import create_memory_client
166195from agent_memory_client.integrations.langchain import get_memory_tools
196+ from langchain_core.tools import tool
197+ from langchain.agents import create_tool_calling_agent, AgentExecutor
198+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
199+ from langchain_openai import ChatOpenAI
167200
168- # Get memory tools
169- memory_tools = get_memory_tools(
170- memory_client = client,
171- session_id = " session" ,
172- user_id = " user"
173- )
174201
175202# Define custom tools
176203@tool
@@ -184,55 +211,96 @@ async def calculate(expression: str) -> str:
184211 except (ValueError , SyntaxError ):
185212 return " Error: Invalid expression"
186213
214+
187215@tool
188216async def get_weather (city : str ) -> str :
189217 """ Get weather for a city."""
190218 # Your weather API logic here
191219 return f " Weather in { city} : Sunny, 72°F "
192220
193- # Combine all tools
194- all_tools = memory_tools + [calculate, get_weather]
195221
196- # Use with agent
197- agent = create_tool_calling_agent(llm, all_tools, prompt)
198- executor = AgentExecutor(agent = agent, tools = all_tools)
222+ async def main ():
223+ # Initialize memory client
224+ memory_client = await create_memory_client(" http://localhost:8000" )
225+
226+ # Get memory tools
227+ memory_tools = get_memory_tools(
228+ memory_client = memory_client,
229+ session_id = " session" ,
230+ user_id = " user"
231+ )
232+
233+ # Combine all tools
234+ all_tools = memory_tools + [calculate, get_weather]
235+
236+ # Create agent with combined tools
237+ llm = ChatOpenAI(model = " gpt-4o" )
238+ prompt = ChatPromptTemplate.from_messages([
239+ (" system" , " You are a helpful assistant with memory and additional capabilities." ),
240+ (" human" , " {input} " ),
241+ MessagesPlaceholder(" agent_scratchpad" ),
242+ ])
243+
244+ agent = create_tool_calling_agent(llm, all_tools, prompt)
245+ executor = AgentExecutor(agent = agent, tools = all_tools)
246+
247+ # Use the agent
248+ result = await executor.ainvoke({
249+ " input" : " What's 2+2? Also remember that I like math."
250+ })
251+ print (result[" output" ])
252+
253+
254+ asyncio.run(main())
199255```
200256
201257### Multi-User Application
202258
203259Handle multiple users with different sessions:
204260
205261``` python
262+ import asyncio
263+ from agent_memory_client import create_memory_client
206264from agent_memory_client.integrations.langchain import get_memory_tools
207265from langchain_core.tools import StructuredTool
266+ from langchain.agents import create_tool_calling_agent, AgentExecutor
267+ from langchain_openai import ChatOpenAI
268+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
208269
209- async def create_user_agent (user_id : str , session_id : str ):
210- """ Create a memory-enabled agent for a specific user."""
211270
212- tools: list[StructuredTool] = get_memory_tools(
213- memory_client = shared_memory_client,
214- session_id = session_id,
215- user_id = user_id,
216- namespace = f " app: { user_id} " # User-specific namespace
217- )
271+ async def main ():
272+ # Initialize shared memory client
273+ shared_memory_client = await create_memory_client(" http://localhost:8000" )
218274
219- llm = ChatOpenAI(model = " gpt-4o" )
220- prompt = ChatPromptTemplate.from_messages([
221- (" system" , f " You are assisting user { user_id} . " ),
222- (" human" , " {input} " ),
223- MessagesPlaceholder(" agent_scratchpad" ),
224- ])
275+ async def create_user_agent (user_id : str , session_id : str ):
276+ """ Create a memory-enabled agent for a specific user."""
277+ tools: list[StructuredTool] = get_memory_tools(
278+ memory_client = shared_memory_client,
279+ session_id = session_id,
280+ user_id = user_id,
281+ namespace = f " app: { user_id} " # User-specific namespace
282+ )
225283
226- agent = create_tool_calling_agent(llm, tools, prompt)
227- return AgentExecutor(agent = agent, tools = tools)
284+ llm = ChatOpenAI(model = " gpt-4o" )
285+ prompt = ChatPromptTemplate.from_messages([
286+ (" system" , f " You are assisting user { user_id} . " ),
287+ (" human" , " {input} " ),
288+ MessagesPlaceholder(" agent_scratchpad" ),
289+ ])
290+
291+ agent = create_tool_calling_agent(llm, tools, prompt)
292+ return AgentExecutor(agent = agent, tools = tools)
293+
294+ # Create agents for different users
295+ alice_agent = await create_user_agent(" alice" , " alice_session_1" )
296+ bob_agent = await create_user_agent(" bob" , " bob_session_1" )
297+
298+ # Each agent has isolated memory
299+ await alice_agent.ainvoke({" input" : " I love pizza" })
300+ await bob_agent.ainvoke({" input" : " I love sushi" })
228301
229- # Create agents for different users
230- alice_agent = await create_user_agent(" alice" , " alice_session_1" )
231- bob_agent = await create_user_agent(" bob" , " bob_session_1" )
232302
233- # Each agent has isolated memory
234- await alice_agent.ainvoke({" input" : " I love pizza" })
235- await bob_agent.ainvoke({" input" : " I love sushi" })
303+ asyncio.run(main())
236304```
237305
238306## See Also
0 commit comments