Add persistent memory to LangGraph agents and workflows.
LangGraph Integration
CortexDB provides persistent long-term memory for LangGraph agents, complementing LangGraph's built-in short-term state management.
Installation
pip install cortexdb[langgraph]
Setup
from langgraph.graph import StateGraph, MessagesState
from cortexdb.integrations.langgraph import CortexMemoryNode, cortex_remember, cortex_recall
# Add CortexDB nodes to your graph
builder = StateGraph(MessagesState)
# Recall relevant context before the agent responds
builder.add_node("recall_memory", cortex_recall(
tenant_id="my-app",
top_k=10,
))
# Store the conversation turn after the agent responds
builder.add_node("store_memory", cortex_remember(
tenant_id="my-app",
episode_type="message",
))
builder.add_node("agent", agent_node)
# Wire the graph
builder.add_edge("recall_memory", "agent")
builder.add_edge("agent", "store_memory")
builder.set_entry_point("recall_memory")
graph = builder.compile()
Memory-Augmented Agent
from cortexdb import Cortex
cortex = Cortex(api_key="your-api-key")
def recall_node(state: MessagesState):
"""Recall relevant memories before agent processes the message."""
last_message = state["messages"][-1].content
memories = cortex.recall(
query=last_message,
tenant_id="my-app",
)
state["memory_context"] = memories.context
return state
def remember_node(state: MessagesState):
"""Store the conversation turn in CortexDB."""
for msg in state["messages"][-2:]: # user + assistant
cortex.remember(
content=msg.content,
tenant_id="my-app",
metadata={
"source": "langgraph-agent",
"author": msg.type, # "human" or "ai"
},
)
return state
Configuration
| Parameter | Default | Description |
|---|---|---|
| tenant_id | Required | Tenant identifier |
| namespace | None | Memory namespace |
| top_k | 10 | Results per recall |
| episode_type | message | Episode type for stored turns |
| source | langgraph | Source identifier |