from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse from typing importCallable
@wrap_model_call defstate_based_tools( request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse] ) -> ModelResponse: """Filter tools based on conversation State.""" # Read from State: check if user has authenticated state = request.state is_authenticated = state.get("authenticated", False) message_count = len(state["messages"])
# Only enable sensitive tools after authentication ifnot is_authenticated: tools = [t for t in request.tools if t.name.startswith("public_")] request = request.override(tools=tools) elif message_count < 5: # Limit tools early in conversation tools = [t for t in request.tools if t.name != "advanced_search"] request = request.override(tools=tools)
from dataclasses import dataclass from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse from typing importCallable from langgraph.store.memory import InMemoryStore
@dataclass classContext: user_id: str
@wrap_model_call defstore_based_tools( request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse] ) -> ModelResponse: """Filter tools based on Store preferences.""" user_id = request.runtime.context.user_id
# Read from Store: get user's enabled features store = request.runtime.store feature_flags = store.get(("features",), user_id)
if feature_flags: enabled_features = feature_flags.value.get("enabled_tools", []) # Only include tools that are enabled for this user tools = [t for t in request.tools if t.name in enabled_features] request = request.override(tools=tools)
from dataclasses import dataclass from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse from typing importCallable
@dataclass classContext: user_role: str
@wrap_model_call defcontext_based_tools( request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse] ) -> ModelResponse: """Filter tools based on Runtime Context permissions.""" # Read from Runtime Context: get user role if request.runtime isNoneor request.runtime.context isNone: # If no context provided, default to viewer (most restrictive) user_role = "viewer" else: user_role = request.runtime.context.user_role
if user_role == "admin": # Admins get all tools pass elif user_role == "editor": # Editors can't delete tools = [t for t in request.tools if t.name != "delete_data"] request = request.override(tools=tools) else: # Viewers get read-only tools tools = [t for t in request.tools if t.name.startswith("read_")] request = request.override(tools=tools)
from langchain.agents import create_agent from langchain.agents.middleware import wrap_tool_call from langchain.messages import ToolMessage
@wrap_tool_call defhandle_tool_errors(request, handler): """Handle tool execution errors with custom messages.""" try: return handler(request) except Exception as e: # Return a custom error message to the model return ToolMessage( content=f"Tool error: Please check your input and try again. ({str(e)})", tool_call_id=request.tool_call["id"] )
from langchain.agents import create_agent from langchain.agents.middleware import dynamic_prompt, ModelRequest
classContext(TypedDict): user_role: str
@dynamic_prompt defuser_role_prompt(request: ModelRequest) -> str: """Generate system prompt based on user role.""" user_role = request.runtime.context.get("user_role", "user") base_prompt = "You are a helpful assistant."
if user_role == "expert": returnf"{base_prompt} Provide detailed technical responses." elif user_role == "beginner": returnf"{base_prompt} Explain concepts simply and avoid jargon."
# The system prompt will be set dynamically based on context result = agent.invoke( {"messages": [{"role": "user", "content": "Explain machine learning"}]}, context={"user_role": "expert"} )