from langchain.messages import HumanMessage, AIMessage, SystemMessageconversation = [ {"role": "system", "content": "You are a helpful assistant that translates English to French."}, {"role": "user", "content": "Translate: I love programming."}, {"role": "assistant", "content": "J'adore la programmation."}, {"role": "user", "content": "Translate: I love building applications."}]response = model.invoke(conversation)print(response) # AIMessage("J'adore créer des applications.")
消息对象
复制
向 AI 提问
from langchain_core.messages import HumanMessage, AIMessage, SystemMessageconversation = [ SystemMessage("You are a helpful assistant that translates English to French."), HumanMessage("Translate: I love programming."), AIMessage("J'adore la programmation."), HumanMessage("Translate: I love building applications.")]response = model.invoke(conversation)print(response) # AIMessage("J'adore créer des applications.")
full = None # None | AIMessageChunkfor chunk in model.stream("What color is the sky?"): full = chunk if full is None else full + chunk print(full.text)# The# The sky# The sky is# The sky is typically# The sky is typically blue# ...print(full.content_blocks)# [{"type": "text", "text": "The sky is typically blue..."}]
responses = model.batch([ "Why do parrots have colorful feathers?", "How do airplanes fly?", "What is quantum computing?"])for response in responses: print(response)
for response in model.batch_as_completed([ "Why do parrots have colorful feathers?", "How do airplanes fly?", "What is quantum computing?"]): print(response)
from langchain.tools import tool@tooldef get_weather(location: str) -> str: """Get the weather at a location.""" return f"It's sunny in {location}."model_with_tools = model.bind_tools([get_weather]) response = model_with_tools.invoke("What's the weather like in Boston?")for tool_call in response.tool_calls: # View tool calls made by the model print(f"Tool: {tool_call['name']}") print(f"Args: {tool_call['args']}")
# Bind (potentially multiple) tools to the modelmodel_with_tools = model.bind_tools([get_weather])# Step 1: Model generates tool callsmessages = [{"role": "user", "content": "What's the weather in Boston?"}]ai_msg = model_with_tools.invoke(messages)messages.append(ai_msg)# Step 2: Execute tools and collect resultsfor tool_call in ai_msg.tool_calls: # Execute the tool with the generated arguments tool_result = get_weather.invoke(tool_call) messages.append(tool_result)# Step 3: Pass results back to model for final responsefinal_response = model_with_tools.invoke(messages)print(final_response.text)# "The current weather in Boston is 72°F and sunny."
model_with_tools = model.bind_tools([get_weather])response = model_with_tools.invoke( "What's the weather in Boston and Tokyo?")# The model may generate multiple tool callsprint(response.tool_calls)# [# {'name': 'get_weather', 'args': {'location': 'Boston'}, 'id': 'call_1'},# {'name': 'get_weather', 'args': {'location': 'Tokyo'}, 'id': 'call_2'},# ]# Execute all tools (can be done in parallel with async)results = []for tool_call in response.tool_calls: if tool_call['name'] == 'get_weather': result = get_weather.invoke(tool_call) ... results.append(result)
for chunk in model_with_tools.stream( "What's the weather in Boston and Tokyo?"): # Tool call chunks arrive progressively for tool_chunk in chunk.tool_call_chunks: if name := tool_chunk.get("name"): print(f"Tool: {name}") if id_ := tool_chunk.get("id"): print(f"ID: {id_}") if args := tool_chunk.get("args"): print(f"Args: {args}")# Output:# Tool: get_weather# ID: call_SvMlU1TVIZugrFLckFE2ceRE# Args: {"lo# Args: catio# Args: n": "B# Args: osto# Args: n"}# Tool: get_weather# ID: call_QMZdy6qInx13oWKE7KhuhOLR# Args: {"lo# Args: catio# Args: n": "T# Args: okyo# Args: "}
您可以累积块以构建完整的工具调用
累积工具调用
复制
向 AI 提问
gathered = Nonefor chunk in model_with_tools.stream("What's the weather in Boston?"): gathered = chunk if gathered is None else gathered + chunk print(gathered.tool_calls)
from pydantic import BaseModel, Fieldclass Movie(BaseModel): """A movie with details.""" title: str = Field(..., description="The title of the movie") year: int = Field(..., description="The year the movie was released") director: str = Field(..., description="The director of the movie") rating: float = Field(..., description="The movie's rating out of 10")model_with_structure = model.with_structured_output(Movie)response = model_with_structure.invoke("Provide details about the movie Inception")print(response) # Movie(title="Inception", year=2010, director="Christopher Nolan", rating=8.8)
from pydantic import BaseModel, Fieldclass Movie(BaseModel): """A movie with details.""" title: str = Field(..., description="The title of the movie") year: int = Field(..., description="The year the movie was released") director: str = Field(..., description="The director of the movie") rating: float = Field(..., description="The movie's rating out of 10")model_with_structure = model.with_structured_output(Movie, include_raw=True) response = model_with_structure.invoke("Provide details about the movie Inception")response# {# "raw": AIMessage(...),# "parsed": Movie(title=..., year=..., ...),# "parsing_error": None,# }
示例:嵌套结构
模式可以嵌套
复制
向 AI 提问
from pydantic import BaseModel, Fieldclass Actor(BaseModel): name: str role: strclass MovieDetails(BaseModel): title: str year: int cast: list[Actor] genres: list[str] budget: float | None = Field(None, description="Budget in millions USD")model_with_structure = model.with_structured_output(MovieDetails)
response = model.invoke("Create a picture of a cat")print(response.content_blocks)# [# {"type": "text", "text": "Here's a picture of a cat"},# {"type": "image", "base64": "...", "mime_type": "image/jpeg"},# ]
for chunk in model.stream("Why do parrots have colorful feathers?"): reasoning_steps = [r for r in chunk.content_blocks if r["type"] == "reasoning"] print(reasoning_steps if reasoning_steps else chunk.text)
from langchain.chat_models import init_chat_modelmodel = init_chat_model("gpt-4.1-mini")tool = {"type": "web_search"}model_with_tools = model.bind_tools([tool])response = model_with_tools.invoke("What was a positive news story from today?")response.content_blocks
from langchain_core.rate_limiters import InMemoryRateLimiterrate_limiter = InMemoryRateLimiter( requests_per_second=0.1, # 1 request every 10s check_every_n_seconds=0.1, # Check every 100ms whether allowed to make a request max_bucket_size=10, # Controls the maximum burst size.)model = init_chat_model( model="gpt-5", model_provider="openai", rate_limiter=rate_limiter )
model = init_chat_model( model="gpt-4o", model_provider="openai").bind(logprobs=True)response = model.invoke("Why do parrots talk?")print(response.response_metadata["logprobs"])
response = model.invoke( "Tell me a joke", config={ "run_name": "joke_generation", # Custom name for this run "tags": ["humor", "demo"], # Tags for categorization "metadata": {"user_id": "123"}, # Custom metadata "callbacks": [my_callback_handler], # Callback handlers })
from langchain.chat_models import init_chat_modelconfigurable_model = init_chat_model(temperature=0)configurable_model.invoke( "what's your name", config={"configurable": {"model": "gpt-5-nano"}}, # Run with GPT-5-Nano)configurable_model.invoke( "what's your name", config={"configurable": {"model": "claude-sonnet-4-5-20250929"}}, # Run with Claude)
具有默认值的可配置模型
我们可以创建一个具有默认模型值的可配置模型,指定哪些参数是可配置的,并为可配置参数添加前缀。
复制
向 AI 提问
first_model = init_chat_model( model="gpt-4.1-mini", temperature=0, configurable_fields=("model", "model_provider", "temperature", "max_tokens"), config_prefix="first", # Useful when you have a chain with multiple models)first_model.invoke("what's your name")
from pydantic import BaseModel, Fieldclass GetWeather(BaseModel): """Get the current weather in a given location""" location: str = Field(..., description="The city and state, e.g. San Francisco, CA")class GetPopulation(BaseModel): """Get the current population in a given location""" location: str = Field(..., description="The city and state, e.g. San Francisco, CA")model = init_chat_model(temperature=0)model_with_tools = model.bind_tools([GetWeather, GetPopulation])model_with_tools.invoke( "what's bigger in 2024 LA or NYC", config={"configurable": {"model": "gpt-4.1-mini"}}).tool_calls