Chat models are language models that use a sequence of messages as inputs and return messages as outputs (as opposed to using plain text). These are generally newer models.
If you’d like to write your own chat model, see this how-to.If you’d like to contribute an integration, see Contributing integrations.

Install and use

pip install -qU "langchain[openai]"
import getpass
import os

if not os.environ.get("OPENAI_API_KEY"):
  os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter API key for OpenAI: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("gpt-4o-mini", model_provider="openai")
pip install -qU "langchain[anthropic]"
import getpass
import os

if not os.environ.get("ANTHROPIC_API_KEY"):
  os.environ["ANTHROPIC_API_KEY"] = getpass.getpass("Enter API key for Anthropic: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("claude-3-5-sonnet-latest", model_provider="anthropic")
pip install -qU "langchain[azure]"
import getpass
import os

if not os.environ.get("AZURE_OPENAI_API_KEY"):
  os.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass("Enter API key for Azure: ")

from langchain_openai import AzureChatOpenAI

model = AzureChatOpenAI(
    azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
    azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],
    openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
)
pip install -qU "langchain[google-genai]"
import getpass
import os

if not os.environ.get("GOOGLE_API_KEY"):
  os.environ["GOOGLE_API_KEY"] = getpass.getpass("Enter API key for Google Gemini: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("gemini-2.5-flash", model_provider="google_genai")
pip install -qU "langchain[google-vertexai]"
# Ensure your VertexAI credentials are configured

from langchain.chat_models import init_chat_model

model = init_chat_model("gemini-2.5-flash", model_provider="google_vertexai")
pip install -qU "langchain[aws]"
# Ensure your AWS credentials are configured

from langchain.chat_models import init_chat_model

model = init_chat_model("anthropic.claude-3-5-sonnet-20240620-v1:0", model_provider="bedrock_converse")
pip install -qU "langchain[groq]"
import getpass
import os

if not os.environ.get("GROQ_API_KEY"):
  os.environ["GROQ_API_KEY"] = getpass.getpass("Enter API key for Groq: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("llama3-8b-8192", model_provider="groq")
pip install -qU "langchain[cohere]"
import getpass
import os

if not os.environ.get("COHERE_API_KEY"):
  os.environ["COHERE_API_KEY"] = getpass.getpass("Enter API key for Cohere: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("command-r-plus", model_provider="cohere")
pip install -qU "langchain[langchain-nvidia-ai-endpoints]"
import getpass
import os

if not os.environ.get("NVIDIA_API_KEY"):
  os.environ["NVIDIA_API_KEY"] = getpass.getpass("Enter API key for NVIDIA: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("meta/llama3-70b-instruct", model_provider="nvidia")
pip install -qU "langchain[fireworks]"
import getpass
import os

if not os.environ.get("FIREWORKS_API_KEY"):
  os.environ["FIREWORKS_API_KEY"] = getpass.getpass("Enter API key for Fireworks AI: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("accounts/fireworks/models/llama-v3p1-70b-instruct", model_provider="fireworks")
pip install -qU "langchain[mistralai]"
import getpass
import os

if not os.environ.get("MISTRAL_API_KEY"):
  os.environ["MISTRAL_API_KEY"] = getpass.getpass("Enter API key for Mistral AI: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("mistral-large-latest", model_provider="mistralai")
pip install -qU "langchain[together]"
import getpass
import os

if not os.environ.get("TOGETHER_API_KEY"):
  os.environ["TOGETHER_API_KEY"] = getpass.getpass("Enter API key for Together AI: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("mistralai/Mixtral-8x7B-Instruct-v0.1", model_provider="together")
pip install -qU "langchain[langchain-ibm]"
import getpass
import os

if not os.environ.get("WATSONX_APIKEY"):
  os.environ["WATSONX_APIKEY"] = getpass.getpass("Enter API key for IBM watsonx: ")

from langchain_ibm import ChatWatsonx

model = ChatWatsonx(
    model_id="ibm/granite-34b-code-instruct",
    url="https://us-south.ml.cloud.ibm.com",
    project_id="<WATSONX PROJECT_ID>"
)
pip install -qU "langchain[databricks-langchain]"
import getpass
import os

if not os.environ.get("DATABRICKS_TOKEN"):
  os.environ["DATABRICKS_TOKEN"] = getpass.getpass("Enter API key for Databricks: ")

from databricks_langchain import ChatDatabricks

os.environ["DATABRICKS_HOST"] = "https://example.staging.cloud.databricks.com/serving-endpoints"

model = ChatDatabricks(endpoint="databricks-meta-llama-3-1-70b-instruct")
pip install -qU "langchain[langchain-xai]"
import getpass
import os

if not os.environ.get("XAI_API_KEY"):
  os.environ["XAI_API_KEY"] = getpass.getpass("Enter API key for xAI: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("grok-2", model_provider="xai")
pip install -qU "langchain[langchain-perplexity]"
import getpass
import os

if not os.environ.get("PPLX_API_KEY"):
  os.environ["PPLX_API_KEY"] = getpass.getpass("Enter API key for Perplexity: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("llama-3.1-sonar-small-128k-online", model_provider="perplexity")
pip install -qU "langchain[langchain-deepseek]"
import getpass
import os

if not os.environ.get("DEEPSEEK_API_KEY"):
  os.environ["DEEPSEEK_API_KEY"] = getpass.getpass("Enter API key for DeepSeek: ")

from langchain.chat_models import init_chat_model

model = init_chat_model("deepseek-chat", model_provider="deepseek")
While all these LangChain classes support the indicated advanced feature, you may haveto open the provider-specific documentation to learn which hosted models or backends support the feature.
ProviderTool callingStructured outputJSON modeLocalMultimodalPackage
ChatAnthropiclangchain-anthropic
ChatAimlapilangchain-aimlapi
ChatMistralAIlangchain-mistralai
ChatFireworkslangchain-fireworks
AzureChatOpenAIlangchain-openai
ChatOpenAIlangchain-openai
ChatTogetherlangchain-together
ChatVertexAIlangchain-google-vertexai
ChatGoogleGenerativeAIlangchain-google-genai
ChatGroqlangchain-groq
ChatCoherelangchain-cohere
ChatBedrocklangchain-aws
ChatHuggingFacelangchain-huggingface
ChatNVIDIAlangchain-nvidia-ai-endpoints
ChatOllamalangchain-ollama
ChatLlamaCpplangchain-community
ChatAI21langchain-ai21
ChatUpstagelangchain-upstage
ChatDatabricksdatabricks-langchain
ChatWatsonxlangchain-ibm
ChatXAIlangchain-xai
ChatPerplexitylangchain-perplexity

All chat models

Abso

AI21 Labs

AI/ML API

Alibaba Cloud PAI EAS

Anthropic

Anyscale

AzureAIChatCompletionsModel

Azure OpenAI

Azure ML Endpoint

Baichuan Chat

Baidu Qianfan

AWS Bedrock

Cerebras

CloudflareWorkersAI

Cohere

ContextualAI

Coze Chat

Dappier AI

Databricks

DeepInfra

DeepSeek

Eden AI

EverlyAI

Featherless AI

Fireworks

ChatFriendli

Goodfire

Google Gemini

Google Cloud Vertex AI

GPTRouter

DigitalOcean Gradient

GreenNode

Groq

ChatHuggingFace

IBM watsonx.ai

JinaChat

Kinetica

Konko

LiteLLM

Llama 2 Chat

Llama API

LlamaEdge

Llama.cpp

maritalk

MiniMax

MistralAI

MLX

ModelScope

Moonshot

Naver

Nebius

Netmind

NVIDIA AI Endpoints

ChatOCIModelDeployment

OCIGenAI

ChatOctoAI

Ollama

OpenAI

Outlines

Perplexity

Pipeshift

ChatPredictionGuard

PremAI

PromptLayer ChatOpenAI

Qwen QwQ

Qwen

Reka

RunPod Chat Model

SambaNovaCloud

SambaStudio

ChatSeekrFlow

Snowflake Cortex

solar

SparkLLM Chat

Nebula (Symbl.ai)

Tencent Hunyuan

Together

Tongyi Qwen

Upstage

vectara

vLLM Chat

Volc Engine Maas

ChatWriter

xAI

Xinference

YandexGPT

ChatYI

Yuan2.0

ZHIPU AI