pip install langchain | Install LangChain |
pip install langchain-openai | OpenAI integration |
pip install langchain-anthropic | Anthropic integration |
pip install langchain-community | Community integrations |
pip install langchain-chroma | Chroma vector store |
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="gpt-4",
temperature=0.7,
api_key="your-api-key"
)
response = llm.invoke("Hello, how are you?") from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(
model="claude-3-sonnet-20240229",
api_key="your-api-key"
) from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="What is Python?"),
]
response = llm.invoke(messages) from langchain_core.prompts import PromptTemplate
template = PromptTemplate.from_template(
"Tell me a {adjective} joke about {topic}."
)
prompt = template.format(adjective="funny", topic="programming") from langchain_core.prompts import ChatPromptTemplate
template = ChatPromptTemplate.from_messages([
("system", "You are a {role}."),
("human", "{input}"),
])
messages = template.format_messages(
role="helpful assistant",
input="What is AI?"
) from langchain_core.prompts import FewShotPromptTemplate
examples = [
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
]
example_prompt = PromptTemplate.from_template(
"Input: {input}\nOutput: {output}"
)
few_shot = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix="Give the antonym:",
suffix="Input: {word}\nOutput:",
input_variables=["word"],
) from langchain_core.output_parsers import StrOutputParser
chain = template | llm | StrOutputParser()
result = chain.invoke({"topic": "Python"}) from langchain_core.runnables import RunnablePassthrough
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
result = rag_chain.invoke("What is RAG?") from langchain_core.runnables import RunnableParallel
chain = RunnableParallel(
summary=summary_chain,
translation=translation_chain,
)
result = chain.invoke({"text": "Hello world"}) from langchain_core.runnables import RunnableBranch
branch = RunnableBranch(
(lambda x: "math" in x["topic"].lower(), math_chain),
(lambda x: "science" in x["topic"].lower(), science_chain),
general_chain, # default
) from langchain_community.document_loaders import TextLoader
loader = TextLoader("document.txt")
docs = loader.load() from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader("document.pdf")
docs = loader.load() from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://example.com")
docs = loader.load() from langchain_community.document_loaders import DirectoryLoader
loader = DirectoryLoader("./docs", glob="**/*.txt")
docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
chunks = splitter.split_documents(docs) from langchain.text_splitter import TokenTextSplitter
splitter = TokenTextSplitter(
chunk_size=500,
chunk_overlap=50,
)
chunks = splitter.split_documents(docs) from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(
documents=chunks,
embedding=embeddings,
persist_directory="./chroma_db"
)
# Retriever
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 4}
) from langchain_community.vectorstores import FAISS
vectorstore = FAISS.from_documents(chunks, embeddings)
# Save and load
vectorstore.save_local("faiss_index")
vectorstore = FAISS.load_local("faiss_index", embeddings) from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
template = """Answer based on context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
result = rag_chain.invoke("What is machine learning?") from langchain_core.tools import tool
@tool
def search(query: str) -> str:
"""Search the web for information."""
return f"Results for: {query}"
@tool
def calculator(expression: str) -> str:
"""Evaluate a math expression."""
return str(eval(expression)) from langchain.agents import create_tool_calling_agent, AgentExecutor
tools = [search, calculator]
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
])
agent = create_tool_calling_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
result = executor.invoke({"input": "What is 25 * 4?"}) from langchain.agents import create_react_agent
from langchain import hub
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, verbose=True) from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(return_messages=True)
memory.save_context(
{"input": "Hi, I am John"},
{"output": "Hello John!"}
)
history = memory.load_memory_variables({}) from langchain.memory import ConversationBufferWindowMemory
memory = ConversationBufferWindowMemory(
k=5, # Keep last 5 exchanges
return_messages=True
) from langchain.memory import ConversationSummaryMemory
memory = ConversationSummaryMemory(
llm=llm,
return_messages=True
) from langchain.chains import ConversationChain
chain = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)
response = chain.invoke({"input": "Hi!"})