Minimal chatbot
The smallest working example:import asyncio
from langchat import LangChat
from langchat.providers import OpenAI, Pinecone, Supabase
LangChat.load_env()
lc = LangChat(
llm=OpenAI("gpt-4o-mini"),
vector_db=Pinecone("my-index"),
db=Supabase(),
)
async def main():
response = await lc.chat(query="Hello!", user_id="alice")
print(response)
asyncio.run(main())
Interactive terminal chat
import asyncio
from langchat import LangChat, ChatResponse
from langchat.providers import OpenAI, Pinecone, Supabase
LangChat.load_env()
lc = LangChat(
llm=OpenAI("gpt-4o-mini"),
vector_db=Pinecone("my-index"),
db=Supabase(),
)
async def main():
user_id = "alice"
print("LangChat — type 'exit' to quit\n")
while True:
query = input("You: ").strip()
if query.lower() in ("exit", "quit", "q"):
break
if not query:
continue
response: ChatResponse = await lc.chat(query=query, user_id=user_id)
if response:
print(f"Bot: {response.text}")
print(f" [{response.response_time:.2f}s]\n")
else:
print(f"Bot: Error — {response.error}\n")
asyncio.run(main())
Sync (no async)
Usechat_sync() when you don’t want to deal with asyncio:
from langchat import LangChat
from langchat.providers import OpenAI, Pinecone, Supabase
LangChat.load_env()
lc = LangChat(
llm=OpenAI("gpt-4o-mini"),
vector_db=Pinecone("my-index"),
db=Supabase(),
)
response = lc.chat_sync(query="What is LangChat?", user_id="alice")
print(response)
Multiple users
Eachuser_id gets separate conversation history:
async def handle_user(lc: LangChat, user_id: str, query: str):
response = await lc.chat(query=query, user_id=user_id)
return response.text
async def main():
# These are completely separate conversations
r1 = await handle_user(lc, "alice", "What's our refund policy?")
r2 = await handle_user(lc, "bob", "How do I reset my password?")
print(r1)
print(r2)
Multiple platforms
Useplatform to separate conversations from different surfaces:
# Web app
web_response = await lc.chat(
query="Show me my order history",
user_id="alice",
platform="web",
)
# Mobile app — same user, separate conversation
mobile_response = await lc.chat(
query="Show me my order history",
user_id="alice",
platform="mobile",
)
Checking the response
ChatResponse supports bool and string protocols:
response = await lc.chat(query="Hello", user_id="alice")
# Check success
if response:
print("Success!")
# Print the text
print(response) # same as print(response.text)
print(str(response)) # explicit cast
# Access all fields
print(response.text)
print(response.status) # "success" or "error"
print(response.user_id)
print(response.platform)
print(response.response_time) # seconds as float
print(response.timestamp) # ISO 8601 UTC string
print(response.error) # None on success
Different LLM providers
from langchat.providers import Anthropic, Gemini, Mistral, Cohere, Ollama
# Anthropic Claude
lc = LangChat(llm=Anthropic("claude-3-5-sonnet-20241022"), vector_db=..., db=...)
# Google Gemini
lc = LangChat(llm=Gemini("gemini-1.5-flash"), vector_db=..., db=...)
# Mistral
lc = LangChat(llm=Mistral("mistral-large-latest"), vector_db=..., db=...)
# Cohere
lc = LangChat(llm=Cohere("command-r-plus"), vector_db=..., db=...)
# Ollama (local, no API key)
lc = LangChat(llm=Ollama("llama3.2"), vector_db=..., db=...)
