LangChain Integration
Use AI Foundation Services with LangChain for RAG, chains, and agent workflows. Since AIFS is OpenAI-compatible, you can use the langchain-openai package directly.
pip install langchain langchain-openaiInitialize LLM
Section titled “Initialize LLM”import os
from langchain_openai import ChatOpenAI
llm = ChatOpenAI( openai_api_key=os.getenv("OPENAI_API_KEY"), openai_api_base=os.getenv("OPENAI_BASE_URL"), model_name="Llama-3.3-70B-Instruct", streaming=True,)
# Testfor chunk in llm.stream("Write me a short poem about cloud computing."): if chunk.content: print(chunk.content, end="", flush=True)Initialize Embeddings
Section titled “Initialize Embeddings”from typing import Listfrom langchain_openai import OpenAIEmbeddingsimport os
class AIFSEmbeddings(OpenAIEmbeddings): def embed_documents(self, texts: List[str]) -> List[List[float]]: embeddings = self.client.create(input=texts, model=self.model) return [embed.embedding for embed in embeddings.data]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]: embeddings = await self.async_client.create(input=texts, model=self.model) return [embed.embedding for embed in embeddings.data]
def embed_query(self, text: str) -> List[float]: return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]: embed = await self.aembed_documents([text]) return embed[0]
embed_model = AIFSEmbeddings( openai_api_key=os.getenv("OPENAI_API_KEY"), openai_api_base=os.getenv("OPENAI_BASE_URL"), model="text-embedding-bge-m3",)
# Testembeddings = embed_model.embed_documents(["Hello!", "World!"])print(f"Dimensions: {len(embeddings[0])}")Next Steps
Section titled “Next Steps”- Embeddings Guide — Learn more about embedding models
- LlamaIndex Integration — Alternative RAG framework