I am stuck at this error - Error Loading the Langachain pipeline 'field set'

21 hours ago 5
ARTICLE AD BOX

I am stuck at this error - Error Loading the Langachain pipeline 'field set'.

How can i resolve this error?

Below i have mention my code ,

import os

from langchain_community.vectorstores import FAISS

from langchain_community.embeddings import HuggingFaceEmbeddings

from langchain_community.llms import Ollama

from langchain.chains.combine_documents import create_stuff_documents_chain

from langchain.chains.retrieval import create_retrieval_chain

from langchain.prompts import PromptTemplate

DB_FAISS_PATH = 'vectorstore/db_faiss'

# Custom prompt template for the RAG system to enforce grounding

custom_prompt_template = """Use the following pieces of information from the EU AI Act to answer the user's question.

If you don't know the answer based on the provided text, just say that you don't know, don't try to make up an answer.

Context: {context}

Question: {question}

Helpful Answer:"""

def set_custom_prompt():

"""Returns a PromptTemplate for QA retrieval"""

return PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question'])

def load_llm():

"""Loads the Ollama LLM setup. Make sure 'ollama serve' is running on the host machine."""

# We use Llama3 since it is powerful and relatively fast for local deployment via Ollama

return Ollama(model="phi3", temperature=0.1)

def build_qa_chain(llm, prompt, retriever):

qa_chain = create_stuff_documents_chain(llm, prompt)

rag_chain = create_retrieval_chain(

retriever,

qa_chain

)

return rag_chain

def setup_rag_pipeline():

"""Main pipeline formulation function tying embeddings, FAISS, and the LLM together."""

print("Setting up embedding models...")

embeddings = HuggingFaceEmbeddings(

model_name="sentence-transformers/all-MiniLM-L6-v2",

model_kwargs={'device': 'cpu'}

)

if not os.path.exists(DB_FAISS_PATH):

raise FileNotFoundError(f"Missing FAISS DB at {DB_FAISS_PATH}. Please run vector_store.py first to build the knowledge base.")

print("Loading FAISS database...")

db = FAISS.load_local(DB_FAISS_PATH, embeddings, allow_dangerous_deserialization=True)

# K=3 retrieves the top 3 most relevant chunks based on similarity

retriever = db.as_retriever(search_kwargs={'k': 3})

print("Initializing Ollama LLM...")

llm = load_llm()

qa_prompt = set_custom_prompt()

qa_chain = build_qa_chain(llm, qa_prompt, retriever)

return qa_chain

Streamlit app ↓ setup_rag_pipeline() ↓ LangChain chain creation ❌ ERROR HERE (fields_set) ↓ FAISS retriever ↓ LLM (Ollama → phi3 / llama3)

Read Entire Article