Combine RAG and multiple prompts in a Chat

Hey there,

I would like to combine the lessons learned in the course where the context of the documents get shared inside the prompt. And then have a multi prompt chain so that based on the customer question/input the chain decides which prompt it takes. But now, I can not find a chain where the RAG information is added. It always says that it doesn’t have the information.

I’m using the EmbeddingRouterChain like it is explained in the langchain documentation.

from utils import loaders, splitters, embeddings, vectorstores, chat_models
from utils.vectorstores import create_vector_db_from_documents
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import ChatPromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.chains.router import MultiPromptChain
from langchain.chains import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains.router.embedding_router import EmbeddingRouterChain

# Load documents
docs = loaders.load_documents_from_files([

# Split documents
splits = splitters.split_documents(docs)

# Initialize embeddings
embedding = embeddings.initialize_openai_embeddings()

# Create vector DB
vectordb = vectorstores.create_vector_db_from_documents(splits, embedding)

# Initialize chat model
llm = chat_models.initialize_chat_model()

# Prompt Templates
#system_prompt_template = """"""

physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.

Always answer with: `Here is your physics answer`

Here is a question:

math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.

Always answer with: `Here is your math answer`

Here is a question:

template = """

User Input: {input}


prompt_infos = [
        "name": "general",
        "description": "General template if no specified template applies to the customer request",
        "prompt_template": template,
        "name": "physics",
        "description": "Good for answering questions about physics",
        "prompt_template": physics_template,
        "name": "math",
        "description": "Good for answering math questions",
        "prompt_template": math_template,

destination_chains = {}
for p_info in prompt_infos:
    name = p_info["name"]
    prompt_template = p_info["prompt_template"]
    #chat_prompt = ChatPromptTemplate(input_variables=["input", "context"], template=prompt_template)
    chat_prompt = PromptTemplate(input_variables=["input"], template=prompt_template)
    chain = LLMChain(llm=llm, prompt=chat_prompt)
    destination_chains[name] = chain
default_chain = ConversationChain(llm=llm, output_key="answer")

names_and_descriptions = [
    ("general", ["for general questions where no other chain applies"]),
    ("physics", ["for questions about physics"]),
    ("math", ["for questions about math"]),

# Create Memory for Conversational Retrieval
memory = ConversationBufferMemory(
    memory_key="chat_history",  # equals to a placeholder in the prompt
    return_messages=True  # true adds all messages to the prompt

# Setup Conversational Retrieval Chain
retriever = vectordb.as_retriever(search_type="mmr")
qa = ConversationalRetrievalChain.from_llm(

router_chain = EmbeddingRouterChain.from_names_and_descriptions(
    names_and_descriptions, vectordb, embedding, routing_keys=["context"], verbose=True

chain = MultiPromptChain(

def get_answer_from_chain(question):
    #result = qa({"question": question})
    #return str(result['answer'])
    result ={"input": question})
    return result

What am I doing wrong?

Thank you for your help