# app.py from langchain_community.vectorstores import FAISS from langchain_community.embeddings import SentenceTransformerEmbeddings from langchain_community.document_loaders import TextLoader from langchain.text_splitter import CharacterTextSplitter from langchain.chains import RetrievalQA from langchain.llms import HuggingFacePipeline from transformers import pipeline import gradio as gr # Sample FAQ content faq_text = """ Q: What is your return policy? A: You can return any item within 30 days of purchase. Q: How do I track my order? A: Use the tracking link sent to your email after shipping. Q: Do you ship internationally? A: Yes, we ship to over 50 countries worldwide. Q: What payment methods do you accept? A: We accept credit cards, PayPal, and Apple Pay. """ # Save and load FAQ with open("faq.txt", "w") as f: f.write(faq_text) loader = TextLoader("faq.txt") docs = loader.load() # Chunking splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=0) chunks = splitter.split_documents(docs) # Embedding + FAISS embedding = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") db = FAISS.from_documents(chunks, embedding) # RAG pipeline hf_pipe = pipeline("text2text-generation", model="google/flan-t5-base", max_length=256) llm = HuggingFacePipeline(pipeline=hf_pipe) retriever = db.as_retriever() qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever) # Gradio UI def chatbot(query): return qa_chain.run(query) demo = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="FAQ Chatbot (RAG + FAISS)") demo.launch()