File size: 1,651 Bytes
ad88b39 6bdf7b8 ad88b39 6bdf7b8 ad88b39 6bdf7b8 c40d159 6bdf7b8 ad88b39 6bdf7b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
# app.py
import gradio as gr
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import HuggingFacePipeline
from langchain.chains import RetrievalQA
from langchain.text_splitter import CharacterTextSplitter
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# πΉ Sample logs (inline dataset)
sample_logs = """
[ERROR] Disk usage exceeded 90% on node-3
[WARNING] High memory usage detected on node-2
[INFO] Backup completed successfully on node-1
[ERROR] Service 'auth-api' failed to start due to missing env vars
[ERROR] Timeout while connecting to database from node-4
"""
# πΉ Split logs into chunks
text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=20)
docs = text_splitter.create_documents([sample_logs])
# πΉ Embed and index with FAISS
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
db = FAISS.from_documents(docs, embeddings)
# β
Use DeepSeek Coder (fast, public, code-tuned)
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)
llm = HuggingFacePipeline(pipeline=llm_pipeline)
# πΉ Retrieval QA chain
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=db.as_retriever())
# πΉ Gradio chatbot UI
def chat(query):
return qa_chain.run(query)
gr.ChatInterface(chat, title="Incident RCA Bot π¨", description="Ask about logs, errors, and root causes").launch()
|