TOT7 commited on
Commit
400af5c
Β·
verified Β·
1 Parent(s): 9b2c47d

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +258 -213
src/streamlit_app.py CHANGED
@@ -1,214 +1,259 @@
1
- import streamlit as st
2
- from dotenv import load_dotenv
3
- # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
- # from langchain.vectorstores import FAISS
5
- # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
- from langchain.memory import ConversationBufferMemory
7
- from langchain.chains import ConversationalRetrievalChain
8
- from htmlTemplates import css, bot_template, user_template
9
- # from langchain.llms import LlamaCpp # For loading transformer models.
10
- # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
- # ν…μŠ€νŠΈ μŠ€ν”Œλ¦¬ν„°
12
- from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
-
14
- # λ²‘ν„°μŠ€ν† μ–΄/μž„λ² λ”©/LLM
15
- from langchain_community.vectorstores import FAISS
16
- from langchain_community.embeddings import HuggingFaceEmbeddings
17
-
18
- # λ‘œλ”λ“€ (pebblo/pwd λŒλ €μ˜€μ§€ μ•Šκ²Œ μ„œλΈŒλͺ¨λ“ˆλ‘œ)
19
- from langchain_community.document_loaders.pdf import PyPDFLoader
20
- from langchain_community.document_loaders.text import TextLoader
21
- from langchain_community.document_loaders.csv_loader import CSVLoader
22
- from langchain_community.document_loaders.json_loader import JSONLoader
23
- import tempfile # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•œ λΌμ΄λΈŒλŸ¬λ¦¬μž…λ‹ˆλ‹€.
24
- import os
25
- import json
26
- from langchain.docstore.document import Document
27
- from langchain_groq import ChatGroq
28
-
29
- # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
30
- def get_pdf_text(pdf_docs):
31
- temp_dir = tempfile.TemporaryDirectory() # μž„μ‹œ 디렉토리λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
32
- temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # μž„μ‹œ 파일 경둜λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
33
- with open(temp_filepath, "wb") as f: # μž„μ‹œ νŒŒμΌμ„ λ°”μ΄λ„ˆλ¦¬ μ“°κΈ° λͺ¨λ“œλ‘œ μ—½λ‹ˆλ‹€.
34
- f.write(pdf_docs.getvalue()) # PDF λ¬Έμ„œμ˜ λ‚΄μš©μ„ μž„μ‹œ νŒŒμΌμ— μ”λ‹ˆλ‹€.
35
- pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoaderλ₯Ό μ‚¬μš©ν•΄ PDFλ₯Ό λ‘œλ“œν•©λ‹ˆλ‹€.
36
- pdf_doc = pdf_loader.load() # ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•©λ‹ˆλ‹€.
37
- return pdf_doc # μΆ”μΆœν•œ ν…μŠ€νŠΈλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
38
-
39
-
40
- def get_text_file(docs):
41
- #################### λ‚΄μš©μ„ μΆ”κ°€ν•  λΆ€λΆ„
42
- return text_doc
43
-
44
-
45
- def get_csv_file(docs):
46
- #################### λ‚΄μš©μ„ μΆ”κ°€ν•  λΆ€λΆ„
47
- return csv_doc
48
-
49
- # def get_json_file(docs):
50
- # temp_dir = tempfile.TemporaryDirectory()
51
- # temp_filepath = os.path.join(temp_dir.name, docs.name)
52
- # with open(temp_filepath, "wb") as f:
53
- # f.write(docs.getvalue())
54
- # json_loader = JSONLoader(temp_filepath,
55
- # jq_schema='.scans[].relationships',
56
- # text_content=False)
57
- #
58
- # json_doc = json_loader.load()
59
- # # print('json_doc = ',json_doc)
60
- # return json_doc
61
-
62
- def get_json_file(file) -> list[Document]:
63
- # Streamlit UploadedFile -> str
64
- raw = file.getvalue().decode("utf-8", errors="ignore")
65
- data = json.loads(raw)
66
-
67
- docs = []
68
-
69
- # μ˜ˆμ „ jq κ²½λ‘œκ°€ '.scans[].relationships'μ˜€λ‹€λ©΄, λ™μΌν•œ 의미둜 νŒŒμ‹±:
70
- # μ‘΄μž¬ν•˜λ©΄ κ·Έκ²ƒλ§Œ 뽑고, μ—†μœΌλ©΄ ν†΅μœΌλ‘œ λ¬Έμ„œν™”
71
- def add_doc(x):
72
- docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
73
-
74
- if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
75
- for s in data["scans"]:
76
- rels = s.get("relationships", [])
77
- if isinstance(rels, list) and rels:
78
- for r in rels:
79
- add_doc(r)
80
- if not docs: # κ·Έλž˜λ„ λͺ» λ½‘μ•˜μœΌλ©΄ 전체λ₯Ό ν•˜λ‚˜λ‘œ
81
- add_doc(data)
82
- elif isinstance(data, list):
83
- for item in data:
84
- add_doc(item)
85
- else:
86
- add_doc(data)
87
-
88
- return docs
89
-
90
- # λ¬Έμ„œλ“€μ„ μ²˜λ¦¬ν•˜μ—¬ ν…μŠ€νŠΈ 청크둜 λ‚˜λˆ„λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
91
- def get_text_chunks(documents):
92
- text_splitter = RecursiveCharacterTextSplitter(
93
- chunk_size=1000, # 청크의 크기λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
94
- chunk_overlap=200, # 청크 μ‚¬μ΄μ˜ 쀑볡을 μ§€μ •ν•©λ‹ˆλ‹€.
95
- length_function=len # ν…μŠ€νŠΈμ˜ 길이λ₯Ό μΈ‘μ •ν•˜λŠ” ν•¨μˆ˜λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
96
- )
97
-
98
- documents = text_splitter.split_documents(documents) # λ¬Έμ„œλ“€μ„ 청크둜 λ‚˜λˆ•λ‹ˆλ‹€.
99
- return documents # λ‚˜λˆˆ 청크λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
100
-
101
-
102
- # ν…μŠ€νŠΈ μ²­ν¬λ“€λ‘œλΆ€ν„° 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
103
- def get_vectorstore(text_chunks):
104
- # μ›ν•˜λŠ” μž„λ² λ”© λͺ¨λΈμ„ λ‘œλ“œν•©λ‹ˆλ‹€.
105
- embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
106
- model_kwargs={'device': 'cpu'}) # μž„λ² λ”© λͺ¨λΈμ„ μ„€μ •ν•©λ‹ˆλ‹€.
107
- vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
108
- return vectorstore # μƒμ„±λœ 벑터 μŠ€ν† μ–΄λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
109
-
110
-
111
- def get_conversation_chain(vectorstore):
112
- # Groq LLM
113
- llm = ChatGroq(
114
- groq_api_key=os.environ.get("GROQ_API_KEY"),
115
- model_name="llama-3.1-8b-instant",
116
- temperature=0.75, # ν•„μš”μ— 맞게 νŠœλ‹
117
- max_tokens=512 # μ»¨ν…μŠ€νŠΈ 초과 λ°©μ§€μš© (ν•„μš”μ‹œ μ‘°μ •)
118
- )
119
-
120
- memory = ConversationBufferMemory(
121
- memory_key="chat_history",
122
- return_messages=True
123
- )
124
- retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
125
-
126
- conversation_chain = ConversationalRetrievalChain.from_llm(
127
- llm=llm,
128
- retriever=retriever,
129
- memory=memory,
130
- )
131
- return conversation_chain
132
-
133
- # μ‚¬μš©μž μž…λ ₯을 μ²˜λ¦¬ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
134
- def handle_userinput(user_question):
135
- print('user_question => ', user_question)
136
- # λŒ€ν™” 체인을 μ‚¬μš©ν•˜μ—¬ μ‚¬μš©μž μ§ˆλ¬Έμ— λŒ€ν•œ 응닡을 μƒμ„±ν•©λ‹ˆλ‹€.
137
- response = st.session_state.conversation({'question': user_question})
138
- # λŒ€ν™” 기둝을 μ €μž₯ν•©λ‹ˆλ‹€.
139
- st.session_state.chat_history = response['chat_history']
140
-
141
- for i, message in enumerate(st.session_state.chat_history):
142
- if i % 2 == 0:
143
- st.write(user_template.replace(
144
- "{{MSG}}", message.content), unsafe_allow_html=True)
145
- else:
146
- st.write(bot_template.replace(
147
- "{{MSG}}", message.content), unsafe_allow_html=True)
148
-
149
-
150
- def main():
151
- load_dotenv()
152
- st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
153
- page_icon=":books:")
154
- st.write(css, unsafe_allow_html=True)
155
-
156
- if "conversation" not in st.session_state:
157
- st.session_state.conversation = None
158
- if "chat_history" not in st.session_state:
159
- st.session_state.chat_history = None
160
-
161
- st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
162
- user_question = st.text_input("Ask a question about your documents:")
163
- if user_question:
164
- handle_userinput(user_question)
165
-
166
- with st.sidebar:
167
- st.subheader("Your documents")
168
- docs = st.file_uploader(
169
- "Upload your Files here and click on 'Process'", accept_multiple_files=True)
170
- if st.button("Process[PDF]"):
171
- with st.spinner("Processing"):
172
- # get pdf text
173
- doc_list = []
174
- for file in docs:
175
- print('file - type : ', file.type)
176
- if file.type in ['application/octet-stream', 'application/pdf']:
177
- # file is .pdf
178
- doc_list.extend(get_pdf_text(file))
179
- else:
180
- st.error("PDF 파일이 μ•„λ‹™λ‹ˆλ‹€.")
181
- if not doc_list:
182
- st.error("처리 κ°€λŠ₯ν•œ λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
183
- st.stop()
184
-
185
- text_chunks = get_text_chunks(doc_list)
186
- vectorstore = get_vectorstore(text_chunks)
187
- st.session_state.conversation = get_conversation_chain(vectorstore)
188
-
189
- ################## TXT, CSV λ²„νŠΌ κ΅¬ν˜„
190
- # TXT λ²„νŠΌ κ΅¬ν˜„ μ°Έκ³  : if file.type == 'text/plain':
191
- # CSV λ²„νŠΌ κ΅¬ν˜„ μ°Έκ³  : if file.type == 'text/csv':
192
-
193
- if st.button("Process[JSON]"):
194
- with st.spinner("Processing"):
195
- # get txt text
196
- doc_list = []
197
- for file in docs:
198
- print('file - type : ', file.type)
199
- if file.type == 'application/json':
200
- # file is .json
201
- doc_list.extend(get_json_file(file))
202
- else:
203
- st.error("JSON 파일이 μ•„λ‹™λ‹ˆλ‹€.")
204
- if not doc_list:
205
- st.error("처리 κ°€λŠ₯ν•œ λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
206
- st.stop()
207
-
208
- text_chunks = get_text_chunks(doc_list)
209
- vectorstore = get_vectorstore(text_chunks)
210
- st.session_state.conversation = get_conversation_chain(vectorstore)
211
-
212
-
213
- if __name__ == '__main__':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  main()
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
+ # from langchain.vectorstores import FAISS
5
+ # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
+ from langchain.memory import ConversationBufferMemory
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from htmlTemplates import css, bot_template, user_template
9
+ # from langchain.llms import LlamaCpp # For loading transformer models.
10
+ # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
+ # ν…μŠ€νŠΈ μŠ€ν”Œλ¦¬ν„°
12
+ from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
+
14
+ # λ²‘ν„°μŠ€ν† μ–΄/μž„λ² λ”©/LLM
15
+ from langchain_community.vectorstores import FAISS
16
+ from langchain_community.embeddings import HuggingFaceEmbeddings
17
+
18
+ # λ‘œοΏ½οΏ½οΏ½λ“€ (pebblo/pwd λŒλ €μ˜€μ§€ μ•Šκ²Œ μ„œλΈŒλͺ¨λ“ˆλ‘œ)
19
+ from langchain_community.document_loaders.pdf import PyPDFLoader
20
+ from langchain_community.document_loaders.text import TextLoader
21
+ from langchain_community.document_loaders.csv_loader import CSVLoader
22
+ from langchain_community.document_loaders.json_loader import JSONLoader
23
+ import tempfile # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•œ λΌμ΄λΈŒλŸ¬λ¦¬μž…λ‹ˆλ‹€.
24
+ import os
25
+ import json
26
+ from langchain.docstore.document import Document
27
+ from langchain_groq import ChatGroq
28
+
29
+ # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
30
+ def get_pdf_text(pdf_docs):
31
+ temp_dir = tempfile.TemporaryDirectory() # μž„μ‹œ 디렉토리λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
32
+ temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # μž„μ‹œ 파일 경둜λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
33
+ with open(temp_filepath, "wb") as f: # μž„μ‹œ νŒŒμΌμ„ λ°”μ΄λ„ˆλ¦¬ μ“°κΈ° λͺ¨λ“œλ‘œ μ—½λ‹ˆλ‹€.
34
+ f.write(pdf_docs.getvalue()) # PDF λ¬Έμ„œμ˜ λ‚΄μš©μ„ μž„μ‹œ νŒŒμΌμ— μ”λ‹ˆλ‹€.
35
+ pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoaderλ₯Ό μ‚¬μš©ν•΄ PDFλ₯Ό λ‘œλ“œν•©λ‹ˆλ‹€.
36
+ pdf_doc = pdf_loader.load() # ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•©λ‹ˆλ‹€.
37
+ return pdf_doc # μΆ”μΆœν•œ ν…μŠ€νŠΈλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
38
+
39
+
40
+ def get_text_file(docs):
41
+ # μ—…λ‘œλ“œλœ νŒŒμΌμ„ μž„μ‹œ 디렉토리에 μ €μž₯
42
+ temp_dir = tempfile.TemporaryDirectory()
43
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
44
+ with open(temp_filepath, "wb") as f:
45
+ f.write(docs.getvalue())
46
+ # TextLoader둜 뢈러였기
47
+ text_loader = TextLoader(temp_filepath, encoding="utf-8")
48
+ text_doc = text_loader.load()
49
+ return text_doc
50
+
51
+
52
+ def get_csv_file(docs):
53
+ # μ—…λ‘œλ“œλœ νŒŒμΌμ„ μž„μ‹œ 디렉토리에 μ €μž₯
54
+ temp_dir = tempfile.TemporaryDirectory()
55
+ temp_filepath = os.path.join(temp_dir.name, docs.name)
56
+ with open(temp_filepath, "wb") as f:
57
+ f.write(docs.getvalue())
58
+ # CSVLoader둜 뢈러였기
59
+ csv_loader = CSVLoader(temp_filepath, encoding="utf-8")
60
+ csv_doc = csv_loader.load()
61
+ return csv_doc
62
+
63
+ # def get_json_file(docs):
64
+ # temp_dir = tempfile.TemporaryDirectory()
65
+ # temp_filepath = os.path.join(temp_dir.name, docs.name)
66
+ # with open(temp_filepath, "wb") as f:
67
+ # f.write(docs.getvalue())
68
+ # json_loader = JSONLoader(temp_filepath,
69
+ # jq_schema='.scans[].relationships',
70
+ # text_content=False)
71
+ #
72
+ # json_doc = json_loader.load()
73
+ # # print('json_doc = ',json_doc)
74
+ # return json_doc
75
+
76
+ def get_json_file(file) -> list[Document]:
77
+ # Streamlit UploadedFile -> str
78
+ raw = file.getvalue().decode("utf-8", errors="ignore")
79
+ data = json.loads(raw)
80
+
81
+ docs = []
82
+
83
+ # μ˜ˆμ „ jq κ²½λ‘œκ°€ '.scans[].relationships'μ˜€λ‹€λ©΄, λ™μΌν•œ 의미둜 νŒŒμ‹±:
84
+ # μ‘΄μž¬ν•˜λ©΄ κ·Έκ²ƒλ§Œ 뽑고, μ—†μœΌλ©΄ ν†΅μœΌλ‘œ λ¬Έμ„œν™”
85
+ def add_doc(x):
86
+ docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
87
+
88
+ if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
89
+ for s in data["scans"]:
90
+ rels = s.get("relationships", [])
91
+ if isinstance(rels, list) and rels:
92
+ for r in rels:
93
+ add_doc(r)
94
+ if not docs: # κ·Έλž˜λ„ λͺ» λ½‘μ•˜μœΌλ©΄ 전체λ₯Ό ν•˜λ‚˜λ‘œ
95
+ add_doc(data)
96
+ elif isinstance(data, list):
97
+ for item in data:
98
+ add_doc(item)
99
+ else:
100
+ add_doc(data)
101
+
102
+ return docs
103
+
104
+ # λ¬Έμ„œλ“€μ„ μ²˜λ¦¬ν•˜μ—¬ ν…μŠ€νŠΈ 청크둜 λ‚˜λˆ„λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
105
+ def get_text_chunks(documents):
106
+ text_splitter = RecursiveCharacterTextSplitter(
107
+ chunk_size=1000, # 청크의 크기λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
108
+ chunk_overlap=200, # 청크 μ‚¬μ΄μ˜ 쀑볡을 μ§€μ •ν•©λ‹ˆλ‹€.
109
+ length_function=len # ν…μŠ€νŠΈμ˜ 길이λ₯Ό μΈ‘μ •ν•˜λŠ” ν•¨μˆ˜λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
110
+ )
111
+
112
+ documents = text_splitter.split_documents(documents) # λ¬Έμ„œλ“€μ„ 청크둜 λ‚˜λˆ•λ‹ˆλ‹€.
113
+ return documents # λ‚˜λˆˆ 청크λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
114
+
115
+
116
+ # ν…μŠ€νŠΈ μ²­ν¬λ“€λ‘œλΆ€ν„° 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
117
+ def get_vectorstore(text_chunks):
118
+ # μ›ν•˜λŠ” μž„λ² λ”© λͺ¨λΈμ„ λ‘œλ“œν•©λ‹ˆλ‹€.
119
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
120
+ model_kwargs={'device': 'cpu'}) # μž„λ² λ”© λͺ¨λΈμ„ μ„€μ •ν•©λ‹ˆλ‹€.
121
+ vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
122
+ return vectorstore # μƒμ„±λœ 벑터 μŠ€ν† μ–΄λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
123
+
124
+
125
+ def get_conversation_chain(vectorstore):
126
+ # Groq LLM
127
+ llm = ChatGroq(
128
+ groq_api_key=os.environ.get("GROQ_API_KEY"),
129
+ model_name="llama-3.1-8b-instant",
130
+ temperature=0.75, # ν•„μš”μ— 맞게 νŠœλ‹
131
+ max_tokens=512 # μ»¨ν…μŠ€νŠΈ 초과 λ°©μ§€μš© (ν•„μš”μ‹œ μ‘°μ •)
132
+ )
133
+
134
+ memory = ConversationBufferMemory(
135
+ memory_key="chat_history",
136
+ return_messages=True
137
+ )
138
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
139
+
140
+ conversation_chain = ConversationalRetrievalChain.from_llm(
141
+ llm=llm,
142
+ retriever=retriever,
143
+ memory=memory,
144
+ )
145
+ return conversation_chain
146
+
147
+ # μ‚¬μš©μž μž…λ ₯을 μ²˜λ¦¬ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
148
+ def handle_userinput(user_question):
149
+ print('user_question => ', user_question)
150
+ # λŒ€ν™” 체인을 μ‚¬μš©ν•˜μ—¬ μ‚¬μš©μž μ§ˆλ¬Έμ— λŒ€ν•œ 응닡을 μƒμ„±ν•©λ‹ˆλ‹€.
151
+ response = st.session_state.conversation({'question': user_question})
152
+ # λŒ€ν™” 기둝을 μ €μž₯ν•©λ‹ˆλ‹€.
153
+ st.session_state.chat_history = response['chat_history']
154
+
155
+ for i, message in enumerate(st.session_state.chat_history):
156
+ if i % 2 == 0:
157
+ st.write(user_template.replace(
158
+ "{{MSG}}", message.content), unsafe_allow_html=True)
159
+ else:
160
+ st.write(bot_template.replace(
161
+ "{{MSG}}", message.content), unsafe_allow_html=True)
162
+
163
+
164
+ def main():
165
+ load_dotenv()
166
+ st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
167
+ page_icon=":books:")
168
+ st.write(css, unsafe_allow_html=True)
169
+
170
+ if "conversation" not in st.session_state:
171
+ st.session_state.conversation = None
172
+ if "chat_history" not in st.session_state:
173
+ st.session_state.chat_history = None
174
+
175
+ st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
176
+ user_question = st.text_input("Ask a question about your documents:")
177
+ if user_question:
178
+ handle_userinput(user_question)
179
+
180
+ with st.sidebar:
181
+ st.subheader("Your documents")
182
+ docs = st.file_uploader(
183
+ "Upload your Files here and click on 'Process'", accept_multiple_files=True)
184
+ if st.button("Process[PDF]"):
185
+ with st.spinner("Processing"):
186
+ # get pdf text
187
+ doc_list = []
188
+ for file in docs:
189
+ print('file - type : ', file.type)
190
+ if file.type in ['application/octet-stream', 'application/pdf']:
191
+ # file is .pdf
192
+ doc_list.extend(get_pdf_text(file))
193
+ else:
194
+ st.error("PDF 파일이 μ•„λ‹™λ‹ˆλ‹€.")
195
+ if not doc_list:
196
+ st.error("처리 κ°€λŠ₯ν•œ λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
197
+ st.stop()
198
+
199
+ text_chunks = get_text_chunks(doc_list)
200
+ vectorstore = get_vectorstore(text_chunks)
201
+ st.session_state.conversation = get_conversation_chain(vectorstore)
202
+
203
+ ################## TXT, CSV λ²„νŠΌ κ΅¬ν˜„
204
+ if st.button("Process[TXT]"):
205
+ with st.spinner("Processing"):
206
+ doc_list = []
207
+ for file in docs:
208
+ if file.type == 'text/plain':
209
+ doc_list.extend(get_text_file(file))
210
+ else:
211
+ st.error("TXT 파일이 μ•„λ‹™λ‹ˆλ‹€.")
212
+
213
+ if not doc_list:
214
+ st.error("처리 κ°€λŠ₯ν•œ TXT λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
215
+ st.stop()
216
+
217
+ text_chunks = get_text_chunks(doc_list)
218
+ vectorstore = get_vectorstore(text_chunks)
219
+ st.session_state.conversation = get_conversation_chain(vectorstore)
220
+
221
+ if st.button("Process[CSV]"):
222
+ with st.spinner("Processing"):
223
+ doc_list = []
224
+ for file in docs:
225
+ if file.type == 'text/csv':
226
+ doc_list.extend(get_csv_file(file))
227
+ else:
228
+ st.error("CSV 파일이 μ•„λ‹™λ‹ˆλ‹€.")
229
+
230
+ if not doc_list:
231
+ st.error("처리 κ°€λŠ₯ν•œ CSV λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
232
+ st.stop()
233
+
234
+ text_chunks = get_text_chunks(doc_list)
235
+ vectorstore = get_vectorstore(text_chunks)
236
+ st.session_state.conversation = get_conversation_chain(vectorstore)
237
+
238
+ if st.button("Process[JSON]"):
239
+ with st.spinner("Processing"):
240
+ # get txt text
241
+ doc_list = []
242
+ for file in docs:
243
+ print('file - type : ', file.type)
244
+ if file.type == 'application/json':
245
+ # file is .json
246
+ doc_list.extend(get_json_file(file))
247
+ else:
248
+ st.error("JSON 파일이 μ•„λ‹™λ‹ˆλ‹€.")
249
+ if not doc_list:
250
+ st.error("처리 κ°€λŠ₯ν•œ λ¬Έμ„œλ₯Ό μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€.")
251
+ st.stop()
252
+
253
+ text_chunks = get_text_chunks(doc_list)
254
+ vectorstore = get_vectorstore(text_chunks)
255
+ st.session_state.conversation = get_conversation_chain(vectorstore)
256
+
257
+
258
+ if __name__ == '__main__':
259
  main()