shaanyy commited on
Commit
11075d0
ยท
verified ยท
1 Parent(s): 1562de4

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +89 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==========================================
2
+ # Hugging Face ๋ชจ๋ธ ์‚ฌ์šฉ - ๊ฐ์ • ๋ถ„์„ Gradio
3
+ # ==========================================
4
+
5
+ # !pip install -q gradio transformers peft
6
+
7
+ import gradio as gr
8
+ import torch
9
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
10
+ from peft import PeftModel
11
+
12
+ # ๋ชจ๋ธ ๋กœ๋“œ
13
+ print("๋ชจ๋ธ ๋กœ๋“œ ์ค‘...")
14
+
15
+ BASE_MODEL = "klue/bert-base"
16
+ LORA_MODEL = "JINIIII/nsmc-sentiment-lora"
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL)
19
+ base_model = AutoModelForSequenceClassification.from_pretrained(
20
+ BASE_MODEL,
21
+ num_labels=2
22
+ )
23
+ model = PeftModel.from_pretrained(base_model, LORA_MODEL)
24
+ model.eval()
25
+
26
+ device = "cuda" if torch.cuda.is_available() else "cpu"
27
+ model.to(device)
28
+
29
+ print(f"์™„๋ฃŒ! (Device: {device})")
30
+
31
+ # ๊ฐ์ • ๋ถ„์„ ํ•จ์ˆ˜
32
+ def analyze_sentiment(text):
33
+ if not text.strip():
34
+ return "ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”", {}
35
+
36
+ # ํ† ํฌ๋‚˜์ด์ง•
37
+ inputs = tokenizer(
38
+ text,
39
+ return_tensors="pt",
40
+ truncation=True,
41
+ max_length=128,
42
+ padding=True
43
+ ).to(device)
44
+
45
+ # ์˜ˆ์ธก
46
+ with torch.no_grad():
47
+ outputs = model(**inputs)
48
+ probs = torch.softmax(outputs.logits, dim=-1)[0]
49
+
50
+ # ๊ฒฐ๊ณผ
51
+ pred = torch.argmax(probs).item()
52
+ label = "๐Ÿ˜Š ๊ธ์ •" if pred == 1 else "๐Ÿ˜ž ๋ถ€์ •"
53
+ confidence = probs[pred].item()
54
+
55
+ result = f"**{label}** (ํ™•์‹ ๋„: {confidence*100:.1f}%)"
56
+
57
+ prob_dict = {
58
+ "๐Ÿ˜ž ๋ถ€์ •": float(probs[0]),
59
+ "๐Ÿ˜Š ๊ธ์ •": float(probs[1])
60
+ }
61
+
62
+ return result, prob_dict
63
+
64
+ # Gradio UI
65
+ demo = gr.Interface(
66
+ fn=analyze_sentiment,
67
+ inputs=gr.Textbox(
68
+ label="์˜ํ™” ๋ฆฌ๋ทฐ",
69
+ placeholder="์˜ํ™”์— ๋Œ€ํ•œ ๋ฆฌ๋ทฐ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”...",
70
+ lines=3
71
+ ),
72
+ outputs=[
73
+ gr.Markdown(label="๋ถ„์„ ๊ฒฐ๊ณผ"),
74
+ gr.Label(label="๊ฐ์ • ํ™•๋ฅ ", num_top_classes=2)
75
+ ],
76
+ title="์˜ํ™” ๋ฆฌ๋ทฐ ๊ฐ์ • ๋ถ„์„",
77
+ description="LoRA๋กœ ํŒŒ์ธํŠœ๋‹๋œ NSMC ๊ฐ์ • ๋ถ„์„ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค.",
78
+ examples=[
79
+ ["์ •๋ง ์žฌ๋ฏธ์žˆ๋Š” ์˜ํ™”์˜€์–ด์š”! ๊ฐ•๋ ฅ ์ถ”์ฒœํ•ฉ๋‹ˆ๋‹ค."],
80
+ ["์‹œ๊ฐ„ ๋‚ญ๋น„์˜€์Šต๋‹ˆ๋‹ค. ๋ณ„๋กœ์˜€์–ด์š”."],
81
+ ["๋ฐฐ์šฐ๋“ค์˜ ์—ฐ๊ธฐ๊ฐ€ ํ›Œ๋ฅญํ–ˆ์Šต๋‹ˆ๋‹ค."],
82
+ ["์Šคํ† ๋ฆฌ๊ฐ€ ์ง€๋ฃจํ•˜๊ณ  ์žฌ๋ฏธ์—†์—ˆ์–ด์š”."],
83
+ ],
84
+ theme="soft",
85
+ allow_flagging="never"
86
+ )
87
+
88
+ # ์‹คํ–‰
89
+ demo.launch(share=True, debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ peft
3
+ torch