Spaces:
Sleeping
Sleeping
File size: 2,398 Bytes
11075d0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
# ==========================================
# Hugging Face λͺ¨λΈ μ¬μ© - κ°μ λΆμ Gradio
# ==========================================
# !pip install -q gradio transformers peft
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from peft import PeftModel
# λͺ¨λΈ λ‘λ
print("λͺ¨λΈ λ‘λ μ€...")
BASE_MODEL = "klue/bert-base"
LORA_MODEL = "JINIIII/nsmc-sentiment-lora"
tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL)
base_model = AutoModelForSequenceClassification.from_pretrained(
BASE_MODEL,
num_labels=2
)
model = PeftModel.from_pretrained(base_model, LORA_MODEL)
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
print(f"μλ£! (Device: {device})")
# κ°μ λΆμ ν¨μ
def analyze_sentiment(text):
if not text.strip():
return "ν
μ€νΈλ₯Ό μ
λ ₯ν΄μ£ΌμΈμ", {}
# ν ν¬λμ΄μ§
inputs = tokenizer(
text,
return_tensors="pt",
truncation=True,
max_length=128,
padding=True
).to(device)
# μμΈ‘
with torch.no_grad():
outputs = model(**inputs)
probs = torch.softmax(outputs.logits, dim=-1)[0]
# κ²°κ³Ό
pred = torch.argmax(probs).item()
label = "π κΈμ " if pred == 1 else "π λΆμ "
confidence = probs[pred].item()
result = f"**{label}** (νμ λ: {confidence*100:.1f}%)"
prob_dict = {
"π λΆμ ": float(probs[0]),
"π κΈμ ": float(probs[1])
}
return result, prob_dict
# Gradio UI
demo = gr.Interface(
fn=analyze_sentiment,
inputs=gr.Textbox(
label="μν 리뷰",
placeholder="μνμ λν 리뷰λ₯Ό μ
λ ₯νμΈμ...",
lines=3
),
outputs=[
gr.Markdown(label="λΆμ κ²°κ³Ό"),
gr.Label(label="κ°μ νλ₯ ", num_top_classes=2)
],
title="μν 리뷰 κ°μ λΆμ",
description="LoRAλ‘ νμΈνλλ NSMC κ°μ λΆμ λͺ¨λΈμ
λλ€.",
examples=[
["μ λ§ μ¬λ―Έμλ μνμμ΄μ! κ°λ ₯ μΆμ²ν©λλ€."],
["μκ° λλΉμμ΅λλ€. λ³λ‘μμ΄μ."],
["λ°°μ°λ€μ μ°κΈ°κ° νλ₯νμ΅λλ€."],
["μ€ν λ¦¬κ° μ§λ£¨νκ³ μ¬λ―Έμμμ΄μ."],
],
theme="soft",
allow_flagging="never"
)
# μ€ν
demo.launch(share=True, debug=True) |