File size: 1,075 Bytes
c999e42
4ad95cb
c999e42
1a48819
4ad95cb
c999e42
4ad95cb
1a48819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c999e42
4ad95cb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import pipeline

# Initialize the pipeline
pipe = pipeline("text-generation", model="mlabonne/Qwen3-8B-abliterated")

def chat(text):
    try:
        # Limit input text to prevent excessive processing
        if len(text) > 1000:
            text = text[:1000]
        
        # Generate response with limited tokens
        res = pipe(text, max_new_tokens=50)
        generated_text = res[0]['generated_text']
        
        # Limit response length to control Content-Length
        max_response_length = 2000
        if len(generated_text) > max_response_length:
            generated_text = generated_text[:max_response_length] + "..."
        
        return generated_text
    
    except Exception as e:
        # Return user-friendly error message instead of critical failure
        return f"Desculpe, ocorreu um erro ao processar sua solicitação. Tente novamente com um texto mais simples."

demo = gr.Interface(
    fn=chat,
    inputs="text",
    outputs="text",
    title="Qwen3-8B Abliterated Chatbot"
)

demo.launch()