Spaces:
Build error
Build error
initial commit with peft removed anyways.
Browse files
app.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Fujisaki_CPU.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1Damnr0Ha4zZAlKFvne9cu76uuElLNYus
|
| 8 |
+
|
| 9 |
+
李萌萌的电子骨灰盒
|
| 10 |
+
----
|
| 11 |
+
|
| 12 |
+
这是一个通过ChatGLM模型训练的李萌萌的数字分身,你可以在问题栏目填入内容,或者什么都不填,来观察李萌萌到底会说些什么。
|
| 13 |
+
T4级别的GPU已经可以很胜任这个任务了。
|
| 14 |
+
|
| 15 |
+
### 安装依赖
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from modeling_chatglm import ChatGLMForConditionalGeneration
|
| 19 |
+
import torch
|
| 20 |
+
import sys
|
| 21 |
+
|
| 22 |
+
from transformers import AutoTokenizer, GenerationConfig
|
| 23 |
+
|
| 24 |
+
model = ChatGLMForConditionalGeneration.from_pretrained("ljsabc/Fujisaki-int4").float()
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
| 26 |
+
|
| 27 |
+
# We have to use full precision, as some tokens are >65535
|
| 28 |
+
model.eval()
|
| 29 |
+
|
| 30 |
+
torch.set_default_tensor_type(torch.FloatTensor)
|
| 31 |
+
def evaluate(context, temperature, top_p, top_k):
|
| 32 |
+
generation_config = GenerationConfig(
|
| 33 |
+
temperature=temperature,
|
| 34 |
+
top_p=top_p,
|
| 35 |
+
top_k=top_k,
|
| 36 |
+
#repetition_penalty=1.1,
|
| 37 |
+
num_beams=1,
|
| 38 |
+
do_sample=True,
|
| 39 |
+
)
|
| 40 |
+
with torch.no_grad():
|
| 41 |
+
input_text = f"Context: {context}Answer: "
|
| 42 |
+
ids = tokenizer.encode(input_text)
|
| 43 |
+
input_ids = torch.LongTensor([ids]).to('cpu')
|
| 44 |
+
out = model.generate(
|
| 45 |
+
input_ids=input_ids,
|
| 46 |
+
max_length=160,
|
| 47 |
+
generation_config=generation_config
|
| 48 |
+
)
|
| 49 |
+
out_text = tokenizer.decode(out[0]).split("Answer: ")[1]
|
| 50 |
+
return out_text
|
| 51 |
+
|
| 52 |
+
def evaluate_stream(msg, history, temperature, top_p):
|
| 53 |
+
generation_config = GenerationConfig(
|
| 54 |
+
temperature=temperature,
|
| 55 |
+
top_p=top_p,
|
| 56 |
+
#repetition_penalty=1.1,
|
| 57 |
+
num_beams=1,
|
| 58 |
+
do_sample=True,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
history.append([msg, None])
|
| 62 |
+
|
| 63 |
+
context = ""
|
| 64 |
+
if len(history) > 4:
|
| 65 |
+
history.pop(0)
|
| 66 |
+
|
| 67 |
+
for j in range(len(history)):
|
| 68 |
+
history[j][0] = history[j][0].replace("<br>", "")
|
| 69 |
+
|
| 70 |
+
# concatenate context
|
| 71 |
+
for h in history[:-1]:
|
| 72 |
+
context += h[0] + "||" + h[1] + "||"
|
| 73 |
+
|
| 74 |
+
context += history[-1][0]
|
| 75 |
+
context = context.replace(r'<br>', '')
|
| 76 |
+
|
| 77 |
+
# TODO: Avoid the tokens are too long.
|
| 78 |
+
CUTOFF = 224
|
| 79 |
+
while len(tokenizer.encode(context)) > CUTOFF:
|
| 80 |
+
# save 15 token size for the answer
|
| 81 |
+
context = context[15:]
|
| 82 |
+
|
| 83 |
+
h = []
|
| 84 |
+
print("History:", history)
|
| 85 |
+
print("Context:", context)
|
| 86 |
+
for response, h in model.stream_chat(tokenizer, context, h, max_length=CUTOFF, top_p=top_p, temperature=temperature):
|
| 87 |
+
history[-1][1] = response
|
| 88 |
+
yield history, ""
|
| 89 |
+
|
| 90 |
+
#return response
|
| 91 |
+
|
| 92 |
+
import gradio as gr
|
| 93 |
+
|
| 94 |
+
title = """<h1 align="center">李萌萌(Alter Ego)</h1>
|
| 95 |
+
<h3 align="center">这是一个通过ChatGLM模型训练的李萌萌的数字分身,你可以与她聊天,或者直接在文本框按下Enter,来观察李萌萌到底会说些什么。</h3>"""
|
| 96 |
+
|
| 97 |
+
footer = """<p align='center'>项目在<a href='https://github.com/ljsabc/Fujisaki' target='_blank'>GitHub</a>上托管,基于清华的<a href='https://huggingface.co/THUDM/chatglm-6b' target='_blank'>THUDM/chatglm-6b</a>项目。</p>
|
| 98 |
+
<p align='center'><em>"I'm... a boy." --Chihiro Fujisaki</em></p>"""
|
| 99 |
+
|
| 100 |
+
with gr.Blocks() as demo:
|
| 101 |
+
gr.HTML(title)
|
| 102 |
+
state = gr.State()
|
| 103 |
+
with gr.Row():
|
| 104 |
+
with gr.Column(scale=2):
|
| 105 |
+
temp = gr.components.Slider(minimum=0, maximum=1.1, value=0.8, label="Temperature",
|
| 106 |
+
info="温度参数,越高的温度生成的内容越丰富,但是有可能出现语法问题。小的温度也能帮助生成更相关的回答。")
|
| 107 |
+
top_p = gr.components.Slider(minimum=0.5, maximum=1.0, value=0.975, label="Top-p",
|
| 108 |
+
info="top-p参数,只输出前p>top-p的文字,越大生成的内容越丰富,但也可能出现语法问题。数字越小似乎上下文的衔接性越好。")
|
| 109 |
+
#code = gr.Textbox(label="temp_output", info="解码器输出")
|
| 110 |
+
#top_k = gr.components.Slider(minimum=1, maximum=200, step=1, value=25, label="Top k",
|
| 111 |
+
# info="top-k参数,下一个输出的文字会从top-k个文字中进行选择,越大生成的内容越丰富,但也可能出现语法问题。数字越小似乎上下文的衔接性越好。")
|
| 112 |
+
|
| 113 |
+
with gr.Column(scale=3):
|
| 114 |
+
chatbot = gr.Chatbot(label="聊天框", info="")
|
| 115 |
+
msg = gr.Textbox(label="输入框", placeholder="最近过得怎么样?",
|
| 116 |
+
info="输入你的内容,按[Enter]发送。也可以什么都不填写生成随机数据。对话一般不能太长,否则就复读机了,建议清除数据。")
|
| 117 |
+
clear = gr.Button("清除聊天")
|
| 118 |
+
|
| 119 |
+
msg.submit(evaluate_stream, [msg, chatbot, temp, top_p], [chatbot, msg])
|
| 120 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
| 121 |
+
gr.HTML(footer)
|
| 122 |
+
|
| 123 |
+
demo.queue()
|
| 124 |
+
demo.launch(debug=False)
|