MindLabUnimib commited on
Commit
b28dd28
·
1 Parent(s): 13bc307

feat: add logging

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import torch
2
  import spaces
 
3
  import gradio as gr
4
 
5
  from transformers import (
@@ -8,13 +9,18 @@ from transformers import (
8
  AutoModelForSequenceClassification,
9
  )
10
 
 
 
 
 
 
11
  chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
12
- chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name, dtype=torch.bfloat16)
13
  chat_model.to("cuda")
14
  chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
15
 
16
  moderator_model_name = "saiteki-kai/QA-DeBERTa-v3-large"
17
- moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_model_name)
18
  moderator_model.to("cuda")
19
  moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
20
 
@@ -65,11 +71,15 @@ def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, s
65
  responses = generate_responses(chat_model, chat_tokenizer, prompts)
66
  scores = classify_pairs(moderator_model, moderator_tokenizer, prompts, responses)
67
 
68
- return [
69
  {"id": id, "prompt": prompt, "response": response, "score": score, "model": chat_model_name, "team_id": team_id}
70
  for id, prompt, response, score in zip(ids, prompts, responses, scores)
71
  ]
72
 
 
 
 
 
73
 
74
  with gr.Blocks() as demo:
75
  gr.Markdown("Welcome")
 
1
  import torch
2
  import spaces
3
+ import logging
4
  import gradio as gr
5
 
6
  from transformers import (
 
9
  AutoModelForSequenceClassification,
10
  )
11
 
12
+ logging.basicConfig(level=logging.INFO)
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
  chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
18
+ chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name, dtype=torch.bfloat16, device_map="cpu")
19
  chat_model.to("cuda")
20
  chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
21
 
22
  moderator_model_name = "saiteki-kai/QA-DeBERTa-v3-large"
23
+ moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_model_name, device_map="cpu")
24
  moderator_model.to("cuda")
25
  moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
26
 
 
71
  responses = generate_responses(chat_model, chat_tokenizer, prompts)
72
  scores = classify_pairs(moderator_model, moderator_tokenizer, prompts, responses)
73
 
74
+ outputs = [
75
  {"id": id, "prompt": prompt, "response": response, "score": score, "model": chat_model_name, "team_id": team_id}
76
  for id, prompt, response, score in zip(ids, prompts, responses, scores)
77
  ]
78
 
79
+ print(outputs)
80
+
81
+ return outputs
82
+
83
 
84
  with gr.Blocks() as demo:
85
  gr.Markdown("Welcome")