MuhammadQASIM111 commited on
Commit
e16e12e
·
verified ·
1 Parent(s): 3e12f1c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gtts import gTTS
3
+ import tempfile
4
+ import os
5
+ import pandas as pd
6
+ import numpy as np
7
+ import faiss
8
+ from sentence_transformers import SentenceTransformer
9
+ from transformers import pipeline
10
+ import whisper
11
+ from groq import Groq
12
+
13
+ # Load data and models
14
+ questions = pd.read_csv('non_technical_interview_questions.csv', encoding='unicode_escape')[['Question']]
15
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
16
+ question_embeddings = np.vstack([model.encode(q) for q in questions['Question']])
17
+ dimension = question_embeddings.shape[1]
18
+ index = faiss.IndexFlatL2(dimension)
19
+ index.add(question_embeddings)
20
+ generation_model = pipeline("text2text-generation", model="google/flan-t5-large", num_beams=1)
21
+ client = Groq(api_key="GROQ_API_KEY")
22
+
23
+ # Helper functions
24
+ def get_questions(job_title, job_description, top_k=5):
25
+ text = f'{job_title}: {job_description}'
26
+ text_embedding = model.encode(text).reshape(1, -1)
27
+ _, indices = index.search(text_embedding, top_k)
28
+ similar_questions = questions.iloc[indices[0]]
29
+ return similar_questions['Question'].tolist()
30
+
31
+ def text_to_speech(question_text):
32
+ tts = gTTS(text=question_text, lang='en')
33
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
34
+ tts.save(temp_file.name)
35
+ return temp_file.name
36
+
37
+ def audio_to_text(audio_file):
38
+ whisper_model = whisper.load_model("base")
39
+ result = whisper_model.transcribe(audio_file)
40
+ return result['text']
41
+
42
+ def generate_feedback_from_llm(question, user_answer):
43
+ response = client.chat.completions.create(
44
+ messages=[
45
+ {
46
+ "role": "system",
47
+ "content": (
48
+ "You are an experienced interview coach specializing in preparing candidates for real-world job interviews. "
49
+ "Your goal is to provide concise, actionable feedback that helps the user improve their answers quickly. "
50
+ "Please focus on the following points: \n"
51
+ "1. Strengths: Highlight key strengths in the candidate's answer. Focus on one or two positive aspects (e.g., problem-solving, clear communication). \n"
52
+ "2. Areas for Improvement: Mention one or two quick improvements, like being more concise, adding specific examples, or avoiding jargon. \n"
53
+ "3. What a Best Answer Looks Like: Provide a brief example of an ideal answer that addresses the same question with clarity and impact. \n"
54
+ "4. English Proficiency: Check for major grammar or sentence structure issues and provide quick tips for improvement. \n"
55
+ "5. Interview Readines: In one sentence, assess if the answer is ready for a real interview or if it needs a little more refinement. \n"
56
+ "6. Quick Tips: Offer practical, quick tips on how to improve the candidate’s overall interview performance. These could include advice on body language, confidence, tone, or other interview techniques.\n"
57
+ "Keep your feedback clear and to the point, focusing on the most impactful changes the user can make to improve their interview performance."
58
+ "Your feedback should always be respectful, professional, and constructive, focused on preparing the candidate to perform confidently and concisely in real-world job interviews."
59
+ )
60
+ },
61
+ {"role": "user", "content": f"Question: {question}\nAnswer: {user_answer}\n\nProvide feedback on the quality of the answer, noting strengths and suggestions for improvement."}
62
+ ],
63
+ model="llama3-8b-8192",
64
+ )
65
+ return response.choices[0].message.content
66
+
67
+ # Gradio app logic
68
+ def start_interview(job_title, job_description):
69
+ question_queue = get_questions(job_title, job_description)
70
+ current_question_index = 0
71
+ question_audio = text_to_speech(question_queue[current_question_index])
72
+ return question_queue, current_question_index, question_audio
73
+
74
+ def next_question(question_queue, current_question_index):
75
+ current_question_index += 1
76
+ if current_question_index < len(question_queue):
77
+ question_audio = text_to_speech(question_queue[current_question_index])
78
+ else:
79
+ question_audio = None
80
+ return current_question_index, question_audio
81
+
82
+ def transcribe_and_feedback(answer_audio, question_audio):
83
+ question_text = audio_to_text(question_audio)
84
+ user_answer = audio_to_text(answer_audio)
85
+ feedback = generate_feedback_from_llm(question_text, user_answer)
86
+ return user_answer, feedback
87
+
88
+ # Gradio UI components
89
+ with gr.Blocks() as demo:
90
+ gr.Markdown("### Job Interview Practice App")
91
+
92
+ with gr.Row():
93
+ job_title = gr.Textbox(label="Job Title", placeholder="e.g., Data Scientist")
94
+ job_description = gr.Textbox(label="Job Description", lines=5, placeholder="Describe the role requirements.")
95
+ start_button = gr.Button("Start Interview")
96
+
97
+ with gr.Row():
98
+ question_audio = gr.Audio(label="Question", type="filepath", interactive=False)
99
+ next_button = gr.Button("Next Question")
100
+
101
+ with gr.Row():
102
+ answer_audio = gr.Audio(label="Your Answer", type="filepath")
103
+
104
+ with gr.Row():
105
+ response_text = gr.Textbox(label="Transcription of Your Answer", interactive=False)
106
+ feedback_text = gr.Textbox(label="Feedback", interactive=False)
107
+
108
+ # Define workflow logic
109
+ question_queue = gr.State()
110
+ current_question_index = gr.State()
111
+
112
+ start_button.click(start_interview, [job_title, job_description], [question_queue, current_question_index, question_audio])
113
+ next_button.click(next_question, [question_queue, current_question_index], [current_question_index, question_audio])
114
+
115
+ # Answer transcription and feedback generation
116
+ answer_audio.change(transcribe_and_feedback, [answer_audio, question_audio], [response_text, feedback_text])
117
+
118
+ demo.launch()