kyle8581's picture
.
2b267d0
import os
import sys
# Force the project root onto the path
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if project_root not in sys.path:
sys.path.insert(0, project_root)
from dotenv import load_dotenv
import gradio as gr
import yaml
import json
import re
from llm_functions import get_interviewer_response, get_student_response, generate_cover_letter_response
from utils import parse_json_from_response
from guide_generation.llm_functions import generate_guide as create_guide_from_llm
# Load environment variables and initial data
load_dotenv()
with open("prompt.yaml", "r", encoding='utf-8') as f:
prompts = yaml.safe_load(f)
with open("example_info.json", "r", encoding='utf-8') as f:
# This now serves as the default values for the UI
default_info = json.load(f)
def user_submit(message, history):
"""์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๊ณ , ์ฑ—๋ด‡ ๊ธฐ๋ก์„ ์—…๋ฐ์ดํŠธํ•ฉ๋‹ˆ๋‹ค."""
if not message.strip():
return "", history
history.append([message, None])
return "", history
def bot_response(history, shared_info, progress=gr.Progress()):
"""๋ฉด์ ‘๊ด€์˜ ์‘๋‹ต์„ ์ƒ์„ฑํ•˜๊ณ  ์ง„ํ–‰๋ฅ ์„ ์—…๋ฐ์ดํŠธํ•ฉ๋‹ˆ๋‹ค."""
if not history or history[-1][1] is not None:
return history, gr.update()
conversation_str = ""
for h in history:
conversation_str += f"ํ•™์ƒ: {h[0]}\n"
if h[1]:
conversation_str += f"AI: {h[1]}\n"
format_info = shared_info.copy()
format_info['conversation'] = conversation_str
history[-1][1] = ""
full_response = ""
for chunk in get_interviewer_response(format_info):
full_response += chunk
history[-1][1] = full_response
yield history, gr.update()
final_data = parse_json_from_response(full_response)
final_progress_update = gr.update()
if final_data:
history[-1][1] = final_data.get("answer", "์‘๋‹ต์„ ์ฒ˜๋ฆฌํ•˜๋Š” ๋ฐ ์‹คํŒจํ–ˆ์Šต๋‹ˆ๋‹ค.")
final_progress = final_data.get("progress", 0)
if isinstance(final_progress, int) and 0 <= final_progress <= 100:
progress(final_progress / 100)
final_progress_update = f"์ž๊ธฐ์†Œ๊ฐœ์„œ ์™„์„ฑ๋„: {final_progress}%"
if final_progress >= 100:
history.append([None, "๋ฉด์ ‘์ด ์ข…๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ž๊ธฐ์†Œ๊ฐœ์„œ ์ƒ์„ฑ ํƒญ์œผ๋กœ ์ด๋™ํ•˜์„ธ์š”."])
yield history, final_progress_update
def generate_ai_reply(history, shared_info, progress=gr.Progress()):
"""ํ•™์ƒ์˜ AI ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜๊ณ , ๊ทธ์— ๋Œ€ํ•œ ๋ฉด์ ‘๊ด€์˜ ํ›„์† ์งˆ๋ฌธ์„ ๋ฐ›์Šต๋‹ˆ๋‹ค."""
if not history or not history[-1][1]:
return history, gr.update()
conversation_str = ""
for h in history:
conversation_str += f"ํ•™์ƒ: {h[0]}\n"
if h[1]:
conversation_str += f"AI: {h[1]}\n"
format_info = shared_info.copy()
format_info['conversation'] = conversation_str
student_answer_json = ""
history.append(["", None])
for chunk in get_student_response(format_info):
student_answer_json += chunk
parsed_data = parse_json_from_response(student_answer_json)
if parsed_data:
history[-1][0] = parsed_data.get("answer", "")
else:
history[-1][0] = student_answer_json
yield history, gr.update()
final_data = parse_json_from_response(student_answer_json)
if final_data:
history[-1][0] = final_data.get("answer", "์‘๋‹ต์„ ์ฒ˜๋ฆฌํ•˜๋Š” ๋ฐ ์‹คํŒจํ–ˆ์Šต๋‹ˆ๋‹ค.")
yield history, gr.update()
yield from bot_response(history, shared_info, progress=progress)
def generate_all_cover_letters(history, shared_info, word_limit, progress=gr.Progress()):
"""๋ชจ๋“  ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋ฌธํ•ญ์— ๋Œ€ํ•œ ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•˜๊ณ  ์ง„ํ–‰๋ฅ ์„ ํ‘œ์‹œํ•ฉ๋‹ˆ๋‹ค."""
if not history:
yield [gr.update(value="๋ฉด์ ‘ ๋Œ€ํ™”๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค.")] * len(shared_info.get('questions', [])) + [gr.update()]
return
conversation_str = ""
for h in history:
if h[0]: conversation_str += f"ํ•™์ƒ: {h[0]}\n"
if h[1]: conversation_str += f"AI: {h[1]}\n"
total_questions = len(shared_info.get('questions', []))
outputs = [""] * total_questions
format_info = shared_info.copy()
format_info['conversation'] = conversation_str
for i, question in enumerate(shared_info.get('questions', [])):
full_response = ""
flow = shared_info.get('guide', '')
for chunk in generate_cover_letter_response(question, [], format_info, flow, word_limit):
full_response += chunk
parsed_data = parse_json_from_response(full_response)
if parsed_data and 'answer' in parsed_data:
outputs[i] = parsed_data['answer']
else:
outputs[i] = full_response # Fallback to full response
overall_progress_val = (i + 1) / total_questions
progress(overall_progress_val)
progress_text = f"์ž๊ธฐ์†Œ๊ฐœ์„œ ์ƒ์„ฑ ์ง„ํ–‰๋ฅ : {int(overall_progress_val*100)}%"
yield [gr.update(value=o) for o in outputs] + [gr.update(value=progress_text, visible=True)]
final_outputs = []
for o in outputs:
final_data = parse_json_from_response(o)
if final_data and 'answer' in final_data:
final_outputs.append(gr.update(value=final_data['answer']))
else:
final_outputs.append(gr.update(value=o))
yield final_outputs + [gr.update(visible=False)]
def update_guide_and_info(company, position, jd, questions_str):
guide_json, _ = create_guide_from_llm(questions_str, jd, company, "์‹ ์ž…") # experience_level is hardcoded for now
if guide_json and "guide" in guide_json:
guide_text = guide_json["guide"]
else:
guide_text = "๊ฐ€์ด๋“œ ์ƒ์„ฑ์— ์‹คํŒจํ–ˆ์Šต๋‹ˆ๋‹ค. ์ž…๋ ฅ๊ฐ’์„ ํ™•์ธํ•ด์ฃผ์„ธ์š”."
new_info = default_info.copy()
new_info.update({
"company_name": company,
"position_title": position,
"jd": jd,
"questions": [q.strip() for q in questions_str.strip().split('\n') if q.strip()],
"guide": guide_text
})
# Return new state and update for the guide display
return new_info, guide_text
# --- Gradio UI ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
shared_info = gr.State(default_info)
with gr.Tabs() as tabs:
with gr.TabItem("๊ฐ€์ด๋“œ ์ƒ์„ฑ", id=0):
gr.Markdown("## ๐Ÿ“ ์ž๊ธฐ์†Œ๊ฐœ์„œ ์ •๋ณด ์ž…๋ ฅ")
gr.Markdown("๋ฉด์ ‘ ์‹œ๋ฎฌ๋ ˆ์ด์…˜์— ํ•„์š”ํ•œ ์ •๋ณด๋ฅผ ์ž…๋ ฅํ•˜๊ณ  '๊ฐ€์ด๋“œ ์ƒ์„ฑ' ๋ฒ„ํŠผ์„ ๋ˆŒ๋Ÿฌ์ฃผ์„ธ์š”.")
with gr.Row():
company_name_input = gr.Textbox(label="ํšŒ์‚ฌ๋ช…", value=default_info.get("company_name"))
position_title_input = gr.Textbox(label="์ง๋ฌด๋ช…", value=default_info.get("position_title"))
jd_input = gr.Textbox(label="Job Description (JD)", lines=5, value=default_info.get("jd"))
questions_input = gr.Textbox(label="์ž๊ธฐ์†Œ๊ฐœ์„œ ์งˆ๋ฌธ (ํ•œ ์ค„์— ํ•œ ๊ฐœ์”ฉ)", lines=3, value="\n".join(default_info.get("questions", [])))
generate_guide_btn = gr.Button("๊ฐ€์ด๋“œ ์ƒ์„ฑ", variant="primary")
guide_output = gr.Markdown(label="์ƒ์„ฑ๋œ ๊ฐ€์ด๋“œ", value=f"**๊ฐ€์ด๋“œ:**\n{default_info.get('guide')}")
with gr.TabItem("๋ฉด์ ‘ ๋Œ€ํ™”", id=1):
gr.Markdown("## ๐Ÿ’ฌ ๋ฉด์ ‘ ์‹œ๋ฎฌ๋ ˆ์ด์…˜")
gr.Markdown("๋ฉด์ ‘๊ด€์˜ ์งˆ๋ฌธ์— ๋‹ต๋ณ€ํ•˜๊ฑฐ๋‚˜, 'AI ๋‹ต๋ณ€ ์ƒ์„ฑ' ๋ฒ„ํŠผ์„ ๋ˆŒ๋Ÿฌ๋ณด์„ธ์š”. ๋ฉด์ ‘๊ด€์ด ํŒ๋‹จํ•˜๋Š” ์ž๊ธฐ์†Œ๊ฐœ์„œ ์™„์„ฑ๋„๊ฐ€ 100%๊ฐ€ ๋˜๋ฉด ๋ฉด์ ‘์ด ์ข…๋ฃŒ๋ฉ๋‹ˆ๋‹ค.")
progress_display = gr.Markdown("์ž๊ธฐ์†Œ๊ฐœ์„œ ์™„์„ฑ๋„: 0%")
chatbot = gr.Chatbot(label="๋ฉด์ ‘ ๋Œ€ํ™”", bubble_full_width=False, avatar_images=("๐Ÿ‘ค", "๐Ÿ‘”"), height=500)
msg = gr.Textbox(label="๋ฉ”์‹œ์ง€ ์ž…๋ ฅ", placeholder="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”...", lines=2)
with gr.Row():
submit_btn = gr.Button("์ „์†ก", variant="primary")
ai_reply_btn = gr.Button("AI ๋‹ต๋ณ€ ์ƒ์„ฑ", variant="secondary")
clear_btn = gr.Button("์ดˆ๊ธฐํ™”")
with gr.TabItem("์ž๊ธฐ์†Œ๊ฐœ์„œ ์ƒ์„ฑ", id=2):
gr.Markdown("## ๐Ÿ“ ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋‹ต๋ณ€ ์ƒ์„ฑ")
gr.Markdown("๋ฉด์ ‘์ด ์™„๋ฃŒ๋˜๋ฉด ๋Œ€ํ™” ๋‚ด์šฉ์„ ๋ฐ”ํƒ•์œผ๋กœ ์ž๊ธฐ์†Œ๊ฐœ์„œ ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.")
word_limit_input = gr.Number(label="๋‹จ์–ด ์ˆ˜ ์ œํ•œ", value=500)
generate_btn = gr.Button("์ž๊ธฐ์†Œ๊ฐœ์„œ ์ƒ์„ฑ ์‹œ์ž‘", variant="primary", size="lg")
cover_letter_progress_display = gr.Markdown("", visible=False)
cover_letter_outputs = [gr.Textbox(label=f"๋‹ต๋ณ€ {i+1}", lines=8, interactive=False) for i, q in enumerate(default_info.get('questions',[]))]
# Event Handlers
generate_guide_btn.click(
fn=update_guide_and_info,
inputs=[company_name_input, position_title_input, jd_input, questions_input],
outputs=[shared_info, guide_output]
)
submit_btn.click(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display])
msg.submit(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display])
ai_reply_btn.click(generate_ai_reply, [chatbot, shared_info], [chatbot, progress_display])
clear_btn.click(lambda: ([], "์ž๊ธฐ์†Œ๊ฐœ์„œ ์™„์„ฑ๋„: 0%"), None, [chatbot, progress_display], queue=False)
generate_btn.click(generate_all_cover_letters, [chatbot, shared_info, word_limit_input], cover_letter_outputs + [cover_letter_progress_display])
if __name__ == "__main__":
demo.launch(share=True)