|
import os |
|
import sys |
|
|
|
|
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) |
|
if project_root not in sys.path: |
|
sys.path.insert(0, project_root) |
|
|
|
from dotenv import load_dotenv |
|
import gradio as gr |
|
import yaml |
|
import json |
|
import re |
|
from llm_functions import get_interviewer_response, get_student_response, generate_cover_letter_response |
|
from utils import parse_json_from_response |
|
from guide_generation.llm_functions import generate_guide as create_guide_from_llm |
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
with open("prompt.yaml", "r", encoding='utf-8') as f: |
|
prompts = yaml.safe_load(f) |
|
|
|
with open("example_info.json", "r", encoding='utf-8') as f: |
|
|
|
default_info = json.load(f) |
|
|
|
def user_submit(message, history): |
|
"""์ฌ์ฉ์ ์
๋ ฅ์ ์ฒ๋ฆฌํ๊ณ , ์ฑ๋ด ๊ธฐ๋ก์ ์
๋ฐ์ดํธํฉ๋๋ค.""" |
|
if not message.strip(): |
|
return "", history |
|
history.append([message, None]) |
|
return "", history |
|
|
|
def bot_response(history, shared_info, progress=gr.Progress()): |
|
"""๋ฉด์ ๊ด์ ์๋ต์ ์์ฑํ๊ณ ์งํ๋ฅ ์ ์
๋ฐ์ดํธํฉ๋๋ค.""" |
|
if not history or history[-1][1] is not None: |
|
return history, gr.update() |
|
|
|
conversation_str = "" |
|
for h in history: |
|
conversation_str += f"ํ์: {h[0]}\n" |
|
if h[1]: |
|
conversation_str += f"AI: {h[1]}\n" |
|
|
|
format_info = shared_info.copy() |
|
format_info['conversation'] = conversation_str |
|
|
|
history[-1][1] = "" |
|
full_response = "" |
|
for chunk in get_interviewer_response(format_info): |
|
full_response += chunk |
|
history[-1][1] = full_response |
|
yield history, gr.update() |
|
|
|
final_data = parse_json_from_response(full_response) |
|
final_progress_update = gr.update() |
|
if final_data: |
|
history[-1][1] = final_data.get("answer", "์๋ต์ ์ฒ๋ฆฌํ๋ ๋ฐ ์คํจํ์ต๋๋ค.") |
|
final_progress = final_data.get("progress", 0) |
|
if isinstance(final_progress, int) and 0 <= final_progress <= 100: |
|
progress(final_progress / 100) |
|
final_progress_update = f"์๊ธฐ์๊ฐ์ ์์ฑ๋: {final_progress}%" |
|
|
|
if final_progress >= 100: |
|
history.append([None, "๋ฉด์ ์ด ์ข
๋ฃ๋์์ต๋๋ค. ์๊ธฐ์๊ฐ์ ์์ฑ ํญ์ผ๋ก ์ด๋ํ์ธ์."]) |
|
|
|
yield history, final_progress_update |
|
|
|
|
|
def generate_ai_reply(history, shared_info, progress=gr.Progress()): |
|
"""ํ์์ AI ๋ต๋ณ์ ์์ฑํ๊ณ , ๊ทธ์ ๋ํ ๋ฉด์ ๊ด์ ํ์ ์ง๋ฌธ์ ๋ฐ์ต๋๋ค.""" |
|
if not history or not history[-1][1]: |
|
return history, gr.update() |
|
|
|
conversation_str = "" |
|
for h in history: |
|
conversation_str += f"ํ์: {h[0]}\n" |
|
if h[1]: |
|
conversation_str += f"AI: {h[1]}\n" |
|
|
|
format_info = shared_info.copy() |
|
format_info['conversation'] = conversation_str |
|
|
|
student_answer_json = "" |
|
history.append(["", None]) |
|
for chunk in get_student_response(format_info): |
|
student_answer_json += chunk |
|
parsed_data = parse_json_from_response(student_answer_json) |
|
if parsed_data: |
|
history[-1][0] = parsed_data.get("answer", "") |
|
else: |
|
history[-1][0] = student_answer_json |
|
yield history, gr.update() |
|
|
|
final_data = parse_json_from_response(student_answer_json) |
|
if final_data: |
|
history[-1][0] = final_data.get("answer", "์๋ต์ ์ฒ๋ฆฌํ๋ ๋ฐ ์คํจํ์ต๋๋ค.") |
|
yield history, gr.update() |
|
|
|
yield from bot_response(history, shared_info, progress=progress) |
|
|
|
def generate_all_cover_letters(history, shared_info, word_limit, progress=gr.Progress()): |
|
"""๋ชจ๋ ์๊ธฐ์๊ฐ์ ๋ฌธํญ์ ๋ํ ๋ต๋ณ์ ์์ฑํ๊ณ ์งํ๋ฅ ์ ํ์ํฉ๋๋ค.""" |
|
if not history: |
|
yield [gr.update(value="๋ฉด์ ๋ํ๊ฐ ์์ต๋๋ค.")] * len(shared_info.get('questions', [])) + [gr.update()] |
|
return |
|
|
|
conversation_str = "" |
|
for h in history: |
|
if h[0]: conversation_str += f"ํ์: {h[0]}\n" |
|
if h[1]: conversation_str += f"AI: {h[1]}\n" |
|
|
|
total_questions = len(shared_info.get('questions', [])) |
|
outputs = [""] * total_questions |
|
|
|
format_info = shared_info.copy() |
|
format_info['conversation'] = conversation_str |
|
|
|
for i, question in enumerate(shared_info.get('questions', [])): |
|
full_response = "" |
|
flow = shared_info.get('guide', '') |
|
for chunk in generate_cover_letter_response(question, [], format_info, flow, word_limit): |
|
full_response += chunk |
|
parsed_data = parse_json_from_response(full_response) |
|
if parsed_data and 'answer' in parsed_data: |
|
outputs[i] = parsed_data['answer'] |
|
else: |
|
outputs[i] = full_response |
|
|
|
overall_progress_val = (i + 1) / total_questions |
|
progress(overall_progress_val) |
|
progress_text = f"์๊ธฐ์๊ฐ์ ์์ฑ ์งํ๋ฅ : {int(overall_progress_val*100)}%" |
|
yield [gr.update(value=o) for o in outputs] + [gr.update(value=progress_text, visible=True)] |
|
|
|
final_outputs = [] |
|
for o in outputs: |
|
final_data = parse_json_from_response(o) |
|
if final_data and 'answer' in final_data: |
|
final_outputs.append(gr.update(value=final_data['answer'])) |
|
else: |
|
final_outputs.append(gr.update(value=o)) |
|
|
|
yield final_outputs + [gr.update(visible=False)] |
|
|
|
def update_guide_and_info(company, position, jd, questions_str): |
|
guide_json, _ = create_guide_from_llm(questions_str, jd, company, "์ ์
") |
|
|
|
if guide_json and "guide" in guide_json: |
|
guide_text = guide_json["guide"] |
|
else: |
|
guide_text = "๊ฐ์ด๋ ์์ฑ์ ์คํจํ์ต๋๋ค. ์
๋ ฅ๊ฐ์ ํ์ธํด์ฃผ์ธ์." |
|
|
|
new_info = default_info.copy() |
|
new_info.update({ |
|
"company_name": company, |
|
"position_title": position, |
|
"jd": jd, |
|
"questions": [q.strip() for q in questions_str.strip().split('\n') if q.strip()], |
|
"guide": guide_text |
|
}) |
|
|
|
|
|
return new_info, guide_text |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
shared_info = gr.State(default_info) |
|
|
|
with gr.Tabs() as tabs: |
|
with gr.TabItem("๊ฐ์ด๋ ์์ฑ", id=0): |
|
gr.Markdown("## ๐ ์๊ธฐ์๊ฐ์ ์ ๋ณด ์
๋ ฅ") |
|
gr.Markdown("๋ฉด์ ์๋ฎฌ๋ ์ด์
์ ํ์ํ ์ ๋ณด๋ฅผ ์
๋ ฅํ๊ณ '๊ฐ์ด๋ ์์ฑ' ๋ฒํผ์ ๋๋ฌ์ฃผ์ธ์.") |
|
with gr.Row(): |
|
company_name_input = gr.Textbox(label="ํ์ฌ๋ช
", value=default_info.get("company_name")) |
|
position_title_input = gr.Textbox(label="์ง๋ฌด๋ช
", value=default_info.get("position_title")) |
|
jd_input = gr.Textbox(label="Job Description (JD)", lines=5, value=default_info.get("jd")) |
|
questions_input = gr.Textbox(label="์๊ธฐ์๊ฐ์ ์ง๋ฌธ (ํ ์ค์ ํ ๊ฐ์ฉ)", lines=3, value="\n".join(default_info.get("questions", []))) |
|
|
|
generate_guide_btn = gr.Button("๊ฐ์ด๋ ์์ฑ", variant="primary") |
|
guide_output = gr.Markdown(label="์์ฑ๋ ๊ฐ์ด๋", value=f"**๊ฐ์ด๋:**\n{default_info.get('guide')}") |
|
|
|
with gr.TabItem("๋ฉด์ ๋ํ", id=1): |
|
gr.Markdown("## ๐ฌ ๋ฉด์ ์๋ฎฌ๋ ์ด์
") |
|
gr.Markdown("๋ฉด์ ๊ด์ ์ง๋ฌธ์ ๋ต๋ณํ๊ฑฐ๋, 'AI ๋ต๋ณ ์์ฑ' ๋ฒํผ์ ๋๋ฌ๋ณด์ธ์. ๋ฉด์ ๊ด์ด ํ๋จํ๋ ์๊ธฐ์๊ฐ์ ์์ฑ๋๊ฐ 100%๊ฐ ๋๋ฉด ๋ฉด์ ์ด ์ข
๋ฃ๋ฉ๋๋ค.") |
|
progress_display = gr.Markdown("์๊ธฐ์๊ฐ์ ์์ฑ๋: 0%") |
|
chatbot = gr.Chatbot(label="๋ฉด์ ๋ํ", bubble_full_width=False, avatar_images=("๐ค", "๐"), height=500) |
|
msg = gr.Textbox(label="๋ฉ์์ง ์
๋ ฅ", placeholder="๋ฉ์์ง๋ฅผ ์
๋ ฅํ์ธ์...", lines=2) |
|
with gr.Row(): |
|
submit_btn = gr.Button("์ ์ก", variant="primary") |
|
ai_reply_btn = gr.Button("AI ๋ต๋ณ ์์ฑ", variant="secondary") |
|
clear_btn = gr.Button("์ด๊ธฐํ") |
|
|
|
with gr.TabItem("์๊ธฐ์๊ฐ์ ์์ฑ", id=2): |
|
gr.Markdown("## ๐ ์๊ธฐ์๊ฐ์ ๋ต๋ณ ์์ฑ") |
|
gr.Markdown("๋ฉด์ ์ด ์๋ฃ๋๋ฉด ๋ํ ๋ด์ฉ์ ๋ฐํ์ผ๋ก ์๊ธฐ์๊ฐ์ ๋ต๋ณ์ ์์ฑํฉ๋๋ค.") |
|
word_limit_input = gr.Number(label="๋จ์ด ์ ์ ํ", value=500) |
|
generate_btn = gr.Button("์๊ธฐ์๊ฐ์ ์์ฑ ์์", variant="primary", size="lg") |
|
cover_letter_progress_display = gr.Markdown("", visible=False) |
|
cover_letter_outputs = [gr.Textbox(label=f"๋ต๋ณ {i+1}", lines=8, interactive=False) for i, q in enumerate(default_info.get('questions',[]))] |
|
|
|
|
|
generate_guide_btn.click( |
|
fn=update_guide_and_info, |
|
inputs=[company_name_input, position_title_input, jd_input, questions_input], |
|
outputs=[shared_info, guide_output] |
|
) |
|
|
|
submit_btn.click(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display]) |
|
msg.submit(user_submit, [msg, chatbot], [msg, chatbot]).then(bot_response, [chatbot, shared_info], [chatbot, progress_display]) |
|
ai_reply_btn.click(generate_ai_reply, [chatbot, shared_info], [chatbot, progress_display]) |
|
clear_btn.click(lambda: ([], "์๊ธฐ์๊ฐ์ ์์ฑ๋: 0%"), None, [chatbot, progress_display], queue=False) |
|
generate_btn.click(generate_all_cover_letters, [chatbot, shared_info, word_limit_input], cover_letter_outputs + [cover_letter_progress_display]) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(share=True) |