Spaces:
Running
on
Zero
Running
on
Zero
# Import required libraries | |
import os | |
import streamlit as st | |
from crewai import Agent, Task, Crew, LLM | |
from crewai_tools import ( | |
SerperDevTool, | |
FileReadTool, | |
MDXSearchTool, | |
ScrapeWebsiteTool | |
) | |
from embedchain import App | |
from embedchain.embedder import GeminiEmbedder | |
# Set up API keys | |
gemini_api_key = "AIzaSyDzsGJCnE3017fYnOM0Fp_aWs4YtN4d4TI" | |
serper_api_key = "b86545fdabc35dcb13fd8cc0a9b88c3a17b6dc89" | |
# Configure environment variables | |
os.environ["GEMINIAI_API_KEY"] = gemini_api_key | |
os.environ["SERPER_API_KEY"] = serper_api_key | |
# Initialize Gemini embedding model | |
gemini_embedder = GeminiEmbedder(api_key=gemini_api_key) | |
# Initialize EmbedChain App with Gemini | |
embedchain_app = App(embedding_model=gemini_embedder) | |
# Initialize Tools | |
search_tool = SerperDevTool() | |
scrape_tool = ScrapeWebsiteTool() | |
resume_file_path = 'resume.md' # Use a relative or dynamic file path | |
read_resume = FileReadTool(file_path=resume_file_path) | |
semantic_search_resume = MDXSearchTool(mdx=resume_file_path, embedding_model=gemini_embedder, app=embedchain_app) | |
# Agent 1: Researcher | |
researcher = Agent( | |
role="Tech Job Researcher", | |
goal="Analyze job postings and extract required qualifications.", | |
tools=[scrape_tool, search_tool], | |
verbose=True, | |
backstory=( | |
"An expert in analyzing job postings, you identify essential skills " | |
"and qualifications required for job applications." | |
) | |
) | |
# Agent 2: Profiler | |
profiler = Agent( | |
role="Personal Profiler for Engineers", | |
goal="Create a detailed profile for job applicants.", | |
tools=[read_resume, semantic_search_resume], | |
verbose=True, | |
backstory=( | |
"Specializing in building comprehensive profiles, you extract and " | |
"synthesize information to create impactful resumes." | |
) | |
) | |
# Agent 3: Resume Strategist | |
resume_strategist = Agent( | |
role="Resume Strategist for Engineers", | |
goal="Refine resumes to align with job requirements.", | |
tools=[read_resume, semantic_search_resume], | |
verbose=True, | |
backstory=( | |
"Your expertise lies in crafting resumes that highlight key skills " | |
"and experiences to match job requirements." | |
) | |
) | |
# Agent 4: Interview Preparer | |
interview_preparer = Agent( | |
role="Interview Preparer", | |
goal="Generate potential interview questions and talking points.", | |
tools=[read_resume, semantic_search_resume], | |
verbose=True, | |
backstory=( | |
"You prepare candidates for interviews by formulating relevant questions " | |
"and talking points based on the job and their profile." | |
) | |
) | |
# Define Tasks | |
research_task = Task( | |
description="Analyze the job posting URL to extract key skills, qualifications, and requirements.", | |
expected_output="A structured list of job requirements.", | |
agent=researcher, | |
async_execution=True | |
) | |
profile_task = Task( | |
description="Create a detailed profile from the resume and personal write-up.", | |
expected_output="A comprehensive profile document.", | |
agent=profiler, | |
async_execution=True | |
) | |
resume_strategy_task = Task( | |
description="Tailor the resume based on job requirements and personal profile.", | |
expected_output="An updated resume tailored to the job.", | |
output_file="tailored_resume.md", | |
context=[research_task, profile_task], | |
agent=resume_strategist | |
) | |
interview_preparation_task = Task( | |
description="Generate interview questions and talking points based on the tailored resume.", | |
expected_output="A document with key interview questions and talking points.", | |
output_file="interview_materials.md", | |
context=[research_task, profile_task, resume_strategy_task], | |
agent=interview_preparer | |
) | |
# Crew Setup | |
job_application_crew = Crew( | |
agents=[researcher, profiler, resume_strategist, interview_preparer], | |
tasks=[research_task, profile_task, resume_strategy_task, interview_preparation_task], | |
verbose=True | |
) | |
# Streamlit Application | |
st.title("AI-Powered Job Application Assistant") | |
# User Inputs | |
st.header("Provide Job Details") | |
job_posting_url = st.text_input("Job Posting URL", "https://jobs.lever.co/AIFund/6c82e23e-d954-4dd8-a734-c0c2c5ee00f1") | |
github_url = st.text_input("GitHub Profile URL", "https://github.com/joaomdmoura") | |
personal_writeup = st.text_area( | |
"Personal Writeup", | |
"""Noah is an accomplished Software Engineering Leader with 18 years of experience, | |
specializing in managing remote and in-office teams. He holds an MBA and has a strong | |
background in AI and data science. Noah has successfully led major tech initiatives | |
and startups, driving innovation and growth.""" | |
) | |
# File Upload for Resume | |
st.header("Upload Resume") | |
uploaded_resume = st.file_uploader("Upload your resume (Markdown format)", type=["md"]) | |
if uploaded_resume: | |
resume_file_path = os.path.join("uploads", uploaded_resume.name) | |
with open(resume_file_path, "wb") as f: | |
f.write(uploaded_resume.getbuffer()) | |
st.success("Resume uploaded successfully!") | |
# Start Job Application Process | |
if st.button("Start Job Application Process"): | |
job_application_inputs = { | |
'job_posting_url': job_posting_url, | |
'github_url': github_url, | |
'personal_writeup': personal_writeup | |
} | |
# Run the Crew | |
result = job_application_crew.kickoff(inputs=job_application_inputs) | |
st.success("Job Application Process Completed!") | |
# Display Results | |
if os.path.exists("tailored_resume.md"): | |
st.header("Generated Tailored Resume") | |
with open("tailored_resume.md", "r") as f: | |
st.markdown(f.read(), unsafe_allow_html=True) | |
if os.path.exists("interview_materials.md"): | |
st.header("Generated Interview Materials") | |
with open("interview_materials.md", "r") as f: | |
st.markdown(f.read(), unsafe_allow_html=True) | |