Spaces:
Sleeping
Sleeping
File size: 6,756 Bytes
b82edc9 a6890b8 b82edc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import gradio as gr
from transformers import pipeline
import pandas as pd
import spacy
import re
from pathlib import Path
import PyPDF2
import docx
import json
# Load models
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
from spacy.cli import download
download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
keyword_extractor = pipeline("token-classification", model="jean-baptiste/roberta-large-ner-english")
classifier = pipeline("text-classification", model="microsoft/MiniLM-L12-H384-uncased")
def extract_text_from_resume(file):
file_path = file.name
text = ""
if file_path.endswith('.pdf'):
with open(file_path, 'rb') as pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
for page in pdf_reader.pages:
text += page.extract_text()
elif file_path.endswith('.docx'):
doc = docx.Document(file_path)
for paragraph in doc.paragraphs:
text += paragraph.text + '\n'
elif file_path.endswith('.txt'):
with open(file_path, 'r', encoding='utf-8') as txt_file:
text = txt_file.read()
return text.strip()
def extract_information(text):
doc = nlp(text)
entities = {
"skills": [],
"education": [],
"experience": [],
"contact": []
}
# Extract skills (using a predefined list of common skills)
common_skills = ["python", "java", "javascript", "sql", "machine learning", "data analysis"]
text_lower = text.lower()
entities["skills"] = [skill for skill in common_skills if skill in text_lower]
# Extract education
education_keywords = ["university", "college", "bachelor", "master", "phd", "degree"]
for sent in doc.sents:
if any(keyword in sent.text.lower() for keyword in education_keywords):
entities["education"].append(sent.text.strip())
# Extract experience
experience_keywords = ["experience", "work", "job", "position", "role"]
for sent in doc.sents:
if any(keyword in sent.text.lower() for keyword in experience_keywords):
entities["experience"].append(sent.text.strip())
# Extract contact information
email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
phone_pattern = r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b'
emails = re.findall(email_pattern, text)
phones = re.findall(phone_pattern, text)
entities["contact"] = emails + phones
return entities
def analyze_resume(text, entities):
scores = {
"completeness": 0,
"skills_match": 0,
"formatting": 0,
"keyword_optimization": 0
}
# Completeness score
score_components = 0
if entities["skills"]: score_components += 1
if entities["education"]: score_components += 1
if entities["experience"]: score_components += 1
if entities["contact"]: score_components += 1
scores["completeness"] = (score_components / 4) * 100
# Skills match score
desired_skills = ["python", "java", "javascript", "sql", "machine learning"]
matched_skills = sum(1 for skill in entities["skills"] if skill in desired_skills)
scores["skills_match"] = (matched_skills / len(desired_skills)) * 100
# Formatting score
formatting_score = 0
if len(text.split('\n')) > 5: formatting_score += 20
if len(text) > 200: formatting_score += 20
if any(char.isupper() for char in text): formatting_score += 20
if re.search(r'\b\d{4}\b', text): formatting_score += 20
if len(re.findall(r'[.!?]', text)) > 3: formatting_score += 20
scores["formatting"] = formatting_score
# Keyword optimization score
keywords = keyword_extractor(text[:512])
scores["keyword_optimization"] = min(len(keywords) * 10, 100)
return scores
def generate_recommendations(scores, entities):
recommendations = []
if scores["completeness"] < 75:
recommendations.append("π Add more sections to your resume to improve completeness.")
if not entities["skills"]:
recommendations.append("- Add a skills section")
if not entities["education"]:
recommendations.append("- Add education details")
if not entities["experience"]:
recommendations.append("- Add work experience")
if not entities["contact"]:
recommendations.append("- Add contact information")
if scores["skills_match"] < 60:
recommendations.append("\nπ‘ Consider adding more relevant skills:")
recommendations.append("- Focus on technical skills like Python, Java, SQL")
recommendations.append("- Include both hard and soft skills")
if scores["formatting"] < 80:
recommendations.append("\nπ Improve resume formatting:")
recommendations.append("- Use clear section headings")
recommendations.append("- Include dates for experiences")
recommendations.append("- Use bullet points for better readability")
if scores["keyword_optimization"] < 70:
recommendations.append("\nπ Optimize keywords usage:")
recommendations.append("- Use more industry-specific terms")
recommendations.append("- Include action verbs")
recommendations.append("- Mention specific technologies and tools")
return "\n".join(recommendations)
def process_resume(file):
text = extract_text_from_resume(file)
entities = extract_information(text)
scores = analyze_resume(text, entities)
recommendations = generate_recommendations(scores, entities)
return scores, recommendations
def create_interface():
with gr.Blocks() as app:
gr.Markdown("""
# Resume Analyzer and Optimizer
Upload your resume to get personalized analysis and recommendations.
""")
with gr.Row():
file_input = gr.File(
label="Upload Resume (PDF, DOCX, or TXT)",
file_types=["pdf", "docx", "txt"]
)
with gr.Row():
analyze_button = gr.Button("Analyze Resume", variant="primary")
with gr.Row():
with gr.Column():
score_output = gr.JSON(label="Analysis Scores")
with gr.Column():
recommendations_output = gr.Textbox(
label="Recommendations",
lines=10
)
analyze_button.click(
fn=process_resume,
inputs=[file_input],
outputs=[score_output, recommendations_output]
)
return app
if __name__ == "__main__":
app = create_interface()
app.launch() |