quote_data / preprocessing.py
clemsadand's picture
Create preprocessing.py
ea1968a verified
raw
history blame
959 Bytes
# import pandas as pd
# from datasets import Dataset
import nltk
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import Counter
# Download required NLTK data
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')
# Preprocessing
stop_words = set(stopwords.words('english'))
def preprocess(text):
tokens = word_tokenize(text.lower())
return [word for word in tokens if word.isalnum() and word not in stop_words]
def get_keywords(text, top_n=5):
processed_text = preprocess(text)
pos_tags = pos_tag(processed_text)
# Looking for nouns (NN), verbs (VB), and adjectives (JJ)
keywords = [word for word, pos in pos_tags if pos.startswith(('NN', 'VB', 'JJ'))]
# Get top N keywords by counting worfd ocurrences
keyword_counts = Counter(keywords)
return [word for word, _ in keyword_counts.most_common(top_n)]