|
|
|
|
|
import nltk |
|
from nltk import pos_tag |
|
from nltk.tokenize import word_tokenize |
|
from nltk.corpus import stopwords |
|
from collections import Counter |
|
|
|
|
|
nltk.download('punkt') |
|
nltk.download('averaged_perceptron_tagger') |
|
nltk.download('stopwords') |
|
|
|
|
|
|
|
|
|
stop_words = set(stopwords.words('english')) |
|
|
|
def preprocess(text): |
|
tokens = word_tokenize(text.lower()) |
|
return [word for word in tokens if word.isalnum() and word not in stop_words] |
|
|
|
def get_keywords(text, top_n=5): |
|
processed_text = preprocess(text) |
|
pos_tags = pos_tag(processed_text) |
|
|
|
|
|
keywords = [word for word, pos in pos_tags if pos.startswith(('NN', 'VB', 'JJ'))] |
|
|
|
|
|
keyword_counts = Counter(keywords) |
|
return [word for word, _ in keyword_counts.most_common(top_n)] |