import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words = set(stopwords.words('english'))
example_sent = "This is my awesome sentence"
word_tokens = word_tokenize(example_sent)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
python -m spacy download en_core_web_lg
import spacy
nlp = spacy.load("en_core_web_lg")
stop_words = nlp.Defaults.stop_words
example_sent = "This is my awesome sentence"
doc = nlp(example_sent)
filtered_sentence = [w.text for w in doc if not w.text.lower() in stop_words]