t = Tokenizer()
test_text = ['Machine Learning Knowledge',
'Machine Learning',
'Deep Learning',
'Artificial Intelligence']
t.fit_on_texts(test_text)
sequences = t.texts_to_sequences(test_text)
print("The sequences generated from text are : ",sequences)