import json
with open(‘sarcasm.json’,‘r’) as f:
datastore=json.load(f)
sentences=[]
labels=[]
urls=[]
for item in datastore:
sentences.append(item[‘headline’])
labels.append(item[‘is_sarcastic’])
urls.append(item[‘article_link’])
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer=Tokenizer(oov_token=‘’)
tokenizer.fit_on_texts(sentences)
word_index=tokenizer.word_index
sequences=tokenizer.texts_to_sequences(sentences)
padded=pad_sequences(sequences,padding=‘post’)
print(sentences[2])
print(padded[2])
print(padded.shape)
版权声明:本文为weixin_52043972原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。