商品名称一级分类-CNN and RNN 方法测试

商品名称一级分类-CNN and RNN 方法测试

EPOCHES = 3

#TEXT_CNN参数:
IN_CHANNELS = 100     # 输入特征的维度,在文本处理中就是词向量的维度,在图像处理中就是通道数
OUT_CHANNELS = 256    # 卷积产生的通道数,即卷积核个数,滤波器个数
KERNEL_SIZE = 2       # 卷积核尺寸,实际输入的滤波器尺寸应该为(KERNEL_SIZE,embedding_size),
BATCH_SIZE = 1        # 这里可以用(KERNEL_SIZE,IN_CHANNELS)来表示
WORD_MAX_LENGTH = 10
#TEXT_RNN参数:
HIDDEN_SIZE = 100 #在文本rnn中就是输入向量的维度
OUT_SIZE = 17
N_LAYERS = 2
BATCH_SIZE_RNN = 1


def read_data():
    #读取数据
    data_corpus = pd.read_excel('D:\pro\pytorch\goods_name_classfiction/keyword_all.xlsx') #读取全部关键词
    corpus_list = list(data_corpus['keyword_all'].values) # 转化为列表,长度为22933

    data_goods = pd.read_excel('D:\pro\pytorch\goods_name_classfiction/分词后数据.xlsx')
    # print(data_goods)
    return corpus_list,data_goods
import torch
import torch.nn as nn
from torch import optim
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader,dataset
import torch.nn.functional as F
from matplotlib import pyplot as plt
def preprocess(corpus_list,data_goods):
    #建立语料字典(词:下标)
    corpus_dict = dict() #收集词下标
    for index, word in enumerate(corpus_list):
        corpus_dict[word] = index
    length_corpus_dict = len(corpus_dict)
    print("length_corpus_dict = ", length_corpus_dict)

    #提取分类
    class_fiction_name = data_goods['一级分类']
    class_list = list(set(list(class_fiction_name.values)))
    print("length_class:",len(class_list))
    target_dict = {}
    for i,class_name in enumerate(class_list):
        target_dict[class_name] = i
    print(target_dict)


    embedding = nn.Embedding(length_corpus_dict+1, 100) #设置把文本转化为向量的形状


    goods_vector_target_dict = {} #收集词向量
    for i in range(data_goods.shape[0]):  # 遍历每个商品
        keywords = data_goods['sku_name_afterdivide'][i] #商品名称
        keyword_list = keywords.split('|')  # 获取一个商品所有关键词
        # print(keyword_list)
        idx_list_onegoods = []  # 收集一个商品名称所有词语的下标
        for w in keyword_list:  # 遍历每个词语
            if w != '':
                idx = corpus_dict.get(w,0) #若自于存在于语料库中,输出语料库中的下标,若不存在则等于一个无效值的下标0
                if idx != 0:
                    idx_list_onegoods.append(idx)
        #经过观察分布图可以发现,99%的商品名称的分词结果的词语数量是在10(WORD_MAX_LENGTH)  个以内的,所有以10个为准,多的舍去,少的补0
        if len(idx_list_onegoods) > WORD_MAX_LENGTH:
            idx_list_onegoods = idx_list_onegoods[:WORD_MAX_LENGTH]
        elif len(idx_list_onegoods) < WORD_MAX_LENGTH:
            for j in range(WORD_MAX_LENGTH-len(idx_list_onegoods)):
                idx_list_onegoods.append(0)
        idx_list_tensor_onegoods = torch.LongTensor(idx_list_onegoods) # 把词下标列表转化为,tensor格式的下标矩阵
        embedding_one_goods  = embedding(idx_list_tensor_onegoods) #把下标转化为向量

        #读取该商品所属的分类
        class_name = data_goods['一级分类'][i]
        target = target_dict[class_name]
        # print("class_name :",class_name,"target : ",target)

        goods_vector_target_dict[str(i)] = [embedding_one_goods,target]

    # print(len(goods_vector_target_dict))
    return goods_vector_target_dict,target_dict
class goods_vector_target_Dataset(dataset.Dataset):         #继承Dataset类
    def __init__(self,goods_vector_target_dict):
        self.data = goods_vector_target_dict
    def __len__(self):
        #返回数据的长度
        return len(goods_vector_target_dict)
    def __getitem__(self, g_v_index):
        [x,target] = self.data[str(g_v_index)]
        label = torch.IntTensor([target])
        return x,label
def set_dataloader(goods_vector_target_dict):
    g_v_target_Dataset = goods_vector_target_Dataset(goods_vector_target_dict)
    x,target = g_v_target_Dataset[0]

    DL = DataLoader(g_v_target_Dataset,
                    batch_size=BATCH_SIZE,
                    shuffle = True)
    return DL
class TEXT_CNN(nn.Module):
    def __init__(self):
        super(TEXT_CNN,self).__init__()
        self.conv1 = nn.Conv1d(in_channels=IN_CHANNELS, #卷积前的特征维度
                              out_channels=OUT_CHANNELS, #卷积后的特征维度
                              kernel_size=KERNEL_SIZE,  #滤波器尺寸
                              )
        self.word_length = WORD_MAX_LENGTH
        self.input_word_length_conv = int((self.word_length-KERNEL_SIZE)/1 +1) #卷积之后的特征数量
        self.input_word_length_pool = int((self.input_word_length_conv-2)/2 +1) #池化后的特征数量,池化过程不会改变特征维度

        self.fc1 = nn.Linear(OUT_CHANNELS * self.input_word_length_pool,500)
        self.fc2 = nn.Linear(500,17)

    def forward(self,x):
        x = self.conv1(x)
        # print("x.size() - after conv1 = ", x.size())
        x = F.relu(x)
        # print("x.size() - after relu = ", x.size())
        x = F.max_pool1d(x,2)
        # print("x.size() ---after pool ",x.size())
        x = x.view(-1, self.input_word_length_pool * OUT_CHANNELS) #第一个参数是batch
        # print("x.size() -- after reshape = ",x.size())
        x = self.fc1(x)
        x = F.relu(x)
        # print("x.size() -- x.size() after linear-1 = ",x.size())
        x = self.fc2(x)
        x = F.log_softmax(x,dim=1)
        return x
def train_text_cnn(dataloader):
    print("初始化cnn类,设置损失函数和优化函数")
    net_cnn = TEXT_CNN()
    Loss = nn.MultiLabelSoftMarginLoss()
    optimizer = optim.Adam(net_cnn.parameters())

    print("开始训练")
    correct_number = 0
    for i, item in enumerate(dataloader):  # 遍历loader
        # print("i = ",i)
        x, label = item  # loader出数据和分类
        x = x.transpose(2, 1)  # 转置维度参数为1和2的数据,让词向量维度(此处为in_channel作为第二个维度),一个样本中词语数量作为第三个参数
        label = label.float()  # 把label标签转化为float类型
        # 正向传播
        out = net_cnn(x)

        # 把数据喂入损失函数
        loss = Loss(out, label)
        # 清空梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 优化权重
        optimizer.step()

        # 计算预测准确的样本数量
        correct = (torch.argmax(out, 1) == torch.argmax(label, 1)).sum().float()
        correct_number += correct.item()

        # 每训练一千个商品计算损失和准确率
        if (i + 1) % 200 == 0:
            acc = correct_number / (200 * BATCH_SIZE)
            print("acc = %.2f " % (acc * 100) + '%', "   i = ", i, "   loss = ", loss)
            # 清空  correct_num
            correct_number = 0
class TEXT_GRU(nn.Module):
    #这里的GRU也是RNN的一种(将LSTM中的遗忘门和输入门合成一个更新门的简化版LSTM)
    def __init__(self,hidden_size,out_size,n_layers = 1,batch_size=1):
        super(TEXT_GRU,self).__init__()
        self.batch_size = batch_size
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.out_size = out_size
        
        #这里指定batch_size为first(也就是说GRU就受的数据是batch*hidden*layers)的GRU层
        self.gru = nn.GRU(self.hidden_size,self.hidden_size,self.n_layers, batch_first =True)
        #注:这里的batch first是指输入数据的形状(变成batch, time_seq, input),不是初始化net的形状
        #加一个线形层,全链接
        self.out = nn.Linear(self.hidden_size,self.out_size)
    def forward(self,word_inputs,hidden):
        # batch, time_seq, input
        inputs = word_inputs.view(self.batch_size,-1,self.hidden_size)
        
        # hidden 就是上下文输出,output 就是 RNN 输出
        output , hidden = self.gru(inputs,hidden)
        
        output = self.out(output)
        
        #获取time seq 维度中的最后一个向量
        output = output[:,-1,:]
        
        return output , hidden
    
    def init_hidden(self):
        #初始化一个hidden
        hidden = torch.autograd.Variable(torch.zeros(self.n_layers, 1, self.hidden_size))
        return hidden
def train_text_rnn(dataloader):
    print("初始化GRU类,设置优化函数和损失函数")
    net_rnn = TEXT_GRU(100,17)
    print(net_rnn)
    criterion = nn.CrossEntropyLoss()
    optim = torch.optim.SGD(net_rnn.parameters(),lr=0.001,momentum=0.9)
    
    #训练:
    correct_number = 0
    for i, item in enumerate(dataloader): 
        encoder_hidden = net_rnn.init_hidden() #初始化hidden
        input_data, label = item  # loader出数据和分类
        #正向传播
        encoder_outputs, encoder_hidden = net_rnn(input_data, encoder_hidden)
        #计算损失
        label = label.type(torch.LongTensor)
        label = label.squeeze(dim=0) #降维满足CrossEntropyLoss()的输入条件
        
        #清零梯度
        optim.zero_grad()
        loss_gru = criterion(encoder_outputs,label)
        #反向传播
        loss_gru.backward()
        #更新参数
        optim.step()
       

        # 计算准确率
        predict = torch.argmax(encoder_outputs)
        if label.item() == predict.item():
            correct_number += 1 
        
        if (i+1) % 200 == 0:
            print( "loss_gru = ", loss_gru,"i+1 = ",i+1)
            print("acc = %.2f" % (correct_number/200.0) ) #计算两百个样本中有几个是预测准确的
            
            #清空计分
            correct_number = 0
            

        
if __name__ == '__main__':
    print("读取数据........")
    corpus_list,data_goods = read_data()


    print("预处理数据............")
    goods_vector_target_dict,target_dict = preprocess(corpus_list,data_goods)
    length_sample = len(goods_vector_target_dict)
    print("length_sample",length_sample)


    print("生成数据批量读取器..........")
    g_v_target_Dataset = goods_vector_target_Dataset(goods_vector_target_dict)
    dataloader = set_dataloader(goods_vector_target_dict)
  
读取数据........
预处理数据............
length_corpus_dict =  23840
length_class: 17
{'汽水饮料': 0, '香烟': 1, '生活百货': 2, '粮油调味': 3, '休闲零食': 4, '生鲜专区': 5, '日化用品': 6, '牛奶乳品': 7, '计生情趣': 8, '无酒不欢': 9, '糖巧饼干': 10, '冲调保健': 11, '冷冻食品': 12, '方便速食': 13, '应季鲜食': 14, '个人护理': 15, '母婴用品': 16}
length_sample 59716
生成数据批量读取器..........
# train_cnn #训练卷积网络
# train_text_cnn(dataloader) #卷积网络在测试中acc只有百分之20左右,没有展示  



#train_rnn  #训练循环神经网络
train_text_rnn(dataloader)
初始化GRU类,设置优化函数和损失函数
TEXT_GRU(
  (gru): GRU(100, 100, batch_first=True)
  (out): Linear(in_features=100, out_features=17, bias=True)
)
loss_gru =  tensor(3.0386, grad_fn=<NllLossBackward>) i+1 =  200
acc = 0.17
loss_gru =  tensor(2.7060, grad_fn=<NllLossBackward>) i+1 =  400
acc = 0.13
loss_gru =  tensor(2.9429, grad_fn=<NllLossBackward>) i+1 =  600
acc = 0.17
loss_gru =  tensor(2.5029, grad_fn=<NllLossBackward>) i+1 =  800
acc = 0.21
loss_gru =  tensor(1.6622, grad_fn=<NllLossBackward>) i+1 =  1000
acc = 0.24
loss_gru =  tensor(2.9567, grad_fn=<NllLossBackward>) i+1 =  1200
acc = 0.23
loss_gru =  tensor(2.5880, grad_fn=<NllLossBackward>) i+1 =  1400
acc = 0.28
loss_gru =  tensor(1.9529, grad_fn=<NllLossBackward>) i+1 =  1600
acc = 0.23
loss_gru =  tensor(3.6859, grad_fn=<NllLossBackward>) i+1 =  1800
acc = 0.28
loss_gru =  tensor(2.5353, grad_fn=<NllLossBackward>) i+1 =  2000
acc = 0.35
loss_gru =  tensor(1.6803, grad_fn=<NllLossBackward>) i+1 =  2200
acc = 0.30
loss_gru =  tensor(2.9684, grad_fn=<NllLossBackward>) i+1 =  2400
acc = 0.32
loss_gru =  tensor(2.1357, grad_fn=<NllLossBackward>) i+1 =  2600
acc = 0.36
loss_gru =  tensor(1.3955, grad_fn=<NllLossBackward>) i+1 =  2800
acc = 0.37
loss_gru =  tensor(0.6635, grad_fn=<NllLossBackward>) i+1 =  3000
acc = 0.35
loss_gru =  tensor(0.9471, grad_fn=<NllLossBackward>) i+1 =  3200
acc = 0.39
loss_gru =  tensor(1.9385, grad_fn=<NllLossBackward>) i+1 =  3400
acc = 0.39
loss_gru =  tensor(0.8706, grad_fn=<NllLossBackward>) i+1 =  3600
acc = 0.35
loss_gru =  tensor(3.0208, grad_fn=<NllLossBackward>) i+1 =  3800
acc = 0.36
loss_gru =  tensor(1.2624, grad_fn=<NllLossBackward>) i+1 =  4000
acc = 0.30
loss_gru =  tensor(1.1914, grad_fn=<NllLossBackward>) i+1 =  4200
acc = 0.32
loss_gru =  tensor(1.0794, grad_fn=<NllLossBackward>) i+1 =  4400
acc = 0.36
loss_gru =  tensor(0.4265, grad_fn=<NllLossBackward>) i+1 =  4600
acc = 0.41
loss_gru =  tensor(0.7499, grad_fn=<NllLossBackward>) i+1 =  4800
acc = 0.41
loss_gru =  tensor(1.6211, grad_fn=<NllLossBackward>) i+1 =  5000
acc = 0.40
loss_gru =  tensor(1.4165, grad_fn=<NllLossBackward>) i+1 =  5200
acc = 0.42
loss_gru =  tensor(1.4213, grad_fn=<NllLossBackward>) i+1 =  5400
acc = 0.35
loss_gru =  tensor(0.7890, grad_fn=<NllLossBackward>) i+1 =  5600
acc = 0.42
loss_gru =  tensor(1.8348, grad_fn=<NllLossBackward>) i+1 =  5800
acc = 0.39
loss_gru =  tensor(1.4075, grad_fn=<NllLossBackward>) i+1 =  6000
acc = 0.38
loss_gru =  tensor(0.6074, grad_fn=<NllLossBackward>) i+1 =  6200
acc = 0.47
loss_gru =  tensor(0.8261, grad_fn=<NllLossBackward>) i+1 =  6400
acc = 0.47
loss_gru =  tensor(2.4060, grad_fn=<NllLossBackward>) i+1 =  6600
acc = 0.43
loss_gru =  tensor(1.3542, grad_fn=<NllLossBackward>) i+1 =  6800
acc = 0.41
loss_gru =  tensor(3.4078, grad_fn=<NllLossBackward>) i+1 =  7000
acc = 0.42
loss_gru =  tensor(1.0472, grad_fn=<NllLossBackward>) i+1 =  7200
acc = 0.47
loss_gru =  tensor(0.1292, grad_fn=<NllLossBackward>) i+1 =  7400
acc = 0.44
loss_gru =  tensor(3.4075, grad_fn=<NllLossBackward>) i+1 =  7600
acc = 0.53
loss_gru =  tensor(0.0680, grad_fn=<NllLossBackward>) i+1 =  7800
acc = 0.47
loss_gru =  tensor(0.5741, grad_fn=<NllLossBackward>) i+1 =  8000
acc = 0.47
loss_gru =  tensor(0.2975, grad_fn=<NllLossBackward>) i+1 =  8200
acc = 0.44
loss_gru =  tensor(0.3285, grad_fn=<NllLossBackward>) i+1 =  8400
acc = 0.43
loss_gru =  tensor(1.9534, grad_fn=<NllLossBackward>) i+1 =  8600
acc = 0.48
loss_gru =  tensor(1.9777, grad_fn=<NllLossBackward>) i+1 =  8800
acc = 0.49
loss_gru =  tensor(1.4026, grad_fn=<NllLossBackward>) i+1 =  9000
acc = 0.47
loss_gru =  tensor(2.8992, grad_fn=<NllLossBackward>) i+1 =  9200
acc = 0.51
loss_gru =  tensor(0.0485, grad_fn=<NllLossBackward>) i+1 =  9400
acc = 0.51
loss_gru =  tensor(1.4136, grad_fn=<NllLossBackward>) i+1 =  9600
acc = 0.54
loss_gru =  tensor(4.0180, grad_fn=<NllLossBackward>) i+1 =  9800
acc = 0.51
loss_gru =  tensor(0.2641, grad_fn=<NllLossBackward>) i+1 =  10000
acc = 0.43
loss_gru =  tensor(0.9155, grad_fn=<NllLossBackward>) i+1 =  10200
acc = 0.52
loss_gru =  tensor(0.0242, grad_fn=<NllLossBackward>) i+1 =  10400
acc = 0.47
loss_gru =  tensor(1.4925, grad_fn=<NllLossBackward>) i+1 =  10600
acc = 0.53
loss_gru =  tensor(2.2237, grad_fn=<NllLossBackward>) i+1 =  10800
acc = 0.53
loss_gru =  tensor(0.7028, grad_fn=<NllLossBackward>) i+1 =  11000
acc = 0.56
loss_gru =  tensor(2.0236, grad_fn=<NllLossBackward>) i+1 =  11200
acc = 0.53
loss_gru =  tensor(2.0818, grad_fn=<NllLossBackward>) i+1 =  11400
acc = 0.56
loss_gru =  tensor(0.2314, grad_fn=<NllLossBackward>) i+1 =  11600
acc = 0.56
loss_gru =  tensor(1.1168, grad_fn=<NllLossBackward>) i+1 =  11800
acc = 0.51
loss_gru =  tensor(4.6615, grad_fn=<NllLossBackward>) i+1 =  12000
acc = 0.57
loss_gru =  tensor(2.7071, grad_fn=<NllLossBackward>) i+1 =  12200
acc = 0.53
loss_gru =  tensor(1.1002, grad_fn=<NllLossBackward>) i+1 =  12400
acc = 0.49
loss_gru =  tensor(0.8452, grad_fn=<NllLossBackward>) i+1 =  12600
acc = 0.52
loss_gru =  tensor(2.2987, grad_fn=<NllLossBackward>) i+1 =  12800
acc = 0.49
loss_gru =  tensor(1.7405, grad_fn=<NllLossBackward>) i+1 =  13000
acc = 0.58
loss_gru =  tensor(2.1415, grad_fn=<NllLossBackward>) i+1 =  13200
acc = 0.62
loss_gru =  tensor(2.7600, grad_fn=<NllLossBackward>) i+1 =  13400
acc = 0.55
loss_gru =  tensor(0.1250, grad_fn=<NllLossBackward>) i+1 =  13600
acc = 0.56
loss_gru =  tensor(0.3571, grad_fn=<NllLossBackward>) i+1 =  13800
acc = 0.57
loss_gru =  tensor(2.7511, grad_fn=<NllLossBackward>) i+1 =  14000
acc = 0.54
loss_gru =  tensor(2.6041, grad_fn=<NllLossBackward>) i+1 =  14200
acc = 0.61
loss_gru =  tensor(0.5438, grad_fn=<NllLossBackward>) i+1 =  14400
acc = 0.53
loss_gru =  tensor(2.0588, grad_fn=<NllLossBackward>) i+1 =  14600
acc = 0.59
loss_gru =  tensor(1.3306, grad_fn=<NllLossBackward>) i+1 =  14800
acc = 0.66
loss_gru =  tensor(0.3544, grad_fn=<NllLossBackward>) i+1 =  15000
acc = 0.60
loss_gru =  tensor(0.4446, grad_fn=<NllLossBackward>) i+1 =  15200
acc = 0.62
loss_gru =  tensor(0.3822, grad_fn=<NllLossBackward>) i+1 =  15400
acc = 0.56
loss_gru =  tensor(0.4503, grad_fn=<NllLossBackward>) i+1 =  15600
acc = 0.59
loss_gru =  tensor(0.4371, grad_fn=<NllLossBackward>) i+1 =  15800
acc = 0.62
loss_gru =  tensor(0.0546, grad_fn=<NllLossBackward>) i+1 =  16000
acc = 0.66
loss_gru =  tensor(0.3448, grad_fn=<NllLossBackward>) i+1 =  16200
acc = 0.61
loss_gru =  tensor(0.9325, grad_fn=<NllLossBackward>) i+1 =  16400
acc = 0.56
loss_gru =  tensor(1.1092, grad_fn=<NllLossBackward>) i+1 =  16600
acc = 0.61
loss_gru =  tensor(0.5308, grad_fn=<NllLossBackward>) i+1 =  16800
acc = 0.62
loss_gru =  tensor(1.6681, grad_fn=<NllLossBackward>) i+1 =  17000
acc = 0.59
loss_gru =  tensor(0.4959, grad_fn=<NllLossBackward>) i+1 =  17200
acc = 0.62
loss_gru =  tensor(0.8824, grad_fn=<NllLossBackward>) i+1 =  17400
acc = 0.64
loss_gru =  tensor(0.1917, grad_fn=<NllLossBackward>) i+1 =  17600
acc = 0.60
loss_gru =  tensor(0.0733, grad_fn=<NllLossBackward>) i+1 =  17800
acc = 0.63
loss_gru =  tensor(1.4889, grad_fn=<NllLossBackward>) i+1 =  18000
acc = 0.60
loss_gru =  tensor(3.2624, grad_fn=<NllLossBackward>) i+1 =  18200
acc = 0.68
loss_gru =  tensor(0.6349, grad_fn=<NllLossBackward>) i+1 =  18400
acc = 0.64
loss_gru =  tensor(1.8831, grad_fn=<NllLossBackward>) i+1 =  18600
acc = 0.57
loss_gru =  tensor(0.6848, grad_fn=<NllLossBackward>) i+1 =  18800
acc = 0.64
loss_gru =  tensor(0.4036, grad_fn=<NllLossBackward>) i+1 =  19000
acc = 0.61
loss_gru =  tensor(0.1304, grad_fn=<NllLossBackward>) i+1 =  19200
acc = 0.70
loss_gru =  tensor(0.3352, grad_fn=<NllLossBackward>) i+1 =  19400
acc = 0.65
loss_gru =  tensor(0.6869, grad_fn=<NllLossBackward>) i+1 =  19600
acc = 0.71
loss_gru =  tensor(2.2552, grad_fn=<NllLossBackward>) i+1 =  19800
acc = 0.67
loss_gru =  tensor(1.1483, grad_fn=<NllLossBackward>) i+1 =  20000
acc = 0.67
loss_gru =  tensor(0.0024, grad_fn=<NllLossBackward>) i+1 =  20200
acc = 0.61
loss_gru =  tensor(0.0117, grad_fn=<NllLossBackward>) i+1 =  20400
acc = 0.60
loss_gru =  tensor(4.1795, grad_fn=<NllLossBackward>) i+1 =  20600
acc = 0.73
loss_gru =  tensor(1.4006, grad_fn=<NllLossBackward>) i+1 =  20800
acc = 0.64
loss_gru =  tensor(0.4805, grad_fn=<NllLossBackward>) i+1 =  21000
acc = 0.70
loss_gru =  tensor(0.1000, grad_fn=<NllLossBackward>) i+1 =  21200
acc = 0.66
loss_gru =  tensor(0.3179, grad_fn=<NllLossBackward>) i+1 =  21400
acc = 0.60
loss_gru =  tensor(0.2431, grad_fn=<NllLossBackward>) i+1 =  21600
acc = 0.64
loss_gru =  tensor(1.1147, grad_fn=<NllLossBackward>) i+1 =  21800
acc = 0.77
loss_gru =  tensor(4.4950, grad_fn=<NllLossBackward>) i+1 =  22000
acc = 0.68
loss_gru =  tensor(0.0866, grad_fn=<NllLossBackward>) i+1 =  22200
acc = 0.70
loss_gru =  tensor(2.1087, grad_fn=<NllLossBackward>) i+1 =  22400
acc = 0.72
loss_gru =  tensor(1.5803, grad_fn=<NllLossBackward>) i+1 =  22600
acc = 0.67
loss_gru =  tensor(0.7615, grad_fn=<NllLossBackward>) i+1 =  22800
acc = 0.68
loss_gru =  tensor(1.0394, grad_fn=<NllLossBackward>) i+1 =  23000
acc = 0.72
loss_gru =  tensor(1.7408, grad_fn=<NllLossBackward>) i+1 =  23200
acc = 0.71
loss_gru =  tensor(0.1566, grad_fn=<NllLossBackward>) i+1 =  23400
acc = 0.69
loss_gru =  tensor(1.0396, grad_fn=<NllLossBackward>) i+1 =  23600
acc = 0.68
loss_gru =  tensor(1.3258, grad_fn=<NllLossBackward>) i+1 =  23800
acc = 0.70
loss_gru =  tensor(1.1567, grad_fn=<NllLossBackward>) i+1 =  24000
acc = 0.69
loss_gru =  tensor(0.9120, grad_fn=<NllLossBackward>) i+1 =  24200
acc = 0.69
loss_gru =  tensor(2.9511, grad_fn=<NllLossBackward>) i+1 =  24400
acc = 0.72
loss_gru =  tensor(0.0725, grad_fn=<NllLossBackward>) i+1 =  24600
acc = 0.71
loss_gru =  tensor(0.2883, grad_fn=<NllLossBackward>) i+1 =  24800
acc = 0.69
loss_gru =  tensor(0.1038, grad_fn=<NllLossBackward>) i+1 =  25000
acc = 0.64
loss_gru =  tensor(1.5492, grad_fn=<NllLossBackward>) i+1 =  25200
acc = 0.70
loss_gru =  tensor(1.0096, grad_fn=<NllLossBackward>) i+1 =  25400
acc = 0.74
loss_gru =  tensor(0.5120, grad_fn=<NllLossBackward>) i+1 =  25600
acc = 0.70
loss_gru =  tensor(0.4768, grad_fn=<NllLossBackward>) i+1 =  25800
acc = 0.71
loss_gru =  tensor(3.6087, grad_fn=<NllLossBackward>) i+1 =  26000
acc = 0.66
loss_gru =  tensor(2.7767, grad_fn=<NllLossBackward>) i+1 =  26200
acc = 0.73
loss_gru =  tensor(0.0097, grad_fn=<NllLossBackward>) i+1 =  26400
acc = 0.72
loss_gru =  tensor(4.3543, grad_fn=<NllLossBackward>) i+1 =  26600
acc = 0.71
loss_gru =  tensor(1.4313, grad_fn=<NllLossBackward>) i+1 =  26800
acc = 0.73
loss_gru =  tensor(0.7051, grad_fn=<NllLossBackward>) i+1 =  27000
acc = 0.68
loss_gru =  tensor(0.6362, grad_fn=<NllLossBackward>) i+1 =  27200
acc = 0.73
loss_gru =  tensor(0.0750, grad_fn=<NllLossBackward>) i+1 =  27400
acc = 0.69
loss_gru =  tensor(0.5342, grad_fn=<NllLossBackward>) i+1 =  27600
acc = 0.74
loss_gru =  tensor(1.4162, grad_fn=<NllLossBackward>) i+1 =  27800
acc = 0.70
loss_gru =  tensor(0.0062, grad_fn=<NllLossBackward>) i+1 =  28000
acc = 0.71
loss_gru =  tensor(0.7902, grad_fn=<NllLossBackward>) i+1 =  28200
acc = 0.62
loss_gru =  tensor(4.1682, grad_fn=<NllLossBackward>) i+1 =  28400
acc = 0.72
loss_gru =  tensor(0.2228, grad_fn=<NllLossBackward>) i+1 =  28600
acc = 0.76
loss_gru =  tensor(0.0164, grad_fn=<NllLossBackward>) i+1 =  28800
acc = 0.73
loss_gru =  tensor(6.3688, grad_fn=<NllLossBackward>) i+1 =  29000
acc = 0.65
loss_gru =  tensor(0.2564, grad_fn=<NllLossBackward>) i+1 =  29200
acc = 0.72
loss_gru =  tensor(4.3733, grad_fn=<NllLossBackward>) i+1 =  29400
acc = 0.69
loss_gru =  tensor(0.4473, grad_fn=<NllLossBackward>) i+1 =  29600
acc = 0.69
loss_gru =  tensor(0.1224, grad_fn=<NllLossBackward>) i+1 =  29800
acc = 0.78
loss_gru =  tensor(0.6393, grad_fn=<NllLossBackward>) i+1 =  30000
acc = 0.71
loss_gru =  tensor(0.0081, grad_fn=<NllLossBackward>) i+1 =  30200
acc = 0.78
loss_gru =  tensor(1.4730, grad_fn=<NllLossBackward>) i+1 =  30400
acc = 0.77
loss_gru =  tensor(0.0150, grad_fn=<NllLossBackward>) i+1 =  30600
acc = 0.72
loss_gru =  tensor(1.3727, grad_fn=<NllLossBackward>) i+1 =  30800
acc = 0.76
loss_gru =  tensor(0.1216, grad_fn=<NllLossBackward>) i+1 =  31000
acc = 0.73
loss_gru =  tensor(2.9719, grad_fn=<NllLossBackward>) i+1 =  31200
acc = 0.70
loss_gru =  tensor(0.8828, grad_fn=<NllLossBackward>) i+1 =  31400
acc = 0.78
loss_gru =  tensor(0.0536, grad_fn=<NllLossBackward>) i+1 =  31600
acc = 0.80
loss_gru =  tensor(0.0363, grad_fn=<NllLossBackward>) i+1 =  31800
acc = 0.73
loss_gru =  tensor(4.3157, grad_fn=<NllLossBackward>) i+1 =  32000
acc = 0.70
loss_gru =  tensor(2.8426, grad_fn=<NllLossBackward>) i+1 =  32200
acc = 0.71
loss_gru =  tensor(0.5529, grad_fn=<NllLossBackward>) i+1 =  32400
acc = 0.72
loss_gru =  tensor(0.1205, grad_fn=<NllLossBackward>) i+1 =  32600
acc = 0.77
loss_gru =  tensor(1.9493, grad_fn=<NllLossBackward>) i+1 =  32800
acc = 0.71
loss_gru =  tensor(1.5550, grad_fn=<NllLossBackward>) i+1 =  33000
acc = 0.77
loss_gru =  tensor(0.5392, grad_fn=<NllLossBackward>) i+1 =  33200
acc = 0.79
loss_gru =  tensor(1.3131, grad_fn=<NllLossBackward>) i+1 =  33400
acc = 0.70
loss_gru =  tensor(0.0395, grad_fn=<NllLossBackward>) i+1 =  33600
acc = 0.77
loss_gru =  tensor(0.0223, grad_fn=<NllLossBackward>) i+1 =  33800
acc = 0.80
loss_gru =  tensor(0.1720, grad_fn=<NllLossBackward>) i+1 =  34000
acc = 0.78
loss_gru =  tensor(2.4051, grad_fn=<NllLossBackward>) i+1 =  34200
acc = 0.73
loss_gru =  tensor(1.7388, grad_fn=<NllLossBackward>) i+1 =  34400
acc = 0.80
loss_gru =  tensor(0.6589, grad_fn=<NllLossBackward>) i+1 =  34600
acc = 0.79
loss_gru =  tensor(0.7629, grad_fn=<NllLossBackward>) i+1 =  34800
acc = 0.70
loss_gru =  tensor(0.0311, grad_fn=<NllLossBackward>) i+1 =  35000
acc = 0.79
loss_gru =  tensor(0.3465, grad_fn=<NllLossBackward>) i+1 =  35200
acc = 0.78
loss_gru =  tensor(4.0610, grad_fn=<NllLossBackward>) i+1 =  35400
acc = 0.82
loss_gru =  tensor(0.0788, grad_fn=<NllLossBackward>) i+1 =  35600
acc = 0.74
loss_gru =  tensor(0.0254, grad_fn=<NllLossBackward>) i+1 =  35800
acc = 0.69
loss_gru =  tensor(0.1517, grad_fn=<NllLossBackward>) i+1 =  36000
acc = 0.72
loss_gru =  tensor(0.0716, grad_fn=<NllLossBackward>) i+1 =  36200
acc = 0.79
loss_gru =  tensor(1.1799, grad_fn=<NllLossBackward>) i+1 =  36400
acc = 0.79
loss_gru =  tensor(0.0787, grad_fn=<NllLossBackward>) i+1 =  36600
acc = 0.74
loss_gru =  tensor(0.0297, grad_fn=<NllLossBackward>) i+1 =  36800
acc = 0.79
loss_gru =  tensor(0.4162, grad_fn=<NllLossBackward>) i+1 =  37000
acc = 0.76
loss_gru =  tensor(1.1754, grad_fn=<NllLossBackward>) i+1 =  37200
acc = 0.77
loss_gru =  tensor(0.5442, grad_fn=<NllLossBackward>) i+1 =  37400
acc = 0.74
loss_gru =  tensor(0.1710, grad_fn=<NllLossBackward>) i+1 =  37600
acc = 0.78
loss_gru =  tensor(0.2572, grad_fn=<NllLossBackward>) i+1 =  37800
acc = 0.73
loss_gru =  tensor(1.2389, grad_fn=<NllLossBackward>) i+1 =  38000
acc = 0.77
loss_gru =  tensor(0.8938, grad_fn=<NllLossBackward>) i+1 =  38200
acc = 0.78
loss_gru =  tensor(0.5183, grad_fn=<NllLossBackward>) i+1 =  38400
acc = 0.80
loss_gru =  tensor(0.0201, grad_fn=<NllLossBackward>) i+1 =  38600
acc = 0.77
loss_gru =  tensor(0.3638, grad_fn=<NllLossBackward>) i+1 =  38800
acc = 0.78
loss_gru =  tensor(1.9419, grad_fn=<NllLossBackward>) i+1 =  39000
acc = 0.73
loss_gru =  tensor(4.8649, grad_fn=<NllLossBackward>) i+1 =  39200
acc = 0.80
loss_gru =  tensor(0.2308, grad_fn=<NllLossBackward>) i+1 =  39400
acc = 0.81
loss_gru =  tensor(0.0016, grad_fn=<NllLossBackward>) i+1 =  39600
acc = 0.71
loss_gru =  tensor(0.5710, grad_fn=<NllLossBackward>) i+1 =  39800
acc = 0.79
loss_gru =  tensor(0.4222, grad_fn=<NllLossBackward>) i+1 =  40000
acc = 0.80
loss_gru =  tensor(0.1464, grad_fn=<NllLossBackward>) i+1 =  40200
acc = 0.79
loss_gru =  tensor(0.3998, grad_fn=<NllLossBackward>) i+1 =  40400
acc = 0.74
loss_gru =  tensor(0.9317, grad_fn=<NllLossBackward>) i+1 =  40600
acc = 0.80
loss_gru =  tensor(0.0051, grad_fn=<NllLossBackward>) i+1 =  40800
acc = 0.76
loss_gru =  tensor(0.0554, grad_fn=<NllLossBackward>) i+1 =  41000
acc = 0.71
loss_gru =  tensor(0.0016, grad_fn=<NllLossBackward>) i+1 =  41200
acc = 0.74
loss_gru =  tensor(3.7336, grad_fn=<NllLossBackward>) i+1 =  41400
acc = 0.83
loss_gru =  tensor(0.3915, grad_fn=<NllLossBackward>) i+1 =  41600
acc = 0.79
loss_gru =  tensor(0.0606, grad_fn=<NllLossBackward>) i+1 =  41800
acc = 0.78
loss_gru =  tensor(0.3752, grad_fn=<NllLossBackward>) i+1 =  42000
acc = 0.83
loss_gru =  tensor(0.4473, grad_fn=<NllLossBackward>) i+1 =  42200
acc = 0.79
loss_gru =  tensor(1.2779, grad_fn=<NllLossBackward>) i+1 =  42400
acc = 0.81
loss_gru =  tensor(0.0627, grad_fn=<NllLossBackward>) i+1 =  42600
acc = 0.78
loss_gru =  tensor(0.0123, grad_fn=<NllLossBackward>) i+1 =  42800
acc = 0.79
loss_gru =  tensor(1.0919, grad_fn=<NllLossBackward>) i+1 =  43000
acc = 0.76
loss_gru =  tensor(0.0726, grad_fn=<NllLossBackward>) i+1 =  43200
acc = 0.78
loss_gru =  tensor(0.0013, grad_fn=<NllLossBackward>) i+1 =  43400
acc = 0.82
loss_gru =  tensor(0.9275, grad_fn=<NllLossBackward>) i+1 =  43600
acc = 0.77
loss_gru =  tensor(0.0638, grad_fn=<NllLossBackward>) i+1 =  43800
acc = 0.79
loss_gru =  tensor(0.0168, grad_fn=<NllLossBackward>) i+1 =  44000
acc = 0.82
loss_gru =  tensor(0.0153, grad_fn=<NllLossBackward>) i+1 =  44200
acc = 0.83
loss_gru =  tensor(0.3572, grad_fn=<NllLossBackward>) i+1 =  44400
acc = 0.81
loss_gru =  tensor(10.2706, grad_fn=<NllLossBackward>) i+1 =  44600
acc = 0.77
loss_gru =  tensor(2.8233, grad_fn=<NllLossBackward>) i+1 =  44800
acc = 0.81
loss_gru =  tensor(0.4327, grad_fn=<NllLossBackward>) i+1 =  45000
acc = 0.77
loss_gru =  tensor(0.1983, grad_fn=<NllLossBackward>) i+1 =  45200
acc = 0.81
loss_gru =  tensor(0.0220, grad_fn=<NllLossBackward>) i+1 =  45400
acc = 0.79
loss_gru =  tensor(4.2947, grad_fn=<NllLossBackward>) i+1 =  45600
acc = 0.71
loss_gru =  tensor(0.0382, grad_fn=<NllLossBackward>) i+1 =  45800
acc = 0.77
loss_gru =  tensor(0.0285, grad_fn=<NllLossBackward>) i+1 =  46000
acc = 0.81
loss_gru =  tensor(0.0243, grad_fn=<NllLossBackward>) i+1 =  46200
acc = 0.73
loss_gru =  tensor(5.6964, grad_fn=<NllLossBackward>) i+1 =  46400
acc = 0.84
loss_gru =  tensor(0.3131, grad_fn=<NllLossBackward>) i+1 =  46600
acc = 0.73
loss_gru =  tensor(0.2723, grad_fn=<NllLossBackward>) i+1 =  46800
acc = 0.81
loss_gru =  tensor(0.0224, grad_fn=<NllLossBackward>) i+1 =  47000
acc = 0.81
loss_gru =  tensor(0.0020, grad_fn=<NllLossBackward>) i+1 =  47200
acc = 0.83
loss_gru =  tensor(0.7779, grad_fn=<NllLossBackward>) i+1 =  47400
acc = 0.78
loss_gru =  tensor(2.2047, grad_fn=<NllLossBackward>) i+1 =  47600
acc = 0.83
loss_gru =  tensor(0.1925, grad_fn=<NllLossBackward>) i+1 =  47800
acc = 0.79
loss_gru =  tensor(1.1697, grad_fn=<NllLossBackward>) i+1 =  48000
acc = 0.79
loss_gru =  tensor(0.2083, grad_fn=<NllLossBackward>) i+1 =  48200
acc = 0.79
loss_gru =  tensor(1.1640, grad_fn=<NllLossBackward>) i+1 =  48400
acc = 0.73
loss_gru =  tensor(0.3408, grad_fn=<NllLossBackward>) i+1 =  48600
acc = 0.81
loss_gru =  tensor(0.1376, grad_fn=<NllLossBackward>) i+1 =  48800
acc = 0.80
loss_gru =  tensor(0.2293, grad_fn=<NllLossBackward>) i+1 =  49000
acc = 0.79
loss_gru =  tensor(0.0477, grad_fn=<NllLossBackward>) i+1 =  49200
acc = 0.76
loss_gru =  tensor(0.7520, grad_fn=<NllLossBackward>) i+1 =  49400
acc = 0.77
loss_gru =  tensor(0.4068, grad_fn=<NllLossBackward>) i+1 =  49600
acc = 0.79
loss_gru =  tensor(0.3563, grad_fn=<NllLossBackward>) i+1 =  49800
acc = 0.83
loss_gru =  tensor(1.1095, grad_fn=<NllLossBackward>) i+1 =  50000
acc = 0.79
loss_gru =  tensor(0.4017, grad_fn=<NllLossBackward>) i+1 =  50200
acc = 0.82
loss_gru =  tensor(0.1864, grad_fn=<NllLossBackward>) i+1 =  50400
acc = 0.77
loss_gru =  tensor(0.0532, grad_fn=<NllLossBackward>) i+1 =  50600
acc = 0.80
loss_gru =  tensor(0.1764, grad_fn=<NllLossBackward>) i+1 =  50800
acc = 0.82
loss_gru =  tensor(0.0060, grad_fn=<NllLossBackward>) i+1 =  51000
acc = 0.75
loss_gru =  tensor(0.9909, grad_fn=<NllLossBackward>) i+1 =  51200
acc = 0.89
loss_gru =  tensor(0.3442, grad_fn=<NllLossBackward>) i+1 =  51400
acc = 0.84
loss_gru =  tensor(0.0042, grad_fn=<NllLossBackward>) i+1 =  51600
acc = 0.81
loss_gru =  tensor(1.9397, grad_fn=<NllLossBackward>) i+1 =  51800
acc = 0.81
loss_gru =  tensor(0.2149, grad_fn=<NllLossBackward>) i+1 =  52000
acc = 0.74
loss_gru =  tensor(0.6850, grad_fn=<NllLossBackward>) i+1 =  52200
acc = 0.81
loss_gru =  tensor(0.0030, grad_fn=<NllLossBackward>) i+1 =  52400
acc = 0.86
loss_gru =  tensor(0.3355, grad_fn=<NllLossBackward>) i+1 =  52600
acc = 0.74
loss_gru =  tensor(0.0089, grad_fn=<NllLossBackward>) i+1 =  52800
acc = 0.85
loss_gru =  tensor(0.0387, grad_fn=<NllLossBackward>) i+1 =  53000
acc = 0.80
loss_gru =  tensor(0.0057, grad_fn=<NllLossBackward>) i+1 =  53200
acc = 0.80
loss_gru =  tensor(0.2154, grad_fn=<NllLossBackward>) i+1 =  53400
acc = 0.84
loss_gru =  tensor(0.1375, grad_fn=<NllLossBackward>) i+1 =  53600
acc = 0.79
loss_gru =  tensor(0.0931, grad_fn=<NllLossBackward>) i+1 =  53800
acc = 0.75
loss_gru =  tensor(0.4137, grad_fn=<NllLossBackward>) i+1 =  54000
acc = 0.83
loss_gru =  tensor(0.9419, grad_fn=<NllLossBackward>) i+1 =  54200
acc = 0.79
loss_gru =  tensor(1.7267, grad_fn=<NllLossBackward>) i+1 =  54400
acc = 0.80
loss_gru =  tensor(0.8051, grad_fn=<NllLossBackward>) i+1 =  54600
acc = 0.85
loss_gru =  tensor(0.4758, grad_fn=<NllLossBackward>) i+1 =  54800
acc = 0.82
loss_gru =  tensor(0.0355, grad_fn=<NllLossBackward>) i+1 =  55000
acc = 0.81
loss_gru =  tensor(0.0054, grad_fn=<NllLossBackward>) i+1 =  55200
acc = 0.81
loss_gru =  tensor(0.5640, grad_fn=<NllLossBackward>) i+1 =  55400
acc = 0.86
loss_gru =  tensor(0.0608, grad_fn=<NllLossBackward>) i+1 =  55600
acc = 0.81
loss_gru =  tensor(0.0016, grad_fn=<NllLossBackward>) i+1 =  55800
acc = 0.81
loss_gru =  tensor(0.0082, grad_fn=<NllLossBackward>) i+1 =  56000
acc = 0.81
loss_gru =  tensor(0.0174, grad_fn=<NllLossBackward>) i+1 =  56200
acc = 0.75
loss_gru =  tensor(0.0252, grad_fn=<NllLossBackward>) i+1 =  56400
acc = 0.79
loss_gru =  tensor(0.0094, grad_fn=<NllLossBackward>) i+1 =  56600
acc = 0.79
loss_gru =  tensor(0.0016, grad_fn=<NllLossBackward>) i+1 =  56800
acc = 0.81
loss_gru =  tensor(0.0569, grad_fn=<NllLossBackward>) i+1 =  57000
acc = 0.81
loss_gru =  tensor(1.2155, grad_fn=<NllLossBackward>) i+1 =  57200
acc = 0.78
loss_gru =  tensor(0.3368, grad_fn=<NllLossBackward>) i+1 =  57400
acc = 0.81
loss_gru =  tensor(2.4498, grad_fn=<NllLossBackward>) i+1 =  57600
acc = 0.82
loss_gru =  tensor(0.0184, grad_fn=<NllLossBackward>) i+1 =  57800
acc = 0.86
loss_gru =  tensor(0.7247, grad_fn=<NllLossBackward>) i+1 =  58000
acc = 0.80
loss_gru =  tensor(0.3346, grad_fn=<NllLossBackward>) i+1 =  58200
acc = 0.82
loss_gru =  tensor(0.0144, grad_fn=<NllLossBackward>) i+1 =  58400
acc = 0.84
loss_gru =  tensor(0.0055, grad_fn=<NllLossBackward>) i+1 =  58600
acc = 0.77
loss_gru =  tensor(0.4129, grad_fn=<NllLossBackward>) i+1 =  58800
acc = 0.85
loss_gru =  tensor(0.3432, grad_fn=<NllLossBackward>) i+1 =  59000
acc = 0.81
loss_gru =  tensor(0.1580, grad_fn=<NllLossBackward>) i+1 =  59200
acc = 0.79
loss_gru =  tensor(0.2341, grad_fn=<NllLossBackward>) i+1 =  59400
acc = 0.81
loss_gru =  tensor(0.0006, grad_fn=<NllLossBackward>) i+1 =  59600
acc = 0.78


版权声明:本文为weixin_42681868原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。