[torch] nlp-lstm-cos -> sin

折月煮酒 提交于 2019-11-28 15:44:53

LSTM

看了官方lstm以及相关原理,然后自己按照理解写了一遍,然后在网上看到cos预测sin问题,然后用lstm完成了建模。

看到好多论文里图像文本特征用lstm的,对学ocr有点帮助。

官方lstm例子

给定句子对句子里的词进行词性分类。

'''
@Descripttion: This is Aoru Xue's demo,which is only for reference
@version: 
@Author: Aoru Xue
@Date: 2019-08-17 21:58:08
@LastEditors: Aoru Xue
@LastEditTime: 2019-08-26 13:34:22
'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

training_data = [
    ("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
    ("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
words_set = list(set([word for data in training_data for word in data[0]]))
def word2idx(word):
    return words_set.index(word)
def target2idx(target):
    dic = {"NN":0,"DET":1,"V":2}
    return dic[target]
def get_training_idx(training_data):
    idxs = []
    for words,targets in training_data:
       idxs.append((torch.tensor([word2idx(word) for word in words],dtype = torch.long),
       torch.tensor([target2idx(target) for target in targets])))
    return idxs
class LSTMTagger(nn.Module):
    def __init__(self,hidden_dim,vocab_size,embedding_dim,tag_dim):
        super(LSTMTagger,self).__init__()
        self.embedding_dim = embedding_dim
        self.tag_dim = tag_dim
        self.words_embeddings = nn.Embedding(vocab_size,embedding_dim)
        self.lstm = nn.LSTM(embedding_dim,hidden_dim)
        self.hidden2tag = nn.Linear(hidden_dim,tag_dim)
    def forward(self,x):
        # x (len(wods),)
        x = self.words_embeddings(x) # (len(words),embedding_dim)
        x, _ = self.lstm(x.view(1,-1,self.embedding_dim)) # 默认batch_size 为1 是 (len(words),onehotdim).其实应该是(batch_size,len(words),onehotdim)
        x = self.hidden2tag(x) # (1,len(words),tag_dim)
        return x.view((-1,self.tag_dim))
if __name__ == "__main__":
    train_data = get_training_idx(training_data)
    model = LSTMTagger(hidden_dim = 64,vocab_size = len(words_set),embedding_dim = 32,tag_dim =3)
    loss_fn = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(),lr = 0.1)
    losses = []
    for epoch in range(300):
        for sentence,target in train_data:
            model.zero_grad()
            out = model(sentence)
            loss = loss_fn(out,target)
            losses.append(loss.item())
            loss.backward() 
            optimizer.step()

    with torch.no_grad():

        for sentence,target in train_data:        
            print(torch.argmax(model(sentence),dim = 1),target)






'''
[Running] set PYTHONIOENCODING=utf-8 && /home/xueaoru/.conda/envs/pytorch/bin/python -u "/home/xueaoru/文档/codes/LSTM.py"
tensor([1, 0, 2, 1, 0]) tensor([1, 0, 2, 1, 0])
tensor([0, 2, 1, 0]) tensor([0, 2, 1, 0])
'''

cos预测sin

cos值与sin值是多对多的关系,直接随便用一个nn无法完成建模,需要考虑前后数据关系来建模。

即由前面输入的数据的cos数据来确定该处sin值应该是多少。

训练感觉好慢。将近两分钟。

建模代码如下:

'''
@Descripttion: This is Aoru Xue's demo,which is only for reference
@version: 
@Author: Aoru Xue
@Date: 2019-08-26 16:22:36
@LastEditors: Aoru Xue
@LastEditTime: 2019-08-26 17:05:54
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

raw_inputs = torch.tensor([i*np.pi / 20 for i in range(1000)],dtype = torch.float)
cosx =torch.cos(raw_inputs)
sinx = torch.sin(raw_inputs)


class RNNModule(nn.Module):
    def __init__(self,hidden2): 
        super(RNNModule,self).__init__() 
        self.lstm = nn.LSTM(1,hidden2)
        self.flatten = nn.Linear(hidden2,1)
    def forward(self,x):
        x = x.view((-1,1,1))
        x,_ = self.lstm(x)
        
        x = self.flatten(x)
        return x.view((1,-1))
if __name__ == "__main__":
    model = RNNModule(16)
    xs = [x*np.pi / 20 for x in range(0,2000)]
    optimizer = optim.Adam(model.parameters())
    loss_fn = nn.MSELoss()  
    for epoch in range(100):
        for i in range(0,1000 - 20):
            model.zero_grad()
            cos_x = torch.cos(torch.tensor(xs[i:i+20],dtype = torch.float))
            out = model(cos_x)
            sin_x = torch.sin(torch.tensor(xs[i:i+20],dtype = torch.float))
            loss = loss_fn(out,sin_x.view(1,-1))
            loss.backward()
            optimizer.step()
    with torch.no_grad():
        x = cosx[0:20]
        output = model(x)
        print(output,sinx[0:20])
    
 '''
 tensor([[-0.0167,  0.0853,  0.2704,  0.4169,  0.5790,  0.7059,  0.8086,  0.9002,
          0.9675,  0.9988,  1.0050,  0.9896,  0.9524,  0.8948,  0.8171,  0.7172,
          0.5929,  0.4554,  0.3129,  0.1634]]) tensor([0.0000, 0.1564, 0.3090, 0.4540, 0.5878, 0.7071, 0.8090, 0.8910, 0.9511,
        0.9877, 1.0000, 0.9877, 0.9511, 0.8910, 0.8090, 0.7071, 0.5878, 0.4540,
        0.3090, 0.1564])
 
 '''
标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!