pytorch之 RNN regression

关于RNN模型参数的解释,可以参看RNN参数解释
  ###仅为自己练习,没有其他用途

1 import torch 2 from torch import nn 3 import numpy as np 4 import matplotlib.pyplot as plt 5 6 # torch.manual_seed(1) # reproducible 7 8 # Hyper Parameters 9 TIME_STEP = 10 # rnn time step 10 INPUT_SIZE = 1 # rnn input size 11 LR = 0.02 # learning rate 12 13 # show data 14 steps = np.linspace(0, np.pi*2, 100, dtype=np.float32) # float32 for converting torch FloatTensor 15 x_np = np.sin(steps) 16 y_np = np.cos(steps) 17 plt.plot(steps, y_np, 'r-', label='target (cos)') 18 plt.plot(steps, x_np, 'b-', label='input (sin)') 19 plt.legend(loc='best') 20 plt.show() 21 22 23 class RNN(nn.Module): 24 def __init__(self): 25 super(RNN, self).__init__() 26 27 self.rnn = nn.RNN( 28 input_size=INPUT_SIZE, 29 hidden_size=32, # rnn hidden unit 30 num_layers=1, # number of rnn layer 31 batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size) 32 ) 33 self.out = nn.Linear(32, 1) 34 35 def forward(self, x, h_state): 36 # x (batch, time_step, input_size) 37 # h_state (n_layers, batch, hidden_size) 38 # r_out (batch, time_step, hidden_size) 39 r_out, h_state = self.rnn(x, h_state) 40 41 outs = [] # save all predictions 42 for time_step in range(r_out.size(1)): # calculate output for each time step 43 outs.append(self.out(r_out[:, time_step, :])) 44 return torch.stack(outs, dim=1), h_state 45 46 # instead, for simplicity, you can replace above codes by follows 47 # r_out = r_out.view(-1, 32) 48 # outs = self.out(r_out) 49 # outs = outs.view(-1, TIME_STEP, 1) 50 # return outs, h_state 51 52 # or even simpler, since nn.Linear can accept inputs of any dimension 53 # and returns outputs with same dimension except for the last 54 # outs = self.out(r_out) 55 # return outs 56 57 rnn = RNN() 58 print(rnn) 59 60 optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters 61 loss_func = nn.MSELoss() 62 63 h_state = None # for initial hidden state 64 65 plt.figure(1, figsize=(12, 5)) 66 plt.ion() # continuously plot 67 68 for step in range(100): 69 start, end = step * np.pi, (step+1)*np.pi # time range 70 # use sin predicts cos 71 steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False) # float32 for converting torch FloatTensor 72 x_np = np.sin(steps) 73 y_np = np.cos(steps) 74 75 x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) 76 y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis]) 77 78 prediction, h_state = rnn(x, h_state) # rnn output 79 # !! next step is important !! 80 h_state = h_state.data # repack the hidden state, break the connection from last iteration 81 82 loss = loss_func(prediction, y) # calculate loss 83 optimizer.zero_grad() # clear gradients for this training step 84 loss.backward() # backpropagation, compute gradients 85 optimizer.step() # apply gradients 86 87 # plotting 88 plt.plot(steps, y_np.flatten(), 'r-') 89 plt.plot(steps, prediction.data.numpy().flatten(), 'b-') 90 plt.draw(); plt.pause(0.05) 91 92 plt.ioff() 93 plt.show()