Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
736 views
in Technique[技术] by (71.8m points)

python - TypeError: forward() missing 1 required positional argument: 'hidden' - LSTM Model

I'm a beginner to the LSTM and PyTorch. I try to create a model for a bursty traffic prediction scenario. This is kind of a overfit model. First, It matches all the next values using the x_data as inputs and targets. You can identify the shape of the x_data by looking at the main. Then try to predict the whole traffic shape using the first 100 seed values. But this model gives me an error when running "outputs, hidden = model(inputs)". please help me to solve this error.

Here is my complete code,

import numpy as np
import torch
import torch,torch.nn as nn
from torch import Tensor 
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from scipy.signal import savgol_filter


is_cuda = torch.cuda.is_available()

if is_cuda:
    device = torch.device("cuda")
    print("GPU is available")
else:
    device = torch.device("cpu")
    print("GPU not available, CPU used")

class FPredRNN(nn.Module):         
  def __init__(self, input_size, hidden_size, num_layers, dropout_val = 0.1):
    super(FPredRNN, self).__init__()
    self.input_size = input_size
    self.nh = hidden_size
    self.nl = num_layers

    self.lstm = nn.LSTM(self.input_size, self.nh, self.nl, dropout = dropout_val) 
    self.dropout = nn.Dropout(dropout_val)
    self.linear = nn.Linear(self.nh, 1)

  def forward(self, x, hidden, steps = 1000, eval = False): 
    predictions = []
    batch_size = x.size(0)
    if(hidden.size(0) != batch_size):
      self.init_hidden(batch_size)
    
    l_out, hidden = self.lstm(x, hidden)
    l_out = l_out.contiguous().view(-1, self.nh)

    out = self.dropout(l_out)
    out = self.linear(out)
    #out = out.view(batch_size, -1)
    #out = out[:,-1]
    
    if(eval):
      eval_input = out[-1:]
      for i in range(steps):
        lstm_out, hidden = self.lstm(eval_input, hidden)
        linear_out = self.linear(lstm_out)
        predictions += [linear_out]
        eval_input = linear_out
      out = torch.stack(predictions).squeeze()
      
    return out, hidden
            
  def init_hidden(self, batch_size):
    weight = next(self.parameters()).data
    hidden = (weight.new(self.nl, batch_size, self.nh).zero_().to(device), weight.new(self.nl, batch_size, self.nh).zero_().to(device)) 
    #hidden = ((self.nl, batch_size, self.nh).zero_().to(device), (self.nl, batch_size, self.nh).zero_().to(device))
    return hidden

if __name__ == "__main__":
  x_data = np.empty((1, 2000))
  y_data = np.empty((1, 1))

  for n in [30000]:
    traffic_generator = GenerateTraffic()
    bursty_traffic, a_t = traffic_generator.create_bursty_traffic(n_d=n)
    detected, attempted = traffic_generator.simulate_bursty_traffic_arrivals(bursty_traffic, backoff_bool= True)
    smooth_x = savgol_filter(detected, 97, 2)
    x_data[(n//10000)-3] = smooth_x 

  inputs = x_data[:, :1999]
  targets = x_data[:, 1:2000]
  
  inputs = torch.from_numpy(inputs)
  targets = torch.from_numpy(targets)
 
  print(inputs.size(1))
  print(inputs.size(0))
  #print(hidden.size(0))
  
  model = FPredRNN(input_size = inputs.size(1), hidden_size = 1100, num_layers = 2, dropout_val = 0.1)
  model.to(device)   

  criterion = nn.MSELoss()
  optimizer = torch.optim.Adam(model.parameters(), lr = 0.0001)
  
  
  # Train Model
  n_epochs = 2
  for epoch in range(1, n_epochs + 1):
    optimizer.zero_grad()
    inputs.to(device)
    outputs, hidden = model(inputs)
    loss = criterion(outputs, targets)
    loss.backward()
    optimizer.step()

# Test Model
  seed_lenght = 100
  seed = inputs[:seed_lenght]
  outt = model(seed, steps=1000, eval = True)
  test_out = torch.cat((seed.squeeze(), outt))

Error code

TypeError                                 Traceback (most recent call last)
<ipython-input-2-0e63f8c64103> in <module>()
   101     optimizer.zero_grad()
   102     inputs.to(device)
--> 103     outputs, hidden = model(inputs)
   104     loss = criterion(outputs, targets)
   105     loss.backward()

/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   725             result = self._slow_forward(*input, **kwargs)
   726         else:
--> 727             result = self.forward(*input, **kwargs)
   728         for hook in itertools.chain(
   729                 _global_forward_hooks.values(),

TypeError: forward() missing 1 required positional argument: 'hidden'
question from:https://stackoverflow.com/questions/65859737/typeerror-forward-missing-1-required-positional-argument-hidden-lstm-mod

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Answer

0 votes
by (71.8m points)

The forward method needs another value inputted, hidden. I think what you want to do is:

hidden = model.init_hidden()
outputs, hidden = model(inputs, hidden)

this way the first input for hidden would just be an tensor full of zeros, and the next hidden inputs would be the inputs of the previous letters.


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...