Lines Matching full:lstm
223 lstm = torch.nn.LSTM(inputSize, hiddenSize, numLayers).to(device)
226 return x, lengths, (hx, cx), lstm.all_weights, lstm
228 # NB: lstm.all_weights format:
229 # wih, whh, bih, bhh = lstm.all_weights[layer]
230 return x, lengths, (hx, cx), lstm.all_weights, None
318 # cudnn_layernorm_lstm: since cudnn does not have Layernorm LSTM, we cannot benchmark
320 # computation of a cudnn lstm + seq_len * 3 layernorm computation. This should serve
321 # as a perf lowerbound for the Layernorm LSTM forward pass(given that Layernorm itself
337 # Layernorm cudnn LSTM in the forward pass
356 # input: lstm.all_weights format (wih, whh, bih, bhh = lstm.all_weights[layer])
377 # returns: x, (hx, cx), all_weights, lstm module with all_weights as params
394 lstm = torch.nn.LSTM(inputSize, hiddenSize, numLayers, dropout=dropout)
396 lstm = lstm.cuda()
399 return x, (hx, cx), lstm.all_weights, lstm
401 # NB: lstm.all_weights format:
402 # wih, whh, bih, bhh = lstm.all_weights[layer]
403 return x, (hx, cx), lstm.all_weights, None