• Home
  • Raw
  • Download

Lines Matching +full:script +full:- +full:output

10 # list[list[T]] -> list[T]
20 (options) -> (inputs, params, forward, backward_setup, backward)
29 backward: Given `output = backward_setup(*forward(*inputs))`, performs
46 def simple_backward_setup(output, seed=None): argument
47 assert isinstance(output, torch.Tensor)
50 grad_output = torch.randn_like(output)
51 return output, grad_output
54 def simple_backward(output, grad_output, **kwargs): argument
55 return output.backward(grad_output, **kwargs)
69 def lstm_creator(script=True, **kwargs): argument
75 forward=lstm_factory(lstm_cell, script),
81 def lnlstm_creator(script=True, decompose_layernorm=False, **kwargs): argument
82 assert script is True
110 def dropoutlstm_creator(script=True, **kwargs): argument
111 assert script is True
138 def lstm_premul_creator(script=True, **kwargs): argument
144 forward=lstm_factory_premul(premul_lstm_cell, script),
150 def lstm_premul_bias_creator(script=True, **kwargs): argument
156 forward=lstm_factory_premul_bias(premul_lstm_cell_no_bias, script),
162 def lstm_simple_creator(script=True, **kwargs): argument
168 forward=lstm_factory_simple(flat_lstm_cell, script),
174 def lstm_multilayer_creator(script=True, **kwargs): argument
180 forward=lstm_factory_multilayer(lstm_cell, script),
251 # XXX: It's more efficient to store the output in its padded form,
253 # Un-padding the output also makes the backward pass 2x slower...
266 def varlen_lstm_factory(cell, script): argument
274 ) -> Tuple[List[Tensor], Tuple[List[Tensor], List[Tensor]]]:
278 # List of: (output, hx, cx)
284 output = []
292 output += [hy]
293 outputs += [torch.stack(output)]
299 if script:
300 cell = torch.jit.script(cell)
301 dynamic_rnn = torch.jit.script(dynamic_rnn)
306 def varlen_lstm_creator(script=False, **kwargs): argument
312 forward=varlen_lstm_factory(lstm_cell, script),
357 # output: packed_weights with format
370 # XXX: script fns have problems indexing multidim lists, so we try to
406 def lstm_factory(cell, script): argument
414 ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
424 if script:
425 cell = torch.jit.script(cell)
426 dynamic_rnn = torch.jit.script(dynamic_rnn)
432 def lstm_factory_premul(premul_cell, script): argument
440 ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
450 if script:
451 premul_cell = torch.jit.script(premul_cell)
452 dynamic_rnn = torch.jit.script(dynamic_rnn)
458 def lstm_factory_premul_bias(premul_cell, script): argument
466 ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
470 …# add bias for all timesteps instead of going step-by-step, results in a single reduction kernel i…
474 inputs = torch.mm(input.view(-1, inpSize[2]), wih.t()) + bih
475 inputs = inputs.view(inpSize[0], inpSize[1], -1).unbind(0)
482 if script:
483 premul_cell = torch.jit.script(premul_cell)
484 dynamic_rnn = torch.jit.script(dynamic_rnn)
491 def lstm_factory_simple(cell, script): argument
500 if script:
501 cell = torch.jit.script(cell)
502 dynamic_rnn = torch.jit.script(dynamic_rnn)
507 def lstm_factory_multilayer(cell, script): argument
510 ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
529 if script:
530 cell = torch.jit.script(cell)
531 dynamic_rnn = torch.jit.script(dynamic_rnn)