Home
last modified time | relevance | path

Searched full:dropout (Results 1 – 25 of 447) sorted by relevance

12345678910>>...18

/external/pytorch/torch/csrc/api/include/torch/nn/modules/
Dtransformerlayer.h7 #include <torch/nn/modules/dropout.h>
34 /// 8).dropout(0.1));
66 /// feedforward dropout layer
67 Dropout dropout = nullptr; variable
77 /// pre feedfastward, dropout layer
78 Dropout dropout1 = nullptr;
79 /// post feedfastward, dropout layer
80 Dropout dropout2 = nullptr;
109 /// 8).dropout(0.2));
146 /// Dropout, post self attention
[all …]
Ddropout.h4 #include <torch/nn/options/dropout.h>
31 "dropout probability has to be between 0 and 1, but got ", in reset()
41 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 /// Applies dropout over a 1-D input.
44 /// See https://pytorch.org/docs/main/nn.html#torch.nn.Dropout to learn
52 /// Dropout model(DropoutOptions().p(0.42).inplace(true));
60 /// Pretty prints the `Dropout` module into the given `stream`.
66 /// provides, and examples of how to use `Dropout` with
69 TORCH_MODULE(Dropout);
73 /// Applies dropout over a 2-D input.
[all …]
/external/pytorch/torch/csrc/api/src/nn/modules/
Dtransformer.cpp31 .dropout(options.dropout()))); in reset()
35 dropout = this->register_module("dropout", Dropout(options.dropout())); in reset()
44 dropout1 = this->register_module("dropout1", Dropout(options.dropout())); in reset()
45 dropout2 = this->register_module("dropout2", Dropout(options.dropout())); in reset()
53 // dropout->reset_parameters(); in reset_parameters()
75 src2 = linear2(dropout(F::gelu(linear1(ret)))); in forward()
77 src2 = linear2(dropout(F::relu(linear1(ret)))); in forward()
83 src2 = linear2(dropout(callable_activation(linear1(ret)))); in forward()
110 .dropout(options.dropout()))); in reset()
117 .dropout(options.dropout()))); in reset()
[all …]
/external/tensorflow/tensorflow/python/keras/layers/legacy_rnn/
Drnn_cell_wrapper_impl.py35 """Operator adding dropout to inputs and outputs of the given cell."""
48 """Create a cell with added input, state, and/or output dropout.
51 then the same dropout mask is applied at every step, as described in:
52 [A Theoretically Grounded Application of Dropout in Recurrent
55 Otherwise a different dropout mask is applied at every time step.
65 probability; if it is constant and 1, no input dropout will be added.
67 probability; if it is constant and 1, no output dropout will be added.
69 probability; if it is constant and 1, no output dropout will be added.
70 State dropout is performed on the outgoing states of the cell. **Note**
71 the state components to which dropout is applied when `state_keep_prob`
[all …]
/external/tensorflow/tensorflow/python/keras/legacy_tf_layers/
Dcore.py16 """Contains the core layers: Dense, Dropout.
190 @keras_export(v1=['keras.__internal__.legacy.layers.Dropout'])
191 @tf_export(v1=['layers.Dropout'])
192 class Dropout(keras_layers.Dropout, base.Layer): class
193 """Applies Dropout to the input.
195 Dropout consists in randomly setting a fraction `rate` of input units to 0
201 rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
204 binary dropout mask that will be multiplied with the input.
206 `(batch_size, timesteps, features)`, and you want the dropout mask
220 super(Dropout, self).__init__(rate=rate,
[all …]
/external/pytorch/torch/csrc/api/include/torch/nn/options/
Drnn.h40 /// If non-zero, adds dropout with the given probability to the output of each
42 TORCH_ARG(double, dropout) = 0.0;
57 /// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
82 /// If non-zero, introduces a `Dropout` layer on the outputs of each
83 /// RNN layer except the last layer, with dropout probability equal to
84 /// `dropout`. Default: 0
85 TORCH_ARG(double, dropout) = 0.0;
115 /// If non-zero, introduces a `Dropout` layer on the outputs of each
116 /// LSTM layer except the last layer, with dropout probability equal to
117 /// `dropout`. Default: 0
[all …]
Dtransformerlayer.h20 /// auto options = TransformerEncoderLayer(512, 8).dropout(0.2);
34 /// the dropout value, default is 0.1
35 TORCH_ARG(double, dropout) = 0.1;
49 /// 8).dropout(0.2));
63 /// dropout value. Default: 1
64 TORCH_ARG(double, dropout) = 0.1;
/external/pytorch/test/nn/
Dtest_dropout.py74 o_ref = torch.dropout(x_ref, p, train)
82 self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
83 self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
90 self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
91 self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
129 … # In this test, we verify that dropout preserves the layout and data for different memory formats.
130 # We check whether, we get same values for the output of dropout, when the probability
131 # of dropout is 0 or very close to 0.
174 self._test_dropout(nn.Dropout, device, input)
176 self._test_dropout_discontiguous(nn.Dropout, device)
[all …]
/external/pytorch/torch/nn/modules/
Ddropout.py8 "Dropout",
26 f"dropout probability has to be between 0 and 1, but got {p}"
35 class Dropout(_DropoutNd): class
61 >>> m = nn.Dropout(p=0.2)
70 return F.dropout(input, self.p, self.training, self.inplace)
88 (as is normally the case in early convolution layers) then i.i.d. dropout
133 (as is normally the case in early convolution layers) then i.i.d. dropout
146 Due to historical reasons, this class will perform 1D channel-wise dropout
185 (as is normally the case in early convolution layers) then i.i.d. dropout
216 r"""Applies Alpha Dropout over the input.
[all …]
Dtransformer.py13 from .dropout import Dropout
76 dropout: the dropout value (default=0.1).
106 dropout: float = 0.1,
128 dropout,
150 dropout,
645 dropout: the dropout value (default=0.1).
701 dropout: float = 0.1,
715 dropout=dropout,
722 self.dropout = Dropout(dropout)
728 self.dropout1 = Dropout(dropout)
[all …]
/external/pytorch/aten/src/ATen/cudnn/
DDescriptors.h234 // Initialize a dropout descriptor's RNG state.
236 …void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& … in initialize_rng()
237 TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); in initialize_rng()
243 …AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size… in initialize_rng()
246 // Restore a dropout descriptor given a dropout probability and existing RNG state.
247 void set(cudnnHandle_t handle, float dropout, at::Tensor state_) { in set()
248 TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); in set()
253 …AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0… in set()
256 // Restore a dropout descriptor corresponding to no dropout
258 // NB: seed doesn't matter when dropout = 0, because no random number in set_no_dropout()
[all …]
/external/pytorch/test/onnx/model_defs/
Dlstm_flattening_result.py12 def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first): argument
21 dropout=dropout,
31 def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first): argument
40 dropout=dropout,
Dword_language_model.py21 dropout=0.5, argument
26 self.drop = nn.Dropout(dropout)
29 self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
39 ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout
/external/pytorch/torch/ao/quantization/pt2e/
Dexport_utils.py40 Switch dropout patterns in the model between train and eval modes.
42 Dropout has different behavior in train vs eval mode. For exported models,
44 the dropout behavior between the two modes, so here we need to rewrite the aten
45 dropout patterns manually to achieve the same effect.
59 return F.dropout(x, p=0.5, training=True, inplace=inplace)
62 return F.dropout(x, p=0.5, training=False, inplace=inplace)
178 This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm.
190 This is equivalent to model.train() but only for certain special ops like dropout, batchnorm.
202 ops only, which are currently dropout and batchnorm.
207 is already specialized at export time. Additionally, other ops beyond dropout and batchnorm
/external/pytorch/torch/ao/nn/quantized/modules/
Ddropout.py5 __all__ = ["Dropout"]
8 class Dropout(torch.nn.Dropout): class
9 r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
11 had dropout to work with quantized tensors in train and eval mode.
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_CudnnRNNParamsSize.pbtxt18 dropout: dropout probability. When set to 0., dropout is disabled.
19 seed: the 1st part of a seed to initialize dropout.
20 seed2: the 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNNParamsToCanonicalV2.pbtxt30 dropout: dropout probability. When set to 0., dropout is disabled.
31 seed: the 1st part of a seed to initialize dropout.
32 seed2: the 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNNCanonicalToParamsV2.pbtxt30 dropout: dropout probability. When set to 0., dropout is disabled.
31 seed: the 1st part of a seed to initialize dropout.
32 seed2: the 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNNCanonicalToParams.pbtxt31 dropout: dropout probability. When set to 0., dropout is disabled.
32 seed: the 1st part of a seed to initialize dropout.
33 seed2: the 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNNParamsToCanonical.pbtxt31 dropout: dropout probability. When set to 0., dropout is disabled.
32 seed: the 1st part of a seed to initialize dropout.
33 seed2: the 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNN.pbtxt15 dropout: Dropout probability. When set to 0., dropout is disabled.
16 seed: The 1st part of a seed to initialize dropout.
17 seed2: The 2nd part of a seed to initialize dropout.
Dapi_def_CudnnRNNV2.pbtxt16 dropout: Dropout probability. When set to 0., dropout is disabled.
17 seed: The 1st part of a seed to initialize dropout.
18 seed2: The 2nd part of a seed to initialize dropout.
/external/tensorflow/tensorflow/python/layers/
Dcore.py16 """Contains the core layers: Dense, Dropout.
25 Dropout = core.Dropout variable
26 dropout = core.dropout variable
/external/pytorch/torch/csrc/api/include/torch/nn/functional/
Ddropout.h3 #include <torch/nn/options/dropout.h>
14 inline Tensor dropout(Tensor input, double p, bool training, bool inplace) { in dropout() function
17 "dropout probability has to be between 0 and 1, but got ", in dropout()
22 return torch::dropout(input, p, training); in dropout()
30 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.dropout
39 /// F::dropout(input, F::DropoutFuncOptions().p(0.5));
41 inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) {
42 return detail::dropout(
60 "dropout probability has to be between 0 and 1, but got ", in _dropoutNd_helper()
161 false, "dropout probability has to be between 0 and 1, but got ", p); in alpha_dropout()
[all …]
/external/tensorflow/tensorflow/examples/speech_commands/
Dmodels.py106 placeholder node that can be used to control the dropout amount.
119 TensorFlow node outputting logits results, and optionally a dropout
182 TensorFlow node outputting logits results, and optionally a dropout
239 During training, dropout nodes are introduced after each relu, controlled by a
248 TensorFlow node outputting logits results, and optionally a dropout
275 first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
302 second_dropout = tf.nn.dropout(second_relu, rate=dropout_rate)
363 During training, dropout nodes are introduced after the relu, controlled by a
372 TensorFlow node outputting logits results, and optionally a dropout
401 first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
[all …]

12345678910>>...18