/external/tensorflow/tensorflow/python/training/ |
D | learning_rate_decay.py | 31 def exponential_decay(learning_rate, argument 183 def polynomial_decay(learning_rate, argument 284 def natural_exp_decay(learning_rate, argument 372 def inverse_time_decay(learning_rate, argument 455 def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): argument 518 def cosine_decay_restarts(learning_rate, argument 595 def linear_cosine_decay(learning_rate, argument 680 def noisy_linear_cosine_decay(learning_rate, argument
|
D | adagrad_test.py | 53 learning_rate = lambda: 3.0 function 326 learning_rate = lambda: 3.0 function
|
D | gradient_descent.py | 34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
|
D | proximal_gradient_descent.py | 41 def __init__(self, learning_rate, l1_regularization_strength=0.0, argument
|
D | momentum.py | 46 def __init__(self, learning_rate, momentum, argument
|
D | adadelta.py | 38 def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8, argument
|
D | proximal_adagrad.py | 43 def __init__(self, learning_rate, initial_accumulator_value=0.1, argument
|
D | adagrad_da.py | 49 learning_rate, argument
|
D | rmsprop.py | 66 learning_rate, argument
|
D | adagrad.py | 41 def __init__(self, learning_rate, initial_accumulator_value=0.1, argument
|
D | ftrl.py | 43 learning_rate, argument
|
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
D | mnist.py | 101 def training(loss, learning_rate): argument
|
/external/tensorflow/tensorflow/core/kernels/boosted_trees/ |
D | training_ops.cc | 79 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local 170 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode() 281 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local 390 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode()
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_embedding.py | 64 learning_rate=None, argument 260 def __init__(self, learning_rate, use_gradient_accumulation, argument 289 learning_rate, argument 335 learning_rate, argument 405 learning_rate, argument 480 def __init__(self, learning_rate, clip_weight_min=None, argument
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/ |
D | loss_scale_optimizer.py | 305 def learning_rate(self): member in LossScaleOptimizer 309 def learning_rate(self, lr): member in LossScaleOptimizer
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adamax.py | 46 learning_rate=0.001, argument
|
D | ftrl.py | 58 learning_rate=0.001, argument
|
D | adadelta.py | 62 learning_rate=0.001, argument
|
D | adam.py | 49 learning_rate=0.001, argument
|
D | nadam.py | 65 learning_rate=0.001, argument
|
D | gradient_descent.py | 64 learning_rate=0.01, argument
|
D | rmsprop.py | 64 learning_rate=0.001, argument
|
D | adagrad.py | 58 learning_rate=0.001, argument
|
/external/webrtc/webrtc/base/ |
D | rollingaccumulator.h | 126 double ComputeWeightedMean(double learning_rate) const { in ComputeWeightedMean()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | auto_parallel_test.cc | 41 Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1}); in TEST_F() local
|