/external/tensorflow/tensorflow/python/training/ |
D | learning_rate_decay.py | 29 def exponential_decay(learning_rate, argument 94 decayed_lr = learning_rate_schedule.ExponentialDecay(learning_rate, 159 def polynomial_decay(learning_rate, argument 244 learning_rate, 259 def natural_exp_decay(learning_rate, argument 332 learning_rate, decay_steps, natural_exp_rate, staircase=staircase, 343 def inverse_time_decay(learning_rate, argument 415 learning_rate, 429 def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): argument 478 learning_rate, decay_steps, alpha=alpha, name=name) [all …]
|
D | momentum.py | 46 def __init__(self, learning_rate, momentum, argument 76 self._learning_rate = learning_rate 85 learning_rate = self._learning_rate 86 if callable(learning_rate): 87 learning_rate = learning_rate() 88 self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
|
D | rmsprop_test.py | 95 for (dtype, learning_rate, decay, momentum, 113 learning_rate=learning_rate, 151 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, 154 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, 177 learning_rate=1.0, 201 learning_rate=1.0, 219 for (dtype, learning_rate, decay, 239 learning_rate=learning_rate, 277 learning_rate, decay, momentum, epsilon, centered) 280 learning_rate, decay, momentum, epsilon, centered) [all …]
|
D | gradient_descent.py | 34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument 52 self._learning_rate = learning_rate 80 learning_rate = self._call_if_callable(self._learning_rate) 82 learning_rate, name="learning_rate")
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
D | optimizers_test.py | 63 gradient_descent.GradientDescentOptimizer(learning_rate=0.1), 64 lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr), 72 loss, global_step, learning_rate=0.1, optimizer=optimizer) 82 return gradient_descent.GradientDescentOptimizer(learning_rate=0.1) 88 loss, global_step, learning_rate=None, optimizer=optimizer_fn) 103 loss, global_step, learning_rate=0.1, optimizer=optimizer) 110 loss, global_step, learning_rate=0.1, optimizer="SGD", 118 None, global_step, learning_rate=0.1, optimizer="SGD") 121 [[1.0]], global_step, learning_rate=0.1, optimizer="SGD") 134 learning_rate=0.1, [all …]
|
D | optimizers.py | 43 …"Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint:… 59 learning_rate, argument 172 if learning_rate is not None: 173 if (isinstance(learning_rate, ops.Tensor) and 174 learning_rate.get_shape().ndims == 0): 175 lr = learning_rate 176 elif isinstance(learning_rate, float): 177 if learning_rate < 0.0: 178 raise ValueError("Invalid learning_rate %s.", learning_rate) 182 initializer=init_ops.constant_initializer(learning_rate)) [all …]
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | addsign_test.py | 63 learning_rate=0.1, argument 91 learning_rate=learning_rate, 127 learning_rate, 137 learning_rate, 153 self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8) 159 self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8) 166 learning_rate=0.1, argument 199 learning_rate=learning_rate, 227 learning_rate, 237 learning_rate, [all …]
|
D | powersign_test.py | 64 learning_rate=0.1, argument 92 learning_rate=learning_rate, 129 learning_rate, 139 learning_rate, 156 learning_rate=0.1, 164 self._testDense(use_resource=True, learning_rate=0.1, base=10.0, beta=0.8) 171 learning_rate=0.1, argument 204 learning_rate=learning_rate, 232 learning_rate, 242 learning_rate, [all …]
|
D | weight_decay_optimizers.py | 304 def __init__(self, weight_decay, learning_rate, momentum, argument 332 weight_decay, learning_rate=learning_rate, momentum=momentum, 359 def __init__(self, weight_decay, learning_rate=0.001, beta1=0.9, beta2=0.999, argument 380 weight_decay, learning_rate=learning_rate, beta1=beta1, beta2=beta2, 400 learning_rate=1.0, argument 445 learning_rate=learning_rate,
|
D | lars_optimizer.py | 49 learning_rate, argument 89 self._learning_rate = learning_rate 168 learning_rate = self._learning_rate 169 if callable(learning_rate): 170 learning_rate = learning_rate() 172 learning_rate, name="learning_rate")
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adagrad_test.py | 80 learning_rate = lambda: 3.0 function 82 learning_rate = learning_rate() 84 ada_opt = adagrad.Adagrad(learning_rate) 132 learning_rate = 3.0 135 ada_opt = adagrad.Adagrad(learning_rate, decay=decay) 156 lr_np = learning_rate / (1 + decay * t) 176 learning_rate = 3.0 179 learning_rate, decay_steps=1.0, decay_rate=decay) 202 lr_np = learning_rate / (1 + decay * t) 246 learning_rate = constant_op.constant(3.0) [all …]
|
D | rmsprop_test.py | 103 for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: 116 learning_rate=learning_rate, 160 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, 163 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, 189 learning_rate = 0.01 196 learning_rate=learning_rate, 232 lr = learning_rate / (1 + decay * t) 260 learning_rate = 0.01 267 learning_rate, decay_steps=1.0, decay_rate=decay) 269 learning_rate=lr_schedule, [all …]
|
D | gradient_descent_test.py | 94 learning_rate = 3.0 96 sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay) 102 learning_rate = learning_rate_schedule.InverseTimeDecay( 104 sgd = gradient_descent.SGD(learning_rate=learning_rate) 110 learning_rate = learning_rate_schedule.InverseTimeDecay( 112 sgd = gradient_descent.SGD(learning_rate=learning_rate) 279 opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0) 280 opt_3 = gradient_descent.SGD(learning_rate=0.1) 309 learning_rate = 2.0 312 learning_rate=learning_rate, momentum=momentum) [all …]
|
D | optimizer_v2_test.py | 106 sgd.learning_rate = 0.5 117 sgd.learning_rate = learning_rate_schedule.InverseTimeDecay( 275 opt = gradient_descent.SGD(learning_rate=1.0) 324 opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0) 335 opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0) 344 gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0) 349 gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0) 354 opt1 = adam.Adam(learning_rate=1.0) 404 opt = adam.Adam(learning_rate=1.0) 438 isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable)) [all …]
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | sgdr_learning_rate_decay.py | 28 def sgdr_decay(learning_rate, global_step, initial_period_steps, argument 131 [learning_rate, global_step, 133 learning_rate = ops.convert_to_tensor(learning_rate, 135 dtype = learning_rate.dtype 184 m_fac = learning_rate * (m_mul ** i_restart)
|
D | training_test.py | 100 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 117 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 151 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 184 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 207 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 246 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 281 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 304 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 330 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 350 def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0): argument [all …]
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/ |
D | loss_scale_optimizer.py | 101 def learning_rate(self): member in LossScaleOptimizer 102 return self._optimizer.learning_rate 104 @learning_rate.setter 105 def learning_rate(self, lr): member in LossScaleOptimizer 106 self._optimizer.learning_rate = lr
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | addsign_test.py | 60 learning_rate=0.1, argument 81 learning_rate=learning_rate, 109 learning_rate, 119 learning_rate, 136 self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
|
D | powersign_test.py | 61 learning_rate=0.1, argument 82 learning_rate=learning_rate, 110 learning_rate, 120 learning_rate, 136 self._testDense(learning_rate=0.1, base=10.0, beta=0.8)
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/ |
D | learning_test.py | 252 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 287 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 321 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 354 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 379 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 406 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 439 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 458 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 477 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 513 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) [all …]
|
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/ |
D | test_utils.py | 104 learning_rate=0.1, ignore_params_fn=lambda _: (), argument 156 optimizer=adam.AdamOptimizer(learning_rate)) 172 learning_rate=0.1, rtol=0.2, atol=0.1, train_loss_tolerance_coeff=0.99, argument 214 train_iterations=train_iterations, seed=seed, learning_rate=learning_rate, 257 learning_rate=0.1, argument 279 seed=seed, learning_rate=learning_rate,
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
D | dynamic_rnn_estimator_test.py | 260 learning_rate=0.1) 361 learning_rate = 0.1 399 learning_rate=learning_rate, 418 learning_rate = 0.1 456 learning_rate=learning_rate, 515 learning_rate = 0.1 550 learning_rate=learning_rate, 577 learning_rate = 0.3 612 learning_rate=learning_rate, 654 learning_rate = 0.1 [all …]
|
/external/tensorflow/tensorflow/contrib/optimizer_v2/ |
D | rmsprop_test.py | 92 (learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple( 110 learning_rate=learning_rate, 148 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, 151 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, 176 learning_rate=1.0, 198 learning_rate=1.0, 216 (learning_rate, decay, momentum, epsilon, centered, _) = tuple( 236 learning_rate=learning_rate, 274 learning_rate, decay, momentum, centered) 277 learning_rate, decay, momentum, centered) [all …]
|
D | momentum_test.py | 59 learning_rate = lambda: 2.0 function 62 learning_rate = learning_rate() 65 learning_rate=learning_rate, momentum=momentum) 176 learning_rate=2.0, momentum=0.9, use_nesterov=True) 212 learning_rate=2.0, momentum=0.9, use_nesterov=True) 252 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0) 277 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0) 291 learning_rate=constant_op.constant(2.0), 443 mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1) 466 learning_rate=2.0, momentum=0.9) [all …]
|
/external/webrtc/webrtc/base/ |
D | rollingaccumulator.h | 126 double ComputeWeightedMean(double learning_rate) const { in ComputeWeightedMean() argument 127 if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) { in ComputeWeightedMean() 135 current_weight *= learning_rate; in ComputeWeightedMean()
|