Home
last modified time | relevance | path

Searched refs:learning_rate (Results 1 – 25 of 265) sorted by relevance

1234567891011

/external/tensorflow/tensorflow/python/training/
Dlearning_rate_decay.py29 def exponential_decay(learning_rate, argument
94 decayed_lr = learning_rate_schedule.ExponentialDecay(learning_rate,
159 def polynomial_decay(learning_rate, argument
244 learning_rate,
259 def natural_exp_decay(learning_rate, argument
332 learning_rate, decay_steps, natural_exp_rate, staircase=staircase,
343 def inverse_time_decay(learning_rate, argument
415 learning_rate,
429 def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): argument
478 learning_rate, decay_steps, alpha=alpha, name=name)
[all …]
Dmomentum.py46 def __init__(self, learning_rate, momentum, argument
76 self._learning_rate = learning_rate
85 learning_rate = self._learning_rate
86 if callable(learning_rate):
87 learning_rate = learning_rate()
88 self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
Drmsprop_test.py95 for (dtype, learning_rate, decay, momentum,
113 learning_rate=learning_rate,
151 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
154 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
177 learning_rate=1.0,
201 learning_rate=1.0,
219 for (dtype, learning_rate, decay,
239 learning_rate=learning_rate,
277 learning_rate, decay, momentum, epsilon, centered)
280 learning_rate, decay, momentum, epsilon, centered)
[all …]
Dgradient_descent.py34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
52 self._learning_rate = learning_rate
80 learning_rate = self._call_if_callable(self._learning_rate)
82 learning_rate, name="learning_rate")
/external/tensorflow/tensorflow/contrib/layers/python/layers/
Doptimizers_test.py63 gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
64 lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr),
72 loss, global_step, learning_rate=0.1, optimizer=optimizer)
82 return gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
88 loss, global_step, learning_rate=None, optimizer=optimizer_fn)
103 loss, global_step, learning_rate=0.1, optimizer=optimizer)
110 loss, global_step, learning_rate=0.1, optimizer="SGD",
118 None, global_step, learning_rate=0.1, optimizer="SGD")
121 [[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
134 learning_rate=0.1,
[all …]
Doptimizers.py43 …"Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint:…
59 learning_rate, argument
172 if learning_rate is not None:
173 if (isinstance(learning_rate, ops.Tensor) and
174 learning_rate.get_shape().ndims == 0):
175 lr = learning_rate
176 elif isinstance(learning_rate, float):
177 if learning_rate < 0.0:
178 raise ValueError("Invalid learning_rate %s.", learning_rate)
182 initializer=init_ops.constant_initializer(learning_rate))
[all …]
/external/tensorflow/tensorflow/contrib/opt/python/training/
Daddsign_test.py63 learning_rate=0.1, argument
91 learning_rate=learning_rate,
127 learning_rate,
137 learning_rate,
153 self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)
159 self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)
166 learning_rate=0.1, argument
199 learning_rate=learning_rate,
227 learning_rate,
237 learning_rate,
[all …]
Dpowersign_test.py64 learning_rate=0.1, argument
92 learning_rate=learning_rate,
129 learning_rate,
139 learning_rate,
156 learning_rate=0.1,
164 self._testDense(use_resource=True, learning_rate=0.1, base=10.0, beta=0.8)
171 learning_rate=0.1, argument
204 learning_rate=learning_rate,
232 learning_rate,
242 learning_rate,
[all …]
Dweight_decay_optimizers.py304 def __init__(self, weight_decay, learning_rate, momentum, argument
332 weight_decay, learning_rate=learning_rate, momentum=momentum,
359 def __init__(self, weight_decay, learning_rate=0.001, beta1=0.9, beta2=0.999, argument
380 weight_decay, learning_rate=learning_rate, beta1=beta1, beta2=beta2,
400 learning_rate=1.0, argument
445 learning_rate=learning_rate,
Dlars_optimizer.py49 learning_rate, argument
89 self._learning_rate = learning_rate
168 learning_rate = self._learning_rate
169 if callable(learning_rate):
170 learning_rate = learning_rate()
172 learning_rate, name="learning_rate")
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dadagrad_test.py80 learning_rate = lambda: 3.0 function
82 learning_rate = learning_rate()
84 ada_opt = adagrad.Adagrad(learning_rate)
132 learning_rate = 3.0
135 ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
156 lr_np = learning_rate / (1 + decay * t)
176 learning_rate = 3.0
179 learning_rate, decay_steps=1.0, decay_rate=decay)
202 lr_np = learning_rate / (1 + decay * t)
246 learning_rate = constant_op.constant(3.0)
[all …]
Drmsprop_test.py103 for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
116 learning_rate=learning_rate,
160 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
163 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
189 learning_rate = 0.01
196 learning_rate=learning_rate,
232 lr = learning_rate / (1 + decay * t)
260 learning_rate = 0.01
267 learning_rate, decay_steps=1.0, decay_rate=decay)
269 learning_rate=lr_schedule,
[all …]
Dgradient_descent_test.py94 learning_rate = 3.0
96 sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
102 learning_rate = learning_rate_schedule.InverseTimeDecay(
104 sgd = gradient_descent.SGD(learning_rate=learning_rate)
110 learning_rate = learning_rate_schedule.InverseTimeDecay(
112 sgd = gradient_descent.SGD(learning_rate=learning_rate)
279 opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0)
280 opt_3 = gradient_descent.SGD(learning_rate=0.1)
309 learning_rate = 2.0
312 learning_rate=learning_rate, momentum=momentum)
[all …]
Doptimizer_v2_test.py106 sgd.learning_rate = 0.5
117 sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
275 opt = gradient_descent.SGD(learning_rate=1.0)
324 opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
335 opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
344 gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
349 gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
354 opt1 = adam.Adam(learning_rate=1.0)
404 opt = adam.Adam(learning_rate=1.0)
438 isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable))
[all …]
/external/tensorflow/tensorflow/contrib/training/python/training/
Dsgdr_learning_rate_decay.py28 def sgdr_decay(learning_rate, global_step, initial_period_steps, argument
131 [learning_rate, global_step,
133 learning_rate = ops.convert_to_tensor(learning_rate,
135 dtype = learning_rate.dtype
184 m_fac = learning_rate * (m_mul ** i_restart)
Dtraining_test.py100 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
117 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
151 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
184 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
207 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
246 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
281 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
304 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
330 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
350 def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0): argument
[all …]
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/
Dloss_scale_optimizer.py101 def learning_rate(self): member in LossScaleOptimizer
102 return self._optimizer.learning_rate
104 @learning_rate.setter
105 def learning_rate(self, lr): member in LossScaleOptimizer
106 self._optimizer.learning_rate = lr
/external/tensorflow/tensorflow/compiler/tests/
Daddsign_test.py60 learning_rate=0.1, argument
81 learning_rate=learning_rate,
109 learning_rate,
119 learning_rate,
136 self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
Dpowersign_test.py61 learning_rate=0.1, argument
82 learning_rate=learning_rate,
110 learning_rate,
120 learning_rate,
136 self._testDense(learning_rate=0.1, base=10.0, beta=0.8)
/external/tensorflow/tensorflow/contrib/slim/python/slim/
Dlearning_test.py252 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
287 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
321 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
354 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
379 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
406 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
439 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
458 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
477 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
513 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
[all …]
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/
Dtest_utils.py104 learning_rate=0.1, ignore_params_fn=lambda _: (), argument
156 optimizer=adam.AdamOptimizer(learning_rate))
172 learning_rate=0.1, rtol=0.2, atol=0.1, train_loss_tolerance_coeff=0.99, argument
214 train_iterations=train_iterations, seed=seed, learning_rate=learning_rate,
257 learning_rate=0.1, argument
279 seed=seed, learning_rate=learning_rate,
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
Ddynamic_rnn_estimator_test.py260 learning_rate=0.1)
361 learning_rate = 0.1
399 learning_rate=learning_rate,
418 learning_rate = 0.1
456 learning_rate=learning_rate,
515 learning_rate = 0.1
550 learning_rate=learning_rate,
577 learning_rate = 0.3
612 learning_rate=learning_rate,
654 learning_rate = 0.1
[all …]
/external/tensorflow/tensorflow/contrib/optimizer_v2/
Drmsprop_test.py92 (learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple(
110 learning_rate=learning_rate,
148 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
151 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
176 learning_rate=1.0,
198 learning_rate=1.0,
216 (learning_rate, decay, momentum, epsilon, centered, _) = tuple(
236 learning_rate=learning_rate,
274 learning_rate, decay, momentum, centered)
277 learning_rate, decay, momentum, centered)
[all …]
Dmomentum_test.py59 learning_rate = lambda: 2.0 function
62 learning_rate = learning_rate()
65 learning_rate=learning_rate, momentum=momentum)
176 learning_rate=2.0, momentum=0.9, use_nesterov=True)
212 learning_rate=2.0, momentum=0.9, use_nesterov=True)
252 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
277 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
291 learning_rate=constant_op.constant(2.0),
443 mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
466 learning_rate=2.0, momentum=0.9)
[all …]
/external/webrtc/webrtc/base/
Drollingaccumulator.h126 double ComputeWeightedMean(double learning_rate) const { in ComputeWeightedMean() argument
127 if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) { in ComputeWeightedMean()
135 current_weight *= learning_rate; in ComputeWeightedMean()

1234567891011