Home
last modified time | relevance | path

Searched refs:learning_rate (Results 1 – 25 of 175) sorted by relevance

1234567

/external/tensorflow/tensorflow/python/training/
Dlearning_rate_decay.py31 def exponential_decay(learning_rate, argument
98 learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
183 def polynomial_decay(learning_rate, argument
269 learning_rate,
284 def natural_exp_decay(learning_rate, argument
358 learning_rate,
372 def inverse_time_decay(learning_rate, argument
445 learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
455 def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): argument
508 learning_rate, decay_steps, alpha=alpha, name=name)
[all …]
Dmomentum.py46 def __init__(self, learning_rate, momentum, argument
81 self._learning_rate = learning_rate
90 learning_rate = self._learning_rate
91 if callable(learning_rate):
92 learning_rate = learning_rate()
93 self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
Dgradient_descent.py34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
52 self._learning_rate = learning_rate
80 learning_rate = self._call_if_callable(self._learning_rate)
82 learning_rate, name="learning_rate")
Drmsprop_test.py95 for (dtype, learning_rate, decay, momentum,
113 learning_rate=learning_rate,
151 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
154 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
177 learning_rate=1.0,
201 learning_rate=1.0,
219 for (dtype, learning_rate, decay,
239 learning_rate=learning_rate,
277 learning_rate, decay, momentum, epsilon, centered)
280 learning_rate, decay, momentum, epsilon, centered)
[all …]
Dadagrad.py41 def __init__(self, learning_rate, initial_accumulator_value=0.1, argument
67 self._learning_rate = learning_rate
93 learning_rate = self._call_if_callable(self._learning_rate)
95 learning_rate, name="learning_rate")
Dmomentum_test.py59 learning_rate = lambda: 2.0 function
62 learning_rate = learning_rate()
65 learning_rate=learning_rate, momentum=momentum)
177 learning_rate=2.0, momentum=0.9, use_nesterov=True)
214 learning_rate=2.0, momentum=0.9, use_nesterov=True)
254 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
279 opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
294 learning_rate=constant_op.constant(2.0),
453 mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
477 learning_rate=2.0, momentum=0.9)
[all …]
Dadagrad_test.py53 learning_rate = lambda: 3.0 function
55 learning_rate = learning_rate()
58 learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
326 learning_rate = lambda: 3.0 function
329 learning_rate, initial_accumulator_value=0.1, use_locking=True)
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dadagrad_test.py84 learning_rate = lambda: 3.0 function
86 learning_rate = learning_rate()
88 ada_opt = adagrad.Adagrad(learning_rate)
135 learning_rate = 3.0
138 ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
159 lr_np = learning_rate / (1 + decay * t)
177 learning_rate = 3.0
179 ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0)
217 learning_rate = 3.0
220 learning_rate, decay_steps=1.0, decay_rate=decay)
[all …]
Drmsprop_test.py106 for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
119 learning_rate=learning_rate,
163 var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
166 var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
192 learning_rate = 0.01
199 learning_rate=learning_rate,
235 lr = learning_rate / (1 + decay * t)
263 learning_rate = 0.01
270 learning_rate, decay_steps=1.0, decay_rate=decay)
272 learning_rate=lr_schedule,
[all …]
Dgradient_descent_test.py94 learning_rate = 3.0
96 sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
102 learning_rate = learning_rate_schedule.InverseTimeDecay(
104 sgd = gradient_descent.SGD(learning_rate=learning_rate)
110 learning_rate = learning_rate_schedule.InverseTimeDecay(
112 sgd = gradient_descent.SGD(learning_rate=learning_rate)
279 opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0)
280 opt_3 = gradient_descent.SGD(learning_rate=0.1)
309 learning_rate = 2.0
312 learning_rate=learning_rate, momentum=momentum)
[all …]
Doptimizer_v2_test.py118 sgd.learning_rate = 0.5
129 sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
302 opt = gradient_descent.SGD(learning_rate=1.0)
355 opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
366 opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
375 gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
380 gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
385 opt1 = adam.Adam(learning_rate=1.0)
435 opt = adam.Adam(learning_rate=1.0)
469 isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable))
[all …]
Dadam_test.py220 learning_rate = lambda: 0.001 function
225 learning_rate = learning_rate()
230 opt = adam.Adam(learning_rate=learning_rate)
374 learning_rate = 0.001
381 learning_rate=learning_rate,
392 lr_np = learning_rate / (1 + decay * t)
421 learning_rate = 0.001
424 learning_rate, decay_steps=1.0, decay_rate=decay)
430 learning_rate=lr_schedule,
441 lr_np = learning_rate / (1 + decay * t)
[all …]
Dnadam.py65 learning_rate=0.001, argument
91 learning_rate = kwargs.get('lr', learning_rate)
92 if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
98 self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
Dadadelta_test.py68 learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop
73 learning_rate=lr, rho=rho, epsilon=epsilon)
176 opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0)
177 opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.)
/external/webrtc/webrtc/base/
Drollingaccumulator.h126 double ComputeWeightedMean(double learning_rate) const { in ComputeWeightedMean() argument
127 if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) { in ComputeWeightedMean()
135 current_weight *= learning_rate; in ComputeWeightedMean()
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding.py64 learning_rate=None, argument
121 if learning_rate is not None and learning_rate_fn is not None:
124 .format(learning_rate, learning_rate_fn))
128 hot_id_replication, learning_rate, learning_rate_fn)
260 def __init__(self, learning_rate, use_gradient_accumulation, argument
262 self.learning_rate = learning_rate
289 learning_rate, argument
307 self).__init__(learning_rate, use_gradient_accumulation,
335 learning_rate, argument
365 self).__init__(learning_rate, use_gradient_accumulation,
[all …]
/external/tensorflow/tensorflow/core/kernels/boosted_trees/
Dtraining_ops.cc79 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local
84 FindBestSplitsPerNode(context, learning_rate, node_ids_list, gains_list, in Compute()
170 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode() argument
200 learning_rate * left_node_contribs(candidate_idx, 0)); in FindBestSplitsPerNode()
202 learning_rate * right_node_contribs(candidate_idx, 0)); in FindBestSplitsPerNode()
281 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local
289 FindBestSplitsPerNode(context, learning_rate, node_ids_list, gains_list, in Compute()
390 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode() argument
427 learning_rate * left_node_contribs(candidate_idx, i)); in FindBestSplitsPerNode()
429 learning_rate * right_node_contribs(candidate_idx, i)); in FindBestSplitsPerNode()
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/
Dloss_scale_optimizer.py305 def learning_rate(self): member in LossScaleOptimizer
306 return self._optimizer.learning_rate
308 @learning_rate.setter
309 def learning_rate(self, lr): member in LossScaleOptimizer
310 self._optimizer.learning_rate = lr
Dloss_scale_optimizer_test.py146 learning_rate = 2.
148 learning_rate / strategy.num_replicas_in_sync)
151 opt = gradient_descent.SGD(learning_rate)
169 2 * learning_rate / strategy.num_replicas_in_sync))
211 learning_rate = 2.
214 opt = gradient_descent.SGD(learning_rate)
323 opt = adam.Adam(learning_rate=1.0)
346 opt = adam.Adam(learning_rate=1.0)
380 opt = MyOptimizer(learning_rate=1.0)
/external/tensorflow/tensorflow/compiler/tests/
Drmsprop_test.py75 learning_rate = 3.0
76 rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
108 learning_rate,
116 learning_rate,
/external/tensorflow/tensorflow/python/kernel_tests/boosted_trees/
Dtraining_ops_test.py72 learning_rate=0.1,
179 learning_rate=0.1,
288 learning_rate=0.1,
401 learning_rate=0.1,
615 learning_rate=0.1,
813 learning_rate=0.1,
1016 learning_rate=0.1,
1231 learning_rate=0.1,
1459 learning_rate=0.1,
1611 learning_rate=0.1,
[all …]
/external/tensorflow/tensorflow/python/keras/distribute/
Dkeras_premade_models_test.py66 opt = gradient_descent.SGD(learning_rate=0.1)
81 linear_opt = gradient_descent.SGD(learning_rate=0.05)
82 dnn_opt = adagrad.Adagrad(learning_rate=0.1)
/external/tensorflow/tensorflow/python/distribute/model_collection/
Dsimple_models.py54 optimizer = gradient_descent.SGD(learning_rate=0.001)
83 optimizer = gradient_descent.SGD(learning_rate=0.001)
117 optimizer = gradient_descent.SGD(learning_rate=0.001)
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_eager_test.py86 optimizer = rmsprop.RMSprop(learning_rate=0.001)
159 optimizer = rmsprop.RMSprop(learning_rate=0.001)
184 model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
211 optimizer = rmsprop.RMSprop(learning_rate=0.001)
250 optimizer=rmsprop.RMSprop(learning_rate=0.001),
271 optimizer=rmsprop.RMSprop(learning_rate=0.001),
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
DBoostedTreesUpdateEnsembleV2.pbtxt51 name: "learning_rate"
115 name: "learning_rate"
187 name: "learning_rate"

1234567