Home
last modified time | relevance | path

Searched refs:momentum (Results 1 – 25 of 109) sorted by relevance

12345

/external/tensorflow/tensorflow/contrib/eager/python/examples/l2hmc/
Dl2hmc.py111 momentum = tf.random_normal(tf.shape(position))
112 position_post, momentum_post = position, momentum
119 accept_prob = self._compute_accept_prob(position, momentum, position_post,
124 def _forward_lf(self, position, momentum, i): argument
131 momentum, logdet = self._update_momentum_forward(position, momentum, t)
134 position, logdet = self._update_position_forward(position, momentum, t,
138 position, logdet = self._update_position_forward(position, momentum, t,
142 momentum, logdet = self._update_momentum_forward(position, momentum, t)
145 return position, momentum, sumlogdet
147 def _backward_lf(self, position, momentum, i): argument
[all …]
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Drmsprop_test.py60 def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, argument
69 if momentum > 0.:
70 mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
78 lr, rho, momentum, epsilon, centered): argument
92 if momentum > 0.:
93 mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +
103 for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
118 momentum=momentum,
132 if momentum > 0.:
161 momentum, epsilon, centered)
[all …]
Drmsprop.py66 momentum=0.0, argument
114 if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0:
116 if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
118 self._set_hyper("momentum", momentum)
138 momentum = self._get_hyper("momentum", var_dtype)
151 momentum,
162 momentum,
183 momentum = self._get_hyper("momentum", var_dtype)
196 momentum,
208 momentum,
Dgradient_descent.py64 momentum=0.0, argument
88 if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0:
90 if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
92 self._set_hyper("momentum", momentum)
Dgradient_descent_test.py293 def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): argument
294 accum = accum * momentum - g * lr
295 var += (accum * momentum - g * lr)
310 momentum = 0.9
312 learning_rate=learning_rate, momentum=momentum)
377 learning_rate=2.0, momentum=0.9, nesterov=True)
413 learning_rate=2.0, momentum=0.9, nesterov=True)
454 opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0)
479 opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0)
494 momentum=constant_op.constant(0.9))
[all …]
/external/tensorflow/tensorflow/contrib/opt/python/training/
Dlars_optimizer.py50 momentum=0.9, argument
83 if momentum < 0.0:
84 raise ValueError("momentum should be positive: %s" % momentum)
90 self._momentum = momentum
173 momentum = self._momentum
174 if callable(momentum):
175 momentum = momentum()
176 self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
Dweight_decay_optimizers.py27 from tensorflow.python.training import momentum as momentum_opt
304 def __init__(self, weight_decay, learning_rate, momentum, argument
332 weight_decay, learning_rate=learning_rate, momentum=momentum,
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ResourceApplyKerasMomentum.pbtxt28 name: "momentum"
45 var + momentum * accum, so in the end, the var you get is actually
46 var + momentum * accum.
49 summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
51 want to use Nesterov momentum.
53 accum = accum * momentum - lr * grad
Dapi_def_ResourceApplyMomentum.pbtxt28 name: "momentum"
45 var - lr * momentum * accum, so in the end, the var you get is actually
46 var - lr * momentum * accum.
49 summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
51 want to use Nesterov momentum.
53 accum = accum * momentum + grad
Dapi_def_ApplyMomentum.pbtxt28 name: "momentum"
51 var - lr * momentum * accum, so in the end, the var you get is actually
52 var - lr * momentum * accum.
55 summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
57 want to use Nesterov momentum.
59 accum = accum * momentum + grad
Dapi_def_ResourceSparseApplyKerasMomentum.pbtxt34 name: "momentum"
51 var + momentum * accum, so in the end, the var you get is actually
52 var + momentum * accum.
55 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
57 Set use_nesterov = True if you want to use Nesterov momentum.
61 accum = accum * momentum - lr * grad
Dapi_def_SparseApplyMomentum.pbtxt34 name: "momentum"
57 var - lr * momentum * accum, so in the end, the var you get is actually
58 var - lr * momentum * accum.
61 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
63 Set use_nesterov = True if you want to use Nesterov momentum.
67 $$accum = accum * momentum + grad$$
Dapi_def_ResourceSparseApplyMomentum.pbtxt34 name: "momentum"
51 var - lr * momentum * accum, so in the end, the var you get is actually
52 var - lr * momentum * accum.
55 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
57 Set use_nesterov = True if you want to use Nesterov momentum.
61 accum = accum * momentum + grad
/external/tensorflow/tensorflow/python/training/
Dmomentum.py46 def __init__(self, learning_rate, momentum, argument
77 self._momentum = momentum
90 momentum = self._momentum
91 if callable(momentum):
92 momentum = momentum()
93 self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
Drmsprop_test.py61 def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum, argument
70 mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
75 lr, decay, momentum, epsilon, centered): argument
88 mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
95 for (dtype, learning_rate, decay, momentum,
115 momentum=momentum,
152 decay, momentum, epsilon, centered)
155 decay, momentum, epsilon, centered)
179 momentum=0.0,
203 momentum=0.0,
[all …]
Dmomentum_test.py35 from tensorflow.python.training import momentum as momentum_lib
40 def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): argument
41 var = var + accum * lr * momentum
42 accum = accum * momentum + g
44 var = var - accum * lr * momentum
60 momentum = lambda: 0.9 function
63 momentum = momentum()
65 learning_rate=learning_rate, momentum=momentum)
177 learning_rate=2.0, momentum=0.9, use_nesterov=True)
214 learning_rate=2.0, momentum=0.9, use_nesterov=True)
[all …]
/external/tensorflow/tensorflow/python/layers/
Dnormalization_test.py318 axis=1, epsilon=epsilon, momentum=0.9)
361 axis=2, epsilon=epsilon, momentum=0.9)
403 axis=1, epsilon=epsilon, momentum=0.9)
444 axis=2, epsilon=epsilon, momentum=0.9)
485 axis=3, epsilon=epsilon, momentum=0.9)
526 axis=3, epsilon=epsilon, momentum=0.9, fused=True)
568 axis=1, epsilon=epsilon, momentum=0.9, fused=True)
609 axis=-1, epsilon=epsilon, momentum=0.9)
651 axis=-1, epsilon=epsilon, momentum=0.9)
696 momentum=0.9,
[all …]
Dnormalization.py109 momentum=0.99, argument
132 momentum=momentum,
163 momentum=0.99, argument
290 momentum=momentum,
/external/tensorflow/tensorflow/contrib/optimizer_v2/
Dmomentum_test.py24 from tensorflow.contrib.optimizer_v2 import momentum as momentum_lib
40 def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): argument
41 var = var + accum * lr * momentum
42 accum = accum * momentum + g
44 var = var - accum * lr * momentum
60 momentum = lambda: 0.9 function
63 momentum = momentum()
65 learning_rate=learning_rate, momentum=momentum)
176 learning_rate=2.0, momentum=0.9, use_nesterov=True)
212 learning_rate=2.0, momentum=0.9, use_nesterov=True)
[all …]
Drmsprop_test.py57 def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum, argument
66 mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
71 lr, decay, momentum, centered): argument
84 mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
92 (learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple(
112 momentum=momentum,
149 decay, momentum, centered)
152 decay, momentum, centered)
178 momentum=0.0,
200 momentum=0.0,
[all …]
/external/tensorflow/tensorflow/compiler/tests/
Dmomentum_test.py30 from tensorflow.python.training import momentum as momentum_lib
35 def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): argument
36 var += accum * lr * momentum
37 accum = accum * momentum + g
39 var -= accum * lr * momentum
50 learning_rate=2.0, momentum=0.9)
115 learning_rate=0.1, momentum=0.9, use_nesterov=True)
136 momentum=constant_op.constant(0.9))
/external/tensorflow/tensorflow/python/keras/layers/
Dnormalization.py138 momentum=0.99, argument
168 self.momentum = momentum
427 def _assign_moving_average(self, variable, value, momentum): argument
429 [variable, value, momentum]) as scope:
431 decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
475 momentum = tf_utils.smart_cond(training,
476 lambda: self.momentum,
479 momentum = ops.convert_to_tensor(self.momentum)
485 (mean, self.momentum))
488 (variance, self.momentum))
[all …]
/external/tensorflow/tensorflow/python/framework/
Dauto_control_deps_test.py37 from tensorflow.python.training import momentum
231 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
244 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
277 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
/external/tensorflow/tensorflow/core/kernels/
Dtraining_ops_gpu.cu.cc89 typename TTypes<T>::ConstScalar momentum, bool use_nesterov) { in operator ()()
93 accum.device(d) = accum * momentum.reshape(single).broadcast(bcast) + grad; in operator ()()
96 accum * momentum.reshape(single).broadcast(bcast) * in operator ()()
110 typename TTypes<T>::ConstScalar momentum, bool use_nesterov) { in operator ()()
114 accum.device(d) = (accum * momentum.reshape(single).broadcast(bcast) - in operator ()()
117 var.device(d) += (accum * momentum.reshape(single).broadcast(bcast) - in operator ()()
233 typename TTypes<T>::ConstScalar momentum, in operator ()()
244 mom * momentum.reshape(single).broadcast(bcast) + in operator ()()
258 typename TTypes<T>::ConstScalar momentum, in operator ()()
270 mom.device(d) = mom * momentum.reshape(single).broadcast(bcast) + in operator ()()
/external/tensorflow/tensorflow/contrib/distribute/python/
Dsingle_loss_example.py91 momentum=0.9, argument
106 renorm=renorm, momentum=momentum, fused=False)

12345