Home
last modified time | relevance | path

Searched refs:grads_and_vars (Results 1 – 25 of 81) sorted by relevance

1234

/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Doptimizer_v2.py297 grads_and_vars = self._compute_gradients(
300 return self.apply_gradients(grads_and_vars, name=name)
341 grads_and_vars = list(zip(grads, var_list))
343 v for g, v in grads_and_vars
347 return grads_and_vars
380 def apply_gradients(self, grads_and_vars, name=None): argument
399 grads_and_vars = _filter_grads(grads_and_vars)
400 var_list = [v for (_, v) in grads_and_vars]
411 self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
413 def _distributed_apply(self, distribution, grads_and_vars, name): argument
[all …]
/external/tensorflow/tensorflow/python/training/
Doptimizer.py398 grads_and_vars = self.compute_gradients(
404 vars_with_grad = [v for g, v in grads_and_vars if g is not None]
409 ([str(v) for _, v in grads_and_vars], loss))
411 return self.apply_gradients(grads_and_vars, global_step=global_step,
505 grads_and_vars = list(zip(grads, var_list))
507 [v for g, v in grads_and_vars
509 return grads_and_vars
511 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
547 grads_and_vars = get_filtered_grad_fn(lambda: grads_and_vars)()
549 self._distributed_apply, args=(grads_and_vars, global_step, name))
[all …]
Doptimizer_test.py192 grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
197 for j, gv in enumerate(grads_and_vars)
201 for j, gv in enumerate(grads_and_vars)
226 grads_and_vars = sgd_op.compute_gradients(f, [x])
227 self.assertEqual(1, len(grads_and_vars))
228 grad, x_as_var = grads_and_vars[0]
233 sgd_op.apply_gradients(grads_and_vars)
Dsync_replicas_optimizer.py226 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
249 if not grads_and_vars:
278 for grad, var in grads_and_vars:
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/
Dloss_scale_optimizer.py74grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-a…
76 grads = [g for g, _ in grads_and_vars]
77 variables = [v for _, v in grads_and_vars]
97 def apply_gradients(self, grads_and_vars, name=None): argument
98 return self._optimizer.apply_gradients(grads_and_vars, name)
/external/tensorflow/tensorflow/contrib/mixed_precision/python/
Dloss_scale_optimizer.py135 grads_and_vars = self._opt.compute_gradients(
142 return self._down_scale(grads_and_vars, loss_scale)
144 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
146 grads = [g for (g, _) in grads_and_vars]
155 return self._opt.apply_gradients(grads_and_vars, global_step, name)
Dloss_scale_optimizer_test.py107 grads_and_vars = opt.compute_gradients(loss, var_list=[x])
109 self.assertEqual(len(grads_and_vars), 1)
112 g_v = self.evaluate(grads_and_vars[0][0])
114 self.assertIs(grads_and_vars[0][1], x)
123 grads_and_vars = opt.compute_gradients(loss, var_list=[x])
125 self.assertEqual(len(grads_and_vars), 1)
127 g_v = self.evaluate(grads_and_vars[0][0])
/external/tensorflow/tensorflow/contrib/optimizer_v2/
Doptimizer_v2.py707 grads_and_vars = self.compute_gradients(
716 vars_with_grad = [v for g, v in grads_and_vars if g is not None]
721 ([str(v) for _, v in grads_and_vars], loss))
724 grads_and_vars, global_step=global_step, name=name)
827 grads_and_vars = list(zip(grads, var_list))
829 v for g, v in grads_and_vars
832 return grads_and_vars
843 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
870 grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
871 if not grads_and_vars:
[all …]
Doptimizer_v2_test.py171 grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
176 for j, gv in enumerate(grads_and_vars)
180 for j, gv in enumerate(grads_and_vars)
205 grads_and_vars = sgd_op.compute_gradients(f, [x])
206 self.assertEqual(1, len(grads_and_vars))
207 grad, x_as_var = grads_and_vars[0]
212 sgd_op.apply_gradients(grads_and_vars)
256 grads_and_vars = sgd_op.compute_gradients(cost, [var0, var1],
258 grad_dict = {var.op.name: grad for grad, var in grads_and_vars}
/external/tensorflow/tensorflow/contrib/layers/python/layers/
Doptimizers.py305 def _clip_gradients_by_norm(grads_and_vars, clip_gradients): argument
307 gradients, variables = zip(*grads_and_vars)
374 def gradient_clipping(grads_and_vars): argument
376 grads, variables = zip(*grads_and_vars)
412 def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale): argument
414 gradients, variables = zip(*grads_and_vars)
429 def _multiply_gradients(grads_and_vars, gradient_multipliers): argument
432 for grad, var in grads_and_vars:
Doptimizers_test.py476 grads_and_vars = [(grad, grad)]
477 grads_and_vars = optimizers_lib.adaptive_clipping_fn(
478 decay=0.5)(grads_and_vars)
493 [moving_mean, moving_sq_mean, grads_and_vars[0][0]])
508 grads_and_vars = [(grad, grad)]
509 grads_and_vars = optimizers_lib.adaptive_clipping_fn(
510 decay=0.9, global_step=step)(grads_and_vars)
515 return session.run(grads_and_vars[0][0],
/external/tensorflow/tensorflow/contrib/constrained_optimization/python/
Dexternal_regret_optimizer.py267 grads_and_vars = self.optimizer.compute_gradients(
274 grads_and_vars.append(
277 self.optimizer.apply_gradients(grads_and_vars, name="update"))
282 grads_and_vars = self.optimizer.compute_gradients(
294 gradient for gradient, _ in grads_and_vars + multiplier_grads_and_vars
299 self.optimizer.apply_gradients(grads_and_vars, name="update"))
Dswap_regret_optimizer.py382 grads_and_vars = self.optimizer.compute_gradients(
389 grads_and_vars.append(
392 self.optimizer.apply_gradients(grads_and_vars, name="update"))
397 grads_and_vars = self.optimizer.compute_gradients(
409 gradient for gradient, _ in grads_and_vars + matrix_grads_and_vars
414 self.optimizer.apply_gradients(grads_and_vars, name="update"))
/external/tensorflow/tensorflow/contrib/training/python/training/
Dtraining.py270 def add_gradients_summaries(grads_and_vars): argument
280 for grad, var in grads_and_vars:
326 def multiply_gradients(grads_and_vars, gradient_multipliers): argument
341 if not isinstance(grads_and_vars, list):
349 for grad, var in grads_and_vars:
/external/tensorflow/tensorflow/contrib/opt/python/training/
Ddrop_stale_gradient_optimizer.py78 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
90 grads_and_vars, global_step=global_step, name=name)]):
96 for grad_and_var in grads_and_vars:
Dmoving_average_optimizer.py93 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
95 grads_and_vars, global_step=global_step, name=name)
96 var_list = [x[1] for x in grads_and_vars if x[0] is not None]
Dvariable_clipping_optimizer.py92 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
95 grads_and_vars, global_step=global_step)
98 for grad, var in grads_and_vars:
Dmodel_average_optimizer.py184 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
210 if not grads_and_vars:
215 apply_updates = self._opt.apply_gradients(grads_and_vars)
222 local_vars = [v for g, v in grads_and_vars if g is not None]
Dagn_optimizer.py134 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
152 local_vars = [v for g, v in grads_and_vars if g is not None]
153 grads = [g for g, v in grads_and_vars if g is not None]
166 local_update_op = self._opt.apply_gradients(grads_and_vars)
Daddsign.py92 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
97 grads_and_vars, global_step=global_step, name=name)
Dpowersign.py94 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
99 grads_and_vars, global_step=global_step, name=name)
/external/tensorflow/tensorflow/contrib/slim/python/slim/
Dlearning.py302 def multiply_gradients(grads_and_vars, gradient_multipliers): argument
317 if not isinstance(grads_and_vars, list):
325 for grad, var in grads_and_vars:
344 def add_gradients_summaries(grads_and_vars): argument
354 for grad, var in grads_and_vars:
/external/tensorflow/tensorflow/contrib/distribute/python/
Dstep_fn.py102 grads_and_vars = self.distribution.extended.call_for_each_replica(
109 self.distribution, grads_and_vars)
/external/tensorflow/tensorflow/python/tpu/
Dtpu_embedding_gradient.py44 grads_and_vars = optimizer.compute_gradients(loss, activation_list)
45 grads = [grad for grad, _ in grads_and_vars]
Dtpu_optimizer.py142 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument
164 for (grad, var) in grads_and_vars:

1234