/external/tensorflow/tensorflow/contrib/kfac/python/ops/ |
D | optimizer.py | 193 def apply_gradients(self, grads_and_vars, *args, **kwargs): argument 207 grads_and_vars = list(grads_and_vars) 210 steps_and_vars = self._compute_update_steps(grads_and_vars) 216 def _squared_fisher_norm(self, grads_and_vars, precon_grads_and_vars): argument 236 for (_, gvar), (_, pgvar) in zip(grads_and_vars, precon_grads_and_vars): 242 for (grad, _), (pgrad, _) in zip(grads_and_vars, precon_grads_and_vars) 246 def _update_clip_coeff(self, grads_and_vars, precon_grads_and_vars): argument 271 sq_norm_grad = self._squared_fisher_norm(grads_and_vars, 277 def _clip_updates(self, grads_and_vars, precon_grads_and_vars): argument 296 coeff = self._update_clip_coeff(grads_and_vars, precon_grads_and_vars) [all …]
|
/external/tensorflow/tensorflow/contrib/estimator/python/estimator/ |
D | extenders.py | 127 def clip_grads(grads_and_vars): argument 128 gradients, variables = zip(*grads_and_vars) 130 grads_and_vars = list(zip(gradients, variables)) 131 return grads_and_vars 286 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 306 grads_and_vars = self._transform_grads_fn(grads_and_vars) 307 return self._optimizer.apply_gradients(grads_and_vars, global_step, name)
|
D | replicate_model_fn.py | 304 def apply_gradients(self, grads_and_vars, global_step=None, **kwargs): argument 312 return self._get_optimizer().apply_gradients(grads_and_vars, global_step, 315 self._graph_state().collect_gradients(grads_and_vars) 318 with ops_lib.control_dependencies(_extract_tensors(grads_and_vars)): 391 def collect_gradients(self, grads_and_vars): argument 393 grads_and_vars) 397 grads_and_vars = [] 401 grads_and_vars.extend( 403 return grads_and_vars
|
/external/tensorflow/tensorflow/contrib/kfac/python/kernel_tests/ |
D | optimizer_test.py | 80 grads_and_vars = [(array_ops.constant([[1., 2.], [3., 4.]]), None), 85 sq_norm = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars) 90 grads_and_vars = [(array_ops.constant([[1., 2.], [3., 4.]]), None), 103 coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars) 110 coeff = opt._update_clip_coeff(grads_and_vars, pgrads_and_vars) 111 sq_norm_pgrad = opt._squared_fisher_norm(grads_and_vars, pgrads_and_vars) 189 grads_and_vars = opt.compute_gradients(output, [weights, bias]) 190 all_vars = [grad_and_var[1] for grad_and_var in grads_and_vars] 192 op = opt.apply_gradients(grads_and_vars)
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
D | optimizers.py | 305 def _clip_gradients_by_norm(grads_and_vars, clip_gradients): argument 307 gradients, variables = zip(*grads_and_vars) 374 def gradient_clipping(grads_and_vars): argument 376 grads, variables = zip(*grads_and_vars) 412 def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale): argument 414 gradients, variables = zip(*grads_and_vars) 429 def _multiply_gradients(grads_and_vars, gradient_multipliers): argument 432 for grad, var in grads_and_vars:
|
D | optimizers_test.py | 439 grads_and_vars = [(grad, grad)] 440 grads_and_vars = optimizers_lib.adaptive_clipping_fn( 441 decay=0.5)(grads_and_vars) 456 [moving_mean, moving_sq_mean, grads_and_vars[0][0]]) 471 grads_and_vars = [(grad, grad)] 472 grads_and_vars = optimizers_lib.adaptive_clipping_fn( 473 decay=0.9, global_step=step)(grads_and_vars) 478 return session.run(grads_and_vars[0][0],
|
/external/tensorflow/tensorflow/python/training/ |
D | optimizer.py | 388 grads_and_vars = self.compute_gradients( 394 vars_with_grad = [v for g, v in grads_and_vars if g is not None] 399 ([str(v) for _, v in grads_and_vars], loss)) 401 return self.apply_gradients(grads_and_vars, global_step=global_step, 488 grads_and_vars = list(zip(grads, var_list)) 490 [v for g, v in grads_and_vars 492 return grads_and_vars 494 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 520 grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works. 521 if not grads_and_vars: [all …]
|
D | optimizer_test.py | 190 grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1]) 195 for j, gv in enumerate(grads_and_vars) 199 for j, gv in enumerate(grads_and_vars) 224 grads_and_vars = sgd_op.compute_gradients(f, [x]) 225 self.assertEqual(1, len(grads_and_vars)) 226 grad, x_as_var = grads_and_vars[0] 231 sgd_op.apply_gradients(grads_and_vars)
|
D | sync_replicas_optimizer.py | 211 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 234 if not grads_and_vars: 262 for grad, var in grads_and_vars:
|
D | gradient_descent_test.py | 164 grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_) 166 for grad, _ in grads_and_vars:
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | training.py | 271 def add_gradients_summaries(grads_and_vars): argument 281 for grad, var in grads_and_vars: 327 def multiply_gradients(grads_and_vars, gradient_multipliers): argument 342 if not isinstance(grads_and_vars, list): 350 for grad, var in grads_and_vars:
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | drop_stale_gradient_optimizer.py | 78 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 90 grads_and_vars, global_step=global_step, name=name)]): 96 for grad_and_var in grads_and_vars:
|
D | moving_average_optimizer.py | 92 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 94 grads_and_vars, global_step=global_step, name=name) 95 var_list = [x[1] for x in grads_and_vars if x[0] is not None]
|
D | variable_clipping_optimizer.py | 92 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 95 grads_and_vars, global_step=global_step) 98 for grad, var in grads_and_vars:
|
D | model_average_optimizer.py | 178 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 204 if not grads_and_vars: 209 apply_updates = self._opt.apply_gradients(grads_and_vars) 216 local_vars = [v for g, v in grads_and_vars if g is not None]
|
D | elastic_average_optimizer.py | 222 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 244 apply_updates = self._opt.apply_gradients(grads_and_vars) 251 local_vars = [v for g, v in grads_and_vars if g is not None]
|
D | addsign.py | 92 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 97 grads_and_vars, global_step=global_step, name=name)
|
D | powersign.py | 94 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 99 grads_and_vars, global_step=global_step, name=name)
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/ |
D | learning.py | 302 def multiply_gradients(grads_and_vars, gradient_multipliers): argument 317 if not isinstance(grads_and_vars, list): 325 for grad, var in grads_and_vars: 344 def add_gradients_summaries(grads_and_vars): argument 354 for grad, var in grads_and_vars:
|
/external/tensorflow/tensorflow/contrib/tpu/python/tpu/ |
D | tpu_optimizer.py | 87 def apply_gradients(self, grads_and_vars, global_step=None, name=None): argument 109 for (grad, var) in grads_and_vars:
|
/external/tensorflow/tensorflow/python/eager/ |
D | backprop_test.py | 94 grads_and_vars = backprop.implicit_grad(fn)() 95 self.assertAllEqual(grads_and_vars[0][0], 1.0) 96 self.assertAllEqual(id(grads_and_vars[0][1]), id(x)) 610 grads_and_vars = g() 611 self.assertEqual(1, len(grads_and_vars)) 612 grad, var = grads_and_vars[0] 637 loss, grads_and_vars = loss_grads_fn(x) 639 for (grad, var) in grads_and_vars:
|
D | graph_callable_test.py | 244 grads_and_vars = list(zip(*grad_fn())) 245 self.assertAllEqual(6., grads_and_vars[0][0])
|
/external/tensorflow/tensorflow/contrib/bayesflow/python/kernel_tests/ |
D | sgld_optimizer_test.py | 144 grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_) 146 for grad, _ in grads_and_vars:
|
D | variational_sgd_optimizer_test.py | 194 grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_) 196 for grad, _ in grads_and_vars:
|
/external/tensorflow/tensorflow/tools/api/golden/ |
D | tensorflow.train.-adagrad-optimizer.pbtxt | 26 …argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=N…
|