Searched refs:_get_hyper (Results 1 – 14 of 14) sorted by relevance
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | ftrl.py | 144 self._get_hyper('learning_rate_power', var_dtype)), 146 self._get_hyper('l1_regularization_strength', var_dtype)), 148 self._get_hyper('l2_regularization_strength', var_dtype)), 149 beta=array_ops.identity(self._get_hyper('beta', var_dtype)),
|
D | nadam.py | 110 lr_t = array_ops.identity(self._get_hyper('learning_rate', var_dtype)) 111 beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) 112 beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
|
D | adam.py | 138 beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) 139 beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype)) 390 beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) 391 beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
|
D | adamax.py | 117 beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) 118 beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
|
D | gradient_descent_test.py | 670 self.evaluate(opt._get_hyper("momentum")), 671 self.evaluate(opt2._get_hyper("momentum"))) 673 self.evaluate(opt._get_hyper("decay")), 674 self.evaluate(opt2._get_hyper("decay"))) 686 self.evaluate(opt._get_hyper("momentum")), 687 self.evaluate(opt3._get_hyper("momentum"))) 689 self.evaluate(opt._get_hyper("decay")), 690 self.evaluate(opt3._get_hyper("decay")))
|
D | nadam_test.py | 34 beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype) 36 beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
|
D | rmsprop.py | 166 rho = array_ops.identity(self._get_hyper("rho", var_dtype)) 173 momentum=array_ops.identity(self._get_hyper("momentum", var_dtype)),
|
D | optimizer_v2_test.py | 292 lr = opt._get_hyper('learning_rate') 293 lr2 = opt2._get_hyper('learning_rate') 302 lr3 = opt3._get_hyper('learning_rate') 323 opt._get_hyper('learning_rate')(step)) 326 opt2._get_hyper('learning_rate')(step)) 334 self.evaluate(opt._get_hyper('learning_rate')(step)), 335 opt3._get_hyper('learning_rate')(step))
|
D | adadelta.py | 106 rho=array_ops.identity(self._get_hyper('rho', var_dtype))))
|
D | gradient_descent.py | 130 self._get_hyper("momentum", var_dtype))
|
D | optimizer_v2.py | 791 def _get_hyper(self, name, dtype=None): member in OptimizerV2 836 return self._get_hyper(name) 1001 lr_t = self._get_hyper("learning_rate", var_dtype)
|
D | adamax_test.py | 74 beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
|
D | adam_test.py | 104 beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype) 106 beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/ |
D | loss_scale_optimizer.py | 894 return self._optimizer._get_hyper(name)
|