/external/tensorflow/tensorflow/python/keras/ |
D | optimizer_v1.py | 481 beta_1=0.9, argument 491 self.beta_1 = K.variable(beta_1, name='beta_1') 525 (1. - math_ops.pow(self.beta_1, t))) 529 m_t = (self.beta_1 * m) + (1. - self.beta_1) * g 552 'beta_1': float(K.get_value(self.beta_1)), 577 beta_1=0.9, argument 586 self.beta_1 = K.variable(beta_1, name='beta_1') 617 lr_t = lr / (1. - math_ops.pow(self.beta_1, t)) 623 m_t = (self.beta_1 * m) + (1. - self.beta_1) * g 641 'beta_1': float(K.get_value(self.beta_1)), [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_embedding_v2_utils.py | 434 beta_1: float = 0.9, 490 if beta_1 < 0. or beta_1 >= 1.: 492 .format(beta_1)) 502 self.beta_1 = beta_1 517 parameters.adam.beta1 = self.beta_1
|
D | tpu_embedding_v2_correctness_test.py | 422 m = g * (1 - optimizer.beta_1)
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ApplyAdam.pbtxt | 85 $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ 86 $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
|
D | api_def_ResourceApplyAdamWithAmsgrad.pbtxt | 79 $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ 80 $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
|
D | api_def_ResourceApplyAdam.pbtxt | 79 $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ 80 $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
|
/external/tensorflow/tensorflow/python/keras/mixed_precision/ |
D | loss_scale_optimizer_test.py | 398 opt = adam.Adam(learning_rate=1.0, beta_1=0.5, beta_2=0.9) 404 self.assertEqual(self.evaluate(lso.beta_1), 0.5) 405 self.assertIsInstance(lso.beta_1, variables.Variable) 410 lso.beta_1 = 0.25 411 self.assertEqual(self.evaluate(lso.beta_1), 0.25) 412 self.assertEqual(self.evaluate(opt.beta_1), 0.25) 413 self.assertIs(lso.beta_1, opt.beta_1) 414 opt.beta_1 = 0.75 415 self.assertEqual(self.evaluate(lso.beta_1), 0.75) 416 self.assertEqual(self.evaluate(opt.beta_1), 0.75) [all …]
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adam.py | 109 beta_1=0.9, argument 118 self._set_hyper('beta_1', beta_1) 333 beta_1=0.9, argument 370 self._set_hyper('beta_1', beta_1)
|
D | adam_test.py | 370 beta_1 = 0.9 377 beta_1=beta_1, 418 beta_1 = 0.9 424 beta_1=beta_1, 826 beta_1 = 0.9 833 beta_1=beta_1, 874 beta_1 = 0.9 880 beta_1=beta_1,
|
D | adamax.py | 94 beta_1=0.9, argument 102 self._set_hyper('beta_1', beta_1)
|
D | nadam.py | 69 beta_1=0.9, argument 85 self._set_hyper('beta_1', beta_1)
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.tpu.experimental.embedding.-adam.pbtxt | 8 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'lazy_adam\', \…
|
D | tensorflow.optimizers.-adamax.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
D | tensorflow.optimizers.-nadam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
D | tensorflow.optimizers.-adam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'amsgrad\', \'n…
|
D | tensorflow.keras.optimizers.-adam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'amsgrad\', \'n…
|
D | tensorflow.keras.optimizers.-adamax.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
D | tensorflow.keras.optimizers.-nadam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.tpu.experimental.embedding.-adam.pbtxt | 8 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'lazy_adam\', \…
|
D | tensorflow.keras.optimizers.-adam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'amsgrad\', \'n…
|
D | tensorflow.keras.optimizers.-nadam.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
D | tensorflow.keras.optimizers.-adamax.pbtxt | 29 …argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varar…
|
/external/tensorflow/tensorflow/python/keras/tests/ |
D | tracking_util_test.py | 640 self.evaluate(optimizer.beta_1.assign(42.)) 671 optimizer = adam.Adam(0.001, beta_1=1.0) 692 self.assertEqual(42., self.evaluate(optimizer.beta_1)) 786 self.evaluate(optimizer.beta_1.assign(3.)) 795 self.evaluate(root_trackable.optimizer.beta_1.assign(103.)) 804 self.evaluate(root_trackable.optimizer.beta_1))
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | keras_optimizer_v2_test.py | 59 optimizer = adam.Adam(learning_rate=0.01, beta_1=0.2, beta_2=0.2)
|
/external/speex/libspeexdsp/ |
D | preprocess.c | 732 spx_word16_t beta, beta_1; in speex_preprocess_run() local 741 beta_1 = Q15_ONE-beta; in speex_preprocess_run() 778 …st->noise[i] = MAX32(EXTEND32(0),MULT16_32_Q15(beta_1,st->noise[i]) + MULT16_32_Q15(beta,SHL32(st-… in speex_preprocess_run()
|