/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | decompose_resource_ops.td | 243 // alpha <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) 245 // v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t 251 $beta1, $beta2, $epsilon, $grad, BoolAttr:$_, 267 (TF_MulOp $beta2, (CreateTFReadVariableOp $src_op, $grad, $v_resource)), 269 (TF_SubOp $one, $beta2), 288 // alpha <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) 290 // v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t 297 $beta1, $beta2, $epsilon, $grad, BoolAttr:$_, 313 (TF_MulOp $beta2, (CreateTFReadVariableOp $src_op, $grad, $v_resource)), 315 (TF_SubOp $one, $beta2),
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | ApplyAdam.pbtxt | 35 name: "beta2" 115 name: "beta2" 202 name: "beta2" 291 name: "beta2" 381 name: "beta2"
|
D | ResourceApplyAdam.pbtxt | 32 name: "beta2" 105 name: "beta2" 185 name: "beta2" 267 name: "beta2" 350 name: "beta2"
|
D | ResourceApplyAdaMax.pbtxt | 28 name: "beta2"
|
D | ResourceApplyAdamWithAmsgrad.pbtxt | 36 name: "beta2"
|
D | ApplyAdaMax.pbtxt | 31 name: "beta2"
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/ |
D | ApplyAdam.pbtxt | 35 name: "beta2" 115 name: "beta2" 202 name: "beta2" 291 name: "beta2" 381 name: "beta2"
|
D | ResourceApplyAdam.pbtxt | 32 name: "beta2" 105 name: "beta2" 185 name: "beta2" 267 name: "beta2" 350 name: "beta2"
|
D | ResourceApplyAdaMax.pbtxt | 28 name: "beta2"
|
D | ApplyAdaMax.pbtxt | 31 name: "beta2"
|
D | ResourceApplyAdamWithAmsgrad.pbtxt | 36 name: "beta2"
|
/external/tensorflow/tensorflow/python/training/ |
D | adam.py | 109 beta2=0.999, argument 168 self._beta2 = beta2 205 beta2 = self._call_if_callable(self._beta2) 210 self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
|
D | adam_test.py | 42 beta2=0.999, argument 44 alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t) 47 v_t = beta2 * v + (1 - beta2) * g_t * g_t 182 beta2 = lambda: 0.999 function 187 beta2 = beta2()
|
D | training_ops_test.py | 422 beta2 = np.array(0.999, dtype=var.dtype) 424 beta2_power = beta2**t 428 beta2_t = constant_op.constant(beta2, self._toType(var.dtype), []) 437 beta2, epsilon) 445 def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1, beta2, epsilon): argument 446 alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t) 449 v_t = beta2 * v + (1 - beta2) * g_t * g_t
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | adam_test.py | 37 beta2=0.999, argument 39 alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t) 42 v_t = beta2 * v + (1 - beta2) * g_t * g_t
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_embedding.py | 490 beta2: float = 1, 542 self.beta2 = beta2 643 beta2: float = 0.999, 695 if beta2 < 0. or beta2 >= 1.: 696 raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2)) 704 self.beta2 = beta2 844 beta2: float = 0.999, 899 if beta2 < 0. or beta2 >= 1.: 900 raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2)) 911 self.beta2 = beta2 [all …]
|
D | tpu_embedding_v2_utils.py | 442 beta2: float = 1, 493 self.beta2 = beta2 509 parameters.adagrad_momentum.beta2 = self.beta2 817 parameters.adam.beta2 = self.beta_2
|
/external/speex/libspeexdsp/ |
D | scal.c | 156 float beta, beta2; in speex_decorrelate() local 186 beta2 = beta; in speex_decorrelate() 205 if (max_alpha > .98/(1.+beta2)) in speex_decorrelate() 206 max_alpha = .98/(1.+beta2); in speex_decorrelate()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ApplyAdaMax.pbtxt | 41 name: "beta2" 75 v_t <- max(beta2 * v_{t-1}, abs(g))
|
D | api_def_ResourceApplyAdaMax.pbtxt | 41 name: "beta2" 69 v_t <- max(beta2 * v_{t-1}, abs(g))
|
/external/tensorflow/tensorflow/core/protobuf/tpu/ |
D | optimization_parameters.proto | 91 // accum(new) = beta2 == 1.0 ? 93 // beta2 * accum(old) + (1 - beta2) * grad^2 112 float beta2 = 4; field 175 // learning rate to: user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) 195 float beta2 = 4; field 284 // user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) 299 float beta2 = 3; field 308 // user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) 326 float beta2 = 4; field
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.tpu.experimental.-adam-parameters.pbtxt | 8 …argspec: "args=[\'self\', \'learning_rate\', \'beta1\', \'beta2\', \'epsilon\', \'lazy_adam\', \'s…
|
D | tensorflow.tpu.experimental.embedding.-adagrad-momentum.pbtxt | 8 …elf\', \'learning_rate\', \'momentum\', \'use_nesterov\', \'exponent\', \'beta2\', \'epsilon\', \'…
|
/external/tensorflow/tensorflow/tools/api/golden/v2/ |
D | tensorflow.tpu.experimental.embedding.-adagrad-momentum.pbtxt | 8 …elf\', \'learning_rate\', \'momentum\', \'use_nesterov\', \'exponent\', \'beta2\', \'epsilon\', \'…
|
/external/tensorflow/tensorflow/python/tpu/tests/ |
D | tpu_embedding_base_test.py | 187 beta2=0.9) 509 if optimizer.beta2 == 1.0: 512 accumulator = optimizer.beta2 * accumulator + ( 513 1 - optimizer.beta2) * gradients**2
|