/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adagrad.py | 86 def _prepare_local(self, var_device, var_dtype, apply_state): argument 87 super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state) 88 apply_state[(var_device, var_dtype)].update( 92 neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'], 127 def _resource_apply_dense(self, grad, var, apply_state=None): argument 129 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 141 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 143 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | adam.py | 134 def _prepare_local(self, var_device, var_dtype, apply_state): argument 135 super(Adam, self)._prepare_local(var_device, var_dtype, apply_state) 142 lr = (apply_state[(var_device, var_dtype)]['lr_t'] * 144 apply_state[(var_device, var_dtype)].update( 166 def _resource_apply_dense(self, grad, var, apply_state=None): argument 168 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 203 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 205 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 386 def _prepare_local(self, var_device, var_dtype, apply_state): argument 387 super(NonFusedAdam, self)._prepare_local(var_device, var_dtype, apply_state) [all …]
|
D | adamax.py | 113 def _prepare_local(self, var_device, var_dtype, apply_state): argument 114 super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state) 120 lr_t = apply_state[(var_device, var_dtype)]['lr_t'] 122 apply_state[(var_device, var_dtype)].update( 133 def _resource_apply_dense(self, grad, var, apply_state=None): argument 135 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 152 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 154 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | adadelta.py | 100 def _prepare_local(self, var_device, var_dtype, apply_state): argument 101 super(Adadelta, self)._prepare_local(var_device, var_dtype, apply_state) 102 apply_state[(var_device, var_dtype)].update( 117 def _resource_apply_dense(self, grad, var, apply_state=None): argument 119 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 134 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 136 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | gradient_descent.py | 127 def _prepare_local(self, var_device, var_dtype, apply_state): argument 128 super(SGD, self)._prepare_local(var_device, var_dtype, apply_state) 129 apply_state[(var_device, var_dtype)]["momentum"] = array_ops.identity( 132 def _resource_apply_dense(self, grad, var, apply_state=None): argument 134 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 169 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 172 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | ftrl.py | 139 def _prepare_local(self, var_device, var_dtype, apply_state): argument 140 super(Ftrl, self)._prepare_local(var_device, var_dtype, apply_state) 141 apply_state[(var_device, var_dtype)].update( 153 def _resource_apply_dense(self, grad, var, apply_state=None): argument 155 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 191 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 193 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | rmsprop.py | 163 def _prepare_local(self, var_device, var_dtype, apply_state): argument 164 super(RMSprop, self)._prepare_local(var_device, var_dtype, apply_state) 167 apply_state[(var_device, var_dtype)].update( 169 neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"], 176 def _resource_apply_dense(self, grad, var, apply_state=None): argument 178 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 222 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 224 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|
D | optimizer_v2.py | 663 apply_state = self._prepare(var_list) 670 functools.partial(self._distributed_apply, apply_state=apply_state), 676 def _distributed_apply(self, distribution, grads_and_vars, name, apply_state): argument 690 apply_kwargs["apply_state"] = apply_state 695 apply_kwargs["apply_state"] = apply_state 937 apply_state = {} 939 apply_state[(var_device, var_dtype)] = {} 941 self._prepare_local(var_device, var_dtype, apply_state) 943 return apply_state 945 def _prepare_local(self, var_device, var_dtype, apply_state): argument [all …]
|
D | nadam.py | 109 def _prepare_local(self, var_device, var_dtype, apply_state): argument 129 apply_state[(var_device, var_dtype)] = dict( 150 def _resource_apply_dense(self, grad, var, apply_state=None): argument 152 coefficients = ((apply_state or {}).get((var_device, var_dtype)) 173 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): argument 175 coefficients = ((apply_state or {}).get((var_device, var_dtype))
|