Home
last modified time | relevance | path

Searched refs:use_locking (Results 1 – 25 of 162) sorted by relevance

1234567

/external/tensorflow/tensorflow/python/ops/
Dstate_ops.py137 def assign_sub(ref, value, use_locking=None, name=None): argument
161 ref, value, use_locking=use_locking, name=name)
166 def assign_add(ref, value, use_locking=None, name=None): argument
190 ref, value, use_locking=use_locking, name=name)
195 def assign(ref, value, validate_shape=None, use_locking=None, name=None): argument
222 ref, value, use_locking=use_locking, name=name,
252 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument
299 use_locking=use_locking, name=name)
306 def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): argument
361 ref, indices, updates, use_locking, name)
[all …]
Dvariables.py542 def assign(self, value, use_locking=False, name=None, read_value=True): argument
560 def assign_add(self, delta, use_locking=False, name=None, read_value=True): argument
578 def assign_sub(self, delta, use_locking=False, name=None, read_value=True): argument
596 def scatter_sub(self, sparse_delta, use_locking=False, name=None): argument
613 def scatter_add(self, sparse_delta, use_locking=False, name=None): argument
630 def scatter_update(self, sparse_delta, use_locking=False, name=None): argument
647 def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): argument
1770 def assign(self, value, use_locking=False, name=None, read_value=True): argument
1786 assign = state_ops.assign(self._variable, value, use_locking=use_locking,
1792 def assign_add(self, delta, use_locking=False, name=None, read_value=True): argument
[all …]
/external/tensorflow/tensorflow/python/keras/mixed_precision/experimental/
Dautocast_variable.py116 def assign(self, value, use_locking=None, name=None, read_value=True): argument
118 value, use_locking=use_locking, name=name, read_value=read_value)
120 def assign_add(self, delta, use_locking=None, name=None, read_value=True): argument
122 delta, use_locking=use_locking, name=name, read_value=read_value)
124 def assign_sub(self, delta, use_locking=None, name=None, read_value=True): argument
126 delta, use_locking=use_locking, name=name, read_value=read_value)
/external/tensorflow/tensorflow/contrib/opt/python/training/
Dreg_adagrad_optimizer.py49 use_locking=False, argument
54 use_locking=use_locking,
74 use_locking=self._use_locking,
84 use_locking=self._use_locking,
95 use_locking=self._use_locking,
106 use_locking=self._use_locking,
Dweight_decay_optimizers.py305 use_locking=False, name="MomentumW", use_nesterov=False): argument
333 use_locking=use_locking, name=name, use_nesterov=use_nesterov)
360 epsilon=1e-8, use_locking=False, name="AdamW"): argument
381 epsilon=epsilon, use_locking=use_locking, name=name)
406 use_locking=False, argument
451 use_locking=use_locking,
Dadam_gs_optimizer.py46 use_locking=False, argument
105 super(AdamGSOptimizer, self).__init__(use_locking, name)
160 use_locking=self._use_locking).op
177 use_locking=self._use_locking)
191 m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
197 v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
202 var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
214 use_locking=self._use_locking))
Dadamax.py41 use_locking=False, name="AdaMax"): argument
87 epsilon, use_locking, name)
122 grad, use_locking=self._use_locking).op
135 grad, use_locking=self._use_locking)
168 x, i, v, use_locking=self._use_locking),
170 x, i, v, use_locking=self._use_locking))
189 beta1_power * self._beta1_t, use_locking=self._use_locking)
/external/tensorflow/tensorflow/contrib/optimizer_v2/
Drmsprop.py63 use_locking=False, argument
100 super(RMSPropOptimizer, self).__init__(use_locking, name)
135 use_locking=self._use_locking).op
146 use_locking=self._use_locking).op
163 use_locking=self._use_locking)
174 use_locking=self._use_locking)
192 use_locking=self._use_locking)
204 use_locking=self._use_locking)
222 use_locking=self._use_locking)
234 use_locking=self._use_locking)
Dadam.py38 use_locking=False, name="Adam"): argument
90 super(AdamOptimizer, self).__init__(use_locking, name)
130 use_locking=self._use_locking).op
147 use_locking=self._use_locking)
161 m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
167 v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
172 var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
179 x, i, v, use_locking=self._use_locking),
195 beta1_power * state.get_hyper("beta1"), use_locking=self._use_locking)
197 beta2_power * state.get_hyper("beta2"), use_locking=self._use_locking)
Dmomentum.py43 use_locking=False, name="Momentum", use_nesterov=False): argument
71 super(MomentumOptimizer, self).__init__(use_locking, name)
88 use_locking=self._use_locking,
99 use_locking=self._use_locking,
111 use_locking=self._use_locking,
123 use_locking=self._use_locking,
Dadagrad.py38 use_locking=False, name="Adagrad"): argument
60 super(AdagradOptimizer, self).__init__(use_locking, name)
90 use_locking=self._use_locking)
99 use_locking=self._use_locking)
109 use_locking=self._use_locking)
119 use_locking=self._use_locking)
Dadadelta.py33 use_locking=False, name="Adadelta"): argument
51 super(AdadeltaOptimizer, self).__init__(use_locking, name)
72 use_locking=self._use_locking)
85 use_locking=self._use_locking)
99 use_locking=self._use_locking)
113 use_locking=self._use_locking)
Dgradient_descent.py30 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
44 super(GradientDescentOptimizer, self).__init__(use_locking, name)
52 use_locking=self._use_locking).op
57 handle.handle, lr, grad, use_locking=self._use_locking)
69 return var.scatter_sub(delta, use_locking=self._use_locking)
/external/tensorflow/tensorflow/python/training/
Dproximal_gradient_descent.py39 l2_regularization_strength=0.0, use_locking=False, argument
54 super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)
68 use_locking=self._use_locking).op
77 use_locking=self._use_locking)
87 use_locking=self._use_locking).op
97 use_locking=self._use_locking)
Drmsprop.py66 use_locking=False, argument
103 super(RMSPropOptimizer, self).__init__(use_locking, name)
155 use_locking=self._use_locking).op
166 use_locking=self._use_locking).op
183 use_locking=self._use_locking)
194 use_locking=self._use_locking)
212 use_locking=self._use_locking)
224 use_locking=self._use_locking)
242 use_locking=self._use_locking)
254 use_locking=self._use_locking)
Dftrl.py45 use_locking=False, argument
88 super(FtrlOptimizer, self).__init__(use_locking, name)
162 use_locking=self._use_locking)
177 use_locking=self._use_locking)
194 use_locking=self._use_locking)
209 use_locking=self._use_locking)
227 use_locking=self._use_locking)
243 use_locking=self._use_locking)
259 use_locking=self._use_locking)
273 use_locking=self._use_locking)
Dadam.py44 use_locking=False, argument
97 super(AdamOptimizer, self).__init__(use_locking, name)
160 use_locking=self._use_locking).op
177 use_locking=self._use_locking)
191 m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
197 v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
202 var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
214 use_locking=self._use_locking))
231 beta1_power * self._beta1_t, use_locking=self._use_locking)
233 beta2_power * self._beta2_t, use_locking=self._use_locking)
Dproximal_adagrad.py39 use_locking=False, name="ProximalAdagrad"): argument
60 super(ProximalAdagradOptimizer, self).__init__(use_locking, name)
94 grad, use_locking=self._use_locking)
102 grad, use_locking=self._use_locking)
111 use_locking=self._use_locking)
121 use_locking=self._use_locking)
Dgradient_descent.py34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
51 super(GradientDescentOptimizer, self).__init__(use_locking, name)
60 use_locking=self._use_locking).op
66 grad, use_locking=self._use_locking)
77 return var.scatter_sub(delta, use_locking=self._use_locking)
Dmomentum.py47 use_locking=False, name="Momentum", use_nesterov=False): argument
75 super(MomentumOptimizer, self).__init__(use_locking, name)
102 use_locking=self._use_locking,
112 use_locking=self._use_locking,
122 use_locking=self._use_locking,
132 use_locking=self._use_locking,
Dadagrad.py41 use_locking=False, name="Adagrad"): argument
65 super(AdagradOptimizer, self).__init__(use_locking, name)
103 use_locking=self._use_locking)
112 use_locking=self._use_locking)
122 use_locking=self._use_locking)
132 use_locking=self._use_locking)
Dadadelta.py37 use_locking=False, name="Adadelta"): argument
57 super(AdadeltaOptimizer, self).__init__(use_locking, name)
92 use_locking=self._use_locking)
105 use_locking=self._use_locking)
119 use_locking=self._use_locking)
133 use_locking=self._use_locking)
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Drmsprop.py154 use_locking=self._use_locking)
165 use_locking=self._use_locking)
168 rms_t = state_ops.assign(rms, rms_t, use_locking=self._use_locking)
173 mg_t = state_ops.assign(mg, mg_t, use_locking=self._use_locking)
176 return state_ops.assign(var, var_t, use_locking=self._use_locking).op
200 use_locking=self._use_locking)
212 use_locking=self._use_locking)
215 rms_t = state_ops.assign(rms, rms * rho, use_locking=self._use_locking)
223 mg_t = state_ops.assign(mg, mg * rho, use_locking=self._use_locking)
Dadam.py187 use_locking=self._use_locking)
202 use_locking=self._use_locking)
218 m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking)
225 v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
232 var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
239 v_hat, v_hat_t, use_locking=self._use_locking)
244 use_locking=self._use_locking)
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.-variable.pbtxt57 …argspec: "args=[\'self\', \'value\', \'use_locking\', \'name\', \'read_value\'], varargs=None, key…
61 …argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, key…
65 …argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, key…
69 …argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=Non…
101 …argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=Non…
117 …argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=Non…
121 …argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=Non…

1234567