/external/tensorflow/tensorflow/python/kernel_tests/ |
D | scatter_nd_ops_test.py | 54 def _NumpyScatterNd(ref, indices, updates, op): argument 62 flat_updates = updates.reshape((num_updates, slice_size)) 71 def _NumpyUpdate(ref, indices, updates): argument 72 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 75 def _NumpyAdd(ref, indices, updates): argument 76 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u) 79 def _NumpySub(ref, indices, updates): argument 80 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u) 83 def _NumpyMul(ref, indices, updates): argument 84 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u) [all …]
|
D | scatter_ops_test.py | 34 def _NumpyAdd(ref, indices, updates): argument 38 ref[indx] += updates[i] 41 def _NumpySub(ref, indices, updates): argument 43 ref[indx] -= updates[i] 46 def _NumpyMul(ref, indices, updates): argument 48 ref[indx] *= updates[i] 51 def _NumpyDiv(ref, indices, updates): argument 53 ref[indx] /= updates[i] 56 def _NumpyUpdate(ref, indices, updates): argument 58 ref[indx] = updates[i] [all …]
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | scatter_nd_op_test.py | 49 def _NumpyScatterNd(ref, indices, updates, op): argument 57 flat_updates = updates.reshape((num_updates, slice_size)) 66 def _NumpyUpdate(indices, updates, shape): argument 67 ref = np.zeros(shape, dtype=updates.dtype) 68 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 107 updates = _AsType(np.random.randn(*(updates_shape)), vtype) 110 np_out = np_scatter(indices, updates, ref_shape) 112 tf_out = tf_scatter(indices, updates, ref_shape) 121 def _runScatterNd(self, indices, updates, shape): argument 123 updates_placeholder = array_ops.placeholder(updates.dtype) [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/python/kernel_tests/ |
D | scatter_add_ndim_op_test.py | 33 updates = [100., 200.] 37 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 46 updates = [100., 200.] 50 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 58 updates = [] 62 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 69 updates = [100.] 74 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 81 updates = [[100., 200., 300.], [400., 500., 600.]] 85 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | scatter_functor_gpu.cu.h | 33 __global__ void ScatterOpCustomKernel(T* params, const T* updates, in ScatterOpCustomKernel() argument 49 params[params_i] = ldg(updates + updates_i); in ScatterOpCustomKernel() 53 CudaAtomicAdd(params + params_i, ldg(updates + updates_i)); in ScatterOpCustomKernel() 57 CudaAtomicSub(params + params_i, ldg(updates + updates_i)); in ScatterOpCustomKernel() 61 CudaAtomicMul(params + params_i, ldg(updates + updates_i)); in ScatterOpCustomKernel() 65 CudaAtomicDiv(params + params_i, ldg(updates + updates_i)); in ScatterOpCustomKernel() 78 typename TTypes<T>::ConstMatrix updates, 85 const Index updates_size = updates.size(); 89 params.data(), updates.data(), indices.data(), first_dim_size,
|
D | scatter_op.cc | 39 static bool ValidShapes(const Tensor& params, const Tensor& updates, in ValidShapes() argument 41 if (updates.dims() != indices.dims() + params.dims() - 1) return false; in ValidShapes() 43 if (updates.dim_size(d) != indices.dim_size(d)) { in ValidShapes() 48 if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) { in ValidShapes() 56 const Tensor& indices, const Tensor& updates) { in DoValidationChecking() argument 63 c, ValidShapes(params, updates, indices), in DoValidationChecking() 66 "updates.shape ", updates.shape().DebugString(), ", indices.shape ", in DoValidationChecking() 99 const Tensor& updates = c->input(2); in DoCompute() local 100 DoValidationChecking(c, params, indices, updates); in DoCompute() 125 auto updates_flat = updates.shaped<T, 2>({N, updates.NumElements() / N}); in DoCompute() [all …]
|
D | scatter_nd_op.cc | 62 const Tensor& updates = c->input(1); in Compute() local 76 c, indices, updates, shape, &out, true /*allocate*/)); in Compute() 128 const Tensor& updates = c->input(2); in DoCompute() local 170 c, indices, updates, params_shape, ¶ms, false /*allocate*/)); in DoCompute() 295 const Tensor& indices, const Tensor& updates) { in ValidateUpdateShape() argument 304 updates.shape().DebugString(), in ValidateUpdateShape() 310 if (updates.dims() < batch_dim) return shape_err(); in ValidateUpdateShape() 311 if (params_shape.dims() < slice_dim + (updates.dims() - batch_dim)) { in ValidateUpdateShape() 314 if (updates.dims() != batch_dim + params_shape.dims() - slice_dim) { in ValidateUpdateShape() 318 if (updates.dim_size(d) != indices.dim_size(d)) return shape_err(); in ValidateUpdateShape() [all …]
|
D | scatter_functor.h | 130 typename TTypes<T>::ConstMatrix updates, 138 typename TTypes<T>::ConstMatrix updates, 151 updates.template chip<0>(i)); 162 typename TTypes<T>::ConstMatrix updates, 175 d, params.template chip<0>(index), updates.template chip<0>(i)); 186 typename TTypes<T>::ConstMatrix updates, 199 updates.data() + i * updates.dimension(1), 200 updates.dimension(1) * sizeof(T)); 211 params.template chip<0>(index), updates.template chip<0>(i)); 227 typename TTypes<T>::ConstMatrix updates, [all …]
|
/external/mesa3d/docs/relnotes/ |
D | 6.4 | 36 Glide (3dfx Voodoo1/2) requires updates 37 SVGA requires updates 38 DJGPP requires updates 39 GGI requires updates 40 BeOS requires updates 41 Allegro requires updates 42 D3D requires updates 44 The drivers which require updates mostly need to be updated to work
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/ |
D | optimizers.py | 91 self.updates = [] 181 self.updates = [K.update_add(self.iterations, 1)] 194 self.updates.append(K.update(m, v)) 205 self.updates.append(K.update(p, new_p)) 206 return self.updates 254 self.updates = [K.update_add(self.iterations, 1)] 265 self.updates.append(K.update(a, new_a)) 272 self.updates.append(K.update(p, new_p)) 273 return self.updates 316 self.updates = [K.update_add(self.iterations, 1)] [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ScatterNdNonAliasingAdd.pbtxt | 17 name: "updates" 27 updated with `updates`. 32 from `updates` according to indices `indices`. The updates are non-aliasing: 35 respect to both `input` and `updates`. 46 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 57 updates = tf.constant([9, 10, 11, 12]) 58 output = tf.scatter_nd_non_aliasing_add(input, indices, updates) 66 See @{tf.scatter_nd} for more details about how to make updates to slices.
|
D | api_def_ScatterNd.pbtxt | 10 name: "updates" 24 A new tensor with the given shape and updates applied according 28 summary: "Scatter `updates` into a new (initially zero) tensor according to `indices`." 30 Creates a new tensor by applying sparse `updates` to individual 35 **WARNING**: The order in which updates are applied is nondeterministic, so the 46 `shape`. `updates` is a tensor with shape 62 updates = tf.constant([9, 10, 11, 12]) 64 scatter = tf.scatter_nd(indices, updates, shape) 85 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], 90 scatter = tf.scatter_nd(indices, updates, shape)
|
D | api_def_ResourceScatterAdd.pbtxt | 16 name: "updates" 21 summary: "Adds sparse updates to the variable referenced by `resource`." 26 ref[indices, ...] += updates[...] 29 ref[indices[i], ...] += updates[i, ...] 32 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ScatterUpdate.pbtxt | 16 name: "updates" 35 summary: "Applies sparse updates to a variable reference." 41 ref[indices, ...] = updates[...] 44 ref[indices[i], ...] = updates[i, ...] 47 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 54 duplicate entries in `indices`, the order at which the updates happen 57 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ResourceScatterUpdate.pbtxt | 16 name: "updates" 21 summary: "Assigns sparse updates to the variable referenced by `resource`." 26 ref[indices, ...] = updates[...] 29 ref[indices[i], ...] = updates[i, ...] 32 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
|
D | api_def_ScatterMul.pbtxt | 16 name: "updates" 35 summary: "Multiplies sparse updates into a variable reference." 41 ref[indices, ...] *= updates[...] 44 ref[indices[i], ...] *= updates[i, ...] 47 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 56 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ScatterDiv.pbtxt | 16 name: "updates" 35 summary: "Divides a variable reference by sparse updates." 41 ref[indices, ...] /= updates[...] 44 ref[indices[i], ...] /= updates[i, ...] 47 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 56 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ScatterAdd.pbtxt | 16 name: "updates" 35 summary: "Adds sparse updates to a variable reference." 40 ref[indices, ...] += updates[...] 43 ref[indices[i], ...] += updates[i, ...] 46 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 54 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ScatterSub.pbtxt | 16 name: "updates" 35 summary: "Subtracts sparse updates to a variable reference." 39 ref[indices, ...] -= updates[...] 42 ref[indices[i], ...] -= updates[i, ...] 45 ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] 54 Requires `updates.shape = indices.shape + ref.shape[1:]`.
|
D | api_def_ResourceScatterNdUpdate.pbtxt | 17 name: "updates" 31 summary: "Applies sparse `updates` to individual values or slices within a given" 44 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 56 updates = tf.constant([9, 10, 11, 12]) 57 update = tf.scatter_nd_update(ref, indices, updates) 66 See @{tf.scatter_nd} for more details about how to make updates to
|
D | api_def_ScatterNdSub.pbtxt | 17 name: "updates" 38 summary: "Applies sparse subtraction between `updates` and individual values or slices" 51 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 62 updates = tf.constant([9, 10, 11, 12]) 63 sub = tf.scatter_nd_sub(ref, indices, updates) 71 See @{tf.scatter_nd} for more details about how to make updates to
|
D | api_def_ScatterNdAdd.pbtxt | 17 name: "updates" 38 summary: "Applies sparse addition between `updates` and individual values or slices" 51 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 62 updates = tf.constant([9, 10, 11, 12]) 63 add = tf.scatter_nd_add(ref, indices, updates) 71 See @{tf.scatter_nd} for more details about how to make updates to
|
D | api_def_ScatterNdUpdate.pbtxt | 17 name: "updates" 38 summary: "Applies sparse `updates` to individual values or slices within a given" 51 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 63 updates = tf.constant([9, 10, 11, 12]) 64 update = tf.scatter_nd_update(ref, indices, updates) 73 See @{tf.scatter_nd} for more details about how to make updates to
|
/external/tensorflow/tensorflow/python/ops/ |
D | state_ops.py | 308 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument 354 return gen_state_ops.scatter_update(ref, indices, updates, 357 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 362 def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): argument 418 ref, indices, updates, use_locking, name) 420 ref.handle, indices, ops.convert_to_tensor(updates, dtype=ref.dtype),
|
/external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
D | scatter.cc | 35 const xla::ComputationDataHandle& updates, in XlaScatter() argument 44 builder->GetShape(updates)); in XlaScatter() 106 auto flat_updates = builder->Reshape(updates, flat_updates_shape); in XlaScatter() 118 auto updates = loop_vars[1]; in XlaScatter() local 159 auto update = body_builder->DynamicSlice(updates, updates_offset, in XlaScatter() 183 return std::vector<xla::ComputationDataHandle>{indices, updates, buffer}; in XlaScatter()
|