/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | scatter_test.cc | 31 Literal* scatter_indices, Literal* updates) { in RunTest() argument 32 RunTest(hlo_text, {operand, scatter_indices, updates}); in RunTest() 68 Literal updates = LiteralUtil::CreateR2<int32>({{10, 20, 30}, {70, 80, 90}}); in XLA_TEST_F() local 69 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 99 Literal updates = LiteralUtil::CreateR2<int32>({{10, 20, 30}, {70, 80, 90}}); in XLA_TEST_F() local 100 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 127 Literal updates = in XLA_TEST_F() local 129 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 193 Literal updates = in XLA_TEST_F() local 196 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | scatter_nd_ops_test.py | 62 def _NumpyScatterNd(ref, indices, updates, op): argument 70 flat_updates = updates.reshape((num_updates, slice_size)) 79 def _NumpyUpdate(ref, indices, updates): argument 80 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 83 def _NumpyAdd(ref, indices, updates): argument 84 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u) 87 def _NumpySub(ref, indices, updates): argument 88 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u) 91 def _NumpyMul(ref, indices, updates): argument 92 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u) [all …]
|
D | scatter_ops_test.py | 35 def _NumpyAdd(ref, indices, updates): argument 39 ref[indx] += updates[i] 47 def _NumpySub(ref, indices, updates): argument 49 ref[indx] -= updates[i] 57 def _NumpyMul(ref, indices, updates): argument 59 ref[indx] *= updates[i] 67 def _NumpyDiv(ref, indices, updates): argument 69 ref[indx] /= updates[i] 77 def _NumpyMin(ref, indices, updates): argument 79 ref[indx] = np.minimum(ref[indx], updates[i]) [all …]
|
D | batch_scatter_ops_test.py | 36 def _NumpyUpdate(ref, indices, updates): argument 39 ref[indx] = updates[i] 64 updates = _AsType( 72 np_scatter(new, indices, updates) 77 ref.batch_scatter_update(ops.IndexedSlices(indices, updates)) 79 tf_scatter(ref, indices, updates).eval() 107 updates = np.array([-3, -4, -5]).astype(np.float32) 114 state_ops.batch_scatter_update(ref, indices, updates).eval() 120 state_ops.batch_scatter_update(ref, indices, updates).eval() 125 state_ops.batch_scatter_update(ref, indices, updates).eval()
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | scatter_nd_op_test.py | 49 def _NumpyScatterNd(ref, indices, updates, op): argument 57 flat_updates = updates.reshape((num_updates, slice_size)) 66 def _NumpyUpdate(indices, updates, shape): argument 67 ref = np.zeros(shape, dtype=updates.dtype) 68 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 107 updates = _AsType(np.random.randn(*(updates_shape)), vtype) 110 np_out = np_scatter(indices, updates, ref_shape) 112 tf_out = tf_scatter(indices, updates, ref_shape) 121 def _runScatterNd(self, indices, updates, shape): argument 123 updates_placeholder = array_ops.placeholder(updates.dtype) [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | state_ops.py | 252 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument 298 return gen_state_ops.scatter_update(ref, indices, updates, 301 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 306 def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): argument 361 ref, indices, updates, use_locking, name) 363 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 368 def scatter_add(ref, indices, updates, use_locking=False, name=None): argument 412 return gen_state_ops.scatter_add(ref, indices, updates, 415 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 420 def scatter_nd_add(ref, indices, updates, use_locking=False, name=None): argument [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | scatter_nd_op.cc | 76 const Tensor& updates = c->input(1); in Compute() local 83 OP_REQUIRES(c, updates.shape().dims() >= 1, in Compute() 86 updates.shape().DebugString())); in Compute() 96 updates.shape().num_elements()), in Compute() 103 OP_REQUIRES(c, indices.shape().dim_size(i) == updates.shape().dim_size(i), in Compute() 108 ", updates shape:", updates.shape().DebugString())); in Compute() 113 c, updates.shape().dims() - outer_dims == shape.dims() - ix, in Compute() 117 " updates: ", updates.shape().DebugString())); in Compute() 118 for (int i = 0; i + outer_dims < updates.shape().dims(); ++i) { in Compute() 120 c, updates.shape().dim_size(i + outer_dims) == shape.dim_size(ix + i), in Compute() [all …]
|
D | scatter_op.cc | 39 static bool ValidShapes(const Tensor& params, const Tensor& updates, in ValidShapes() argument 41 if (updates.dims() == 0) return true; in ValidShapes() 42 if (updates.dims() != indices.dims() + params.dims() - 1) return false; in ValidShapes() 44 if (updates.dim_size(d) != indices.dim_size(d)) { in ValidShapes() 49 if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) { in ValidShapes() 57 const Tensor& indices, const Tensor& updates) { in DoValidationChecking() argument 64 c, ValidShapes(params, updates, indices), in DoValidationChecking() 67 "updates.shape ", updates.shape().DebugString(), in DoValidationChecking() 100 const Tensor& updates = c->input(2); in DoCompute() local 101 DoValidationChecking(c, params, indices, updates); in DoCompute() [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/python/kernel_tests/ |
D | scatter_add_ndim_op_test.py | 33 updates = [100., 200.] 37 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 46 updates = [100., 200.] 50 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 58 updates = [] 62 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 69 updates = [100.] 74 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run() 81 updates = [[100., 200., 300.], [400., 500., 600.]] 85 tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
|
/external/mesa3d/docs/relnotes/ |
D | 6.4 | 36 Glide (3dfx Voodoo1/2) requires updates 37 SVGA requires updates 38 DJGPP requires updates 39 GGI requires updates 40 BeOS requires updates 41 Allegro requires updates 42 D3D requires updates 44 The drivers which require updates mostly need to be updated to work
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizers.py | 72 self.updates = [] 187 self.updates = [state_ops.assign_add(self.iterations, 1)] 200 self.updates.append(state_ops.assign(m, v)) 211 self.updates.append(state_ops.assign(p, new_p)) 212 return self.updates 259 self.updates = [state_ops.assign_add(self.iterations, 1)] 270 self.updates.append(state_ops.assign(a, new_a)) 277 self.updates.append(state_ops.assign(p, new_p)) 278 return self.updates 327 self.updates = [state_ops.assign_add(self.iterations, 1)] [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ResourceScatterSub.pbtxt | 16 name: "updates" 21 summary: "Subtracts sparse updates from the variable referenced by `resource`." 26 ref[indices, ...] -= updates[...] 29 ref[indices[i], ...] -= updates[i, ...] 32 ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterDiv.pbtxt | 16 name: "updates" 21 summary: "Divides sparse updates into the variable referenced by `resource`." 26 ref[indices, ...] /= updates[...] 29 ref[indices[i], ...] /= updates[i, ...] 32 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterAdd.pbtxt | 16 name: "updates" 21 summary: "Adds sparse updates to the variable referenced by `resource`." 26 ref[indices, ...] += updates[...] 29 ref[indices[i], ...] += updates[i, ...] 32 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterMul.pbtxt | 16 name: "updates" 21 summary: "Multiplies sparse updates into the variable referenced by `resource`." 26 ref[indices, ...] *= updates[...] 29 ref[indices[i], ...] *= updates[i, ...] 32 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_TensorScatterUpdate.pbtxt | 16 name: "updates" 24 A new tensor with the given shape and updates applied according 28 summary: "Scatter `updates` into an existing tensor according to `indices`." 30 This operation creates a new tensor by applying sparse `updates` to the passed 32 This operation is very similar to `tf.scatter_nd`, except that the updates are 36 If `indices` contains duplicates, then their updates are accumulated (summed). 38 **WARNING**: The order in which updates are applied is nondeterministic, so the 51 `shape`. `updates` is a tensor with shape 67 updates = tf.constant([9, 10, 11, 12]) 69 updated = tf.tensor_scatter_update(tensor, indices, updates) [all …]
|
D | api_def_ResourceScatterMin.pbtxt | 16 name: "updates" 21 …summary: "Reduces sparse updates into the variable referenced by `resource` using the `min` operat… 26 ref[indices, ...] = min(ref[indices, ...], updates[...]) 29 ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) 32 ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterMax.pbtxt | 16 name: "updates" 21 …summary: "Reduces sparse updates into the variable referenced by `resource` using the `max` operat… 26 ref[indices, ...] = max(ref[indices, ...], updates[...]) 29 ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) 32 ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ScatterNdNonAliasingAdd.pbtxt | 17 name: "updates" 27 updated with `updates`. 32 from `updates` according to indices `indices`. The updates are non-aliasing: 35 respect to both `input` and `updates`. 46 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 55 updates = tf.constant([9, 10, 11, 12]) 56 output = tf.scatter_nd_non_aliasing_add(input, indices, updates) 64 See `tf.scatter_nd` for more details about how to make updates to slices.
|
D | api_def_TensorScatterAdd.pbtxt | 16 name: "updates" 24 A new tensor copied from tensor and updates added according to the indices. 27 summary: "Adds sparse `updates` to an existing tensor according to `indices`." 29 This operation creates a new tensor by adding sparse `updates` to the passed 31 This operation is very similar to `tf.scatter_nd_add`, except that the updates 43 `shape`. `updates` is a tensor with shape 55 updates = tf.constant([9, 10, 11, 12]) 57 updated = tf.tensor_scatter_add(tensor, indices, updates) 74 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], 79 updated = tf.tensor_scatter_add(tensor, indices, updates)
|
D | api_def_TensorScatterSub.pbtxt | 16 name: "updates" 24 A new tensor copied from tensor and updates subtracted according to the indices. 27 summary: "Subtracts sparse `updates` from an existing tensor according to `indices`." 29 This operation creates a new tensor by subtracting sparse `updates` from the 31 This operation is very similar to `tf.scatter_nd_sub`, except that the updates 43 `shape`. `updates` is a tensor with shape 55 updates = tf.constant([9, 10, 11, 12]) 57 updated = tf.tensor_scatter_sub(tensor, indices, updates) 74 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], 79 updated = tf.tensor_scatter_sub(tensor, indices, updates)
|
D | api_def_ScatterUpdate.pbtxt | 16 name: "updates" 35 summary: "Applies sparse updates to a variable reference." 41 ref[indices, ...] = updates[...] 44 ref[indices[i], ...] = updates[i, ...] 47 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 54 duplicate entries in `indices`, the order at which the updates happen 57 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ScatterMul.pbtxt | 16 name: "updates" 35 summary: "Multiplies sparse updates into a variable reference." 41 ref[indices, ...] *= updates[...] 44 ref[indices[i], ...] *= updates[i, ...] 47 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 56 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ScatterDiv.pbtxt | 16 name: "updates" 35 summary: "Divides a variable reference by sparse updates." 41 ref[indices, ...] /= updates[...] 44 ref[indices[i], ...] /= updates[i, ...] 47 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 56 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | scatter_expander.cc | 95 HloInstruction* updates, absl::Span<const int64> update_window_dims) { in PermuteScatterAndWindowDims() argument 97 const int64 updates_rank = updates->shape().rank(); in PermuteScatterAndWindowDims() 110 return MakeTransposeHlo(updates, permutation); in PermuteScatterAndWindowDims() 115 const Shape& scatter_indices_shape, HloInstruction* updates, in AdjustScatterDims() argument 125 return PrependDegenerateDims(updates, 1); in AdjustScatterDims() 127 return CollapseFirstNDims(updates, num_scatter_dims); in AdjustScatterDims() 227 HloInstruction* updates = loop_state[2]; in ScatterLoopBody() local 267 /*zeros_to_append=*/updates->shape().dimensions_size() - 1)); in ScatterLoopBody() 268 std::vector<int64> update_slice_bounds(updates->shape().dimensions().begin(), in ScatterLoopBody() 269 updates->shape().dimensions().end()); in ScatterLoopBody() [all …]
|