/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | scatter_test.cc | 30 Literal* scatter_indices, Literal* updates) { in RunTest() argument 31 RunTest(hlo_text, {operand, scatter_indices, updates}); in RunTest() 67 Literal updates = LiteralUtil::CreateR2<int32>({{10, 20, 30}, {70, 80, 90}}); in XLA_TEST_F() local 68 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 98 Literal updates = LiteralUtil::CreateR2<int32>({{10, 20, 30}, {70, 80, 90}}); in XLA_TEST_F() local 99 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 126 Literal updates = in XLA_TEST_F() local 128 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 192 Literal updates = in XLA_TEST_F() local 195 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/array_ops/ |
D | scatter_nd_ops_test.py | 63 def _NumpyScatterNd(ref, indices, updates, op): argument 71 flat_updates = updates.reshape((num_updates, slice_size)) 80 def _NumpyUpdate(ref, indices, updates): argument 81 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 84 def _NumpyAdd(ref, indices, updates): argument 85 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u) 88 def _NumpySub(ref, indices, updates): argument 89 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u) 92 def _NumpyMul(ref, indices, updates): argument 93 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u) [all …]
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | scatter_nd.cc | 51 const RuntimeShape& updates, in CheckShapes() argument 55 (updates.DimensionsCount() >= 1) && in CheckShapes() 60 TF_LITE_ENSURE_EQ(context, indices.Dims(i), updates.Dims(i)); in CheckShapes() 64 TF_LITE_ENSURE_EQ(context, updates.DimensionsCount() - outer_dims, in CheckShapes() 66 for (int i = 0; i + outer_dims < updates.DimensionsCount(); ++i) { in CheckShapes() 67 TF_LITE_ENSURE_EQ(context, updates.Dims(i + outer_dims), in CheckShapes() 79 const TfLiteTensor* updates; in Prepare() local 80 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates)); in Prepare() 84 switch (updates->type) { in Prepare() 94 TfLiteTypeGetName(updates->type)); in Prepare() [all …]
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | scatter_nd_op_test.py | 50 def _NumpyScatterNd(ref, indices, updates, op): argument 58 flat_updates = updates.reshape((num_updates, slice_size)) 67 def _NumpyUpdate(indices, updates, shape): argument 68 ref = np.zeros(shape, dtype=updates.dtype) 69 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 108 updates = _AsType(np.random.randn(*(updates_shape)), vtype) 111 np_out = np_scatter(indices, updates, ref_shape) 113 tf_out = tf_scatter(indices, updates, ref_shape) 122 def _runScatterNd(self, indices, updates, shape): argument 124 updates_placeholder = array_ops.placeholder(updates.dtype) [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | state_ops.py | 256 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument 302 return gen_state_ops.scatter_update(ref, indices, updates, 305 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 310 def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): argument 365 ref, indices, updates, use_locking, name) 367 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 372 def scatter_add(ref, indices, updates, use_locking=False, name=None): argument 416 return gen_state_ops.scatter_add(ref, indices, updates, 419 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), 424 def scatter_nd_add(ref, indices, updates, use_locking=False, name=None): argument [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | scatter_ops_test.py | 35 def _NumpyAdd(ref, indices, updates): argument 39 ref[indx] += updates[i] 47 def _NumpySub(ref, indices, updates): argument 49 ref[indx] -= updates[i] 57 def _NumpyMul(ref, indices, updates): argument 59 ref[indx] *= updates[i] 67 def _NumpyDiv(ref, indices, updates): argument 69 ref[indx] /= updates[i] 77 def _NumpyMin(ref, indices, updates): argument 79 ref[indx] = np.minimum(ref[indx], updates[i]) [all …]
|
D | batch_scatter_ops_test.py | 35 def _NumpyUpdate(ref, indices, updates): argument 38 ref[indx] = updates[i] 63 updates = _AsType( 71 np_scatter(new, indices, updates) 77 ref.batch_scatter_update(ops.IndexedSlices(indices, updates)) 79 self.evaluate(tf_scatter(ref, indices, updates)) 103 updates = np.array([-3, -4, -5]).astype(np.float32) 110 self.evaluate(state_ops.batch_scatter_update(ref, indices, updates)) 116 self.evaluate(state_ops.batch_scatter_update(ref, indices, updates)) 121 self.evaluate(state_ops.batch_scatter_update(ref, indices, updates))
|
/external/tensorflow/tensorflow/core/kernels/ |
D | scatter_op.cc | 33 static bool ValidShapes(const Tensor& params, const Tensor& updates, in ValidShapes() argument 35 if (updates.dims() == 0) return true; in ValidShapes() 36 if (updates.dims() != indices.dims() + params.dims() - 1) return false; in ValidShapes() 38 if (updates.dim_size(d) != indices.dim_size(d)) { in ValidShapes() 43 if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) { in ValidShapes() 51 const Tensor& indices, const Tensor& updates) { in DoValidationChecking() argument 58 c, ValidShapes(params, updates, indices), in DoValidationChecking() 61 "updates.shape ", updates.shape().DebugString(), in DoValidationChecking() 94 const Tensor& updates = c->input(2); in DoCompute() local 95 DoValidationChecking(c, params, indices, updates); in DoCompute() [all …]
|
D | scatter_nd_op.cc | 71 const Tensor& updates = c->input(1); in Compute() local 78 OP_REQUIRES(c, updates.shape().dims() >= 1, in Compute() 81 updates.shape().DebugString())); in Compute() 91 updates.shape().num_elements()), in Compute() 99 c, indices.shape().dim_size(i) == updates.shape().dim_size(i), in Compute() 104 ") of updates[shape=", updates.shape().DebugString(), "]")); in Compute() 108 OP_REQUIRES(c, updates.shape().dims() - outer_dims == shape.dims() - ix, in Compute() 112 outer_dims, ",", updates.shape().dims(), in Compute() 113 ") of updates[shape=", updates.shape().DebugString(), "]")); in Compute() 115 for (int i = 0; i + outer_dims < updates.shape().dims(); ++i) { in Compute() [all …]
|
/external/mesa3d/docs/relnotes/ |
D | 6.4 | 36 Glide (3dfx Voodoo1/2) requires updates 37 SVGA requires updates 38 DJGPP requires updates 39 GGI requires updates 40 BeOS requires updates 41 Allegro requires updates 42 D3D requires updates 44 The drivers which require updates mostly need to be updated to work
|
D | 6.4.2.rst | 53 Glide (3dfx Voodoo1/2) requires updates 54 SVGA requires updates 55 DJGPP requires updates 56 GGI requires updates 57 BeOS requires updates 58 Allegro requires updates 59 D3D requires updates
|
/external/tensorflow/tensorflow/python/kernel_tests/v1_compat_tests/ |
D | scatter_nd_ops_test.py | 49 def _NumpyScatterNd(ref, indices, updates, op): argument 57 flat_updates = updates.reshape((num_updates, slice_size)) 66 def _NumpyMin(ref, indices, updates): argument 67 return _NumpyScatterNd(ref, indices, updates, np.minimum) 70 def _NumpyMax(ref, indices, updates): argument 71 return _NumpyScatterNd(ref, indices, updates, np.maximum) 111 updates = _AsType(np.random.randn(*(updates_shape)), vtype) 116 np_scatter(new, indices, updates) 120 self.evaluate(tf_scatter(ref_var, indices, updates)) 141 updates = np.array([-3, -4, -5]).astype(np.float32) [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ResourceScatterAdd.pbtxt | 16 name: "updates" 21 summary: "Adds sparse updates to the variable referenced by `resource`." 26 ref[indices, ...] += updates[...] 29 ref[indices[i], ...] += updates[i, ...] 32 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterDiv.pbtxt | 16 name: "updates" 21 summary: "Divides sparse updates into the variable referenced by `resource`." 26 ref[indices, ...] /= updates[...] 29 ref[indices[i], ...] /= updates[i, ...] 32 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterMul.pbtxt | 16 name: "updates" 21 summary: "Multiplies sparse updates into the variable referenced by `resource`." 26 ref[indices, ...] *= updates[...] 29 ref[indices[i], ...] *= updates[i, ...] 32 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterSub.pbtxt | 16 name: "updates" 21 summary: "Subtracts sparse updates from the variable referenced by `resource`." 26 ref[indices, ...] -= updates[...] 29 ref[indices[i], ...] -= updates[i, ...] 32 ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterMin.pbtxt | 16 name: "updates" 21 …summary: "Reduces sparse updates into the variable referenced by `resource` using the `min` operat… 26 ref[indices, ...] = min(ref[indices, ...], updates[...]) 29 ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) 32 ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_ResourceScatterMax.pbtxt | 16 name: "updates" 21 …summary: "Reduces sparse updates into the variable referenced by `resource` using the `max` operat… 26 ref[indices, ...] = max(ref[indices, ...], updates[...]) 29 ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) 32 ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 37 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
D | api_def_TensorScatterUpdate.pbtxt | 16 name: "updates" 24 A new tensor with the given shape and updates applied according 28 summary: "Scatter `updates` into an existing tensor according to `indices`." 30 This operation creates a new tensor by applying sparse `updates` to the passed 32 This operation is very similar to `tf.scatter_nd`, except that the updates are 42 - The order in which updates are applied is nondeterministic, so the output 52 if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. 53 if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input 57 The overall shape of `updates` is:
|
D | api_def_ScatterNdNonAliasingAdd.pbtxt | 17 name: "updates" 27 updated with `updates`. 32 from `updates` according to indices `indices`. The updates are non-aliasing: 35 respect to both `input` and `updates`. 46 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 55 updates = tf.constant([9, 10, 11, 12]) 56 output = tf.scatter_nd_non_aliasing_add(input, indices, updates) 64 See `tf.scatter_nd` for more details about how to make updates to slices.
|
D | api_def_TensorScatterAdd.pbtxt | 16 name: "updates" 24 A new tensor copied from tensor and updates added according to the indices. 27 summary: "Adds sparse `updates` to an existing tensor according to `indices`." 29 This operation creates a new tensor by adding sparse `updates` to the passed 31 This operation is very similar to `tf.scatter_nd_add`, except that the updates 44 `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape 56 updates = tf.constant([9, 10, 11, 12]) 58 updated = tf.tensor_scatter_nd_add(tensor, indices, updates) 74 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], 79 updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
|
D | api_def_TensorScatterSub.pbtxt | 16 name: "updates" 24 A new tensor copied from tensor and updates subtracted according to the indices. 27 summary: "Subtracts sparse `updates` from an existing tensor according to `indices`." 29 This operation creates a new tensor by subtracting sparse `updates` from the 31 This operation is very similar to `tf.scatter_nd_sub`, except that the updates 43 `shape`. `updates` is a tensor with shape 55 updates = tf.constant([9, 10, 11, 12]) 57 updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) 73 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], 78 updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
|
D | api_def_ScatterUpdate.pbtxt | 16 name: "updates" 35 summary: "Applies sparse updates to a variable reference." 41 ref[indices, ...] = updates[...] 44 ref[indices[i], ...] = updates[i, ...] 47 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 54 duplicate entries in `indices`, the order at which the updates happen 57 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
/external/tensorflow/tensorflow/python/keras/ |
D | optimizer_v1.py | 63 self.updates = [] 198 self.updates = [state_ops.assign_add(self.iterations, 1)] 210 self.updates.append(state_ops.assign(m, v)) 221 self.updates.append(state_ops.assign(p, new_p)) 222 return self.updates 269 self.updates = [state_ops.assign_add(self.iterations, 1)] 281 self.updates.append(state_ops.assign(a, new_a)) 288 self.updates.append(state_ops.assign(p, new_p)) 289 return self.updates 344 self.updates = [state_ops.assign_add(self.iterations, 1)] [all …]
|
/external/llvm-project/llvm/test/Transforms/Attributor/ |
D | depgraph.ll | 71 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 72 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 73 ; GRAPH-NEXT: updates [AANoUnwind] for CtxI ' %6 = call i32* @checkAndAdvance(i32* %5)' at posit… 74 ; GRAPH-NEXT: updates [AANoUnwind] for CtxI ' %6 = call i32* @checkAndAdvance(i32* %5)' at posit… 75 ; GRAPH-NEXT: updates [AANoUnwind] for CtxI ' %6 = call i32* @checkAndAdvance(i32* %5)' at posit… 76 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 77 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 78 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 79 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… 80 ; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, i32* %0, align 4' at position {arg… [all …]
|