| /external/tensorflow/tensorflow/python/kernel_tests/array_ops/ |
| D | scatter_nd_ops_test.py | 59 def _NumpyScatterNd(ref, indices, updates, op): argument 67 flat_updates = updates.reshape((num_updates, slice_size)) 76 def _NumpyUpdate(ref, indices, updates): argument 77 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 80 def _NumpyAdd(ref, indices, updates): argument 81 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u) 84 def _NumpySub(ref, indices, updates): argument 85 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u) 88 def _NumpyMul(ref, indices, updates): argument 89 return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u) [all …]
|
| D | scatter_ops_test.py | 32 def _NumpyAdd(ref, indices, updates): argument 36 ref[indx] += updates[i] 44 def _NumpySub(ref, indices, updates): argument 46 ref[indx] -= updates[i] 54 def _NumpyMul(ref, indices, updates): argument 56 ref[indx] *= updates[i] 64 def _NumpyDiv(ref, indices, updates): argument 66 ref[indx] /= updates[i] 74 def _NumpyMin(ref, indices, updates): argument 76 ref[indx] = np.minimum(ref[indx], updates[i]) [all …]
|
| /external/tensorflow/tensorflow/lite/kernels/ |
| D | scatter_nd.cc | 51 const RuntimeShape& updates, in CheckShapes() argument 55 (updates.DimensionsCount() >= 1) && in CheckShapes() 60 TF_LITE_ENSURE_EQ(context, indices.Dims(i), updates.Dims(i)); in CheckShapes() 64 TF_LITE_ENSURE_EQ(context, updates.DimensionsCount() - outer_dims, in CheckShapes() 66 for (int i = 0; i + outer_dims < updates.DimensionsCount(); ++i) { in CheckShapes() 67 TF_LITE_ENSURE_EQ(context, updates.Dims(i + outer_dims), in CheckShapes() 79 const TfLiteTensor* updates; in Prepare() local 80 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates)); in Prepare() 84 switch (updates->type) { in Prepare() 94 context, "Updates of type '%s' are not supported by scatter_nd.", in Prepare() [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/tests/ |
| D | scatter_test.cc | 28 Literal* scatter_indices, Literal* updates) { in RunTest() argument 29 RunTest(hlo_text, {operand, scatter_indices, updates}); in RunTest() 53 updates = s32[2,3] parameter(2) in XLA_TEST_F() 54 ROOT scatter = s32[3,3] scatter(operand, indices, updates), in XLA_TEST_F() 65 Literal updates = in XLA_TEST_F() local 67 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() 85 updates = s32[2,3] add(p2, p2) in XLA_TEST_F() 86 ROOT scatter = s32[3,3] scatter(operand, indices, updates), in XLA_TEST_F() 97 Literal updates = in XLA_TEST_F() local 99 RunTest(hlo_text, &operand, &scatter_indices, &updates); in XLA_TEST_F() [all …]
|
| /external/tensorflow/tensorflow/python/ops/ |
| D | state_ops.py | 383 def scatter_update(ref, indices, updates, use_locking=True, name=None): argument 385 r"""Applies sparse updates to a variable reference. 391 ref[indices, ...] = updates[...] 394 ref[indices[i], ...] = updates[i, ...] 397 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 404 duplicate entries in `indices`, the order at which the updates happen 407 Requires `updates.shape = indices.shape + ref.shape[1:]`. 417 updates: A `Tensor`. Must have the same type as `ref`. 429 return gen_state_ops.scatter_update(ref, indices, updates, 432 ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), [all …]
|
| /external/tensorflow/tensorflow/compiler/tests/ |
| D | scatter_nd_op_test.py | 46 def _NumpyScatterNd(ref, indices, updates, op): argument 54 flat_updates = updates.reshape((num_updates, slice_size)) 63 def _NumpyUpdate(indices, updates, shape): argument 64 ref = np.zeros(shape, dtype=updates.dtype) 65 return _NumpyScatterNd(ref, indices, updates, lambda p, u: u) 104 updates = _AsType(np.random.randn(*(updates_shape)), vtype) 107 np_out = np_scatter(indices, updates, ref_shape) 109 tf_out = tf_scatter(indices, updates, ref_shape) 118 def _runScatterNd(self, indices, updates, shape): argument 120 updates_placeholder = array_ops.placeholder(updates.dtype) [all …]
|
| /external/swiftshader/third_party/llvm-16.0/llvm/include/llvm/Analysis/ |
| D | DomTreeUpdater.h | 74 /// as having no pending updates. This function does not check 79 /// Returns true if there are DominatorTree updates queued. 83 /// Returns true if there are PostDominatorTree updates queued. 90 /// These methods provide APIs for submitting updates to the DominatorTree and 95 /// 1. Eager UpdateStrategy: Updates are submitted and then flushed 97 /// 2. Lazy UpdateStrategy: Updates are submitted but only flushed when you 99 /// when you submit a bunch of updates multiple times which can then 100 /// add up to a large number of updates between two queries on the 101 /// DominatorTree. The incremental updater can reschedule the updates or 103 /// process depending on the number of updates. [all …]
|
| /external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
| D | DomTreeUpdater.h | 72 /// as having no pending updates. This function does not check 77 /// Returns true if there are DominatorTree updates queued. 81 /// Returns true if there are PostDominatorTree updates queued. 88 /// These methods provide APIs for submitting updates to the DominatorTree and 93 /// 1. Eager UpdateStrategy: Updates are submitted and then flushed 95 /// 2. Lazy UpdateStrategy: Updates are submitted but only flushed when you 97 /// when you submit a bunch of updates multiple times which can then 98 /// add up to a large number of updates between two queries on the 99 /// DominatorTree. The incremental updater can reschedule the updates or 101 /// process depending on the number of updates. [all …]
|
| /external/tensorflow/tensorflow/core/kernels/ |
| D | scatter_nd_op.cc | 49 // If shape_input has 0 elements, then we need to have indices and updates with 51 // then updates should also have 0 elements, otherwise we should error. 72 const Tensor& updates = c->input(1); in Compute() local 79 OP_REQUIRES(c, updates.shape().dims() >= 1, in Compute() 81 "Updates shape must have rank at least one. Found:", in Compute() 82 updates.shape().DebugString())); in Compute() 92 updates.shape().num_elements()), in Compute() 94 "Indices and updates specified for empty output shape")); in Compute() 100 c, indices.shape().dim_size(i) == updates.shape().dim_size(i), in Compute() 105 ") of updates[shape=", updates.shape().DebugString(), "]")); in Compute() [all …]
|
| D | scatter_op.cc | 33 // Check whether updates.shape = indices.shape + params.shape[1:] 34 static bool ValidShapes(const Tensor& params, const Tensor& updates, in ValidShapes() argument 36 if (updates.dims() == 0) return true; in ValidShapes() 37 if (updates.dims() != indices.dims() + params.dims() - 1) return false; in ValidShapes() 39 if (updates.dim_size(d) != indices.dim_size(d)) { in ValidShapes() 44 if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) { in ValidShapes() 52 const Tensor& indices, const Tensor& updates) { in DoValidationChecking() argument 59 c, ValidShapes(params, updates, indices), in DoValidationChecking() 60 errors::InvalidArgument("Must have updates.shape = indices.shape + " in DoValidationChecking() 61 "params.shape[1:] or updates.shape = [], got ", in DoValidationChecking() [all …]
|
| /external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
| D | scatter.cc | 35 const xla::XlaOp& buffer, const xla::XlaOp& updates, in XlaScatter() argument 41 TF_ASSIGN_OR_RETURN(xla::Shape updates_shape, builder->GetShape(updates)); in XlaScatter() 72 // succeed since it updates a slice of size 1. in XlaScatter() 81 // Example of a 1-D scatter that updates two [3,1] tensors in a tensor of in XlaScatter() 87 // updates = s32[3,2] parameter(2) in XlaScatter() 88 // scatter = s32[3,3] scatter(operand, indices, updates), in XlaScatter() 96 // Example of a 1-D scatter that updates two [1,3] tensors in a tensor of in XlaScatter() 101 // updates = s32[2,3] parameter(2) in XlaScatter() 102 // scatter = s32[3,3] scatter(operand, indices, updates), in XlaScatter() 115 // updates = s32[2,2] parameter(2) in XlaScatter() [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/ |
| D | verifier_scatter_op.mlir | 5 %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> 7 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 27 %scatter_indices: tensor<*xi32>, %updates: tensor<*xf32>) -> 29 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 50 %scatter_indices: tensor<10x2xf32>, %updates: tensor<10x300xf32>) -> 53 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 74 %scatter_indices: tensor<10x2xi32>, %updates: tensor<*xf32>) -> 77 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 98 %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> 101 %0 = "mhlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/ |
| D | verify_scatter.mlir | 5 %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> 7 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 27 %scatter_indices: tensor<*xi32>, %updates: tensor<*xf32>) -> 29 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 50 %scatter_indices: tensor<10x2xf32>, %updates: tensor<10x300xf32>) -> 53 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 74 %scatter_indices: tensor<10x2xi32>, %updates: tensor<*xf32>) -> 77 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ 98 %scatter_indices: tensor<10x2xi32>, %updates: tensor<10x300xf32>) -> 101 %0 = "stablehlo.scatter" (%input_tensor, %scatter_indices, %updates) ({ [all …]
|
| /external/tensorflow/tensorflow/core/api_def/base_api/ |
| D | api_def_TensorScatterUpdate.pbtxt | 16 name: "updates" 18 Updates to scatter into output. 24 A new tensor with the given shape and updates applied according 28 summary: "Scatter `updates` into an existing tensor according to `indices`." 30 This operation creates a new tensor by applying sparse `updates` to the passed 32 This operation is very similar to `tf.scatter_nd`, except that the updates are 42 - The order in which updates are applied is nondeterministic, so the output 52 if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. 53 if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input 57 The overall shape of `updates` is:
|
| D | api_def_ScatterNd.pbtxt | 13 name: "updates" 27 A new tensor with the given shape and updates applied according 31 summary: "Scatters `updates` into a tensor of shape `shape` according to `indices`." 33 Scatter sparse `updates` according to individual values at the specified 39 is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)` 41 `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)` 43 If `indices` contains duplicates, the associated `updates` are accumulated 47 This is because the order in which the updates are applied is nondeterministic 63 `updates` is a tensor with shape: 79 updates = tf.constant([9, 10, 11, 12]) [all …]
|
| D | api_def_ScatterNdNonAliasingAdd.pbtxt | 17 name: "updates" 27 updated with `updates`. 32 from `updates` according to indices `indices`. The updates are non-aliasing: 35 respect to both `input` and `updates`. 46 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 55 updates = tf.constant([9, 10, 11, 12]) 56 output = tf.scatter_nd_non_aliasing_add(input, indices, updates) 64 See `tf.scatter_nd` for more details about how to make updates to slices.
|
| D | api_def_TensorScatterSub.pbtxt | 16 name: "updates" 18 Updates to scatter into output. 24 A new tensor copied from tensor and updates subtracted according to the indices. 27 summary: "Subtracts sparse `updates` from an existing tensor according to `indices`." 29 This operation creates a new tensor by subtracting sparse `updates` from the 31 This operation is very similar to `tf.scatter_nd_sub`, except that the updates 43 `shape`. `updates` is a tensor with shape 55 updates = tf.constant([9, 10, 11, 12]) 57 updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) 73 updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [all …]
|
| D | api_def_ScatterUpdate.pbtxt | 16 name: "updates" 35 summary: "Applies sparse updates to a variable reference." 41 ref[indices, ...] = updates[...] 44 ref[indices[i], ...] = updates[i, ...] 47 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 54 duplicate entries in `indices`, the order at which the updates happen 57 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
|
| /external/tensorflow/tensorflow/python/keras/ |
| D | optimizer_v1.py | 58 self.updates = [] 193 self.updates = [state_ops.assign_add(self.iterations, 1)] 206 self.updates.append(state_ops.assign(m, v)) 217 self.updates.append(state_ops.assign(p, new_p)) 218 return self.updates 268 self.updates = [state_ops.assign_add(self.iterations, 1)] 281 self.updates.append(state_ops.assign(a, new_a)) 288 self.updates.append(state_ops.assign(p, new_p)) 289 return self.updates 307 updated during training. The more updates a parameter receives, [all …]
|
| /external/flatbuffers/go/ |
| D | table.go | 297 // MutateBool updates a bool at the given offset. 303 // MutateByte updates a Byte at the given offset. 309 // MutateUint8 updates a Uint8 at the given offset. 315 // MutateUint16 updates a Uint16 at the given offset. 321 // MutateUint32 updates a Uint32 at the given offset. 327 // MutateUint64 updates a Uint64 at the given offset. 333 // MutateInt8 updates a Int8 at the given offset. 339 // MutateInt16 updates a Int16 at the given offset. 345 // MutateInt32 updates a Int32 at the given offset. 351 // MutateInt64 updates a Int64 at the given offset. [all …]
|
| /external/google-cloud-java/java-asset/proto-google-cloud-asset-v1p2beta1/src/main/java/com/google/cloud/asset/v1p2beta1/ |
| D | FeedOrBuilder.java | 65 * A list of the full names of the assets to receive updates. You must specify 66 * either or both of asset_names and asset_types. Only asset updates matching 84 * A list of the full names of the assets to receive updates. You must specify 85 * either or both of asset_names and asset_types. Only asset updates matching 103 * A list of the full names of the assets to receive updates. You must specify 104 * either or both of asset_names and asset_types. Only asset updates matching 123 * A list of the full names of the assets to receive updates. You must specify 124 * either or both of asset_names and asset_types. Only asset updates matching 144 * A list of types of the assets to receive updates. You must specify either 145 * or both of asset_names and asset_types. Only asset updates matching [all …]
|
| /external/mesa3d/docs/relnotes/ |
| D | 6.4 | 36 Glide (3dfx Voodoo1/2) requires updates 37 SVGA requires updates 38 DJGPP requires updates 39 GGI requires updates 40 BeOS requires updates 41 Allegro requires updates 42 D3D requires updates 44 The drivers which require updates mostly need to be updated to work
|
| /external/coreboot/Documentation/releases/ |
| D | coreboot-4.3-relnotes.md | 69 * intel/strago: GPIO, DDR, & SD config, FSP updates, Clock fixes 73 ### Continued updates for the Intel Skylake platform 75 * google/chell, glados, & lars: FSP & Memory updates, Add Fan & NHLT 77 * intel/kunimitsu: FSP & GPIO updates, Add Fan & NHLT (audio) support 88 * Updates to get the ADA compiler to work correctly for coreboot 98 * Toolchain updates: new versions of GMP & MPFR. Add ADA. 99 * Updates for building on NetBSD & OS X 105 * libpayload: updates for cbfs, XHCI and DesignWare HCD controllers 124 Areas with significant work on updates and fixes 133 * soc/intel/braswell: FSP & ACPI updates, GPIO & clock Fixes [all …]
|
| /external/apache-commons-math/src/main/java/org/apache/commons/math3/ode/events/ |
| D | EventFilter.java | 57 /** Number of past transformers updates stored. */ 70 private final double[] updates; field in EventFilter 86 this.updates = new double[HISTORY_SIZE]; in EventFilter() 99 Arrays.fill(updates, extremeT); in init() 124 System.arraycopy(updates, 1, updates, 0, last); in g() 126 updates[last] = extremeT; in g() 140 if (updates[i] <= t) { in g() 163 System.arraycopy(updates, 0, updates, 1, updates.length - 1); in g() 165 updates[0] = extremeT; in g() 178 for (int i = 0; i < updates.length - 1; ++i) { in g() [all …]
|
| /external/grpc-grpc/test/core/security/ |
| D | grpc_tls_certificate_distributor_test.cc | 64 // if the status updates are correct. 78 // and check in each test if the status updates are correct. 159 // updates are correct. 283 // Push credential updates to kRootCert1Name and check if the status works as in TEST_F() 287 // Check the updates are delivered to watcher 1. in TEST_F() 290 // Push credential updates to kRootCert2Name. in TEST_F() 293 // Check the updates are delivered to watcher 2. in TEST_F() 296 // Push credential updates to kIdentityCert1Name and check if the status works in TEST_F() 301 // Check the updates are delivered to watcher 1 and watcher 2. in TEST_F() 337 // Push credential updates to kRootCert1Name and check if the status works as in TEST_F() [all …]
|