/third_party/mindspore/tests/st/ops/cpu/ |
D | test_scatter_arithmetic_op.py | 27 def __init__(self, lock, inputx, indices, updates): argument 32 self.updates = Parameter(updates, name="updates") 35 out = self.scatter_add(self.inputx, self.indices, self.updates) 39 def scatter_add_net(inputx, indices, updates): argument 41 net = TestScatterAddNet(lock, inputx, indices, updates) 45 def scatter_add_use_locking_false_net(inputx, indices, updates): argument 47 net = TestScatterAddNet(lock, inputx, indices, updates) 57 updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32)) 58 output = scatter_add_net(inputx, indices, updates) 70 updates = Tensor(np.arange(12).reshape((2, 2, 3)).astype(np.float32)) [all …]
|
/third_party/mindspore/tests/st/ops/gpu/ |
D | test_scatter_func_op.py | 35 def __init__(self, func, lock, inputx, indices, updates): argument 41 self.updates = Parameter(updates, name="updates") 44 out = self.scatter_func(self.inputx, self.indices, self.updates) 48 def scatter_func_net(func, inputx, indices, updates): argument 50 net = TestScatterFuncNet(func, lock, inputx, indices, updates) 54 def scatter_func_use_locking_false_net(func, inputx, indices, updates): argument 56 net = TestScatterFuncNet(func, lock, inputx, indices, updates) 61 def __init__(self, func, inputx, indices, updates): argument 67 self.updates = Parameter(updates, name="updates") 71 updates = self.test_dynamic(self.updates) [all …]
|
D | test_scatter_nd_func_op.py | 35 def __init__(self, func, lock, inputx, indices, updates): argument 41 self.updates = Parameter(updates, name="updates") 44 out = self.scatter_func(self.inputx, self.indices, self.updates) 48 def scatter_nd_func_net(func, inputx, indices, updates): argument 50 net = TestScatterNdFuncNet(func, lock, inputx, indices, updates) 54 def scatter_nd_func_use_locking_false_net(func, inputx, indices, updates): argument 56 net = TestScatterNdFuncNet(func, lock, inputx, indices, updates) 66 updates = Tensor(np.array([1.0, 2.2]), mstype.float32) 69 output = scatter_nd_func_net("update", inputx, indices, updates) 74 output = scatter_nd_func_net("add", inputx, indices, updates) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/ |
D | scatter_functor_impl.cu | 22 const T *updates, T *input) { in ScatterUpdateKernel() argument 27 input[current_pos] = updates[pos]; in ScatterUpdateKernel() 32 …erAddKernel(const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, in ScatterAddKernel() argument 38 MsAtomicAdd(&input[current_pos], updates[pos]); in ScatterAddKernel() 43 …erSubKernel(const size_t inner_size, const size_t updates_size, const S *indices, const T *updates, in ScatterSubKernel() argument 49 MsAtomicAdd(&input[current_pos], -updates[pos]); in ScatterSubKernel() 55 const S *indices, const T *updates, T *input, cudaStream_t cuda_stream) { in ScatterFunc() argument 60 … indices, updates, input); in ScatterFunc() 63 … indices, updates, input); in ScatterFunc() 66 … indices, updates, input); in ScatterFunc() [all …]
|
D | scatter_nd_functor_impl.cu | 22 … const S *out_strides, const S *indices, const T *updates, T *input) { in ScatterNdUpdate() argument 41 input[write_index] = updates[read_index]; in ScatterNdUpdate() 48 const S *out_strides, const S *indices, const T *updates, T *input) { in ScatterNdAdd() argument 67 MsAtomicAdd(&input[write_index], updates[read_index]); in ScatterNdAdd() 74 const S *out_strides, const S *indices, const T *updates, T *input) { in ScatterNdSub() argument 93 MsAtomicAdd(&input[write_index], -updates[read_index]); in ScatterNdSub() 100 … const size_t &index_depth, const S *out_strides, const S *indices, const T *updates, T *input, in CalScatterNdFunctor() argument 106 unit_size, index_depth, updates_size, out_strides, indices, updates, input); in CalScatterNdFunctor() 109 unit_size, index_depth, updates_size, out_strides, indices, updates, input); in CalScatterNdFunctor() 112 unit_size, index_depth, updates_size, out_strides, indices, updates, input); in CalScatterNdFunctor() [all …]
|
/third_party/mindspore/tests/st/auto_monad/ |
D | test_effect_ops.py | 86 def construct(self, indices, updates): argument 87 self.scatter_add(self.input_x, indices, updates) 98 updates = Tensor(np.ones([2, 2, 3]), mstype.float32) 101 out = net(indices, updates) 111 def construct(self, indices, updates): argument 112 self.scatter_sub(self.input_x, indices, updates) 123 updates = Tensor(np.array([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]]), mstype.float32) 126 out = net(indices, updates) 136 def construct(self, indices, updates): argument 137 self.scatter_mul(self.input_x, indices, updates) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | scatter_arithmetic_cpu_kernel.cc | 79 auto *updates = reinterpret_cast<T *>(inputs[UPDATES_INDEX_]->addr); in Launch() local 81 compute_func_(this, input, indices, updates); in Launch() 91 void ScatterArithmeticCPUKernel<T>::ScatterAdd(T *input, const int *indices, const T *updates) cons… in ScatterAdd() 96 input[base_index_input + j] += updates[base_index_updates + j]; in ScatterAdd() 102 void ScatterArithmeticCPUKernel<T>::ScatterSub(T *input, const int *indices, const T *updates) cons… in ScatterSub() 107 input[base_index_input + j] -= updates[base_index_updates + j]; in ScatterSub() 113 void ScatterArithmeticCPUKernel<T>::ScatterMul(T *input, const int *indices, const T *updates) cons… in ScatterMul() 118 input[base_index_input + j] *= updates[base_index_updates + j]; in ScatterMul() 124 void ScatterArithmeticCPUKernel<T>::ScatterDiv(T *input, const int *indices, const T *updates) cons… in ScatterDiv() 128 auto divisor = updates[i * inner_size_ + j]; in ScatterDiv() [all …]
|
D | scatter_arithmetic_cpu_kernel.h | 41 void ScatterAdd(T *input, const int *indices, const T *updates) const; 42 void ScatterSub(T *input, const int *indices, const T *updates) const; 43 void ScatterMul(T *input, const int *indices, const T *updates) const; 44 void ScatterDiv(T *input, const int *indices, const T *updates) const; 45 void ScatterMax(T *input, const int *indices, const T *updates) const; 46 void ScatterMin(T *input, const int *indices, const T *updates) const; 47 void ScatterUpdate(T *input, const int *indices, const T *updates) const;
|
/third_party/mesa3d/docs/relnotes/ |
D | 6.4 | 36 Glide (3dfx Voodoo1/2) requires updates 37 SVGA requires updates 38 DJGPP requires updates 39 GGI requires updates 40 BeOS requires updates 41 Allegro requires updates 42 D3D requires updates 44 The drivers which require updates mostly need to be updated to work
|
D | 6.4.2.rst | 53 Glide (3dfx Voodoo1/2) requires updates 54 SVGA requires updates 55 DJGPP requires updates 56 GGI requires updates 57 BeOS requires updates 58 Allegro requires updates 59 D3D requires updates
|
D | 6.4.1.rst | 48 Glide (3dfx Voodoo1/2) requires updates 49 SVGA requires updates 50 DJGPP requires updates 51 GGI requires updates 52 BeOS requires updates 53 Allegro requires updates 54 D3D requires updates
|
D | 6.4.rst | 71 Glide (3dfx Voodoo1/2) requires updates 72 SVGA requires updates 73 DJGPP requires updates 74 GGI requires updates 75 BeOS requires updates 76 Allegro requires updates 77 D3D requires updates
|
/third_party/boost/boost/graph/distributed/detail/ |
D | remote_update_set.hpp | 116 std::vector<updates_pair_type> updates(num_updates); in operator ()() local 117 receive(self->process_group, source, msg_updates, &updates[0], in operator ()() 123 derived->receive_update(source, updates[u].first, updates[u].second); in operator ()() 140 updates(num_processes(pg)), owner(owner) { in remote_update_set() 151 updates[get(owner, key)].push_back(std::make_pair(key, value)); in update() 160 process_id_type num_processes = updates.size(); in synchronize() 162 if (!updates[p].empty()) { in synchronize() 163 send(process_group, p, msg_num_updates, updates[p].size()); in synchronize() 165 &updates[p].front(), updates[p].size()); in synchronize() 166 updates[p].clear(); in synchronize() [all …]
|
/third_party/mindspore/mindspore/ccsrc/transform/graph_ir/op_declare/ |
D | matrix_calculation_ops_declare.cc | 21 …AP(TensorScatterUpdate) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 27 INPUT_MAP(ScatterUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)… 33 …_MAP(ScatterNdUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 39 INPUT_MAP(ScatterMax) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 45 INPUT_MAP(ScatterMin) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 51 INPUT_MAP(ScatterAdd) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 57 INPUT_MAP(ScatterSub) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 63 INPUT_MAP(ScatterMul) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 69 INPUT_MAP(ScatterDiv) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; 75 INPUT_MAP(ScatterNdAdd) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}… [all …]
|
/third_party/mindspore/mindspore/ops/_grad_experimental/ |
D | grad_array_ops.py | 62 def tensor_scatter_possible_replacement(x, indices, updates, out, dout): argument 71 out_indicators = F.cast(equal(updates, possibly_updated), mstype.int32) 77 return F.cast(dx, F.dtype(x)), zeros_like(indices), F.cast(dupdates, F.dtype(updates)) 83 def bprop(x, indices, updates, out, dout): argument 84 return tensor_scatter_possible_replacement(x, indices, updates, out, dout) 92 def bprop(x, indices, updates, out, dout): argument 93 return tensor_scatter_possible_replacement(x, indices, updates, out, dout)
|
/third_party/mindspore/tests/ut/cpp/python_input/gtest_input/pre_activate/ |
D | tensor_scatter_update_fission_test.py | 41 def before(x, indices, updates): argument 42 res = tensor_scatter_update(x, indices, updates) 46 def after(x, indices, updates): argument 48 res = scatter_nd_update(res, indices, updates)
|
/third_party/flutter/flutter/packages/flutter/test/widgets/ |
D | syncing_test.dart | 22 int updates = 0; 37 updates += 1; 61 expect(state.updates, equals(0)); 75 expect(state.updates, equals(1)); 95 expect(state.updates, equals(0)); 109 expect(state.updates, equals(0));
|
/third_party/icu/ohos_icu4j/src/main/tests/ohos/global/icu/dev/test/util/ |
D | ICUServiceTestSample.java | 88 String[][] updates = { field in ICUServiceTestSample.HelloUpdateThread 99 for (int i = 0; i < updates.length; ++i) { in run() 105 HelloService.register(updates[i][0], new ULocale(updates[i][1])); in run()
|
/third_party/icu/icu4j/main/tests/core/src/com/ibm/icu/dev/test/util/ |
D | ICUServiceTestSample.java | 85 String[][] updates = { field in ICUServiceTestSample.HelloUpdateThread 96 for (int i = 0; i < updates.length; ++i) { in run() 102 HelloService.register(updates[i][0], new ULocale(updates[i][1])); in run()
|
/third_party/cef/tests/ceftests/ |
D | osr_accessibility_unittest.cc | 270 CefRefPtr<CefListValue> updates = GetUpdateList(value); in GetUpdateListSize() local 271 if (updates) in GetUpdateListSize() 272 return updates->GetSize(); in GetUpdateListSize() 278 CefRefPtr<CefListValue> updates = GetUpdateList(value); in GetUpdateValue() local 279 if (!updates) in GetUpdateValue() 281 EXPECT_LT(index, updates->GetSize()); in GetUpdateValue() 282 CefRefPtr<CefDictionaryValue> update = updates->GetDictionary(index); in GetUpdateValue()
|
/third_party/skia/third_party/externals/angle2/src/libANGLE/renderer/vulkan/doc/ |
D | DeferredClears.md | 30 In scenario 1, the staged updates in the `vk::ImageHelper` are flushed. That includes the `Clear` 31 updates which will be done with an out-of-render-pass `vkCmdClear*Image` call. 33 In scenario 2, `FramebufferVk::syncState` is responsible for extracting the staged `Clear` updates, 34 assuming there are no subsequent updates to that subresource of the image, and keep them as
|
/third_party/flutter/flutter/packages/flutter_tools/lib/src/build_system/ |
D | filecache.pb.dart | 33 …FileHash copyWith(void Function(FileHash) updates) => super.copyWith(($core.dynamic message) => up… 73 …leStorage copyWith(void Function(FileStorage) updates) => super.copyWith(($core.dynamic message) =…
|
/third_party/node/deps/npm/node_modules/mississippi/ |
D | changelog.md | 6 …ning. (Use the individual modules to avoid potentially unnecessary major updates in your project). 9 * Update to pump@2.0.1. (Use the individual modules to avoid potentially unnecessary major updates…
|
/third_party/skia/third_party/externals/angle2/scripts/ |
D | generate_stats.py | 471 def batch_update(service, spreadsheet_id, updates): argument 473 'requests': updates, 483 updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names] 484 batch_update(service, spreadsheet_id, updates) 529 updates = [] 537 updates.append({ 558 if updates: 560 batch_update(service, spreadsheet_id, updates)
|
/third_party/flutter/skia/third_party/externals/angle2/scripts/ |
D | generate_stats.py | 467 def batch_update(service, spreadsheet_id, updates): argument 469 'requests': updates, 479 updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names] 480 batch_update(service, spreadsheet_id, updates) 525 updates = [] 533 updates.append({ 554 if updates: 556 batch_update(service, spreadsheet_id, updates)
|