| /third_party/python/Doc/library/ |
| D | copy.rst | 1 :mod:`copy` --- Shallow and deep copy operations 5 :synopsis: Shallow and deep copy operations. 9 -------------- 13 mutable items, a copy is sometimes needed so one can change one copy without 14 changing the other. This module provides generic shallow and deep copy 27 Return a deep copy of *x*. 36 The difference between shallow and deep copying is only relevant for compound 42 * A *deep copy* constructs a new compound object and then, recursively, inserts 45 Two problems often exist with deep copy operations that don't exist with shallow 51 * Because deep copy copies everything it may copy too much, such as data [all …]
|
| /third_party/python/Lib/ |
| D | copy.py | 1 """Generic (shallow and deep) copying operations. 8 x = copy.deepcopy(y) # make a deep copy of y 10 For module specific errors, copy.Error is raised. 12 The difference between shallow and deep copying is only relevant for 16 - A shallow copy constructs a new compound object and then (to the 20 - A deep copy constructs a new compound object and then, recursively, 23 Two problems often exist with deep copy operations that don't exist 29 b) because deep copy copies *everything* it may copy too much, e.g. 33 Python's deep copy operation avoids these problems by: 38 b) letting user-defined classes override the copying operation or the [all …]
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/ |
| D | adder_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 23 …const float *a, const float *b, float *dst, const float *bias, ActType act_type, int deep, int row, in Adder12x4() argument 31 for (int d = 0; d < deep; d++) { in Adder12x4() 32 size_t ai = r12div * deep * 12 + d * 12 + r12mod; in Adder12x4() 33 size_t bi = c4div * deep * 4 + d * 4 + c4mod; in Adder12x4() 34 value += fabsf(a[ai] - b[bi]); in Adder12x4() 36 value = -value; in Adder12x4() 45 …loat *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, int col, in AdderOpt() argument 48 AdderFloatNeon64(a, b, c, bias, (int)act_type, deep, row, col, stride); in AdderOpt() [all …]
|
| D | conv_im2col_avx512_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 26 if (conv_param->thread_num_ == 0) { in ConvIm2ColAVX512Fp32() 29 int output_hw = conv_param->output_h_ * conv_param->output_w_; in ConvIm2ColAVX512Fp32() 30 int out_channel_align = UP_ROUND(conv_param->output_channel_, C16NUM); in ConvIm2ColAVX512Fp32() 32 int block_per_thread = UP_DIV(UP_DIV(output_hw, cal_num), conv_param->thread_num_); in ConvIm2ColAVX512Fp32() 40 int deep = conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_; in ConvIm2ColAVX512Fp32() local 41 packed_input += task_id * deep * cal_num; in ConvIm2ColAVX512Fp32() 42 size_t input_size = deep * cal_num * sizeof(float); in ConvIm2ColAVX512Fp32() 44 for (int b = 0; b < conv_param->input_batch_; b++) { in ConvIm2ColAVX512Fp32() [all …]
|
| D | matmul_avx_fp32.h | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 32 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 38 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 40 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 42 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 44 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 46 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 48 size_t row_block, size_t col_block, size_t col_algin, size_t deep); 50 size_t row_block, size_t col_block, size_t col_algin, size_t deep); [all …]
|
| D | lstm_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 25 static void PackLstmMatrix(const float *src_batch, float *dst_batch, int col, int deep) { in PackLstmMatrix() argument 27 RowMajor2Col16Major(src_batch, dst_batch, col, deep); in PackLstmMatrix() 29 RowMajor2Col4Major(src_batch, dst_batch, col, deep); in PackLstmMatrix() 31 RowMajor2Col8Major(src_batch, dst_batch, col, deep); in PackLstmMatrix() 35 static void PackLstmWeightBatch(float *dst, const float *src, int batch, int deep, int col, int col… in PackLstmWeightBatch() argument 38 const float *src_batch = src + i * col * deep; in PackLstmWeightBatch() 39 float *dst_batch = dst + ((order == NULL) ? i : order[i]) * col_align * deep; in PackLstmWeightBatch() 40 PackLstmMatrix(src_batch, dst_batch, col, deep); in PackLstmWeightBatch() [all …]
|
| D | conv_common_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 30 int kernel_h = conv_param->kernel_h_; in Im2ColPackUnitFp32() 31 int kernel_w = conv_param->kernel_w_; in Im2ColPackUnitFp32() 33 int dilation_h = conv_param->dilation_h_; in Im2ColPackUnitFp32() 34 int dilation_w = conv_param->dilation_w_; in Im2ColPackUnitFp32() 35 int out_w = conv_param->output_w_; in Im2ColPackUnitFp32() 39 int in_channel = conv_param->input_channel_; in Im2ColPackUnitFp32() 40 int in_w = conv_param->input_w_; in Im2ColPackUnitFp32() 43 int input_h = block_start / out_w * conv_param->stride_h_ - conv_param->pad_u_; in Im2ColPackUnitFp32() [all …]
|
| D | matmul_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 78 int res = col - col8; in MatVecMulFp32Block8() 129 int res = col - col4; in MatVecMulFp32Block4() 150 for (; ci < align_col - C8NUM + 1; ci += C8NUM) { in MatVecMulFp32Neon64() 162 for (; di < depth - C4NUM + 1; di += C4NUM) { in MatVecMulFp32Neon64() 195 if (ci + C4NUM - 1 >= col) { in MatVecMulFp32Neon64() 196 int c_remain = col - ci; in MatVecMulFp32Neon64() 214 if (ci + C8NUM - 1 >= col) { in MatVecMulFp32Neon64() 215 int c_remain = col - ci - C4NUM; in MatVecMulFp32Neon64() [all …]
|
| D | attention_fp32.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 32 matrix->batch_ = batch; in InitMatrix() 33 matrix->row_ = row; in InitMatrix() 34 matrix->col_ = col; in InitMatrix() 35 matrix->is_transpose_ = is_trans; in InitMatrix() 36 matrix->data_ = NULL; in InitMatrix() 37 matrix->packed_data_ = NULL; in InitMatrix() 45 int real_row = matrix->is_transpose_ ? matrix->col_ : matrix->row_; in LeftMatrixPackElementSize() 46 int deep = matrix->is_transpose_ ? matrix->row_ : matrix->col_; in LeftMatrixPackElementSize() local [all …]
|
| D | lstm_fp32.h | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 24 void PackLstmWeight(float *dst, const float *src, int batch, int deep, int col, int col_align, cons… 26 void PackLstmWeightWithStride(float *dst, const float *src, int batch, int deep, int col, int col_a… 35 void PackLstmInput(const float *src, float *dst, int row, int deep); 37 void LstmMatMul(float *c, const float *a, const float *b, const float *bias, int row, int deep, int…
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/micro/coder/wrapper/int8/ |
| D | matmul_int8_wrapper.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 19 …(int8_t *src_ptr, int32_t *input_sums, int8_t *dst_ptr, int batch, int row, int deep, int input_zp, in InitInt8MatrixA() argument 22 int8_t *cur_a_ptr = src_ptr + i * row * deep; in InitInt8MatrixA() 24 RowMajor2Col16x4MajorInt8(cur_a_ptr, dst_ptr, deep, row); in InitInt8MatrixA() 25 CalcInputSums(cur_a_ptr, row, deep, *weight_zp, input_sums, ColMajor); in InitInt8MatrixA() 27 RowMajor2Row16x4MajorInt8(cur_a_ptr, dst_ptr, row, deep); in InitInt8MatrixA() 28 CalcInputSums(cur_a_ptr, row, deep, *weight_zp, input_sums, RowMajor); in InitInt8MatrixA() 33 …MatrixB(int8_t *weight_ptr, int32_t *weight_bias_sums_batch_, int8_t *dst_ptr, int batch, int deep, in InitInt8MatrixB() argument 37 int8_t *cur_b = weight_ptr + i * deep * col; in InitInt8MatrixB() [all …]
|
| /third_party/openhitls/crypto/bn/src/ |
| D | bn_optimizer.c | 2 * This file is part of the openHiTLS project. 4 * openHiTLS is licensed under the Mulan PSL v2. 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 33 opt->curChunk = BSL_SAL_Calloc(1u, sizeof(Chunk)); in BN_OptimizerCreate() 34 if (opt->curChunk == NULL) { in BN_OptimizerCreate() 43 opt->libCtx = libCtx; in BN_OptimizerSetLibCtx() 48 return opt->libCtx; in BN_OptimizerGetLibCtx() 56 Chunk *curChunk = opt->curChunk; in BN_OptimizerDestroy() 57 Chunk *nextChunk = curChunk->next; in BN_OptimizerDestroy() [all …]
|
| /third_party/PyYAML/lib/yaml/ |
| D | constructor.py | 36 object, to prevent user-controlled methods from being called during 50 if node is not None: 67 def construct_object(self, node, deep=False): argument 70 if deep: 83 if tag_prefix is not None and node.tag.startswith(tag_prefix): 99 if tag_suffix is None: 113 if deep: 124 def construct_sequence(self, node, deep=False): argument 129 return [self.construct_object(child, deep=deep) 132 def construct_mapping(self, node, deep=False): argument [all …]
|
| /third_party/mindspore/mindspore-src/source/tests/ut/data/dataset/test_sentencepiece/ |
| D | vocab.txt | 2 MindSpore is a deep learning framework in all scenarios, aiming to achieve easy development, effici… 3 all-scenario coverage. Easy development features include API friendliness and low debugging difficu… 5 All-scenario coverage means that the framework supports cloud, edge, and device scenarios. 6 ME (MindExpression) provides user-level APIs for scientific computing, building and training neural… 12 the device, edge, and cloud, and promotes the prosperity of domains such as deep learning and scien… 15 Currently, there are two execution modes of a mainstream deep learning framework: a static graph mo… 16 graph mode. The static graph mode has a relatively high training performance, but is difficult to d… 17 contrary, the dynamic graph mode is easy to debug, but is difficult to execute efficiently. MindSpo… 23 A neural network model is usually trained based on gradient descent algorithm, but the manual deriv… 24 complex and the result is prone to errors. The automatic differentiation mechanism of MindSpore bas… [all …]
|
| /third_party/rust/rust/src/tools/clippy/tests/ui/ |
| D | unnecessary_lazy_eval.fixed | 1 //@run-rustfix 2 //@aux-build: proc_macros.rs:proc-macro 15 struct Deep(Option<usize>); 23 fn return_some_field(&self) -> usize { 28 fn some_call<T: Default>() -> T { 51 fn deref(&self) -> &Self::Target { 68 // Should lint - Option 81 // Should lint - Builtin deref 86 // Should lint - Builtin deref through autoderef 90 // Cases when unwrap is not called on a simple variable [all …]
|
| D | unnecessary_lazy_eval.rs | 1 //@run-rustfix 2 //@aux-build: proc_macros.rs:proc-macro 15 struct Deep(Option<usize>); struct 23 fn return_some_field(&self) -> usize { in return_some_field() 28 fn some_call<T: Default>() -> T { in some_call() 51 fn deref(&self) -> &Self::Target { in deref() 68 // Should lint - Option in main() 81 // Should lint - Builtin deref in main() 86 // Should lint - Builtin deref through autoderef in main() 90 // Cases when unwrap is not called on a simple variable in main() [all …]
|
| /third_party/libwebsockets/lib/plat/freertos/esp32/ |
| D | esp_attr.h | 1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD 7 // http://www.apache.org/licenses/LICENSE-2.0 10 // distributed under the License is distributed on an "AS IS" BASIS, 32 // Forces data to be placed to DMA-capable places 39 // Forces code into RTC fast memory. See "docs/deep-sleep-stub.rst" 42 // Forces data into RTC slow memory. See "docs/deep-sleep-stub.rst" 44 // during a deep sleep / wake cycle. 47 // Forces read-only data into RTC slow memory. See "docs/deep-sleep-stub.rst" 55 // after restart or during a deep sleep / wake cycle.
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp16/ |
| D | conv_fp16.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 25 int kernel_h = conv_param->kernel_h_; in Im2ColPackUnitFp16() 26 int kernel_w = conv_param->kernel_w_; in Im2ColPackUnitFp16() 28 int stride_h = conv_param->stride_h_; in Im2ColPackUnitFp16() 29 int stride_w = conv_param->stride_w_; in Im2ColPackUnitFp16() 30 int pad_h = conv_param->pad_u_; in Im2ColPackUnitFp16() 31 int pad_w = conv_param->pad_l_; in Im2ColPackUnitFp16() 32 int dilation_h = conv_param->dilation_h_; in Im2ColPackUnitFp16() 33 int dilation_w = conv_param->dilation_w_; in Im2ColPackUnitFp16() [all …]
|
| D | lstm_fp16.c | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 26 void PackLstmWeightFp32ToFp16(float16_t *dst, const float *src, int batch, int deep, int col, int c… in PackLstmWeightFp32ToFp16() argument 29 const float *src_batch = src + i * col * deep; in PackLstmWeightFp32ToFp16() 30 float16_t *dst_batch = dst + (order == NULL ? i : order[i]) * col_align * deep; in PackLstmWeightFp32ToFp16() 32 RowMajor2ColNMajorFp16(src_batch, dst_batch, col, deep, true); in PackLstmWeightFp32ToFp16() 34 RowMajor2Col8MajorFp16(src_batch, dst_batch, col, deep, true); in PackLstmWeightFp32ToFp16() 39 void PackLstmWeightFp16(float16_t *dst, const float16_t *src, int batch, int deep, int col, int col… in PackLstmWeightFp16() argument 42 const float16_t *src_batch = src + i * col * deep; in PackLstmWeightFp16() 43 float16_t *dst_batch = dst + (order == NULL ? i : order[i]) * col_align * deep; in PackLstmWeightFp16() [all …]
|
| /third_party/jsframework/runtime/main/reactivity/ |
| D | watcher.js | 1 /* eslint-disable */ 14 * This is used for both the $watch() api and directives. 19 * - {Array} filters 20 * - {Boolean} twoWay 21 * - {Boolean} deep 22 * - {Boolean} user 23 * - {Boolean} sync 24 * - {Boolean} lazy 25 * - {Function} [preProcess] 26 * - {Function} [postProcess] [all …]
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/micro/coder/opcoders/nnacl/fp16/ |
| D | matmul_dynamic_fp16_base_coder.cc | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 34 MS_CHECK_TRUE_MSG(ret == RET_OK, RET_ERROR, "init A-metrics' info failed"); in Prepare() 36 MS_CHECK_TRUE_MSG(ret == RET_OK, RET_ERROR, "init B-metrics' info failed"); in Prepare() 50 auto input_a_str = dynamic_mem_manager_->GetVarTensorAddr(input_tensor_); in DoCode() 52 auto output_str = dynamic_mem_manager_->GetVarTensorAddr(output_tensor_); in DoCode() 92 context->AppendCode(code.str()); in DoCode() 102 b_pack_ptr_ = allocator_->GetSharedWeightAddr(filter_tensor_); in InitMatrixB() 104 b_pack_ptr_ = allocator_->Malloc(data_type_, b_pack_ptr_size, kOnlinePackWeight, in InitMatrixB() 105 filter_tensor_->tensor_name() + "_online_pack"); in InitMatrixB() [all …]
|
| /third_party/grpc/third_party/upb/upb/message/ |
| D | copy.h | 1 // Protocol Buffers - Google's data interchange format 4 // Use of this source code is governed by a BSD-style 6 // https://developers.google.com/open-source/licenses/bsd 24 // Deep clones a message using the provided target arena. 32 // Deep clones array contents. 36 // Deep clones map contents. 42 // Deep copies the message from src to dst.
|
| /third_party/protobuf/upb/message/ |
| D | copy.h | 1 // Protocol Buffers - Google's data interchange format 4 // Use of this source code is governed by a BSD-style 6 // https://developers.google.com/open-source/licenses/bsd 24 // Deep clones a message using the provided target arena. 32 // Deep clones array contents. 36 // Deep clones map contents. 42 // Deep copies the message from src to dst.
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/quantizer/ |
| D | gptq_quantizer.cc | 8 * http://www.apache.org/licenses/LICENSE-2.0 11 * distributed under the License is distributed on an "AS IS" BASIS, 34 model_->buf = nullptr; in ~GptqQuantizer() 43 for (auto &cnode : func_graph->GetOrderedCnodes()) { in FilterWeightNode() 48 MS_LOG(INFO) << cnode->fullname_with_scope() << " start gptq quantization."; in FilterWeightNode() 49 for (size_t i = 1; i < cnode->size(); i++) { in FilterWeightNode() 50 auto input_node = cnode->input(i); in FilterWeightNode() 52 …if (!input_node->isa<mindspore::Parameter>() || !input_node->cast<ParameterPtr>()->has_default()) { in FilterWeightNode() 55 auto weight_tensor_name = input_node->fullname_with_scope(); in FilterWeightNode() 58 weight_info->input_index = i - kPrimOffset; in FilterWeightNode() [all …]
|
| /third_party/vk-gl-cts/framework/platform/ohos/ |
| D | app_main.cpp | 7 * http://www.apache.org/licenses/LICENSE-2.0 10 * distributed under the License is distributed on an "AS IS" BASIS, 45 return new es2cts::TestPackage(testCtx, "KHR-GLES2"); in createES2Package() 50 return new es32cts::ES32TestPackage(testCtx, "KHR-GLES32"); in createES32Package() 54 return new es3cts::ES30TestPackage(testCtx, "KHR-GLES3"); in createES30Package() 58 return new es31cts::ES31TestPackage(testCtx, "KHR-GLES31"); in createES31Package() 80 // registry->registerPackage("CTS-Configs", createConfigPackage); in RegistPackage() 84 KHR-GLES2 in RegistPackage() 85 KHR-GLES3 in RegistPackage() 86 KHR-GLES31 in RegistPackage() [all …]
|