/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/ |
D | batchtospace_impl.cu | 25 const size_t crop_lft, const size_t crop_rht, const size_t block_num, in BatchToSpace() argument 52 …idx_in = (((idx_oh + crop_up) % block_num) * block_num + ((idx_ow + crop_lft) % block_num)) * on +… in BatchToSpace() 55 input_pos = (input_pos + ((idx_oh + crop_up) - (idx_in / (on * block_num))) / block_num) * iw; in BatchToSpace() 56 input_pos = (input_pos + ((idx_ow + crop_lft) - ((idx_in / on) % block_num)) / block_num); in BatchToSpace() 67 const size_t crop_lft, const size_t crop_rht, const size_t block_num, in CalBatchToSpace() argument 70 …size, input, in, ih, iw, ic, on, oh, ow, oc, crop_up, crop_dn, crop_lft, crop_rht, block_num, outp… in CalBatchToSpace() 78 … const size_t crop_lft, const size_t crop_rht, const size_t block_num, 84 … const size_t crop_lft, const size_t crop_rht, const size_t block_num, 90 … const size_t crop_lft, const size_t crop_rht, const size_t block_num, 96 … const size_t crop_lft, const size_t crop_rht, const size_t block_num, [all …]
|
D | spacetobatch_impl.cu | 25 const size_t pad_lft, const size_t pad_rht, const size_t block_num, in SpaceToBatch() argument 52 …idx_on = (((idx_ih + pad_up) % block_num) * block_num + ((idx_iw + pad_lft) % block_num)) * in + i… in SpaceToBatch() 55 output_pos = (output_pos + ((idx_ih + pad_up) - (idx_on / (in * block_num))) / block_num) * ow; in SpaceToBatch() 56 output_pos = (output_pos + ((idx_iw + pad_lft) - ((idx_on / in) % block_num)) / block_num); in SpaceToBatch() 67 const size_t pad_lft, const size_t pad_rht, const size_t block_num, in CalSpaceToBatch() argument 71 … size, input, in, ih, iw, ic, on, oh, ow, oc, pad_up, pad_dn, pad_lft, pad_rht, block_num, output); in CalSpaceToBatch() 79 const size_t pad_lft, const size_t pad_rht, const size_t block_num, 85 const size_t pad_lft, const size_t pad_rht, const size_t block_num, 91 const size_t pad_lft, const size_t pad_rht, const size_t block_num, 97 const size_t pad_lft, const size_t pad_rht, const size_t block_num, [all …]
|
D | pad_impl.cu | 28 int block_num = pos / padded_width / padded_height; in Pad() local 35 …output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_lef… in Pad() 47 int block_num = pos / channels / padded_width / padded_height; in PadNHWC() local 54 …output[pos] = input[((block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_le… in PadNHWC() 82 int block_num = pos / channels / old_width / old_height; in PadGradNHWC() local 85 …dx[pos] = dy[((block_num * padded_height + padded_h) * padded_width + padded_w)*channels+pos%chann… in PadGradNHWC() 94 int block_num = pos / old_width / old_height; in PadGrad() local 97 dx[pos] = dy[(block_num * padded_height + padded_h) * padded_width + padded_w]; in PadGrad() 113 const int block_num = pos / padded_dhw; in Pad3d() local 119 …int index = block_num * old_dhw + old_hw * (pos_d - pad_head) + old_width * (pos_h - pad_top) + po… in Pad3d() [all …]
|
D | bias_add_grad_impl.cu | 138 int block_num = GET_BLOCKS(size); in CalBiasAddGradNCHW() local 141 int block_group_size = (block_num + bias_size - 1) / bias_size; in CalBiasAddGradNCHW() 142 block_num = block_group_size * bias_size; in CalBiasAddGradNCHW() 147 BiasAddGradNCHW<<<block_num, thread_num, 0, cuda_stream>>>(size, batch_size, bias_size, height, in CalBiasAddGradNCHW()
|
D | mirror_pad_impl.cu | 61 int block_num = (pos / padded_width) / padded_height; in MirrorPad() local 65 const int padded_channel = block_num % channels_new; in MirrorPad() 66 const int padded_batch = block_num / channels_new; in MirrorPad() 133 int block_num = (pos / dy_width) / dy_height; in MirrorPadGradBatchChannel() local 137 const int interim_channel = block_num % dx_channels; in MirrorPadGradBatchChannel() 138 const int interim_batch = block_num / dx_channels; in MirrorPadGradBatchChannel()
|
/third_party/ffmpeg/libavcodec/ |
D | vc1_loopfilter.c | 36 int right_fieldtx, int block_num) in vc1_h_overlap_filter() argument 38 switch (block_num) { in vc1_h_overlap_filter() 73 v->vc1dsp.vc1_h_s_overlap(left_block[block_num], right_block[block_num], 8, 8, 1); in vc1_h_overlap_filter() 79 int16_t (*bottom_block)[64], int block_num) in vc1_v_overlap_filter() argument 81 switch (block_num) { in vc1_v_overlap_filter() 100 v->vc1dsp.vc1_v_s_overlap(top_block[block_num], bottom_block[block_num]); in vc1_v_overlap_filter() 207 uint32_t flags, int block_num) in vc1_i_h_loop_filter() argument 213 if (block_num & 2) in vc1_i_h_loop_filter() 216 if (!(flags & LEFT_EDGE) || (block_num & 5) == 1) { in vc1_i_h_loop_filter() 217 if (block_num > 3) in vc1_i_h_loop_filter() [all …]
|
/third_party/e2fsprogs/ext2ed/ |
D | file_com.c | 36 file_info.block_num=0; in init_file_info() 79 if (file_info.block_num+block_offset >= file_info.blocks_count) { in type_file___nextblock() 84 file_info.block_num+=block_offset; in type_file___nextblock() 85 file_info.global_block_num=file_block_to_global_block (file_info.block_num,&file_info); in type_file___nextblock() 87 file_info.file_offset=file_info.block_num*file_system_info.block_size; in type_file___nextblock() 180 if (file_info.block_num-block_offset < 0) { in type_file___prevblock() 185 file_info.block_num-=block_offset; in type_file___prevblock() 186 file_info.global_block_num=file_block_to_global_block (file_info.block_num,&file_info); in type_file___prevblock() 188 file_info.file_offset=file_info.block_num*file_system_info.block_size; in type_file___prevblock() 216 file_info.block_num=block_offset; in type_file___block() [all …]
|
D | inode_com.c | 29 long group_num,group_offset,entry_num,block_num,first_entry,last_entry; in type_ext2_inode___prev() local 40 block_num=device_offset/file_system_info.block_size; in type_ext2_inode___prev() 76 long group_num,group_offset,entry_num,block_num,first_entry,last_entry; in type_ext2_inode___next() local 88 block_num=device_offset/file_system_info.block_size; in type_ext2_inode___next() 127 long group_num,group_offset,entry_num,block_num,first_entry,last_entry,inode_num; in type_ext2_inode___show() local 130 block_num=device_offset/file_system_info.block_size; in type_ext2_inode___show() 288 long group_num,group_offset,entry_num,block_num,wanted_entry; in type_ext2_inode___entry() local 296 block_num=device_offset/file_system_info.block_size; in type_ext2_inode___entry() 376 long block_num,group_offset,group_num; in inode_offset_to_group_num() local 378 block_num=inode_offset/file_system_info.block_size; in inode_offset_to_group_num() [all …]
|
D | blockbitmap_com.c | 215 unsigned long block_num,entry_num; in type_ext2_block_bitmap___show() local 253 …block_num=block_bitmap_info.entry_num+block_bitmap_info.group_num*file_system_info.super_block.s_b… in type_ext2_block_bitmap___show() 254 block_num+=file_system_info.super_block.s_first_data_block; in type_ext2_block_bitmap___show() 256 wprintw (show_win,"Status of block %ld - ",block_num); /* and the allocation status */ in type_ext2_block_bitmap___show()
|
D | ext2ed.h | 135 long block_num,blocks_count; member 329 extern long return_indirect (long table_block,long block_num); 330 extern long return_dindirect (long table_block,long block_num); 331 extern long return_tindirect (long table_block,long block_num);
|
/third_party/mindspore/mindspore/ops/_op_impl/_custom_op/ |
D | batch_matmul_impl.py | 122 with tik_instance.for_range(0, 30, block_num=30) as block_idx, \ 173 with tik_instance.for_range(0, 27, block_num=27) as block_idx: 218 with tik_instance.for_range(0, 18, block_num=18) as block_idx, \ 231 with tik_instance.for_range(0, 18, block_num=18) as block_idx, \ 242 with tik_instance.for_range(0, 32, block_num=32) as block_idx, \ 260 block_num, thread_num = 32, 2 263 block_process_ele_num = (batch * m * k) // block_num 264 loop_time = (batch * m * k) // block_num // input1_unit_size 265 with tik_instance.for_range(0, block_num, block_num=block_num) as block_idx, \ 269 input2_index = block_idx // (block_num // batch) * input2_unint_size
|
D | dsd_back_impl.py | 60 block_num = input_w1_shape[2] 71 (batch_size, head, block_num, head_size // 76 (batch_size, head, block_num, global_size // 107 (batch_size, head, block_num, head_size // 112 (batch_size, head, block_num, global_size // 124 with tik_inst.for_range(0, channel_num, block_num=channel_num) as channel_idx: 142 with tik_inst.for_range(0, block_num, thread_num=2) as w_idx: 193 with tik_inst.for_range(0, block_num, thread_num=2) as w_idx_1: 236 with tik_inst.for_range(0, block_num, thread_num=2) as w_idx: 289 with tik_inst.for_range(0, block_num) as w_idx: [all …]
|
D | transpose02314_impl.py | 106 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 14) as cc… 126 with tik_instance.for_range(0, 32, block_num=32) as block_idx: 156 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 14) as cc… 176 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 7) as cc1… 196 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 2) as cc1… 216 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 7) as cc1… 234 with tik_instance.for_range(0, 32, block_num=32) as block_idx: 263 with tik_instance.for_range(0, 32, block_num=32) as block_idx: 279 …with tik_instance.for_range(0, 32, block_num=32) as block_idx, tik_instance.for_range(0, 7, thread… 295 with tik_instance.for_range(0, 32, block_num=32) as block_idx: [all …]
|
D | dsd_impl.py | 53 block_num = input_w1_shape[2] 63 w1_gm = tik_inst.Tensor('float16', (batch_size, head, block_num, head_size // 65 w2_gm = tik_inst.Tensor('float16', (batch_size, head, block_num, global_size // 75 with tik_inst.for_range(0, channel_num, block_num=channel_num) as channel_idx: 104 with tik_inst.for_range(0, block_num, thread_num=2) as w_idx:
|
D | fused_abs_max1_impl.py | 83 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 106 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 127 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 153 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 176 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 212 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 254 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index0: 282 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index1: 309 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: 338 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: [all …]
|
D | img2col_impl.py | 42 with tik_instance.for_range(0, 32, block_num=32) as block_index: 69 with tik_instance.for_range(0, 32, block_num=32) as block_index: 97 with tik_instance.for_range(0, 32, block_num=32) as block_index: 121 with tik_instance.for_range(0, 32, block_num=32) as block_index: 149 with tik_instance.for_range(0, 32, block_num=32) as block_index, \ 178 with tik_instance.for_range(0, 32, block_num=32) as block_index: 206 with tik_instance.for_range(0, 32, block_num=32) as block_index: 245 with tik_instance.for_range(0, 32, block_num=32) as block_index: 294 with tik_instance.for_range(0, 32, block_num=32) as block_index: 322 with tik_instance.for_range(0, 32, block_num=32) as block_index: [all …]
|
D | matmul_dds_impl.py | 74 block_num = seq_len // block_size # block number only support 16 for now 85 … mat_lm = tik_inst.Tensor("float32", (block_num * block_size // 16, bs * block_size // 16, 16, 16), 91 …mat_lc = tik_inst.Tensor("float16", (bs, heads, block_num, block_size // 16, block_size // 16, 16,… 94 …mat_gc = tik_inst.Tensor("float16", (bs, heads, block_num, global_size // 16, block_size // 16, 16… 100 with tik_inst.for_range(0, channel_num, block_num=channel_num) as block_index: 138 0, block_num, 16, 48, 0) 140 with tik_inst.for_range(0, block_num) as block:
|
D | matmul_dds_grad_impl.py | 74 block_num = seq_len // block_size 84 …mat_lc = tik_inst.Tensor("float16", (bs, heads, block_num, block_size // 16, block_size // 16, 16,… 87 …mat_gc = tik_inst.Tensor("float16", (bs, heads, block_num, global_size // 16, block_size // 16, 16… 90 …mat_lc_grad = tik_inst.Tensor("float16", (bs, heads, block_num, block_size // 16, block_size // 16… 93 …mat_gc_grad = tik_inst.Tensor("float16", (bs, heads, block_num, global_size // 16, block_size // 1… 104 with tik_inst.for_range(0, channel_num, block_num=channel_num) as block_index: 148 with tik_inst.for_range(0, block_num) as block:
|
/third_party/mindspore/mindspore/_extends/graph_kernel/model/ |
D | graph_parallel.py | 40 self.block_num = 0 72 self.block_num = self.MAX_BLOCK 76 self.block_num = total_block 77 self.block_weight = total_weight // self.block_num 103 self.block_num = min(self.MAX_BLOCK, block_x) 121 self.block_num = min(self.MAX_BLOCK, block) 122 self.block_weight = self._cal_weight(self.ops) // self.block_num 278 sum_block += s.block_num 282 blocks.append(s.block_num)
|
/third_party/boost/boost/graph/parallel/ |
D | distribution.hpp | 360 std::size_t block_num = get_block_num(i); in local() local 363 std::size_t local_block_num = block_num / p; in local() 382 SizeType block_num = local_block_num * p + id; in global() local 385 SizeType block_start = block_num * block_rows * block_columns; in global() 387 std::cerr << "Block " << block_num << " starts at index " << block_start in global() 391 SizeType block_row = block_num / (data_columns_per_row / block_columns); in global() 392 SizeType block_col = block_num % (data_columns_per_row / block_columns); in global() 420 std::size_t block_num = block_col * blocks_in_row + block_row; in get_block_num() local 421 return block_num; in get_block_num() 506 std::size_t block_num = get_block_num(i); in local() local [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | pad_cpu_kernel.cc | 99 int block_num = (SizeToInt(pos) / padded_width) / padded_height; in LaunchKernel() local 104 int channel_num = block_num % channels_new; // current channel in LaunchKernel() 105 …int batch_item = block_num / channels_new; // current item in bat… in LaunchKernel() 114 …int equiv_block_num = block_num - (batch_item * (pad_channel_before + pad_channel_after)) - pad_ch… in LaunchKernel()
|
/third_party/flutter/skia/third_party/externals/spirv-tools/source/util/ |
D | bit_stream.h | 95 const uint64_t block_num = ((uval >> block_exponent) << 1) + (val >= 0 ? 0 : 1); in EncodeZigZag() local 97 return (block_num << block_exponent) + pos; in EncodeZigZag() 104 const uint64_t block_num = val >> block_exponent; in DecodeZigZag() local 106 if (block_num & 1) { in DecodeZigZag() 108 return -1LL - ((block_num >> 1) << block_exponent) - pos; in DecodeZigZag() 111 return ((block_num >> 1) << block_exponent) + pos; in DecodeZigZag()
|
/third_party/flutter/skia/third_party/externals/libjpeg-turbo/ |
D | jdcoefct.c | 265 JDIMENSION block_num; in decompress_data() local 307 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_data() 308 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_data() 412 JDIMENSION block_num, last_block_column; in decompress_smooth_data() local 511 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_smooth_data() 512 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_smooth_data() 516 if (block_num < last_block_column) { in decompress_smooth_data()
|
/third_party/libjpeg-turbo/ |
D | jdcoefct.c | 265 JDIMENSION block_num; in decompress_data() local 307 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_data() 308 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_data() 412 JDIMENSION block_num, last_block_column; in decompress_smooth_data() local 511 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_smooth_data() 512 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_smooth_data() 516 if (block_num < last_block_column) { in decompress_smooth_data()
|
/third_party/skia/third_party/externals/libjpeg-turbo/ |
D | jdcoefct.c | 269 JDIMENSION block_num; in decompress_data() local 311 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_data() 312 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_data() 432 JDIMENSION block_num, last_block_column; in decompress_smooth_data() local 579 for (block_num = cinfo->master->first_MCU_col[ci]; in decompress_smooth_data() 580 block_num <= cinfo->master->last_MCU_col[ci]; block_num++) { in decompress_smooth_data() 584 if (block_num == cinfo->master->first_MCU_col[ci] && in decompress_smooth_data() 585 block_num < last_block_column) { in decompress_smooth_data() 592 if (block_num + 1 < last_block_column) { in decompress_smooth_data()
|