/external/python/cryptography/src/cryptography/hazmat/primitives/ |
D | padding.py | 31 def _byte_padding_check(block_size): argument 32 if not (0 <= block_size <= 2040): 35 if block_size % 8 != 0: 39 def _byte_padding_update(buffer_, data, block_size): argument 47 finished_blocks = len(buffer_) // (block_size // 8) 49 result = buffer_[:finished_blocks * (block_size // 8)] 50 buffer_ = buffer_[finished_blocks * (block_size // 8):] 55 def _byte_padding_pad(buffer_, block_size, paddingfn): argument 59 pad_size = block_size // 8 - len(buffer_) 63 def _byte_unpadding_update(buffer_, data, block_size): argument [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | spacetodepth_op_test.py | 38 def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32): argument 42 x_tf = array_ops.space_to_depth(input_nhwc, block_size) 48 x_tf = array_ops.space_to_depth(input_nhwc, block_size) 53 input_nchw, block_size, data_format="NCHW") 59 block_size = 2 62 self._testOne(x_np, block_size, x_out, dtype=dtype) 70 block_size = 2 73 self._testOne(x_np, block_size, x_out) 80 block_size = 4 82 self._testOne(x_np, block_size, x_out) [all …]
|
D | depthtospace_op_test.py | 40 def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32): argument 44 x_tf = array_ops.depth_to_space(input_nhwc, block_size) 51 input_nchw, block_size, data_format="NCHW") 61 x_tf = array_ops.depth_to_space(input_nhwc, block_size) 66 input_nchw, block_size, data_format="NCHW") 73 block_size = 2 75 self._testOne(x_np, block_size, x_out) 80 block_size = 2 82 self._testOne(x_np, block_size, x_out, dtype=dtypes.float16) 92 block_size = 2 [all …]
|
/external/ltp/testcases/kernel/syscalls/fallocate/ |
D | fallocate04.c | 23 static size_t block_size; variable 38 block_size = file_stat.st_blksize; in get_blocksize() 39 buf_size = NUM_OF_BLOCKS * block_size; in get_blocksize() 59 memset(buf + i * block_size, 'a' + i, block_size); in fill_tst_buf() 112 block_size, block_size) == -1) { in test02() 123 if (ret != (ssize_t)block_size) { in test02() 144 if ((alloc_size0 - block_size) != alloc_size1) in test02() 150 memset(exp_buf + block_size, 0, block_size); in test02() 170 if (fallocate(fd, FALLOC_FL_ZERO_RANGE, block_size - 1, in test03() 171 block_size + 2) == -1) { in test03() [all …]
|
/external/tensorflow/tensorflow/core/platform/cloud/ |
D | ram_file_block_cache_test.cc | 141 for (size_t block_size = 2; block_size <= 4; block_size++) { in TEST() local 144 RamFileBlockCache cache(block_size, block_size, 0, fetcher); in TEST() 146 for (size_t n = block_size - 2; n <= block_size + 2; n++) { in TEST() 152 EXPECT_EQ(got.size(), n) << "block size = " << block_size in TEST() 157 << "block size = " << block_size << ", offset = " << offset in TEST() 165 EXPECT_EQ(got, want) << "block size = " << block_size in TEST() 173 const size_t block_size = 16; in TEST() local 175 auto fetcher = [&calls, block_size](const string& filename, size_t offset, in TEST() 178 EXPECT_EQ(n, block_size); in TEST() 179 EXPECT_EQ(offset % block_size, 0); in TEST() [all …]
|
/external/tensorflow/tensorflow/c/experimental/filesystem/plugins/gcs/ |
D | ram_file_block_cache_test.cc | 148 for (size_t block_size = 2; block_size <= 4; block_size++) { in TEST() local 151 tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0, in TEST() 154 for (size_t n = block_size - 2; n <= block_size + 2; n++) { in TEST() 160 EXPECT_EQ(got.size(), n) << "block size = " << block_size in TEST() 165 << "block size = " << block_size << ", offset = " << offset in TEST() 173 EXPECT_EQ(got, want) << "block size = " << block_size in TEST() 181 const size_t block_size = 16; in TEST() local 183 auto fetcher = [&calls, block_size](const string& filename, size_t offset, in TEST() 186 EXPECT_EQ(n, block_size); in TEST() 187 EXPECT_EQ(offset % block_size, 0); in TEST() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/ |
D | conv_buffer_1x1.cc | 35 std::string GetComputationPart(const int3& block_size, int element_size, in GetComputationPart() argument 40 for (int z = 0; z < block_size.z; ++z) { in GetComputationPart() 43 for (int y = 0; y < block_size.y; ++y) { in GetComputationPart() 44 for (int x = 0; x < block_size.x; ++x) { in GetComputationPart() 45 std::string s_index = std::to_string(y * block_size.x + x); in GetComputationPart() 86 conv_params.block_size = int3(1, 1, 1); in GetBestParams() 98 conv_params.block_size.x = 2; in GetBestParams() 104 int block_size = in GetBestParams() local 107 if (!can_use_flt8 && block_size > 4) { in GetBestParams() 108 block_size = 4; in GetBestParams() [all …]
|
D | conv_metal.cc | 59 int3 block_size; member 76 std::to_string(params.block_size.z) + ";\n"; in GlobalIdsGen() 79 std::to_string(params.block_size.y) + ";\n"; in GlobalIdsGen() 81 std::to_string(params.block_size.x) + ";\n"; in GlobalIdsGen() 90 std::to_string(params.block_size.y) + ";\n"; in GlobalIdsGen() 92 std::to_string(params.block_size.x) + ";\n"; in GlobalIdsGen() 95 std::to_string(params.block_size.z) + ";\n"; in GlobalIdsGen() 99 std::to_string(params.block_size.z) + ";\n"; in GlobalIdsGen() 104 std::to_string(params.block_size.x) + ";\n"; in GlobalIdsGen() 108 std::to_string(params.block_size.x) + ";\n"; in GlobalIdsGen() [all …]
|
D | conv_powervr.cc | 73 std::string GenerateBlockCoords(const int4& block_size, in GenerateBlockCoords() argument 85 std::to_string(block_size.w) + ";\n"; in GenerateBlockCoords() 89 std::to_string(block_size.x) + ";\n"; in GenerateBlockCoords() 92 std::to_string(block_size.y) + ";\n"; in GenerateBlockCoords() 94 std::to_string(block_size.z) + ";\n"; in GenerateBlockCoords() 97 std::to_string(block_size.y) + ";\n"; in GenerateBlockCoords() 99 std::to_string(block_size.x) + ";\n"; in GenerateBlockCoords() 110 std::to_string(block_size.x) + ";\n"; in GenerateBlockCoords() 113 std::to_string(block_size.y) + ";\n"; in GenerateBlockCoords() 115 std::to_string(block_size.z) + ";\n"; in GenerateBlockCoords() [all …]
|
/external/libaom/libaom/aom_dsp/ |
D | noise_util.c | 23 float aom_noise_psd_get_default_value(int block_size, float factor) { in aom_noise_psd_get_default_value() argument 24 return (factor * factor / 10000) * block_size * block_size / 8; in aom_noise_psd_get_default_value() 33 int block_size; member 38 struct aom_noise_tx_t *aom_noise_tx_malloc(int block_size) { in aom_noise_tx_malloc() argument 43 switch (block_size) { in aom_noise_tx_malloc() 66 fprintf(stderr, "Unsupported block size %d\n", block_size); in aom_noise_tx_malloc() 69 noise_tx->block_size = block_size; in aom_noise_tx_malloc() 71 32, 2 * sizeof(*noise_tx->tx_block) * block_size * block_size); in aom_noise_tx_malloc() 73 32, 2 * sizeof(*noise_tx->temp) * block_size * block_size); in aom_noise_tx_malloc() 81 2 * sizeof(*noise_tx->tx_block) * block_size * block_size); in aom_noise_tx_malloc() [all …]
|
D | noise_model.c | 33 int block_size) { \ 34 const int max_h = AOMMIN(h - y_o, block_size); \ 35 const int max_w = AOMMIN(w - x_o, block_size); \ 50 int block_size, int use_highbd) { in get_block_mean() argument 53 block_size); in get_block_mean() 54 return get_block_mean_lowbd(data, w, h, stride, x_o, y_o, block_size); in get_block_mean() 424 int block_size, int bit_depth, int use_highbd) { in aom_flat_block_finder_init() argument 425 const int n = block_size * block_size; in aom_flat_block_finder_init() 435 block_size); in aom_flat_block_finder_init() 444 block_size); in aom_flat_block_finder_init() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | tpu_space_to_depth_pass.cc | 158 LogicalResult HandlePad(TF::PadOp op, int32_t kernel_size, int32_t block_size) { in HandlePad() argument 169 int32_t pad_beg = (pad_total / 2 + 1) / block_size; in HandlePad() 170 int32_t pad_end = (pad_total / 2) / block_size; in HandlePad() 180 pad_input_shape[0], pad_input_shape[1] / block_size, in HandlePad() 181 pad_input_shape[2] / block_size, in HandlePad() 182 pad_input_shape[3] * block_size * block_size}; in HandlePad() 207 void HandleConv2DInput(TF::Conv2DOp conv2d, int64_t block_size) { in HandleConv2DInput() argument 211 input_shape[0], input_shape[1] / block_size, input_shape[2] / block_size, in HandleConv2DInput() 212 input_shape[3] * block_size * block_size}; in HandleConv2DInput() 262 void HandleConv2DFilter(TF::Conv2DOp conv2d, int64_t block_size) { in HandleConv2DFilter() argument [all …]
|
/external/XNNPACK/test/ |
D | depthtospace-microkernel-tester.h | 36 return this->output_channels() * this->block_size() * this->block_size(); in input_channels() 50 return this->input_height() * this->block_size(); in output_height() 64 return this->input_width() * this->block_size(); in output_width() 67 inline DepthToSpaceMicrokernelTester& block_size(size_t block_size) { in block_size() argument 68 assert(block_size != 0); in block_size() 69 this->block_size_ = block_size; in block_size() 73 inline size_t block_size() const { in block_size() function 101 ASSERT_GE(block_size(), 2); in Test() 118 block_size(), in Test() 125 for (size_t by = 0; by < block_size(); by++) { in Test() [all …]
|
D | depth-to-space-nhwc.cc | 14 .block_size(3) in TEST() 23 .block_size(3) in TEST() 33 .block_size(3) in TEST() 44 .block_size(3) in TEST() 52 for (uint32_t block_size = 2; block_size <= 5; block_size++) { in TEST() local 55 .block_size(block_size) in TEST() 65 .block_size(3) in TEST() 76 .block_size(3) in TEST() 86 .block_size(3) in TEST() 96 .block_size(3) in TEST()
|
D | depth-to-space-nchw2nhwc.cc | 14 .block_size(3) in TEST() 23 .block_size(3) in TEST() 33 .block_size(3) in TEST() 44 .block_size(3) in TEST() 52 for (uint32_t block_size = 2; block_size <= 5; block_size++) { in TEST() local 55 .block_size(block_size) in TEST() 65 .block_size(3) in TEST() 76 .block_size(3) in TEST() 86 .block_size(3) in TEST() 96 .block_size(3) in TEST()
|
D | depth-to-space-operator-tester.h | 53 return input_height() * block_size(); in output_height() 57 return input_width() * block_size(); in output_width() 60 inline DepthToSpaceOperatorTester& block_size(size_t block_size) { in block_size() argument 61 assert(block_size >= 2); in block_size() 62 this->block_size_ = block_size; in block_size() 66 inline size_t block_size() const { in block_size() function 71 return output_channels() * block_size() * block_size(); in input_channels() 153 block_size(), 0, &depth_to_space_op)); in TestNHWCxX32() 171 for (size_t by = 0; by < block_size(); by++) { in TestNHWCxX32() 173 for (size_t bx = 0; bx < block_size(); bx++) { in TestNHWCxX32() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | depthtospace_op_gpu.cu.cc | 35 const int block_size, const int batch_size, in D2S_NHWC() argument 49 const int in_h = h / block_size; in D2S_NHWC() 50 const int offset_h = h % block_size; in D2S_NHWC() 51 const int in_w = w / block_size; in D2S_NHWC() 52 const int offset_w = w % block_size; in D2S_NHWC() 53 const int offset_d = (offset_h * block_size + offset_w) * output_depth; in D2S_NHWC() 66 const int block_size, const int input_width, in D2S_NCHW() argument 83 const int n_bY = n_bY_bX / block_size; in D2S_NCHW() 84 const int bX = n_bY_bX - n_bY * block_size; in D2S_NCHW() 86 const int n = n_bY / block_size; in D2S_NCHW() [all …]
|
D | spacetodepth_op_gpu.cu.cc | 34 const int block_size, const int batch_size, in S2D_NHWC() argument 48 const int out_h = h / block_size; in S2D_NHWC() 49 const int offset_h = h % block_size; in S2D_NHWC() 50 const int out_w = w / block_size; in S2D_NHWC() 51 const int offset_w = w % block_size; in S2D_NHWC() 52 const int offset_d = (offset_h * block_size + offset_w) * input_depth; in S2D_NHWC() 66 const int block_size, const int output_width, in S2D_NCHW() argument 78 const int n_iC_oY_bY_oX = input_idx / block_size; in S2D_NCHW() 79 const int bX = input_idx - n_iC_oY_bY_oX * block_size; in S2D_NCHW() 84 const int n_iC_oY = n_iC_oY_bY / block_size; in S2D_NCHW() [all …]
|
/external/XNNPACK/src/operators/ |
D | depth-to-space-nhwc.c | 20 uint32_t block_size, in xnn_create_depth_to_space_nhwc_x32() argument 50 if (block_size <= 1) { in xnn_create_depth_to_space_nhwc_x32() 53 block_size); in xnn_create_depth_to_space_nhwc_x32() 57 const size_t input_channels = output_channels * block_size * block_size; in xnn_create_depth_to_space_nhwc_x32() 63 input_channel_stride, block_size, block_size, input_channels); in xnn_create_depth_to_space_nhwc_x32() 80 depth_to_space_op->block_size = block_size; in xnn_create_depth_to_space_nhwc_x32() 129 const uint32_t block_size = depth_to_space_op->block_size; in xnn_setup_depth_to_space_nhwc_x32() local 131 const size_t output_width = input_width * block_size; in xnn_setup_depth_to_space_nhwc_x32() 136 .block_size = (size_t) block_size, in xnn_setup_depth_to_space_nhwc_x32() 148 depth_to_space_op->context.depthtospace2d_hwc.elements *= block_size; in xnn_setup_depth_to_space_nhwc_x32() [all …]
|
D | depth-to-space-nchw2nhwc.c | 20 uint32_t block_size, in xnn_create_depth_to_space_nchw2nhwc_x32() argument 50 if (block_size <= 1) { in xnn_create_depth_to_space_nchw2nhwc_x32() 53 block_size); in xnn_create_depth_to_space_nchw2nhwc_x32() 57 const size_t input_channels = output_channels * block_size * block_size; in xnn_create_depth_to_space_nchw2nhwc_x32() 63 input_channel_stride, block_size, block_size, input_channels); in xnn_create_depth_to_space_nchw2nhwc_x32() 80 depth_to_space_op->block_size = block_size; in xnn_create_depth_to_space_nchw2nhwc_x32() 129 const uint32_t block_size = depth_to_space_op->block_size; in xnn_setup_depth_to_space_nchw2nhwc_x32() local 130 const size_t output_height = input_height * block_size; in xnn_setup_depth_to_space_nchw2nhwc_x32() 131 const size_t output_width = input_width * block_size; in xnn_setup_depth_to_space_nchw2nhwc_x32() 136 .block_size = block_size, in xnn_setup_depth_to_space_nchw2nhwc_x32()
|
/external/squashfs-tools/squashfs-tools/ |
D | compressor.h | 47 int block_size, int datablock) in compressor_init() argument 51 return comp->init(stream, block_size, datablock); in compressor_init() 56 void *dest, void *src, int size, int block_size, int *error) in compressor_compress() argument 58 return comp->compress(strm, dest, src, size, block_size, error); in compressor_compress() 63 void *src, int size, int block_size, int *error) in compressor_uncompress() argument 65 return comp->uncompress(dest, src, size, block_size, error); in compressor_uncompress() 83 static inline int compressor_options_post(struct compressor *comp, int block_size) in compressor_options_post() argument 87 return comp->options_post(block_size); in compressor_options_post() 92 int block_size, int *size) in compressor_dump_options() argument 96 return comp->dump_options(block_size, size); in compressor_dump_options() [all …]
|
/external/fsverity-utils/lib/ |
D | compute_digest.c | 30 u32 block_size, const u8 *salt, u32 salt_size) in hash_one_block() argument 35 memset(&cur->data[cur->filled], 0, block_size - cur->filled); in hash_one_block() 39 libfsverity_hash_update(hash, cur->data, block_size); in hash_one_block() 45 return next->filled + hash->alg->digest_size > block_size; in hash_one_block() 54 u32 block_size, const u8 *salt, u32 salt_size, in compute_root_hash() argument 57 const u32 hashes_per_block = block_size / hash->alg->digest_size; in compute_root_hash() 58 const u32 padded_salt_size = roundup(salt_size, hash->alg->block_size); in compute_root_hash() 82 for (blocks = DIV_ROUND_UP(file_size, block_size); blocks > 1; in compute_root_hash() 97 buffers[level].data = libfsverity_zalloc(block_size); in compute_root_hash() 106 for (offset = 0; offset < file_size; offset += block_size) { in compute_root_hash() [all …]
|
/external/rust/crates/quiche/deps/boringssl/src/crypto/cmac/ |
D | cmac.c | 178 size_t block_size = EVP_CIPHER_block_size(cipher); in CMAC_Init() local 179 if ((block_size != AES_BLOCK_SIZE && block_size != 8 /* 3-DES */) || in CMAC_Init() 182 !EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, block_size) || in CMAC_Init() 188 if (block_size == AES_BLOCK_SIZE) { in CMAC_Init() 206 size_t block_size = EVP_CIPHER_CTX_block_size(&ctx->cipher_ctx); in CMAC_Update() local 207 assert(block_size <= AES_BLOCK_SIZE); in CMAC_Update() 211 size_t todo = block_size - ctx->block_used; in CMAC_Update() 230 assert(ctx->block_used == block_size); in CMAC_Update() 232 if (!EVP_Cipher(&ctx->cipher_ctx, scratch, ctx->block, block_size)) { in CMAC_Update() 238 while (in_len > block_size) { in CMAC_Update() [all …]
|
/external/boringssl/src/crypto/cmac/ |
D | cmac.c | 178 size_t block_size = EVP_CIPHER_block_size(cipher); in CMAC_Init() local 179 if ((block_size != AES_BLOCK_SIZE && block_size != 8 /* 3-DES */) || in CMAC_Init() 182 !EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, block_size) || in CMAC_Init() 188 if (block_size == AES_BLOCK_SIZE) { in CMAC_Init() 206 size_t block_size = EVP_CIPHER_CTX_block_size(&ctx->cipher_ctx); in CMAC_Update() local 207 assert(block_size <= AES_BLOCK_SIZE); in CMAC_Update() 211 size_t todo = block_size - ctx->block_used; in CMAC_Update() 230 assert(ctx->block_used == block_size); in CMAC_Update() 232 if (!EVP_Cipher(&ctx->cipher_ctx, scratch, ctx->block, block_size)) { in CMAC_Update() 238 while (in_len > block_size) { in CMAC_Update() [all …]
|
/external/libaom/libaom/test/ |
D | horver_correlation_test.cc | 53 for (int block_size = 0; block_size < BLOCK_SIZES_ALL; block_size++) { in RunHorverTest() local 54 const int w = block_size_wide[block_size]; in RunHorverTest() 55 const int h = block_size_high[block_size]; in RunHorverTest() 83 for (int block_size = 0; block_size < BLOCK_SIZES_ALL; block_size++) { in RunHorverSpeedTest() local 84 const int w = block_size_wide[block_size]; in RunHorverSpeedTest() 85 const int h = block_size_high[block_size]; in RunHorverSpeedTest() 116 for (int block_size = 0; block_size < BLOCK_SIZES_ALL; block_size++) { in RunHorverTest_ExtremeValues() local 117 const int w = block_size_wide[block_size]; in RunHorverTest_ExtremeValues() 118 const int h = block_size_high[block_size]; in RunHorverTest_ExtremeValues()
|