/external/XNNPACK/test/ |
D | x32-transpose.cc | 24 .block_width(2) in TEST() 37 .block_width(j) in TEST() 50 .block_width(4) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(2) in TEST() 99 .block_width(5) in TEST() 112 .block_width(4) in TEST() 126 .block_width(j) in TEST() 139 .block_width(2) in TEST() [all …]
|
D | x64-transpose.cc | 24 .block_width(2) in TEST() 37 .block_width(j) in TEST() 50 .block_width(4) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(2) in TEST() 99 .block_width(5) in TEST() 112 .block_width(4) in TEST() 126 .block_width(j) in TEST() 139 .block_width(2) in TEST() [all …]
|
D | x16-transpose.cc | 24 .block_width(2) in TEST() 37 .block_width(j) in TEST() 50 .block_width(4) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(2) in TEST() 99 .block_width(5) in TEST() 112 .block_width(4) in TEST() 126 .block_width(j) in TEST() 139 .block_width(2) in TEST() [all …]
|
D | x8-transpose.cc | 24 .block_width(2) in TEST() 37 .block_width(j) in TEST() 50 .block_width(4) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(2) in TEST() 99 .block_width(5) in TEST() 112 .block_width(4) in TEST() 126 .block_width(j) in TEST() 139 .block_width(2) in TEST() [all …]
|
D | x24-transpose.cc | 24 .block_width(2) in TEST() 37 .block_width(j) in TEST() 50 .block_width(4) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(2) in TEST() 99 .block_width(5) in TEST() 112 .block_width(4) in TEST() 126 .block_width(j) in TEST() 139 .block_width(2) in TEST() [all …]
|
D | xx-transpose.cc | 24 .block_width(1) in TEST() 37 .block_width(j) in TEST() 50 .block_width(2) in TEST() 62 .block_width(i) in TEST() 75 .block_width(i) in TEST() 87 .block_width(1) in TEST() 99 .block_width(4) in TEST() 112 .block_width(2) in TEST() 126 .block_width(j) in TEST() 139 .block_width(1) in TEST() [all …]
|
D | transpose-microkernel-tester.h | 39 inline TransposeMicrokernelTester& block_width(size_t block_width) { in block_width() argument 40 assert(block_width != 0); in block_width() 41 this->block_width_ = block_width; in block_width() 45 inline size_t block_width() const { return this->block_width_; } in block_width() function 98 std::vector<uint8_t> output(output_stride() * block_width() * output_element_stride()); in Test() 110 block_width(), in Test() 114 for (size_t c = 0; c < block_width(); c++) { in Test() 120 << ", at column " << c << " / " << block_width(); in Test() 137 block_width(), in Test() 141 for (size_t c = 0; c < block_width(); c++) { in Test() [all …]
|
/external/XNNPACK/src/x8-transposec/gen/ |
D | 16x16-reuse-dec-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 41 const size_t rem = min(block_width - 1, 15); in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 99 if XNN_UNPREDICTABLE(block_width > 15) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 103 if XNN_UNPREDICTABLE(block_width >= 15) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 107 if XNN_UNPREDICTABLE(block_width > 13) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 111 if XNN_UNPREDICTABLE(block_width >= 13) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 115 if XNN_UNPREDICTABLE(block_width > 11) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 119 if XNN_UNPREDICTABLE(block_width >= 11) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() 123 if XNN_UNPREDICTABLE(block_width > 9) { in xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon() [all …]
|
D | 16x16-reuse-mov-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 41 const size_t rem = min(block_width - 1, 15); in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 100 if XNN_UNPREDICTABLE(block_width > 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 105 if XNN_UNPREDICTABLE(block_width >= 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 110 if XNN_UNPREDICTABLE(block_width > 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 115 if XNN_UNPREDICTABLE(block_width >= 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 120 if XNN_UNPREDICTABLE(block_width > 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 125 if XNN_UNPREDICTABLE(block_width >= 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() 130 if XNN_UNPREDICTABLE(block_width > 9) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_zip_neon() [all …]
|
D | 16x16-reuse-mov-wasmsimd.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 41 const size_t rem = min(block_width - 1, 15); in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 147 if XNN_UNPREDICTABLE(block_width > 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 152 if XNN_UNPREDICTABLE(block_width >= 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 157 if XNN_UNPREDICTABLE(block_width > 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 162 if XNN_UNPREDICTABLE(block_width >= 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 167 if XNN_UNPREDICTABLE(block_width > 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 172 if XNN_UNPREDICTABLE(block_width >= 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() 177 if XNN_UNPREDICTABLE(block_width > 9) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd() [all …]
|
D | 8x8-reuse-dec-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 41 const size_t rem = min(block_width - 1, 7); in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 71 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 75 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 79 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 83 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 87 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 91 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() 95 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x8_transposec_ukernel__8x8_reuse_dec_zip_neon() [all …]
|
D | 16x16-reuse-mov-sse2.c | 25 size_t block_width, in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() argument 29 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 43 const size_t rem = min(block_width - 1, 15); in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 152 if XNN_UNPREDICTABLE(block_width > 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 157 if XNN_UNPREDICTABLE(block_width >= 15) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 162 if XNN_UNPREDICTABLE(block_width > 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 167 if XNN_UNPREDICTABLE(block_width >= 13) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 172 if XNN_UNPREDICTABLE(block_width > 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 177 if XNN_UNPREDICTABLE(block_width >= 11) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() 182 if XNN_UNPREDICTABLE(block_width > 9) { in xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2() [all …]
|
D | 8x8-multi-dec-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 49 const size_t rem = min(block_width - 1, 7); in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 79 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 83 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 87 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 91 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 95 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 99 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() 103 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x8_transposec_ukernel__8x8_multi_dec_zip_neon() [all …]
|
D | 8x8-reuse-mov-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 41 const size_t rem = min(block_width - 1, 7); in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 72 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 77 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 82 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 87 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 92 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 97 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() 102 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x8_transposec_ukernel__8x8_reuse_mov_zip_neon() [all …]
|
D | 8x8-multi-mov-zip-neon.c | 23 size_t block_width, in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint8_t)); in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 49 const size_t rem = min(block_width - 1, 7); in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 80 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 85 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 90 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 95 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 100 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 105 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() 110 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x8_transposec_ukernel__8x8_multi_mov_zip_neon() [all …]
|
/external/XNNPACK/src/x16-transposec/gen/ |
D | 8x8-reuse-dec-zip-neon.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 41 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 71 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 75 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 79 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 83 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 87 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 91 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() 95 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon() [all …]
|
D | 8x8-multi-dec-zip-neon.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 49 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 79 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 83 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 87 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 91 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 95 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 99 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() 103 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_multi_dec_zip_neon() [all …]
|
D | 8x8-reuse-mov-zip-neon.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 41 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 72 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 77 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 82 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 87 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 92 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 97 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() 102 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_zip_neon() [all …]
|
D | 8x8-reuse-mov-wasmsimd.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 41 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 91 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 96 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 101 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 106 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 111 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 116 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() 121 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd() [all …]
|
D | 8x8-multi-mov-wasmsimd.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 49 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 99 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 104 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 109 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 114 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 119 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 124 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() 129 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_multi_mov_wasmsimd() [all …]
|
D | 8x8-multi-mov-zip-neon.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 49 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 80 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 85 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 90 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 95 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 100 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 105 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() 110 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_multi_mov_zip_neon() [all …]
|
D | 8x8-multi-mov-sse2.c | 25 size_t block_width, in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() argument 29 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 51 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 104 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 109 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 114 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 119 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 124 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 129 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() 134 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_multi_mov_sse2() [all …]
|
D | 8x8-reuse-mov-sse2.c | 25 size_t block_width, in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() argument 29 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 43 const size_t rem = min(block_width - 1, 7); in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 96 if XNN_UNPREDICTABLE(block_width > 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 101 if XNN_UNPREDICTABLE(block_width >= 7) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 106 if XNN_UNPREDICTABLE(block_width > 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 111 if XNN_UNPREDICTABLE(block_width >= 5) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 116 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 121 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() 126 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__8x8_reuse_mov_sse2() [all …]
|
D | 4x4-reuse-dec-zip-neon.c | 23 size_t block_width, in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() argument 27 assert(input_stride >= block_width * sizeof(uint16_t)); in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 41 const size_t rem = min(block_width - 1, 3); in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 59 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 63 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 67 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 102 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 106 if XNN_UNPREDICTABLE(block_width >= 3) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 110 if XNN_UNPREDICTABLE(block_width > 1) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() 122 if XNN_UNPREDICTABLE(block_width > 3) { in xnn_x16_transposec_ukernel__4x4_reuse_dec_zip_neon() [all …]
|
/external/pdfium/third_party/libopenjpeg/ |
D | sparse_array.c | 38 OPJ_UINT32 block_width; member 47 OPJ_UINT32 block_width, in opj_sparse_array_int32_create() argument 52 if (width == 0 || height == 0 || block_width == 0 || block_height == 0) { in opj_sparse_array_int32_create() 55 if (block_width > ((OPJ_UINT32)~0U) / block_height / sizeof(OPJ_INT32)) { in opj_sparse_array_int32_create() 63 sa->block_width = block_width; in opj_sparse_array_int32_create() 65 sa->block_count_hor = opj_uint_ceildiv(width, block_width); in opj_sparse_array_int32_create() 119 const OPJ_UINT32 block_width = sa->block_width; in opj_sparse_array_int32_read_or_write() local 134 block_x = x0 / block_width; in opj_sparse_array_int32_read_or_write() 139 x_incr = (x == x0) ? block_width - (x0 % block_width) : block_width; in opj_sparse_array_int32_read_or_write() 140 block_x_offset = block_width - x_incr; in opj_sparse_array_int32_read_or_write() [all …]
|