/external/libjpeg-turbo/ |
D | jpeg_nbits_table.c | 2056 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2057 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2058 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2059 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2060 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2061 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2062 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2063 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2064 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 2065 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, [all …]
|
/external/openscreen/third_party/boringssl/win-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 40 stp x29,x30,[sp,#-16]! 59 eor v0.16b,v0.16b,v0.16b 60 ld1 {v3.16b},[x0],#16 70 tbl v6.16b,{v3.16b},v2.16b 71 ext v5.16b,v0.16b,v3.16b,#12 72 st1 {v3.4s},[x2],#16 73 aese v6.16b,v0.16b 76 eor v3.16b,v3.16b,v5.16b 77 ext v5.16b,v0.16b,v5.16b,#12 78 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 125 movi v17.16b, #0x0f 154 adrp x11, Lk_mc_forward+16 155 add x11, x11, :lo12:Lk_mc_forward+16 157 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 158 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 159 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 160 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 161 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 162 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 163 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
/external/openscreen/third_party/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 37 stp x29,x30,[sp,#-16]! 56 eor v0.16b,v0.16b,v0.16b 57 ld1 {v3.16b},[x0],#16 67 tbl v6.16b,{v3.16b},v2.16b 68 ext v5.16b,v0.16b,v3.16b,#12 69 st1 {v3.4s},[x2],#16 70 aese v6.16b,v0.16b 73 eor v3.16b,v3.16b,v5.16b 74 ext v5.16b,v0.16b,v5.16b,#12 75 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/boringssl/win-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 40 stp x29,x30,[sp,#-16]! 59 eor v0.16b,v0.16b,v0.16b 60 ld1 {v3.16b},[x0],#16 70 tbl v6.16b,{v3.16b},v2.16b 71 ext v5.16b,v0.16b,v3.16b,#12 72 st1 {v3.4s},[x2],#16 73 aese v6.16b,v0.16b 76 eor v3.16b,v3.16b,v5.16b 77 ext v5.16b,v0.16b,v5.16b,#12 78 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 37 stp x29,x30,[sp,#-16]! 56 eor v0.16b,v0.16b,v0.16b 57 ld1 {v3.16b},[x0],#16 67 tbl v6.16b,{v3.16b},v2.16b 68 ext v5.16b,v0.16b,v3.16b,#12 69 st1 {v3.4s},[x2],#16 70 aese v6.16b,v0.16b 73 eor v3.16b,v3.16b,v5.16b 74 ext v5.16b,v0.16b,v5.16b,#12 75 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 122 movi v17.16b, #0x0f 149 adrp x11, Lk_mc_forward@PAGE+16 150 add x11, x11, Lk_mc_forward@PAGEOFF+16 152 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 153 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 154 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 155 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 156 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 157 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 158 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
/external/rust/crates/quiche/deps/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 36 stp x29,x30,[sp,#-16]! 55 eor v0.16b,v0.16b,v0.16b 56 ld1 {v3.16b},[x0],#16 66 tbl v6.16b,{v3.16b},v2.16b 67 ext v5.16b,v0.16b,v3.16b,#12 68 st1 {v3.4s},[x2],#16 69 aese v6.16b,v0.16b 72 eor v3.16b,v3.16b,v5.16b 73 ext v5.16b,v0.16b,v5.16b,#12 74 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 121 movi v17.16b, #0x0f 148 adrp x11, .Lk_mc_forward+16 149 add x11, x11, :lo12:.Lk_mc_forward+16 151 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 152 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 153 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 154 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 155 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 156 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 157 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
/external/rust/crates/quiche/deps/boringssl/ios-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 35 stp x29,x30,[sp,#-16]! 54 eor v0.16b,v0.16b,v0.16b 55 ld1 {v3.16b},[x0],#16 65 tbl v6.16b,{v3.16b},v2.16b 66 ext v5.16b,v0.16b,v3.16b,#12 67 st1 {v3.4s},[x2],#16 68 aese v6.16b,v0.16b 71 eor v3.16b,v3.16b,v5.16b 72 ext v5.16b,v0.16b,v5.16b,#12 73 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 120 movi v17.16b, #0x0f 147 adrp x11, Lk_mc_forward@PAGE+16 148 add x11, x11, Lk_mc_forward@PAGEOFF+16 150 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 151 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 152 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 153 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 154 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 155 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 156 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | reg-scavenger-valid-slot.ll | 20 define void @foo(<16 x i32>* nocapture readnone %p) #0 { 39 …, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 16 52 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 53 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 54 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 55 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 56 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 57 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 58 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 59 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… [all …]
|
/external/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 38 stp x29,x30,[sp,#-16]! 57 eor v0.16b,v0.16b,v0.16b 58 ld1 {v3.16b},[x0],#16 68 tbl v6.16b,{v3.16b},v2.16b 69 ext v5.16b,v0.16b,v3.16b,#12 70 st1 {v3.4s},[x2],#16 71 aese v6.16b,v0.16b 74 eor v3.16b,v3.16b,v5.16b 75 ext v5.16b,v0.16b,v5.16b,#12 76 eor v3.16b,v3.16b,v5.16b [all …]
|
D | vpaes-armv8.S | 123 movi v17.16b, #0x0f 150 adrp x11, .Lk_mc_forward+16 151 add x11, x11, :lo12:.Lk_mc_forward+16 153 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 154 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 155 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 156 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 157 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 158 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 159 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 [all …]
|
/external/openscreen/third_party/boringssl/linux-aarch64/crypto/fipsmodule/ |
D | aesv8-armx64.S | 38 stp x29,x30,[sp,#-16]! 57 eor v0.16b,v0.16b,v0.16b 58 ld1 {v3.16b},[x0],#16 68 tbl v6.16b,{v3.16b},v2.16b 69 ext v5.16b,v0.16b,v3.16b,#12 70 st1 {v3.4s},[x2],#16 71 aese v6.16b,v0.16b 74 eor v3.16b,v3.16b,v5.16b 75 ext v5.16b,v0.16b,v5.16b,#12 76 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/ |
D | reg-scavenger-valid-slot.ll | 20 define void @foo(<16 x i32>* nocapture readnone %p) #0 { 39 …, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 16 52 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 53 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 54 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 55 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 56 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 57 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 58 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… 59 …16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16 x i32>, <16… [all …]
|
D | reg-scavengebug.ll | 7 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0 10 declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0 13 declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #0 16 declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0 19 declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0 26 %v2 = bitcast i16* %a0 to <16 x i32>* 27 %v3 = bitcast i8* %a3 to <16 x i32>* 29 %v5 = bitcast i32* %v4 to <16 x i32>* 30 %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0 31 %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768) [all …]
|
/external/rust/crates/ring/pregenerated/ |
D | aesv8-armx-ios64.S | 34 stp x29,x30,[sp,#-16]! 53 eor v0.16b,v0.16b,v0.16b 54 ld1 {v3.16b},[x0],#16 64 tbl v6.16b,{v3.16b},v2.16b 65 ext v5.16b,v0.16b,v3.16b,#12 66 st1 {v3.4s},[x2],#16 67 aese v6.16b,v0.16b 70 eor v3.16b,v3.16b,v5.16b 71 ext v5.16b,v0.16b,v5.16b,#12 72 eor v3.16b,v3.16b,v5.16b [all …]
|
D | aesv8-armx-linux64.S | 35 stp x29,x30,[sp,#-16]! 54 eor v0.16b,v0.16b,v0.16b 55 ld1 {v3.16b},[x0],#16 65 tbl v6.16b,{v3.16b},v2.16b 66 ext v5.16b,v0.16b,v3.16b,#12 67 st1 {v3.4s},[x2],#16 68 aese v6.16b,v0.16b 71 eor v3.16b,v3.16b,v5.16b 72 ext v5.16b,v0.16b,v5.16b,#12 73 eor v3.16b,v3.16b,v5.16b [all …]
|
/external/libhevc/common/ |
D | ihevc_quant_tables.c | 49 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 50 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 51 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 52 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 53 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 54 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 55 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 56 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 57 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… 58 …16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16… [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 5 ; We need to allocate a 16-byte spill slot and save the 8 call-saved FPRs. 6 ; The frame size should be exactly 160 + 16 + 8 * 8 = 240. 7 define void @f1(<16 x i8> *%ptr) { 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
/external/llvm-project/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 5 ; We need to allocate a 16-byte spill slot and save the 8 call-saved FPRs. 6 ; The frame size should be exactly 160 + 16 + 8 * 8 = 240. 7 define void @f1(<16 x i8> *%ptr) { 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
/external/libpng/powerpc/ |
D | filter_vsx_intrinsics.c | 46 size_t unaligned_top = 16 - (((size_t)rp % 16));\ 48 if(unaligned_top == 16)\ 65 /* Altivec operations require 16-byte aligned data in png_read_filter_row_up_vsx() 76 while( istop >= 16 ) in png_read_filter_row_up_vsx() 85 pp += 16; in png_read_filter_row_up_vsx() 86 rp += 16; in png_read_filter_row_up_vsx() 87 istop -= 16; in png_read_filter_row_up_vsx() 92 /* If byte count of row is not divisible by 16 in png_read_filter_row_up_vsx() 104 …tic const vector unsigned char VSX_LEFTSHIFTED1_4 = {16,16,16,16, 0, 1, 2, 3,16,16,16,16,16,16,16,… 105 …tic const vector unsigned char VSX_LEFTSHIFTED2_4 = {16,16,16,16,16,16,16,16, 4, 5, 6, 7,16,16,16,… [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | misched-fusion-aes.ll | 11 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k) 12 declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d) 13 declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k) 14 declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d) 16 define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) { 17 %d0 = load <16 x i8>, <16 x i8>* %a0 18 %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1 19 %d1 = load <16 x i8>, <16 x i8>* %a1 20 %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2 21 %d2 = load <16 x i8>, <16 x i8>* %a2 [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | rem.ll | 23 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = srem <16 x i32… 26 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = srem <16 x i16… 29 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = srem <16 x i8> … 42 %V16i32 = srem <16 x i32> undef, undef 46 %V16i16 = srem <16 x i16> undef, undef 50 %V16i8 = srem <16 x i8> undef, undef 66 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i32 = urem <16 x i32… 69 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i16 = urem <16 x i16… 72 ; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V16i8 = urem <16 x i8> … 85 %V16i32 = urem <16 x i32> undef, undef [all …]
|