1// This file is generated from a similarly-named Perl script in the BoringSSL 2// source tree. Do not edit by hand. 3 4#if !defined(__has_feature) 5#define __has_feature(x) 0 6#endif 7#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) 8#define OPENSSL_NO_ASM 9#endif 10 11#if !defined(OPENSSL_NO_ASM) 12#include <GFp/arm_arch.h> 13 14.section __TEXT,__const 15 16 17.align 7 // totally strategic alignment 18_vpaes_consts: 19Lk_mc_forward: // mc_forward 20.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 21.quad 0x080B0A0904070605, 0x000302010C0F0E0D 22.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 23.quad 0x000302010C0F0E0D, 0x080B0A0904070605 24Lk_mc_backward: // mc_backward 25.quad 0x0605040702010003, 0x0E0D0C0F0A09080B 26.quad 0x020100030E0D0C0F, 0x0A09080B06050407 27.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 28.quad 0x0A09080B06050407, 0x020100030E0D0C0F 29Lk_sr: // sr 30.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 31.quad 0x030E09040F0A0500, 0x0B06010C07020D08 32.quad 0x0F060D040B020900, 0x070E050C030A0108 33.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 34 35// 36// "Hot" constants 37// 38Lk_inv: // inv, inva 39.quad 0x0E05060F0D080180, 0x040703090A0B0C02 40.quad 0x01040A060F0B0780, 0x030D0E0C02050809 41Lk_ipt: // input transform (lo, hi) 42.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 43.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 44Lk_sbo: // sbou, sbot 45.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 46.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA 47Lk_sb1: // sb1u, sb1t 48.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF 49.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 50Lk_sb2: // sb2u, sb2t 51.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A 52.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD 53 54// 55// Key schedule constants 56// 57Lk_dksd: // decryption key schedule: invskew x*D 58.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 59.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E 60Lk_dksb: // decryption key schedule: invskew x*B 61.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 62.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 63Lk_dkse: // decryption key schedule: invskew x*E + 0x63 64.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 65.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 66Lk_dks9: // decryption key schedule: invskew x*9 67.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC 68.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE 69 70Lk_rcon: // rcon 71.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 72 73Lk_opt: // output transform 74.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 75.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 76Lk_deskew: // deskew tables: inverts the sbox's "skew" 77.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A 78.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 79 80.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 81.align 2 82 83.align 6 84 85.text 86## 87## _aes_preheat 88## 89## Fills register %r10 -> .aes_consts (so you can -fPIC) 90## and %xmm9-%xmm15 as specified below. 91## 92 93.align 4 94_vpaes_encrypt_preheat: 95 adrp x10, Lk_inv@PAGE 96 add x10, x10, Lk_inv@PAGEOFF 97 movi v17.16b, #0x0f 98 ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv 99 ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo 100 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 101 ret 102 103 104## 105## _aes_encrypt_core 106## 107## AES-encrypt %xmm0. 108## 109## Inputs: 110## %xmm0 = input 111## %xmm9-%xmm15 as in _vpaes_preheat 112## (%rdx) = scheduled keys 113## 114## Output in %xmm0 115## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 116## Preserves %xmm6 - %xmm8 so you get some local vectors 117## 118## 119 120.align 4 121_vpaes_encrypt_core: 122 mov x9, x2 123 ldr w8, [x2,#240] // pull rounds 124 adrp x11, Lk_mc_forward@PAGE+16 125 add x11, x11, Lk_mc_forward@PAGEOFF+16 126 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 127 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 128 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 129 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 130 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 131 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 132 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 133 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 134 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 135 b Lenc_entry 136 137.align 4 138Lenc_loop: 139 // middle of middle round 140 add x10, x11, #0x40 141 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 142 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] 143 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 144 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 145 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 146 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 147 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 148 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] 149 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 150 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 151 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 152 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 153 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 154 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 155 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 156 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 157 sub w8, w8, #1 // nr-- 158 159Lenc_entry: 160 // top of round 161 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 162 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 163 tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 164 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 165 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 166 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 167 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 168 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 169 tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 170 tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 171 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 172 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 173 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 174 cbnz w8, Lenc_loop 175 176 // middle of last round 177 add x10, x11, #0x80 178 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 179 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 180 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 181 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] 182 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 183 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 184 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 185 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 186 ret 187 188 189.globl _GFp_vpaes_encrypt 190.private_extern _GFp_vpaes_encrypt 191 192.align 4 193_GFp_vpaes_encrypt: 194 AARCH64_SIGN_LINK_REGISTER 195 stp x29,x30,[sp,#-16]! 196 add x29,sp,#0 197 198 ld1 {v7.16b}, [x0] 199 bl _vpaes_encrypt_preheat 200 bl _vpaes_encrypt_core 201 st1 {v0.16b}, [x1] 202 203 ldp x29,x30,[sp],#16 204 AARCH64_VALIDATE_LINK_REGISTER 205 ret 206 207 208 209.align 4 210_vpaes_encrypt_2x: 211 mov x9, x2 212 ldr w8, [x2,#240] // pull rounds 213 adrp x11, Lk_mc_forward@PAGE+16 214 add x11, x11, Lk_mc_forward@PAGEOFF+16 215 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 216 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 217 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 218 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 219 and v9.16b, v15.16b, v17.16b 220 ushr v8.16b, v15.16b, #4 221 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 222 tbl v9.16b, {v20.16b}, v9.16b 223 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 224 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 225 tbl v10.16b, {v21.16b}, v8.16b 226 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 227 eor v8.16b, v9.16b, v16.16b 228 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 229 eor v8.16b, v8.16b, v10.16b 230 b Lenc_2x_entry 231 232.align 4 233Lenc_2x_loop: 234 // middle of middle round 235 add x10, x11, #0x40 236 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 237 tbl v12.16b, {v25.16b}, v10.16b 238 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] 239 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 240 tbl v8.16b, {v24.16b}, v11.16b 241 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 242 eor v12.16b, v12.16b, v16.16b 243 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 244 tbl v13.16b, {v27.16b}, v10.16b 245 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 246 eor v8.16b, v8.16b, v12.16b 247 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 248 tbl v10.16b, {v26.16b}, v11.16b 249 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] 250 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 251 tbl v11.16b, {v8.16b}, v1.16b 252 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 253 eor v10.16b, v10.16b, v13.16b 254 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 255 tbl v8.16b, {v8.16b}, v4.16b 256 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 257 eor v11.16b, v11.16b, v10.16b 258 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 259 tbl v12.16b, {v11.16b},v1.16b 260 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 261 eor v8.16b, v8.16b, v11.16b 262 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 263 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 264 eor v8.16b, v8.16b, v12.16b 265 sub w8, w8, #1 // nr-- 266 267Lenc_2x_entry: 268 // top of round 269 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 270 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 271 and v9.16b, v8.16b, v17.16b 272 ushr v8.16b, v8.16b, #4 273 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 274 tbl v13.16b, {v19.16b},v9.16b 275 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 276 eor v9.16b, v9.16b, v8.16b 277 tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 278 tbl v11.16b, {v18.16b},v8.16b 279 tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 280 tbl v12.16b, {v18.16b},v9.16b 281 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 282 eor v11.16b, v11.16b, v13.16b 283 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 284 eor v12.16b, v12.16b, v13.16b 285 tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 286 tbl v10.16b, {v18.16b},v11.16b 287 tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 288 tbl v11.16b, {v18.16b},v12.16b 289 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 290 eor v10.16b, v10.16b, v9.16b 291 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 292 eor v11.16b, v11.16b, v8.16b 293 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 294 cbnz w8, Lenc_2x_loop 295 296 // middle of last round 297 add x10, x11, #0x80 298 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 299 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 300 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 301 tbl v12.16b, {v22.16b}, v10.16b 302 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] 303 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 304 tbl v8.16b, {v23.16b}, v11.16b 305 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 306 eor v12.16b, v12.16b, v16.16b 307 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 308 eor v8.16b, v8.16b, v12.16b 309 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 310 tbl v1.16b, {v8.16b},v1.16b 311 ret 312 313######################################################## 314## ## 315## AES key schedule ## 316## ## 317######################################################## 318 319.align 4 320_vpaes_key_preheat: 321 adrp x10, Lk_inv@PAGE 322 add x10, x10, Lk_inv@PAGEOFF 323 movi v16.16b, #0x5b // Lk_s63 324 adrp x11, Lk_sb1@PAGE 325 add x11, x11, Lk_sb1@PAGEOFF 326 movi v17.16b, #0x0f // Lk_s0F 327 ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt 328 adrp x10, Lk_dksd@PAGE 329 add x10, x10, Lk_dksd@PAGEOFF 330 ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 331 adrp x11, Lk_mc_forward@PAGE 332 add x11, x11, Lk_mc_forward@PAGEOFF 333 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb 334 ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 335 ld1 {v8.2d}, [x10] // Lk_rcon 336 ld1 {v9.2d}, [x11] // Lk_mc_forward[0] 337 ret 338 339 340 341.align 4 342_vpaes_schedule_core: 343 AARCH64_SIGN_LINK_REGISTER 344 stp x29, x30, [sp,#-16]! 345 add x29,sp,#0 346 347 bl _vpaes_key_preheat // load the tables 348 349 ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) 350 351 // input transform 352 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 353 bl _vpaes_schedule_transform 354 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 355 356 adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10 357 add x10, x10, Lk_sr@PAGEOFF 358 359 add x8, x8, x10 360 361 // encrypting, output zeroth round key after transform 362 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) 363 364 cmp w1, #192 // cmp $192, %esi 365 b.hi Lschedule_256 366 b.eq Lschedule_192 367 // 128: fall though 368 369## 370## .schedule_128 371## 372## 128-bit specific part of key schedule. 373## 374## This schedule is really simple, because all its parts 375## are accomplished by the subroutines. 376## 377Lschedule_128: 378 mov x0, #10 // mov $10, %esi 379 380Loop_schedule_128: 381 sub x0, x0, #1 // dec %esi 382 bl _vpaes_schedule_round 383 cbz x0, Lschedule_mangle_last 384 bl _vpaes_schedule_mangle // write output 385 b Loop_schedule_128 386 387## 388## .aes_schedule_192 389## 390## 192-bit specific part of key schedule. 391## 392## The main body of this schedule is the same as the 128-bit 393## schedule, but with more smearing. The long, high side is 394## stored in %xmm7 as before, and the short, low side is in 395## the high bits of %xmm6. 396## 397## This schedule is somewhat nastier, however, because each 398## round produces 192 bits of key material, or 1.5 round keys. 399## Therefore, on each cycle we do 2 rounds and produce 3 round 400## keys. 401## 402.align 4 403Lschedule_192: 404 sub x0, x0, #8 405 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) 406 bl _vpaes_schedule_transform // input transform 407 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part 408 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 409 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 410 mov x0, #4 // mov $4, %esi 411 412Loop_schedule_192: 413 sub x0, x0, #1 // dec %esi 414 bl _vpaes_schedule_round 415 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 416 bl _vpaes_schedule_mangle // save key n 417 bl _vpaes_schedule_192_smear 418 bl _vpaes_schedule_mangle // save key n+1 419 bl _vpaes_schedule_round 420 cbz x0, Lschedule_mangle_last 421 bl _vpaes_schedule_mangle // save key n+2 422 bl _vpaes_schedule_192_smear 423 b Loop_schedule_192 424 425## 426## .aes_schedule_256 427## 428## 256-bit specific part of key schedule. 429## 430## The structure here is very similar to the 128-bit 431## schedule, but with an additional "low side" in 432## %xmm6. The low side's rounds are the same as the 433## high side's, except no rcon and no rotation. 434## 435.align 4 436Lschedule_256: 437 ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) 438 bl _vpaes_schedule_transform // input transform 439 mov x0, #7 // mov $7, %esi 440 441Loop_schedule_256: 442 sub x0, x0, #1 // dec %esi 443 bl _vpaes_schedule_mangle // output low result 444 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 445 446 // high round 447 bl _vpaes_schedule_round 448 cbz x0, Lschedule_mangle_last 449 bl _vpaes_schedule_mangle 450 451 // low round. swap xmm7 and xmm6 452 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 453 movi v4.16b, #0 454 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 455 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 456 bl _vpaes_schedule_low_round 457 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 458 459 b Loop_schedule_256 460 461## 462## .aes_schedule_mangle_last 463## 464## Mangler for last round of key schedule 465## Mangles %xmm0 466## when encrypting, outputs out(%xmm0) ^ 63 467## when decrypting, outputs unskew(%xmm0) 468## 469## Always called right before return... jumps to cleanup and exits 470## 471.align 4 472Lschedule_mangle_last: 473 // schedule last round key from xmm0 474 adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew 475 add x11, x11, Lk_deskew@PAGEOFF 476 477 cbnz w3, Lschedule_mangle_last_dec 478 479 // encrypting 480 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 481 adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform 482 add x11, x11, Lk_opt@PAGEOFF 483 add x2, x2, #32 // add $32, %rdx 484 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute 485 486Lschedule_mangle_last_dec: 487 ld1 {v20.2d,v21.2d}, [x11] // reload constants 488 sub x2, x2, #16 // add $-16, %rdx 489 eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 490 bl _vpaes_schedule_transform // output transform 491 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key 492 493 // cleanup 494 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 495 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 496 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 497 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 498 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 499 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 500 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 501 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 502 ldp x29, x30, [sp],#16 503 AARCH64_VALIDATE_LINK_REGISTER 504 ret 505 506 507## 508## .aes_schedule_192_smear 509## 510## Smear the short, low side in the 192-bit key schedule. 511## 512## Inputs: 513## %xmm7: high side, b a x y 514## %xmm6: low side, d c 0 0 515## %xmm13: 0 516## 517## Outputs: 518## %xmm6: b+c+d b+c 0 0 519## %xmm0: b+c+d b+c b a 520## 521 522.align 4 523_vpaes_schedule_192_smear: 524 movi v1.16b, #0 525 dup v0.4s, v7.s[3] 526 ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 527 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a 528 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 529 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 530 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a 531 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 532 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 533 ret 534 535 536## 537## .aes_schedule_round 538## 539## Runs one main round of the key schedule on %xmm0, %xmm7 540## 541## Specifically, runs subbytes on the high dword of %xmm0 542## then rotates it by one byte and xors into the low dword of 543## %xmm7. 544## 545## Adds rcon from low byte of %xmm8, then rotates %xmm8 for 546## next rcon. 547## 548## Smears the dwords of %xmm7 by xoring the low into the 549## second low, result into third, result into highest. 550## 551## Returns results in %xmm7 = %xmm0. 552## Clobbers %xmm1-%xmm4, %r11. 553## 554 555.align 4 556_vpaes_schedule_round: 557 // extract rcon from xmm8 558 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 559 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 560 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 561 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 562 563 // rotate 564 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 565 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 566 567 // fall through... 568 569 // low round: same as high round, but no rotation and no rcon. 570_vpaes_schedule_low_round: 571 // smear xmm7 572 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 573 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 574 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 575 576 // subbytes 577 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 578 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 579 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 580 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 581 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 582 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 583 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 584 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 585 eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 586 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak 587 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 588 tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak 589 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io 590 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo 591 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou 592 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t 593 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output 594 595 // add in smeared stuff 596 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 597 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 598 ret 599 600 601## 602## .aes_schedule_transform 603## 604## Linear-transform %xmm0 according to tables at (%r11) 605## 606## Requires that %xmm9 = 0x0F0F... as in preheat 607## Output in %xmm0 608## Clobbers %xmm1, %xmm2 609## 610 611.align 4 612_vpaes_schedule_transform: 613 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 614 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 615 // vmovdqa (%r11), %xmm2 # lo 616 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 617 // vmovdqa 16(%r11), %xmm1 # hi 618 tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 619 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 620 ret 621 622 623## 624## .aes_schedule_mangle 625## 626## Mangle xmm0 from (basis-transformed) standard version 627## to our version. 628## 629## On encrypt, 630## xor with 0x63 631## multiply by circulant 0,1,1,1 632## apply shiftrows transform 633## 634## On decrypt, 635## xor with 0x63 636## multiply by "inverse mixcolumns" circulant E,B,D,9 637## deskew 638## apply shiftrows transform 639## 640## 641## Writes out to (%rdx), and increments or decrements it 642## Keeps track of round number mod 4 in %r8 643## Preserves xmm0 644## Clobbers xmm1-xmm5 645## 646 647.align 4 648_vpaes_schedule_mangle: 649 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later 650 // vmovdqa .Lk_mc_forward(%rip),%xmm5 651 652 // encrypting 653 eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 654 add x2, x2, #16 // add $16, %rdx 655 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 656 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 657 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 658 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 659 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 660 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 661 662Lschedule_mangle_both: 663 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 664 add x8, x8, #64-16 // add $-16, %r8 665 and x8, x8, #~(1<<6) // and $0x30, %r8 666 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) 667 ret 668 669 670.globl _GFp_vpaes_set_encrypt_key 671.private_extern _GFp_vpaes_set_encrypt_key 672 673.align 4 674_GFp_vpaes_set_encrypt_key: 675 AARCH64_SIGN_LINK_REGISTER 676 stp x29,x30,[sp,#-16]! 677 add x29,sp,#0 678 stp d8,d9,[sp,#-16]! // ABI spec says so 679 680 lsr w9, w1, #5 // shr $5,%eax 681 add w9, w9, #5 // $5,%eax 682 str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 683 684 mov w3, #0 // mov $0,%ecx 685 mov x8, #0x30 // mov $0x30,%r8d 686 bl _vpaes_schedule_core 687 eor x0, x0, x0 688 689 ldp d8,d9,[sp],#16 690 ldp x29,x30,[sp],#16 691 AARCH64_VALIDATE_LINK_REGISTER 692 ret 693 694.globl _GFp_vpaes_ctr32_encrypt_blocks 695.private_extern _GFp_vpaes_ctr32_encrypt_blocks 696 697.align 4 698_GFp_vpaes_ctr32_encrypt_blocks: 699 AARCH64_SIGN_LINK_REGISTER 700 stp x29,x30,[sp,#-16]! 701 add x29,sp,#0 702 stp d8,d9,[sp,#-16]! // ABI spec says so 703 stp d10,d11,[sp,#-16]! 704 stp d12,d13,[sp,#-16]! 705 stp d14,d15,[sp,#-16]! 706 707 cbz x2, Lctr32_done 708 709 // Note, unlike the other functions, x2 here is measured in blocks, 710 // not bytes. 711 mov x17, x2 712 mov x2, x3 713 714 // Load the IV and counter portion. 715 ldr w6, [x4, #12] 716 ld1 {v7.16b}, [x4] 717 718 bl _vpaes_encrypt_preheat 719 tst x17, #1 720 rev w6, w6 // The counter is big-endian. 721 b.eq Lctr32_prep_loop 722 723 // Handle one block so the remaining block count is even for 724 // _vpaes_encrypt_2x. 725 ld1 {v6.16b}, [x0], #16 // Load input ahead of time 726 bl _vpaes_encrypt_core 727 eor v0.16b, v0.16b, v6.16b // XOR input and result 728 st1 {v0.16b}, [x1], #16 729 subs x17, x17, #1 730 // Update the counter. 731 add w6, w6, #1 732 rev w7, w6 733 mov v7.s[3], w7 734 b.ls Lctr32_done 735 736Lctr32_prep_loop: 737 // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x 738 // uses v14 and v15. 739 mov v15.16b, v7.16b 740 mov v14.16b, v7.16b 741 add w6, w6, #1 742 rev w7, w6 743 mov v15.s[3], w7 744 745Lctr32_loop: 746 ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time 747 bl _vpaes_encrypt_2x 748 eor v0.16b, v0.16b, v6.16b // XOR input and result 749 eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) 750 st1 {v0.16b,v1.16b}, [x1], #32 751 subs x17, x17, #2 752 // Update the counter. 753 add w7, w6, #1 754 add w6, w6, #2 755 rev w7, w7 756 mov v14.s[3], w7 757 rev w7, w6 758 mov v15.s[3], w7 759 b.hi Lctr32_loop 760 761Lctr32_done: 762 ldp d14,d15,[sp],#16 763 ldp d12,d13,[sp],#16 764 ldp d10,d11,[sp],#16 765 ldp d8,d9,[sp],#16 766 ldp x29,x30,[sp],#16 767 AARCH64_VALIDATE_LINK_REGISTER 768 ret 769 770#endif // !OPENSSL_NO_ASM 771