• Home
  • Raw
  • Download

Lines Matching +full:clang +full:- +full:8

2  *  Armv8-A Cryptographic Extension support functions for Aarch64
5 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
13 #if __ARM_ARCH >= 8
19 /* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
21 * The intrinsic declaration are guarded by predefined ACLE macros in clang:
22 * these are normally only enabled by the -march option on the command line.
24 * requiring -march on the command line.
30 /* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
53 # error "Minimum version of Clang for MBEDTLS_AESCE_C on 32-bit Arm or Thumb is 11.0."
55 # error "Minimum version of Clang for MBEDTLS_AESCE_C on aarch64 is 4.0."
70 /* TODO: We haven't verified armclang for 32-bit Arm/Thumb prior to 6.20.
73 # error "Minimum version of armclang for MBEDTLS_AESCE_C on 32-bit Arm is 6.20."
83 # error "Must use minimum -march=armv8-a+crypto for MBEDTLS_AESCE_C"
85 # pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
89 # pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
117 signed char mbedtls_aesce_has_support_result = -1;
126 * thread-safe, because we store the result in a char so cannot in mbedtls_aesce_has_support_impl()
127 * be vulnerable to non-atomic updates. in mbedtls_aesce_has_support_impl()
131 if (mbedtls_aesce_has_support_result == -1) { in mbedtls_aesce_has_support_impl()
213 * It works because MixColumns is a linear operation over GF(2^8) and
215 * GF(2^8). (The inverse of MixColumns needs to be applied to the
263 * AES-ECB block en(de)cryption
271 unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset); in mbedtls_aesce_crypt_ecb()
275 block = aesce_decrypt_block(block, keys, ctx->nr); in mbedtls_aesce_crypt_ecb()
281 block = aesce_encrypt_block(block, keys, ctx->nr); in mbedtls_aesce_crypt_ecb()
299 for (i = 1, j--; j > 0; i++, j--) { in mbedtls_aesce_inverse_key()
310 return (word << (32 - 8)) | (word >> 8); in aes_rot_word()
334 * - Section 5, Nr = Nk + 6 in aesce_setkey_enc()
335 * - Section 5.2, the length of round keys is Nb*(Nr+1) in aesce_setkey_enc()
350 size_t iteration = (size_t) (rki - (uint32_t *) rk) / key_len_in_words; in aesce_setkey_enc()
353 rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1])); in aesce_setkey_enc()
406 /* On clang for A32/T32, work around some missing intrinsics and types which are listed in
407 * [ACLE](https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#polynomial-1)
435 * an equivalent non-Neon implementation. Reverse bit order in each in vrbitq_u8()
437 asm ("ldm %[p], { r2-r5 } \n\t" in vrbitq_u8()
446 "stm %[p], { r2-r5 } \n\t" in vrbitq_u8()
468 /* Missing intrinsics common to both GCC 5, and Clang on 32-bit */
489 * poly64x1_t are different, that is different with MSVC and Clang. */
492 /* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report
494 * cast for clang also. */
518 * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
526 * |------------|-------------|-------------|
539 c = vextq_u8(b, b, 8); /* :c1:c0 = b0:b1 */ in poly_mult_128()
559 * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
575 uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8)); in poly_mult_reduce()
585 g = vextq_u8(ZERO, e, 8); /* : :g1:00 = e1:00 */ in poly_mult_reduce()
609 #pragma clang attribute pop