• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the OpenSSL license (the "License").  You may not use
5 * this file except in compliance with the License.  You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include <openssl/base.h>
11
12#include <string.h>
13
14#include <openssl/mem.h>
15
16#include "../../internal.h"
17#include "../aes/internal.h"
18#include "internal.h"
19
20
21// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
22// bits of a |size_t|.
23static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
24
25
26#define GCM_MUL(key, ctx, Xi) gcm_gmult_nohw((ctx)->Xi, (key)->Htable)
27#define GHASH(key, ctx, in, len) \
28  gcm_ghash_nohw((ctx)->Xi, (key)->Htable, in, len)
29// GHASH_CHUNK is "stride parameter" missioned to mitigate cache
30// trashing effect. In other words idea is to hash data while it's
31// still in L1 cache after encryption pass...
32#define GHASH_CHUNK (3 * 1024)
33
34#if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86)
35static inline void gcm_reduce_1bit(u128 *V) {
36  if (sizeof(crypto_word_t) == 8) {
37    uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V->hi & 1));
38    V->hi = (V->lo << 63) | (V->hi >> 1);
39    V->lo = (V->lo >> 1) ^ T;
40  } else {
41    uint32_t T = 0xe1000000U & (0 - (uint32_t)(V->hi & 1));
42    V->hi = (V->lo << 63) | (V->hi >> 1);
43    V->lo = (V->lo >> 1) ^ ((uint64_t)T << 32);
44  }
45}
46
47void gcm_init_ssse3(u128 Htable[16], const uint64_t H[2]) {
48  Htable[0].hi = 0;
49  Htable[0].lo = 0;
50  u128 V;
51  V.hi = H[1];
52  V.lo = H[0];
53
54  Htable[8] = V;
55  gcm_reduce_1bit(&V);
56  Htable[4] = V;
57  gcm_reduce_1bit(&V);
58  Htable[2] = V;
59  gcm_reduce_1bit(&V);
60  Htable[1] = V;
61  Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
62  V = Htable[4];
63  Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
64  Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
65  Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
66  V = Htable[8];
67  Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
68  Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
69  Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
70  Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
71  Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
72  Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
73  Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
74
75  // Treat |Htable| as a 16x16 byte table and transpose it. Thus, Htable[i]
76  // contains the i'th byte of j*H for all j.
77  uint8_t *Hbytes = (uint8_t *)Htable;
78  for (int i = 0; i < 16; i++) {
79    for (int j = 0; j < i; j++) {
80      uint8_t tmp = Hbytes[16*i + j];
81      Hbytes[16*i + j] = Hbytes[16*j + i];
82      Hbytes[16*j + i] = tmp;
83    }
84  }
85}
86#endif  // GHASH_ASM_X86_64 || GHASH_ASM_X86
87
88#ifdef GCM_FUNCREF
89#undef GCM_MUL
90#define GCM_MUL(key, ctx, Xi) (*gcm_gmult_p)((ctx)->Xi, (key)->Htable)
91#undef GHASH
92#define GHASH(key, ctx, in, len) \
93  (*gcm_ghash_p)((ctx)->Xi, (key)->Htable, in, len)
94#endif  // GCM_FUNCREF
95
96#if defined(HW_GCM) && defined(OPENSSL_X86_64)
97static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
98                             const AES_KEY *key, uint8_t ivec[16],
99                             uint8_t Xi[16], const u128 Htable[16],
100                             enum gcm_impl_t impl) {
101  switch (impl) {
102    case gcm_x86_vaes_avx2:
103      len &= kSizeTWithoutLower4Bits;
104      aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
105      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
106      return len;
107    case gcm_x86_vaes_avx10_512:
108      len &= kSizeTWithoutLower4Bits;
109      aes_gcm_enc_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi);
110      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
111      return len;
112    default:
113      return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi);
114  }
115}
116
117static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
118                             const AES_KEY *key, uint8_t ivec[16],
119                             uint8_t Xi[16], const u128 Htable[16],
120                             enum gcm_impl_t impl) {
121  switch (impl) {
122    case gcm_x86_vaes_avx2:
123      len &= kSizeTWithoutLower4Bits;
124      aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
125      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
126      return len;
127    case gcm_x86_vaes_avx10_512:
128      len &= kSizeTWithoutLower4Bits;
129      aes_gcm_dec_update_vaes_avx10_512(in, out, len, key, ivec, Htable, Xi);
130      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
131      return len;
132    default:
133      return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi);
134  }
135}
136#endif  // HW_GCM && X86_64
137
138#if defined(HW_GCM) && defined(OPENSSL_AARCH64)
139
140static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
141                             const AES_KEY *key, uint8_t ivec[16],
142                             uint8_t Xi[16], const u128 Htable[16],
143                             enum gcm_impl_t impl) {
144  const size_t len_blocks = len & kSizeTWithoutLower4Bits;
145  if (!len_blocks) {
146    return 0;
147  }
148  aes_gcm_enc_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable);
149  return len_blocks;
150}
151
152static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
153                             const AES_KEY *key, uint8_t ivec[16],
154                             uint8_t Xi[16], const u128 Htable[16],
155                             enum gcm_impl_t impl) {
156  const size_t len_blocks = len & kSizeTWithoutLower4Bits;
157  if (!len_blocks) {
158    return 0;
159  }
160  aes_gcm_dec_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable);
161  return len_blocks;
162}
163
164#endif  // HW_GCM && AARCH64
165
166void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
167                       u128 out_table[16], const uint8_t gcm_key[16]) {
168  // H is passed to |gcm_init_*| as a pair of byte-swapped, 64-bit values.
169  uint64_t H[2] = {CRYPTO_load_u64_be(gcm_key),
170                   CRYPTO_load_u64_be(gcm_key + 8)};
171
172#if defined(GHASH_ASM_X86_64)
173  if (crypto_gcm_clmul_enabled()) {
174    if (CRYPTO_is_VPCLMULQDQ_capable() && CRYPTO_is_AVX2_capable()) {
175      if (CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() &&
176          CRYPTO_is_BMI2_capable() && !CRYPTO_cpu_avoid_zmm_registers()) {
177        gcm_init_vpclmulqdq_avx10_512(out_table, H);
178        *out_mult = gcm_gmult_vpclmulqdq_avx10;
179        *out_hash = gcm_ghash_vpclmulqdq_avx10_512;
180        return;
181      }
182      gcm_init_vpclmulqdq_avx2(out_table, H);
183      *out_mult = gcm_gmult_vpclmulqdq_avx2;
184      *out_hash = gcm_ghash_vpclmulqdq_avx2;
185      return;
186    }
187    if (CRYPTO_is_AVX_capable() && CRYPTO_is_MOVBE_capable()) {
188      gcm_init_avx(out_table, H);
189      *out_mult = gcm_gmult_avx;
190      *out_hash = gcm_ghash_avx;
191      return;
192    }
193    gcm_init_clmul(out_table, H);
194    *out_mult = gcm_gmult_clmul;
195    *out_hash = gcm_ghash_clmul;
196    return;
197  }
198  if (CRYPTO_is_SSSE3_capable()) {
199    gcm_init_ssse3(out_table, H);
200    *out_mult = gcm_gmult_ssse3;
201    *out_hash = gcm_ghash_ssse3;
202    return;
203  }
204#elif defined(GHASH_ASM_X86)
205  if (crypto_gcm_clmul_enabled()) {
206    gcm_init_clmul(out_table, H);
207    *out_mult = gcm_gmult_clmul;
208    *out_hash = gcm_ghash_clmul;
209    return;
210  }
211  if (CRYPTO_is_SSSE3_capable()) {
212    gcm_init_ssse3(out_table, H);
213    *out_mult = gcm_gmult_ssse3;
214    *out_hash = gcm_ghash_ssse3;
215    return;
216  }
217#elif defined(GHASH_ASM_ARM)
218  if (gcm_pmull_capable()) {
219    gcm_init_v8(out_table, H);
220    *out_mult = gcm_gmult_v8;
221    *out_hash = gcm_ghash_v8;
222    return;
223  }
224
225  if (gcm_neon_capable()) {
226    gcm_init_neon(out_table, H);
227    *out_mult = gcm_gmult_neon;
228    *out_hash = gcm_ghash_neon;
229    return;
230  }
231#endif
232
233  gcm_init_nohw(out_table, H);
234  *out_mult = gcm_gmult_nohw;
235  *out_hash = gcm_ghash_nohw;
236}
237
238void CRYPTO_gcm128_init_aes_key(GCM128_KEY *gcm_key, const uint8_t *key,
239                                size_t key_bytes) {
240  switch (key_bytes) {
241    case 16:
242      boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm);
243      break;
244
245    case 32:
246      boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm);
247      break;
248  }
249
250  OPENSSL_memset(gcm_key, 0, sizeof(*gcm_key));
251  int is_hwaes;
252  gcm_key->ctr = aes_ctr_set_key(&gcm_key->aes, &is_hwaes, &gcm_key->block, key,
253                                 key_bytes);
254
255  uint8_t ghash_key[16];
256  OPENSSL_memset(ghash_key, 0, sizeof(ghash_key));
257  gcm_key->block(ghash_key, ghash_key, &gcm_key->aes);
258
259  CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, gcm_key->Htable,
260                    ghash_key);
261
262#if !defined(OPENSSL_NO_ASM)
263#if defined(OPENSSL_X86_64)
264  if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx10_512 &&
265      CRYPTO_is_VAES_capable()) {
266    gcm_key->impl = gcm_x86_vaes_avx10_512;
267  } else if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx2 &&
268             CRYPTO_is_VAES_capable()) {
269    gcm_key->impl = gcm_x86_vaes_avx2;
270  } else if (gcm_key->ghash == gcm_ghash_avx && is_hwaes) {
271    gcm_key->impl = gcm_x86_aesni;
272  }
273#elif defined(OPENSSL_AARCH64)
274  if (gcm_pmull_capable() && is_hwaes) {
275    gcm_key->impl = gcm_arm64_aes;
276  }
277#endif
278#endif
279}
280
281void CRYPTO_gcm128_init_ctx(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
282                            const uint8_t *iv, size_t iv_len) {
283#ifdef GCM_FUNCREF
284  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
285#endif
286
287  OPENSSL_memset(&ctx->Yi, 0, sizeof(ctx->Yi));
288  OPENSSL_memset(&ctx->Xi, 0, sizeof(ctx->Xi));
289  ctx->len.aad = 0;
290  ctx->len.msg = 0;
291  ctx->ares = 0;
292  ctx->mres = 0;
293
294  uint32_t ctr;
295  if (iv_len == 12) {
296    OPENSSL_memcpy(ctx->Yi, iv, 12);
297    ctx->Yi[15] = 1;
298    ctr = 1;
299  } else {
300    uint64_t len0 = iv_len;
301
302    while (iv_len >= 16) {
303      CRYPTO_xor16(ctx->Yi, ctx->Yi, iv);
304      GCM_MUL(key, ctx, Yi);
305      iv += 16;
306      iv_len -= 16;
307    }
308    if (iv_len) {
309      for (size_t i = 0; i < iv_len; ++i) {
310        ctx->Yi[i] ^= iv[i];
311      }
312      GCM_MUL(key, ctx, Yi);
313    }
314
315    uint8_t len_block[16];
316    OPENSSL_memset(len_block, 0, 8);
317    CRYPTO_store_u64_be(len_block + 8, len0 << 3);
318    CRYPTO_xor16(ctx->Yi, ctx->Yi, len_block);
319
320    GCM_MUL(key, ctx, Yi);
321    ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
322  }
323
324  key->block(ctx->Yi, ctx->EK0, &key->aes);
325  ++ctr;
326  CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
327}
328
329int CRYPTO_gcm128_aad(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
330                      const uint8_t *aad, size_t aad_len) {
331#ifdef GCM_FUNCREF
332  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
333  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
334                      size_t len) = key->ghash;
335#endif
336
337  if (ctx->len.msg != 0) {
338    // The caller must have finished the AAD before providing other input.
339    return 0;
340  }
341
342  uint64_t alen = ctx->len.aad + aad_len;
343  if (alen > (UINT64_C(1) << 61) || (sizeof(aad_len) == 8 && alen < aad_len)) {
344    return 0;
345  }
346  ctx->len.aad = alen;
347
348  unsigned n = ctx->ares;
349  if (n) {
350    while (n && aad_len) {
351      ctx->Xi[n] ^= *(aad++);
352      --aad_len;
353      n = (n + 1) % 16;
354    }
355    if (n == 0) {
356      GCM_MUL(key, ctx, Xi);
357    } else {
358      ctx->ares = n;
359      return 1;
360    }
361  }
362
363  // Process a whole number of blocks.
364  size_t len_blocks = aad_len & kSizeTWithoutLower4Bits;
365  if (len_blocks != 0) {
366    GHASH(key, ctx, aad, len_blocks);
367    aad += len_blocks;
368    aad_len -= len_blocks;
369  }
370
371  // Process the remainder.
372  if (aad_len != 0) {
373    n = (unsigned int)aad_len;
374    for (size_t i = 0; i < aad_len; ++i) {
375      ctx->Xi[i] ^= aad[i];
376    }
377  }
378
379  ctx->ares = n;
380  return 1;
381}
382
383int CRYPTO_gcm128_encrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
384                          const uint8_t *in, uint8_t *out, size_t len) {
385#ifdef GCM_FUNCREF
386  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
387  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
388                      size_t len) = key->ghash;
389#endif
390
391  uint64_t mlen = ctx->len.msg + len;
392  if (mlen > ((UINT64_C(1) << 36) - 32) ||
393      (sizeof(len) == 8 && mlen < len)) {
394    return 0;
395  }
396  ctx->len.msg = mlen;
397
398  if (ctx->ares) {
399    // First call to encrypt finalizes GHASH(AAD)
400    GCM_MUL(key, ctx, Xi);
401    ctx->ares = 0;
402  }
403
404  unsigned n = ctx->mres;
405  if (n) {
406    while (n && len) {
407      ctx->Xi[n] ^= *(out++) = *(in++) ^ ctx->EKi[n];
408      --len;
409      n = (n + 1) % 16;
410    }
411    if (n == 0) {
412      GCM_MUL(key, ctx, Xi);
413    } else {
414      ctx->mres = n;
415      return 1;
416    }
417  }
418
419#if defined(HW_GCM)
420  // Check |len| to work around a C language bug. See https://crbug.com/1019588.
421  if (key->impl != gcm_separate && len > 0) {
422    // |hw_gcm_encrypt| may not process all the input given to it. It may
423    // not process *any* of its input if it is deemed too small.
424    size_t bulk = hw_gcm_encrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi,
425                                 key->Htable, key->impl);
426    in += bulk;
427    out += bulk;
428    len -= bulk;
429  }
430#endif
431
432  uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
433  ctr128_f stream = key->ctr;
434  while (len >= GHASH_CHUNK) {
435    (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi);
436    ctr += GHASH_CHUNK / 16;
437    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
438    GHASH(key, ctx, out, GHASH_CHUNK);
439    out += GHASH_CHUNK;
440    in += GHASH_CHUNK;
441    len -= GHASH_CHUNK;
442  }
443
444  size_t len_blocks = len & kSizeTWithoutLower4Bits;
445  if (len_blocks != 0) {
446    size_t j = len_blocks / 16;
447    (*stream)(in, out, j, &key->aes, ctx->Yi);
448    ctr += (uint32_t)j;
449    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
450    in += len_blocks;
451    len -= len_blocks;
452    GHASH(key, ctx, out, len_blocks);
453    out += len_blocks;
454  }
455
456  if (len) {
457    key->block(ctx->Yi, ctx->EKi, &key->aes);
458    ++ctr;
459    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
460    while (len--) {
461      ctx->Xi[n] ^= out[n] = in[n] ^ ctx->EKi[n];
462      ++n;
463    }
464  }
465
466  ctx->mres = n;
467  return 1;
468}
469
470int CRYPTO_gcm128_decrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
471                          const uint8_t *in, uint8_t *out, size_t len) {
472#ifdef GCM_FUNCREF
473  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
474  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
475                      size_t len) = key->ghash;
476#endif
477
478  uint64_t mlen = ctx->len.msg + len;
479  if (mlen > ((UINT64_C(1) << 36) - 32) ||
480      (sizeof(len) == 8 && mlen < len)) {
481    return 0;
482  }
483  ctx->len.msg = mlen;
484
485  if (ctx->ares) {
486    // First call to decrypt finalizes GHASH(AAD)
487    GCM_MUL(key, ctx, Xi);
488    ctx->ares = 0;
489  }
490
491  unsigned n = ctx->mres;
492  if (n) {
493    while (n && len) {
494      uint8_t c = *(in++);
495      *(out++) = c ^ ctx->EKi[n];
496      ctx->Xi[n] ^= c;
497      --len;
498      n = (n + 1) % 16;
499    }
500    if (n == 0) {
501      GCM_MUL(key, ctx, Xi);
502    } else {
503      ctx->mres = n;
504      return 1;
505    }
506  }
507
508#if defined(HW_GCM)
509  // Check |len| to work around a C language bug. See https://crbug.com/1019588.
510  if (key->impl != gcm_separate && len > 0) {
511    // |hw_gcm_decrypt| may not process all the input given to it. It may
512    // not process *any* of its input if it is deemed too small.
513    size_t bulk = hw_gcm_decrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi,
514                                 key->Htable, key->impl);
515    in += bulk;
516    out += bulk;
517    len -= bulk;
518  }
519#endif
520
521  uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
522  ctr128_f stream = key->ctr;
523  while (len >= GHASH_CHUNK) {
524    GHASH(key, ctx, in, GHASH_CHUNK);
525    (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi);
526    ctr += GHASH_CHUNK / 16;
527    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
528    out += GHASH_CHUNK;
529    in += GHASH_CHUNK;
530    len -= GHASH_CHUNK;
531  }
532
533  size_t len_blocks = len & kSizeTWithoutLower4Bits;
534  if (len_blocks != 0) {
535    size_t j = len_blocks / 16;
536    GHASH(key, ctx, in, len_blocks);
537    (*stream)(in, out, j, &key->aes, ctx->Yi);
538    ctr += (uint32_t)j;
539    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
540    out += len_blocks;
541    in += len_blocks;
542    len -= len_blocks;
543  }
544
545  if (len) {
546    key->block(ctx->Yi, ctx->EKi, &key->aes);
547    ++ctr;
548    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
549    while (len--) {
550      uint8_t c = in[n];
551      ctx->Xi[n] ^= c;
552      out[n] = c ^ ctx->EKi[n];
553      ++n;
554    }
555  }
556
557  ctx->mres = n;
558  return 1;
559}
560
561int CRYPTO_gcm128_finish(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
562                         const uint8_t *tag, size_t len) {
563#ifdef GCM_FUNCREF
564  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
565#endif
566
567  if (ctx->mres || ctx->ares) {
568    GCM_MUL(key, ctx, Xi);
569  }
570
571  uint8_t len_block[16];
572  CRYPTO_store_u64_be(len_block, ctx->len.aad << 3);
573  CRYPTO_store_u64_be(len_block + 8, ctx->len.msg << 3);
574  CRYPTO_xor16(ctx->Xi, ctx->Xi, len_block);
575  GCM_MUL(key, ctx, Xi);
576  CRYPTO_xor16(ctx->Xi, ctx->Xi, ctx->EK0);
577
578  if (tag && len <= sizeof(ctx->Xi)) {
579    return CRYPTO_memcmp(ctx->Xi, tag, len) == 0;
580  } else {
581    return 0;
582  }
583}
584
585void CRYPTO_gcm128_tag(const GCM128_KEY *key, GCM128_CONTEXT *ctx, uint8_t *tag,
586                       size_t len) {
587  CRYPTO_gcm128_finish(key, ctx, NULL, 0);
588  OPENSSL_memcpy(tag, ctx->Xi, len <= sizeof(ctx->Xi) ? len : sizeof(ctx->Xi));
589}
590
591#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
592int crypto_gcm_clmul_enabled(void) {
593#if defined(GHASH_ASM_X86) || defined(GHASH_ASM_X86_64)
594  return CRYPTO_is_FXSR_capable() && CRYPTO_is_PCLMUL_capable();
595#else
596  return 0;
597#endif
598}
599#endif
600