1// Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include <openssl/base.h>
16
17#include <string.h>
18
19#include <openssl/mem.h>
20
21#include "../../internal.h"
22#include "../aes/internal.h"
23#include "internal.h"
24
25
26// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
27// bits of a |size_t|.
28static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
29
30
31#define GCM_MUL(key, ctx, Xi) gcm_gmult_nohw((ctx)->Xi, (key)->Htable)
32#define GHASH(key, ctx, in, len) \
33  gcm_ghash_nohw((ctx)->Xi, (key)->Htable, in, len)
34// GHASH_CHUNK is "stride parameter" missioned to mitigate cache
35// trashing effect. In other words idea is to hash data while it's
36// still in L1 cache after encryption pass...
37#define GHASH_CHUNK (3 * 1024)
38
39#if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86)
40static inline void gcm_reduce_1bit(u128 *V) {
41  if (sizeof(crypto_word_t) == 8) {
42    uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V->hi & 1));
43    V->hi = (V->lo << 63) | (V->hi >> 1);
44    V->lo = (V->lo >> 1) ^ T;
45  } else {
46    uint32_t T = 0xe1000000U & (0 - (uint32_t)(V->hi & 1));
47    V->hi = (V->lo << 63) | (V->hi >> 1);
48    V->lo = (V->lo >> 1) ^ ((uint64_t)T << 32);
49  }
50}
51
52void gcm_init_ssse3(u128 Htable[16], const uint64_t H[2]) {
53  Htable[0].hi = 0;
54  Htable[0].lo = 0;
55  u128 V;
56  V.hi = H[1];
57  V.lo = H[0];
58
59  Htable[8] = V;
60  gcm_reduce_1bit(&V);
61  Htable[4] = V;
62  gcm_reduce_1bit(&V);
63  Htable[2] = V;
64  gcm_reduce_1bit(&V);
65  Htable[1] = V;
66  Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
67  V = Htable[4];
68  Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
69  Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
70  Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
71  V = Htable[8];
72  Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
73  Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
74  Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
75  Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
76  Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
77  Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
78  Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
79
80  // Treat |Htable| as a 16x16 byte table and transpose it. Thus, Htable[i]
81  // contains the i'th byte of j*H for all j.
82  uint8_t *Hbytes = (uint8_t *)Htable;
83  for (int i = 0; i < 16; i++) {
84    for (int j = 0; j < i; j++) {
85      uint8_t tmp = Hbytes[16*i + j];
86      Hbytes[16*i + j] = Hbytes[16*j + i];
87      Hbytes[16*j + i] = tmp;
88    }
89  }
90}
91#endif  // GHASH_ASM_X86_64 || GHASH_ASM_X86
92
93#ifdef GCM_FUNCREF
94#undef GCM_MUL
95#define GCM_MUL(key, ctx, Xi) (*gcm_gmult_p)((ctx)->Xi, (key)->Htable)
96#undef GHASH
97#define GHASH(key, ctx, in, len) \
98  (*gcm_ghash_p)((ctx)->Xi, (key)->Htable, in, len)
99#endif  // GCM_FUNCREF
100
101#if defined(HW_GCM) && defined(OPENSSL_X86_64)
102static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
103                             const AES_KEY *key, uint8_t ivec[16],
104                             uint8_t Xi[16], const u128 Htable[16],
105                             enum gcm_impl_t impl) {
106  switch (impl) {
107    case gcm_x86_vaes_avx2:
108      len &= kSizeTWithoutLower4Bits;
109      aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
110      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
111      return len;
112    case gcm_x86_vaes_avx512:
113      len &= kSizeTWithoutLower4Bits;
114      aes_gcm_enc_update_vaes_avx512(in, out, len, key, ivec, Htable, Xi);
115      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
116      return len;
117    default:
118      return aesni_gcm_encrypt(in, out, len, key, ivec, Htable, Xi);
119  }
120}
121
122static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
123                             const AES_KEY *key, uint8_t ivec[16],
124                             uint8_t Xi[16], const u128 Htable[16],
125                             enum gcm_impl_t impl) {
126  switch (impl) {
127    case gcm_x86_vaes_avx2:
128      len &= kSizeTWithoutLower4Bits;
129      aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec, Htable, Xi);
130      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
131      return len;
132    case gcm_x86_vaes_avx512:
133      len &= kSizeTWithoutLower4Bits;
134      aes_gcm_dec_update_vaes_avx512(in, out, len, key, ivec, Htable, Xi);
135      CRYPTO_store_u32_be(&ivec[12], CRYPTO_load_u32_be(&ivec[12]) + len / 16);
136      return len;
137    default:
138      return aesni_gcm_decrypt(in, out, len, key, ivec, Htable, Xi);
139  }
140}
141#endif  // HW_GCM && X86_64
142
143#if defined(HW_GCM) && defined(OPENSSL_AARCH64)
144
145static size_t hw_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
146                             const AES_KEY *key, uint8_t ivec[16],
147                             uint8_t Xi[16], const u128 Htable[16],
148                             enum gcm_impl_t impl) {
149  const size_t len_blocks = len & kSizeTWithoutLower4Bits;
150  if (!len_blocks) {
151    return 0;
152  }
153  aes_gcm_enc_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable);
154  return len_blocks;
155}
156
157static size_t hw_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
158                             const AES_KEY *key, uint8_t ivec[16],
159                             uint8_t Xi[16], const u128 Htable[16],
160                             enum gcm_impl_t impl) {
161  const size_t len_blocks = len & kSizeTWithoutLower4Bits;
162  if (!len_blocks) {
163    return 0;
164  }
165  aes_gcm_dec_kernel(in, len_blocks * 8, out, Xi, ivec, key, Htable);
166  return len_blocks;
167}
168
169#endif  // HW_GCM && AARCH64
170
171void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
172                       u128 out_table[16], const uint8_t gcm_key[16]) {
173  // H is passed to |gcm_init_*| as a pair of byte-swapped, 64-bit values.
174  uint64_t H[2] = {CRYPTO_load_u64_be(gcm_key),
175                   CRYPTO_load_u64_be(gcm_key + 8)};
176
177#if defined(GHASH_ASM_X86_64)
178  if (crypto_gcm_clmul_enabled()) {
179    if (CRYPTO_is_VPCLMULQDQ_capable() && CRYPTO_is_AVX2_capable()) {
180      if (CRYPTO_is_AVX512BW_capable() && CRYPTO_is_AVX512VL_capable() &&
181          CRYPTO_is_BMI2_capable() && !CRYPTO_cpu_avoid_zmm_registers()) {
182        gcm_init_vpclmulqdq_avx512(out_table, H);
183        *out_mult = gcm_gmult_vpclmulqdq_avx512;
184        *out_hash = gcm_ghash_vpclmulqdq_avx512;
185        return;
186      }
187      gcm_init_vpclmulqdq_avx2(out_table, H);
188      *out_mult = gcm_gmult_vpclmulqdq_avx2;
189      *out_hash = gcm_ghash_vpclmulqdq_avx2;
190      return;
191    }
192    if (CRYPTO_is_AVX_capable() && CRYPTO_is_MOVBE_capable()) {
193      gcm_init_avx(out_table, H);
194      *out_mult = gcm_gmult_avx;
195      *out_hash = gcm_ghash_avx;
196      return;
197    }
198    gcm_init_clmul(out_table, H);
199    *out_mult = gcm_gmult_clmul;
200    *out_hash = gcm_ghash_clmul;
201    return;
202  }
203  if (CRYPTO_is_SSSE3_capable()) {
204    gcm_init_ssse3(out_table, H);
205    *out_mult = gcm_gmult_ssse3;
206    *out_hash = gcm_ghash_ssse3;
207    return;
208  }
209#elif defined(GHASH_ASM_X86)
210  if (crypto_gcm_clmul_enabled()) {
211    gcm_init_clmul(out_table, H);
212    *out_mult = gcm_gmult_clmul;
213    *out_hash = gcm_ghash_clmul;
214    return;
215  }
216  if (CRYPTO_is_SSSE3_capable()) {
217    gcm_init_ssse3(out_table, H);
218    *out_mult = gcm_gmult_ssse3;
219    *out_hash = gcm_ghash_ssse3;
220    return;
221  }
222#elif defined(GHASH_ASM_ARM)
223  if (gcm_pmull_capable()) {
224    gcm_init_v8(out_table, H);
225    *out_mult = gcm_gmult_v8;
226    *out_hash = gcm_ghash_v8;
227    return;
228  }
229
230  if (gcm_neon_capable()) {
231    gcm_init_neon(out_table, H);
232    *out_mult = gcm_gmult_neon;
233    *out_hash = gcm_ghash_neon;
234    return;
235  }
236#endif
237
238  gcm_init_nohw(out_table, H);
239  *out_mult = gcm_gmult_nohw;
240  *out_hash = gcm_ghash_nohw;
241}
242
243void CRYPTO_gcm128_init_aes_key(GCM128_KEY *gcm_key, const uint8_t *key,
244                                size_t key_bytes) {
245  switch (key_bytes) {
246    case 16:
247      boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm);
248      break;
249
250    case 32:
251      boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm);
252      break;
253  }
254
255  OPENSSL_memset(gcm_key, 0, sizeof(*gcm_key));
256  int is_hwaes;
257  gcm_key->ctr = aes_ctr_set_key(&gcm_key->aes, &is_hwaes, &gcm_key->block, key,
258                                 key_bytes);
259
260  uint8_t ghash_key[16];
261  OPENSSL_memset(ghash_key, 0, sizeof(ghash_key));
262  gcm_key->block(ghash_key, ghash_key, &gcm_key->aes);
263
264  CRYPTO_ghash_init(&gcm_key->gmult, &gcm_key->ghash, gcm_key->Htable,
265                    ghash_key);
266
267#if !defined(OPENSSL_NO_ASM)
268#if defined(OPENSSL_X86_64)
269  if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx512 &&
270      CRYPTO_is_VAES_capable()) {
271    gcm_key->impl = gcm_x86_vaes_avx512;
272  } else if (gcm_key->ghash == gcm_ghash_vpclmulqdq_avx2 &&
273             CRYPTO_is_VAES_capable()) {
274    gcm_key->impl = gcm_x86_vaes_avx2;
275  } else if (gcm_key->ghash == gcm_ghash_avx && is_hwaes) {
276    gcm_key->impl = gcm_x86_aesni;
277  }
278#elif defined(OPENSSL_AARCH64)
279  if (gcm_pmull_capable() && is_hwaes) {
280    gcm_key->impl = gcm_arm64_aes;
281  }
282#endif
283#endif
284}
285
286void CRYPTO_gcm128_init_ctx(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
287                            const uint8_t *iv, size_t iv_len) {
288#ifdef GCM_FUNCREF
289  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
290#endif
291
292  OPENSSL_memset(&ctx->Yi, 0, sizeof(ctx->Yi));
293  OPENSSL_memset(&ctx->Xi, 0, sizeof(ctx->Xi));
294  ctx->len.aad = 0;
295  ctx->len.msg = 0;
296  ctx->ares = 0;
297  ctx->mres = 0;
298
299  uint32_t ctr;
300  if (iv_len == 12) {
301    OPENSSL_memcpy(ctx->Yi, iv, 12);
302    ctx->Yi[15] = 1;
303    ctr = 1;
304  } else {
305    uint64_t len0 = iv_len;
306
307    while (iv_len >= 16) {
308      CRYPTO_xor16(ctx->Yi, ctx->Yi, iv);
309      GCM_MUL(key, ctx, Yi);
310      iv += 16;
311      iv_len -= 16;
312    }
313    if (iv_len) {
314      for (size_t i = 0; i < iv_len; ++i) {
315        ctx->Yi[i] ^= iv[i];
316      }
317      GCM_MUL(key, ctx, Yi);
318    }
319
320    uint8_t len_block[16];
321    OPENSSL_memset(len_block, 0, 8);
322    CRYPTO_store_u64_be(len_block + 8, len0 << 3);
323    CRYPTO_xor16(ctx->Yi, ctx->Yi, len_block);
324
325    GCM_MUL(key, ctx, Yi);
326    ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
327  }
328
329  key->block(ctx->Yi, ctx->EK0, &key->aes);
330  ++ctr;
331  CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
332}
333
334int CRYPTO_gcm128_aad(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
335                      const uint8_t *aad, size_t aad_len) {
336#ifdef GCM_FUNCREF
337  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
338  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
339                      size_t len) = key->ghash;
340#endif
341
342  if (ctx->len.msg != 0) {
343    // The caller must have finished the AAD before providing other input.
344    return 0;
345  }
346
347  uint64_t alen = ctx->len.aad + aad_len;
348  if (alen > (UINT64_C(1) << 61) || (sizeof(aad_len) == 8 && alen < aad_len)) {
349    return 0;
350  }
351  ctx->len.aad = alen;
352
353  unsigned n = ctx->ares;
354  if (n) {
355    while (n && aad_len) {
356      ctx->Xi[n] ^= *(aad++);
357      --aad_len;
358      n = (n + 1) % 16;
359    }
360    if (n == 0) {
361      GCM_MUL(key, ctx, Xi);
362    } else {
363      ctx->ares = n;
364      return 1;
365    }
366  }
367
368  // Process a whole number of blocks.
369  size_t len_blocks = aad_len & kSizeTWithoutLower4Bits;
370  if (len_blocks != 0) {
371    GHASH(key, ctx, aad, len_blocks);
372    aad += len_blocks;
373    aad_len -= len_blocks;
374  }
375
376  // Process the remainder.
377  if (aad_len != 0) {
378    n = (unsigned int)aad_len;
379    for (size_t i = 0; i < aad_len; ++i) {
380      ctx->Xi[i] ^= aad[i];
381    }
382  }
383
384  ctx->ares = n;
385  return 1;
386}
387
388int CRYPTO_gcm128_encrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
389                          const uint8_t *in, uint8_t *out, size_t len) {
390#ifdef GCM_FUNCREF
391  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
392  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
393                      size_t len) = key->ghash;
394#endif
395
396  uint64_t mlen = ctx->len.msg + len;
397  if (mlen > ((UINT64_C(1) << 36) - 32) ||
398      (sizeof(len) == 8 && mlen < len)) {
399    return 0;
400  }
401  ctx->len.msg = mlen;
402
403  if (ctx->ares) {
404    // First call to encrypt finalizes GHASH(AAD)
405    GCM_MUL(key, ctx, Xi);
406    ctx->ares = 0;
407  }
408
409  unsigned n = ctx->mres;
410  if (n) {
411    while (n && len) {
412      ctx->Xi[n] ^= *(out++) = *(in++) ^ ctx->EKi[n];
413      --len;
414      n = (n + 1) % 16;
415    }
416    if (n == 0) {
417      GCM_MUL(key, ctx, Xi);
418    } else {
419      ctx->mres = n;
420      return 1;
421    }
422  }
423
424#if defined(HW_GCM)
425  // Check |len| to work around a C language bug. See https://crbug.com/1019588.
426  if (key->impl != gcm_separate && len > 0) {
427    // |hw_gcm_encrypt| may not process all the input given to it. It may
428    // not process *any* of its input if it is deemed too small.
429    size_t bulk = hw_gcm_encrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi,
430                                 key->Htable, key->impl);
431    in += bulk;
432    out += bulk;
433    len -= bulk;
434  }
435#endif
436
437  uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
438  ctr128_f stream = key->ctr;
439  while (len >= GHASH_CHUNK) {
440    (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi);
441    ctr += GHASH_CHUNK / 16;
442    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
443    GHASH(key, ctx, out, GHASH_CHUNK);
444    out += GHASH_CHUNK;
445    in += GHASH_CHUNK;
446    len -= GHASH_CHUNK;
447  }
448
449  size_t len_blocks = len & kSizeTWithoutLower4Bits;
450  if (len_blocks != 0) {
451    size_t j = len_blocks / 16;
452    (*stream)(in, out, j, &key->aes, ctx->Yi);
453    ctr += (uint32_t)j;
454    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
455    in += len_blocks;
456    len -= len_blocks;
457    GHASH(key, ctx, out, len_blocks);
458    out += len_blocks;
459  }
460
461  if (len) {
462    key->block(ctx->Yi, ctx->EKi, &key->aes);
463    ++ctr;
464    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
465    while (len--) {
466      ctx->Xi[n] ^= out[n] = in[n] ^ ctx->EKi[n];
467      ++n;
468    }
469  }
470
471  ctx->mres = n;
472  return 1;
473}
474
475int CRYPTO_gcm128_decrypt(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
476                          const uint8_t *in, uint8_t *out, size_t len) {
477#ifdef GCM_FUNCREF
478  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
479  void (*gcm_ghash_p)(uint8_t Xi[16], const u128 Htable[16], const uint8_t *inp,
480                      size_t len) = key->ghash;
481#endif
482
483  uint64_t mlen = ctx->len.msg + len;
484  if (mlen > ((UINT64_C(1) << 36) - 32) ||
485      (sizeof(len) == 8 && mlen < len)) {
486    return 0;
487  }
488  ctx->len.msg = mlen;
489
490  if (ctx->ares) {
491    // First call to decrypt finalizes GHASH(AAD)
492    GCM_MUL(key, ctx, Xi);
493    ctx->ares = 0;
494  }
495
496  unsigned n = ctx->mres;
497  if (n) {
498    while (n && len) {
499      uint8_t c = *(in++);
500      *(out++) = c ^ ctx->EKi[n];
501      ctx->Xi[n] ^= c;
502      --len;
503      n = (n + 1) % 16;
504    }
505    if (n == 0) {
506      GCM_MUL(key, ctx, Xi);
507    } else {
508      ctx->mres = n;
509      return 1;
510    }
511  }
512
513#if defined(HW_GCM)
514  // Check |len| to work around a C language bug. See https://crbug.com/1019588.
515  if (key->impl != gcm_separate && len > 0) {
516    // |hw_gcm_decrypt| may not process all the input given to it. It may
517    // not process *any* of its input if it is deemed too small.
518    size_t bulk = hw_gcm_decrypt(in, out, len, &key->aes, ctx->Yi, ctx->Xi,
519                                 key->Htable, key->impl);
520    in += bulk;
521    out += bulk;
522    len -= bulk;
523  }
524#endif
525
526  uint32_t ctr = CRYPTO_load_u32_be(ctx->Yi + 12);
527  ctr128_f stream = key->ctr;
528  while (len >= GHASH_CHUNK) {
529    GHASH(key, ctx, in, GHASH_CHUNK);
530    (*stream)(in, out, GHASH_CHUNK / 16, &key->aes, ctx->Yi);
531    ctr += GHASH_CHUNK / 16;
532    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
533    out += GHASH_CHUNK;
534    in += GHASH_CHUNK;
535    len -= GHASH_CHUNK;
536  }
537
538  size_t len_blocks = len & kSizeTWithoutLower4Bits;
539  if (len_blocks != 0) {
540    size_t j = len_blocks / 16;
541    GHASH(key, ctx, in, len_blocks);
542    (*stream)(in, out, j, &key->aes, ctx->Yi);
543    ctr += (uint32_t)j;
544    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
545    out += len_blocks;
546    in += len_blocks;
547    len -= len_blocks;
548  }
549
550  if (len) {
551    key->block(ctx->Yi, ctx->EKi, &key->aes);
552    ++ctr;
553    CRYPTO_store_u32_be(ctx->Yi + 12, ctr);
554    while (len--) {
555      uint8_t c = in[n];
556      ctx->Xi[n] ^= c;
557      out[n] = c ^ ctx->EKi[n];
558      ++n;
559    }
560  }
561
562  ctx->mres = n;
563  return 1;
564}
565
566int CRYPTO_gcm128_finish(const GCM128_KEY *key, GCM128_CONTEXT *ctx,
567                         const uint8_t *tag, size_t len) {
568#ifdef GCM_FUNCREF
569  void (*gcm_gmult_p)(uint8_t Xi[16], const u128 Htable[16]) = key->gmult;
570#endif
571
572  if (ctx->mres || ctx->ares) {
573    GCM_MUL(key, ctx, Xi);
574  }
575
576  uint8_t len_block[16];
577  CRYPTO_store_u64_be(len_block, ctx->len.aad << 3);
578  CRYPTO_store_u64_be(len_block + 8, ctx->len.msg << 3);
579  CRYPTO_xor16(ctx->Xi, ctx->Xi, len_block);
580  GCM_MUL(key, ctx, Xi);
581  CRYPTO_xor16(ctx->Xi, ctx->Xi, ctx->EK0);
582
583  if (tag && len <= sizeof(ctx->Xi)) {
584    return CRYPTO_memcmp(ctx->Xi, tag, len) == 0;
585  } else {
586    return 0;
587  }
588}
589
590void CRYPTO_gcm128_tag(const GCM128_KEY *key, GCM128_CONTEXT *ctx, uint8_t *tag,
591                       size_t len) {
592  CRYPTO_gcm128_finish(key, ctx, NULL, 0);
593  OPENSSL_memcpy(tag, ctx->Xi, len <= sizeof(ctx->Xi) ? len : sizeof(ctx->Xi));
594}
595
596#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
597int crypto_gcm_clmul_enabled(void) {
598#if defined(GHASH_ASM_X86) || defined(GHASH_ASM_X86_64)
599  return CRYPTO_is_PCLMUL_capable() && CRYPTO_is_SSSE3_capable();
600#else
601  return 0;
602#endif
603}
604#endif
605