• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2014, Google Inc.
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14 
15 #include <assert.h>
16 #include <limits.h>
17 #include <string.h>
18 
19 #include <openssl/aead.h>
20 #include <openssl/cipher.h>
21 #include <openssl/err.h>
22 #include <openssl/hmac.h>
23 #include <openssl/md5.h>
24 #include <openssl/mem.h>
25 #include <openssl/sha.h>
26 #include <openssl/type_check.h>
27 
28 #include "../fipsmodule/cipher/internal.h"
29 #include "../internal.h"
30 #include "internal.h"
31 
32 
33 typedef struct {
34   EVP_CIPHER_CTX cipher_ctx;
35   HMAC_CTX hmac_ctx;
36   // mac_key is the portion of the key used for the MAC. It is retained
37   // separately for the constant-time CBC code.
38   uint8_t mac_key[EVP_MAX_MD_SIZE];
39   uint8_t mac_key_len;
40   // implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
41   // IV.
42   char implicit_iv;
43 } AEAD_TLS_CTX;
44 
45 OPENSSL_STATIC_ASSERT(EVP_MAX_MD_SIZE < 256,
46                       "mac_key_len does not fit in uint8_t");
47 
48 OPENSSL_STATIC_ASSERT(sizeof(((EVP_AEAD_CTX *)NULL)->state) >=
49                           sizeof(AEAD_TLS_CTX),
50                       "AEAD state is too small");
51 #if defined(__GNUC__) || defined(__clang__)
52 OPENSSL_STATIC_ASSERT(alignof(union evp_aead_ctx_st_state) >=
53                           alignof(AEAD_TLS_CTX),
54                       "AEAD state has insufficient alignment");
55 #endif
56 
aead_tls_cleanup(EVP_AEAD_CTX * ctx)57 static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
58   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
59   EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
60   HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
61 }
62 
aead_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir,const EVP_CIPHER * cipher,const EVP_MD * md,char implicit_iv)63 static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
64                          size_t tag_len, enum evp_aead_direction_t dir,
65                          const EVP_CIPHER *cipher, const EVP_MD *md,
66                          char implicit_iv) {
67   if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
68       tag_len != EVP_MD_size(md)) {
69     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_TAG_SIZE);
70     return 0;
71   }
72 
73   if (key_len != EVP_AEAD_key_length(ctx->aead)) {
74     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
75     return 0;
76   }
77 
78   size_t mac_key_len = EVP_MD_size(md);
79   size_t enc_key_len = EVP_CIPHER_key_length(cipher);
80   assert(mac_key_len + enc_key_len +
81          (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
82 
83   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
84   EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
85   HMAC_CTX_init(&tls_ctx->hmac_ctx);
86   assert(mac_key_len <= EVP_MAX_MD_SIZE);
87   OPENSSL_memcpy(tls_ctx->mac_key, key, mac_key_len);
88   tls_ctx->mac_key_len = (uint8_t)mac_key_len;
89   tls_ctx->implicit_iv = implicit_iv;
90 
91   if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
92                          implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
93                          dir == evp_aead_seal) ||
94       !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
95     aead_tls_cleanup(ctx);
96     return 0;
97   }
98   EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
99 
100   return 1;
101 }
102 
aead_tls_tag_len(const EVP_AEAD_CTX * ctx,const size_t in_len,const size_t extra_in_len)103 static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len,
104                                const size_t extra_in_len) {
105   assert(extra_in_len == 0);
106   const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
107 
108   const size_t hmac_len = HMAC_size(&tls_ctx->hmac_ctx);
109   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) {
110     // The NULL cipher.
111     return hmac_len;
112   }
113 
114   const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
115   // An overflow of |in_len + hmac_len| doesn't affect the result mod
116   // |block_size|, provided that |block_size| is a smaller power of two.
117   assert(block_size != 0 && (block_size & (block_size - 1)) == 0);
118   const size_t pad_len = block_size - (in_len + hmac_len) % block_size;
119   return hmac_len + pad_len;
120 }
121 
aead_tls_seal_scatter(const EVP_AEAD_CTX * ctx,uint8_t * out,uint8_t * out_tag,size_t * out_tag_len,const size_t max_out_tag_len,const uint8_t * nonce,const size_t nonce_len,const uint8_t * in,const size_t in_len,const uint8_t * extra_in,const size_t extra_in_len,const uint8_t * ad,const size_t ad_len)122 static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out,
123                                  uint8_t *out_tag, size_t *out_tag_len,
124                                  const size_t max_out_tag_len,
125                                  const uint8_t *nonce, const size_t nonce_len,
126                                  const uint8_t *in, const size_t in_len,
127                                  const uint8_t *extra_in,
128                                  const size_t extra_in_len, const uint8_t *ad,
129                                  const size_t ad_len) {
130   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
131 
132   if (!tls_ctx->cipher_ctx.encrypt) {
133     // Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
134     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
135     return 0;
136   }
137 
138   if (in_len > INT_MAX) {
139     // EVP_CIPHER takes int as input.
140     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
141     return 0;
142   }
143 
144   if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) {
145     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
146     return 0;
147   }
148 
149   if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
150     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
151     return 0;
152   }
153 
154   if (ad_len != 13 - 2 /* length bytes */) {
155     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
156     return 0;
157   }
158 
159   // To allow for CBC mode which changes cipher length, |ad| doesn't include the
160   // length for legacy ciphers.
161   uint8_t ad_extra[2];
162   ad_extra[0] = (uint8_t)(in_len >> 8);
163   ad_extra[1] = (uint8_t)(in_len & 0xff);
164 
165   // Compute the MAC. This must be first in case the operation is being done
166   // in-place.
167   uint8_t mac[EVP_MAX_MD_SIZE];
168   unsigned mac_len;
169   if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
170       !HMAC_Update(&tls_ctx->hmac_ctx, ad, ad_len) ||
171       !HMAC_Update(&tls_ctx->hmac_ctx, ad_extra, sizeof(ad_extra)) ||
172       !HMAC_Update(&tls_ctx->hmac_ctx, in, in_len) ||
173       !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len)) {
174     return 0;
175   }
176 
177   // Configure the explicit IV.
178   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
179       !tls_ctx->implicit_iv &&
180       !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
181     return 0;
182   }
183 
184   // Encrypt the input.
185   int len;
186   if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
187     return 0;
188   }
189 
190   unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
191 
192   // Feed the MAC into the cipher in two steps. First complete the final partial
193   // block from encrypting the input and split the result between |out| and
194   // |out_tag|. Then feed the rest.
195 
196   const size_t early_mac_len = (block_size - (in_len % block_size)) % block_size;
197   if (early_mac_len != 0) {
198     assert(len + block_size - early_mac_len == in_len);
199     uint8_t buf[EVP_MAX_BLOCK_LENGTH];
200     int buf_len;
201     if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac,
202                            (int)early_mac_len)) {
203       return 0;
204     }
205     assert(buf_len == (int)block_size);
206     OPENSSL_memcpy(out + len, buf, block_size - early_mac_len);
207     OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len);
208   }
209   size_t tag_len = early_mac_len;
210 
211   if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len,
212                          mac + tag_len, mac_len - tag_len)) {
213     return 0;
214   }
215   tag_len += len;
216 
217   if (block_size > 1) {
218     assert(block_size <= 256);
219     assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
220 
221     // Compute padding and feed that into the cipher.
222     uint8_t padding[256];
223     unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
224     OPENSSL_memset(padding, padding_len - 1, padding_len);
225     if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len,
226                            padding, (int)padding_len)) {
227       return 0;
228     }
229     tag_len += len;
230   }
231 
232   if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) {
233     return 0;
234   }
235   assert(len == 0);  // Padding is explicit.
236   assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len));
237 
238   *out_tag_len = tag_len;
239   return 1;
240 }
241 
aead_tls_open(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)242 static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
243                          size_t max_out_len, const uint8_t *nonce,
244                          size_t nonce_len, const uint8_t *in, size_t in_len,
245                          const uint8_t *ad, size_t ad_len) {
246   AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
247 
248   if (tls_ctx->cipher_ctx.encrypt) {
249     // Unlike a normal AEAD, a TLS AEAD may only be used in one direction.
250     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION);
251     return 0;
252   }
253 
254   if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
255     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
256     return 0;
257   }
258 
259   if (max_out_len < in_len) {
260     // This requires that the caller provide space for the MAC, even though it
261     // will always be removed on return.
262     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL);
263     return 0;
264   }
265 
266   if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
267     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE);
268     return 0;
269   }
270 
271   if (ad_len != 13 - 2 /* length bytes */) {
272     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_AD_SIZE);
273     return 0;
274   }
275 
276   if (in_len > INT_MAX) {
277     // EVP_CIPHER takes int as input.
278     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE);
279     return 0;
280   }
281 
282   // Configure the explicit IV.
283   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
284       !tls_ctx->implicit_iv &&
285       !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
286     return 0;
287   }
288 
289   // Decrypt to get the plaintext + MAC + padding.
290   size_t total = 0;
291   int len;
292   if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
293     return 0;
294   }
295   total += len;
296   if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
297     return 0;
298   }
299   total += len;
300   assert(total == in_len);
301 
302   CONSTTIME_SECRET(out, total);
303 
304   // Remove CBC padding. Code from here on is timing-sensitive with respect to
305   // |padding_ok| and |data_plus_mac_len| for CBC ciphers.
306   size_t data_plus_mac_len;
307   crypto_word_t padding_ok;
308   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
309     if (!EVP_tls_cbc_remove_padding(
310             &padding_ok, &data_plus_mac_len, out, total,
311             EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
312             HMAC_size(&tls_ctx->hmac_ctx))) {
313       // Publicly invalid. This can be rejected in non-constant time.
314       OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
315       return 0;
316     }
317   } else {
318     padding_ok = CONSTTIME_TRUE_W;
319     data_plus_mac_len = total;
320     // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
321     // already been checked against the MAC size at the top of the function.
322     assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
323   }
324   size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
325 
326   // At this point, if the padding is valid, the first |data_plus_mac_len| bytes
327   // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is
328   // still large enough to extract a MAC, but it will be irrelevant.
329 
330   // To allow for CBC mode which changes cipher length, |ad| doesn't include the
331   // length for legacy ciphers.
332   uint8_t ad_fixed[13];
333   OPENSSL_memcpy(ad_fixed, ad, 11);
334   ad_fixed[11] = (uint8_t)(data_len >> 8);
335   ad_fixed[12] = (uint8_t)(data_len & 0xff);
336   ad_len += 2;
337 
338   // Compute the MAC and extract the one in the record.
339   uint8_t mac[EVP_MAX_MD_SIZE];
340   size_t mac_len;
341   uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
342   uint8_t *record_mac;
343   if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
344       EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
345     if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
346                                    ad_fixed, out, data_len, total,
347                                    tls_ctx->mac_key, tls_ctx->mac_key_len)) {
348       OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
349       return 0;
350     }
351     assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
352 
353     record_mac = record_mac_tmp;
354     EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
355   } else {
356     // We should support the constant-time path for all CBC-mode ciphers
357     // implemented.
358     assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
359 
360     unsigned mac_len_u;
361     if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) ||
362         !HMAC_Update(&tls_ctx->hmac_ctx, ad_fixed, ad_len) ||
363         !HMAC_Update(&tls_ctx->hmac_ctx, out, data_len) ||
364         !HMAC_Final(&tls_ctx->hmac_ctx, mac, &mac_len_u)) {
365       return 0;
366     }
367     mac_len = mac_len_u;
368 
369     assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
370     record_mac = &out[data_len];
371   }
372 
373   // Perform the MAC check and the padding check in constant-time. It should be
374   // safe to simply perform the padding check first, but it would not be under a
375   // different choice of MAC location on padding failure. See
376   // EVP_tls_cbc_remove_padding.
377   crypto_word_t good =
378       constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0);
379   good &= padding_ok;
380   CONSTTIME_DECLASSIFY(&good, sizeof(good));
381   if (!good) {
382     OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
383     return 0;
384   }
385 
386   CONSTTIME_DECLASSIFY(&data_len, sizeof(data_len));
387   CONSTTIME_DECLASSIFY(out, data_len);
388 
389   // End of timing-sensitive code.
390 
391   *out_len = data_len;
392   return 1;
393 }
394 
aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)395 static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
396                                           size_t key_len, size_t tag_len,
397                                           enum evp_aead_direction_t dir) {
398   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
399                        EVP_sha1(), 0);
400 }
401 
aead_aes_128_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)402 static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
403     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
404     enum evp_aead_direction_t dir) {
405   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
406                        EVP_sha1(), 1);
407 }
408 
aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)409 static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
410                                           size_t key_len, size_t tag_len,
411                                           enum evp_aead_direction_t dir) {
412   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
413                        EVP_sha1(), 0);
414 }
415 
aead_aes_256_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)416 static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
417     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
418     enum evp_aead_direction_t dir) {
419   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
420                        EVP_sha1(), 1);
421 }
422 
aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)423 static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
424                                            const uint8_t *key, size_t key_len,
425                                            size_t tag_len,
426                                            enum evp_aead_direction_t dir) {
427   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
428                        EVP_sha1(), 0);
429 }
430 
aead_des_ede3_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)431 static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
432     EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
433     enum evp_aead_direction_t dir) {
434   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
435                        EVP_sha1(), 1);
436 }
437 
aead_tls_get_iv(const EVP_AEAD_CTX * ctx,const uint8_t ** out_iv,size_t * out_iv_len)438 static int aead_tls_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv,
439                            size_t *out_iv_len) {
440   const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)&ctx->state;
441   const size_t iv_len = EVP_CIPHER_CTX_iv_length(&tls_ctx->cipher_ctx);
442   if (iv_len <= 1) {
443     return 0;
444   }
445 
446   *out_iv = tls_ctx->cipher_ctx.iv;
447   *out_iv_len = iv_len;
448   return 1;
449 }
450 
aead_null_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)451 static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
452                                    size_t key_len, size_t tag_len,
453                                    enum evp_aead_direction_t dir) {
454   return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_enc_null(),
455                        EVP_sha1(), 1 /* implicit iv */);
456 }
457 
458 static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
459     SHA_DIGEST_LENGTH + 16,  // key len (SHA1 + AES128)
460     16,                      // nonce len (IV)
461     16 + SHA_DIGEST_LENGTH,  // overhead (padding + SHA1)
462     SHA_DIGEST_LENGTH,       // max tag length
463     0,                       // seal_scatter_supports_extra_in
464 
465     NULL,  // init
466     aead_aes_128_cbc_sha1_tls_init,
467     aead_tls_cleanup,
468     aead_tls_open,
469     aead_tls_seal_scatter,
470     NULL,  // open_gather
471     NULL,  // get_iv
472     aead_tls_tag_len,
473 };
474 
475 static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
476     SHA_DIGEST_LENGTH + 16 + 16,  // key len (SHA1 + AES128 + IV)
477     0,                            // nonce len
478     16 + SHA_DIGEST_LENGTH,       // overhead (padding + SHA1)
479     SHA_DIGEST_LENGTH,            // max tag length
480     0,                            // seal_scatter_supports_extra_in
481 
482     NULL,  // init
483     aead_aes_128_cbc_sha1_tls_implicit_iv_init,
484     aead_tls_cleanup,
485     aead_tls_open,
486     aead_tls_seal_scatter,
487     NULL,             // open_gather
488     aead_tls_get_iv,  // get_iv
489     aead_tls_tag_len,
490 };
491 
492 static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
493     SHA_DIGEST_LENGTH + 32,  // key len (SHA1 + AES256)
494     16,                      // nonce len (IV)
495     16 + SHA_DIGEST_LENGTH,  // overhead (padding + SHA1)
496     SHA_DIGEST_LENGTH,       // max tag length
497     0,                       // seal_scatter_supports_extra_in
498 
499     NULL,  // init
500     aead_aes_256_cbc_sha1_tls_init,
501     aead_tls_cleanup,
502     aead_tls_open,
503     aead_tls_seal_scatter,
504     NULL,  // open_gather
505     NULL,  // get_iv
506     aead_tls_tag_len,
507 };
508 
509 static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
510     SHA_DIGEST_LENGTH + 32 + 16,  // key len (SHA1 + AES256 + IV)
511     0,                            // nonce len
512     16 + SHA_DIGEST_LENGTH,       // overhead (padding + SHA1)
513     SHA_DIGEST_LENGTH,            // max tag length
514     0,                            // seal_scatter_supports_extra_in
515 
516     NULL,  // init
517     aead_aes_256_cbc_sha1_tls_implicit_iv_init,
518     aead_tls_cleanup,
519     aead_tls_open,
520     aead_tls_seal_scatter,
521     NULL,             // open_gather
522     aead_tls_get_iv,  // get_iv
523     aead_tls_tag_len,
524 };
525 
526 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
527     SHA_DIGEST_LENGTH + 24,  // key len (SHA1 + 3DES)
528     8,                       // nonce len (IV)
529     8 + SHA_DIGEST_LENGTH,   // overhead (padding + SHA1)
530     SHA_DIGEST_LENGTH,       // max tag length
531     0,                       // seal_scatter_supports_extra_in
532 
533     NULL,  // init
534     aead_des_ede3_cbc_sha1_tls_init,
535     aead_tls_cleanup,
536     aead_tls_open,
537     aead_tls_seal_scatter,
538     NULL,  // open_gather
539     NULL,  // get_iv
540     aead_tls_tag_len,
541 };
542 
543 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
544     SHA_DIGEST_LENGTH + 24 + 8,  // key len (SHA1 + 3DES + IV)
545     0,                           // nonce len
546     8 + SHA_DIGEST_LENGTH,       // overhead (padding + SHA1)
547     SHA_DIGEST_LENGTH,           // max tag length
548     0,                           // seal_scatter_supports_extra_in
549 
550     NULL,  // init
551     aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
552     aead_tls_cleanup,
553     aead_tls_open,
554     aead_tls_seal_scatter,
555     NULL,             // open_gather
556     aead_tls_get_iv,  // get_iv
557     aead_tls_tag_len,
558 };
559 
560 static const EVP_AEAD aead_null_sha1_tls = {
561     SHA_DIGEST_LENGTH,  // key len
562     0,                  // nonce len
563     SHA_DIGEST_LENGTH,  // overhead (SHA1)
564     SHA_DIGEST_LENGTH,  // max tag length
565     0,                  // seal_scatter_supports_extra_in
566 
567     NULL,  // init
568     aead_null_sha1_tls_init,
569     aead_tls_cleanup,
570     aead_tls_open,
571     aead_tls_seal_scatter,
572     NULL,  // open_gather
573     NULL,  // get_iv
574     aead_tls_tag_len,
575 };
576 
EVP_aead_aes_128_cbc_sha1_tls(void)577 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
578   return &aead_aes_128_cbc_sha1_tls;
579 }
580 
EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void)581 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
582   return &aead_aes_128_cbc_sha1_tls_implicit_iv;
583 }
584 
EVP_aead_aes_256_cbc_sha1_tls(void)585 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
586   return &aead_aes_256_cbc_sha1_tls;
587 }
588 
EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void)589 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
590   return &aead_aes_256_cbc_sha1_tls_implicit_iv;
591 }
592 
EVP_aead_des_ede3_cbc_sha1_tls(void)593 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
594   return &aead_des_ede3_cbc_sha1_tls;
595 }
596 
EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void)597 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
598   return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
599 }
600 
EVP_aead_null_sha1_tls(void)601 const EVP_AEAD *EVP_aead_null_sha1_tls(void) { return &aead_null_sha1_tls; }
602