1 /* Copyright (c) 2014, Google Inc.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
15 #include <assert.h>
16 #include <limits.h>
17 #include <string.h>
18
19 #include <openssl/aead.h>
20 #include <openssl/cipher.h>
21 #include <openssl/err.h>
22 #include <openssl/hmac.h>
23 #include <openssl/mem.h>
24 #include <openssl/sha.h>
25 #include <openssl/type_check.h>
26
27 #include "../crypto/internal.h"
28 #include "internal.h"
29
30
31 typedef struct {
32 EVP_CIPHER_CTX cipher_ctx;
33 HMAC_CTX hmac_ctx;
34 /* mac_key is the portion of the key used for the MAC. It is retained
35 * separately for the constant-time CBC code. */
36 uint8_t mac_key[EVP_MAX_MD_SIZE];
37 uint8_t mac_key_len;
38 /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit
39 * IV. */
40 char implicit_iv;
41 } AEAD_TLS_CTX;
42
43 OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE < 256, mac_key_len_fits_in_uint8_t);
44
aead_tls_cleanup(EVP_AEAD_CTX * ctx)45 static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) {
46 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
47 EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx);
48 HMAC_CTX_cleanup(&tls_ctx->hmac_ctx);
49 OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key));
50 OPENSSL_free(tls_ctx);
51 ctx->aead_state = NULL;
52 }
53
aead_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir,const EVP_CIPHER * cipher,const EVP_MD * md,char implicit_iv)54 static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len,
55 size_t tag_len, enum evp_aead_direction_t dir,
56 const EVP_CIPHER *cipher, const EVP_MD *md,
57 char implicit_iv) {
58 if (tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH &&
59 tag_len != EVP_MD_size(md)) {
60 OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_UNSUPPORTED_TAG_SIZE);
61 return 0;
62 }
63
64 if (key_len != EVP_AEAD_key_length(ctx->aead)) {
65 OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, CIPHER_R_BAD_KEY_LENGTH);
66 return 0;
67 }
68
69 size_t mac_key_len = EVP_MD_size(md);
70 size_t enc_key_len = EVP_CIPHER_key_length(cipher);
71 assert(mac_key_len + enc_key_len +
72 (implicit_iv ? EVP_CIPHER_iv_length(cipher) : 0) == key_len);
73 /* Although EVP_rc4() is a variable-length cipher, the default key size is
74 * correct for TLS. */
75
76 AEAD_TLS_CTX *tls_ctx = OPENSSL_malloc(sizeof(AEAD_TLS_CTX));
77 if (tls_ctx == NULL) {
78 OPENSSL_PUT_ERROR(CIPHER, aead_tls_init, ERR_R_MALLOC_FAILURE);
79 return 0;
80 }
81 EVP_CIPHER_CTX_init(&tls_ctx->cipher_ctx);
82 HMAC_CTX_init(&tls_ctx->hmac_ctx);
83 assert(mac_key_len <= EVP_MAX_MD_SIZE);
84 memcpy(tls_ctx->mac_key, key, mac_key_len);
85 tls_ctx->mac_key_len = (uint8_t)mac_key_len;
86 tls_ctx->implicit_iv = implicit_iv;
87
88 ctx->aead_state = tls_ctx;
89 if (!EVP_CipherInit_ex(&tls_ctx->cipher_ctx, cipher, NULL, &key[mac_key_len],
90 implicit_iv ? &key[mac_key_len + enc_key_len] : NULL,
91 dir == evp_aead_seal) ||
92 !HMAC_Init_ex(&tls_ctx->hmac_ctx, key, mac_key_len, md, NULL)) {
93 aead_tls_cleanup(ctx);
94 ctx->aead_state = NULL;
95 return 0;
96 }
97 EVP_CIPHER_CTX_set_padding(&tls_ctx->cipher_ctx, 0);
98
99 return 1;
100 }
101
aead_tls_seal(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)102 static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
103 size_t *out_len, size_t max_out_len,
104 const uint8_t *nonce, size_t nonce_len,
105 const uint8_t *in, size_t in_len,
106 const uint8_t *ad, size_t ad_len) {
107 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
108 size_t total = 0;
109
110 if (!tls_ctx->cipher_ctx.encrypt) {
111 /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
112 OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_OPERATION);
113 return 0;
114
115 }
116
117 if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len ||
118 in_len > INT_MAX) {
119 /* EVP_CIPHER takes int as input. */
120 OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_TOO_LARGE);
121 return 0;
122 }
123
124 if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) {
125 OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_BUFFER_TOO_SMALL);
126 return 0;
127 }
128
129 if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
130 OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_NONCE_SIZE);
131 return 0;
132 }
133
134 if (ad_len != 13 - 2 /* length bytes */) {
135 OPENSSL_PUT_ERROR(CIPHER, aead_tls_seal, CIPHER_R_INVALID_AD_SIZE);
136 return 0;
137 }
138
139 /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
140 * length for legacy ciphers. */
141 uint8_t ad_extra[2];
142 ad_extra[0] = (uint8_t)(in_len >> 8);
143 ad_extra[1] = (uint8_t)(in_len & 0xff);
144
145 /* Compute the MAC. This must be first in case the operation is being done
146 * in-place. */
147 uint8_t mac[EVP_MAX_MD_SIZE];
148 unsigned mac_len;
149 HMAC_CTX hmac_ctx;
150 HMAC_CTX_init(&hmac_ctx);
151 if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
152 !HMAC_Update(&hmac_ctx, ad, ad_len) ||
153 !HMAC_Update(&hmac_ctx, ad_extra, sizeof(ad_extra)) ||
154 !HMAC_Update(&hmac_ctx, in, in_len) ||
155 !HMAC_Final(&hmac_ctx, mac, &mac_len)) {
156 HMAC_CTX_cleanup(&hmac_ctx);
157 return 0;
158 }
159 HMAC_CTX_cleanup(&hmac_ctx);
160
161 /* Configure the explicit IV. */
162 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
163 !tls_ctx->implicit_iv &&
164 !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
165 return 0;
166 }
167
168 /* Encrypt the input. */
169 int len;
170 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in,
171 (int)in_len)) {
172 return 0;
173 }
174 total = len;
175
176 /* Feed the MAC into the cipher. */
177 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac,
178 (int)mac_len)) {
179 return 0;
180 }
181 total += len;
182
183 unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx);
184 if (block_size > 1) {
185 assert(block_size <= 256);
186 assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE);
187
188 /* Compute padding and feed that into the cipher. */
189 uint8_t padding[256];
190 unsigned padding_len = block_size - ((in_len + mac_len) % block_size);
191 memset(padding, padding_len - 1, padding_len);
192 if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding,
193 (int)padding_len)) {
194 return 0;
195 }
196 total += len;
197 }
198
199 if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
200 return 0;
201 }
202 total += len;
203
204 *out_len = total;
205 return 1;
206 }
207
aead_tls_open(const EVP_AEAD_CTX * ctx,uint8_t * out,size_t * out_len,size_t max_out_len,const uint8_t * nonce,size_t nonce_len,const uint8_t * in,size_t in_len,const uint8_t * ad,size_t ad_len)208 static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
209 size_t *out_len, size_t max_out_len,
210 const uint8_t *nonce, size_t nonce_len,
211 const uint8_t *in, size_t in_len,
212 const uint8_t *ad, size_t ad_len) {
213 AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state;
214
215 if (tls_ctx->cipher_ctx.encrypt) {
216 /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */
217 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_OPERATION);
218 return 0;
219
220 }
221
222 if (in_len < HMAC_size(&tls_ctx->hmac_ctx)) {
223 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
224 return 0;
225 }
226
227 if (max_out_len < in_len) {
228 /* This requires that the caller provide space for the MAC, even though it
229 * will always be removed on return. */
230 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BUFFER_TOO_SMALL);
231 return 0;
232 }
233
234 if (nonce_len != EVP_AEAD_nonce_length(ctx->aead)) {
235 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_NONCE_SIZE);
236 return 0;
237 }
238
239 if (ad_len != 13 - 2 /* length bytes */) {
240 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_INVALID_AD_SIZE);
241 return 0;
242 }
243
244 if (in_len > INT_MAX) {
245 /* EVP_CIPHER takes int as input. */
246 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_TOO_LARGE);
247 return 0;
248 }
249
250 /* Configure the explicit IV. */
251 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
252 !tls_ctx->implicit_iv &&
253 !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) {
254 return 0;
255 }
256
257 /* Decrypt to get the plaintext + MAC + padding. */
258 size_t total = 0;
259 int len;
260 if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) {
261 return 0;
262 }
263 total += len;
264 if (!EVP_DecryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) {
265 return 0;
266 }
267 total += len;
268 assert(total == in_len);
269
270 /* Remove CBC padding. Code from here on is timing-sensitive with respect to
271 * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */
272 int padding_ok;
273 unsigned data_plus_mac_len, data_len;
274 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) {
275 padding_ok = EVP_tls_cbc_remove_padding(
276 &data_plus_mac_len, out, total,
277 EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx),
278 (unsigned)HMAC_size(&tls_ctx->hmac_ctx));
279 /* Publicly invalid. This can be rejected in non-constant time. */
280 if (padding_ok == 0) {
281 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
282 return 0;
283 }
284 } else {
285 padding_ok = 1;
286 data_plus_mac_len = total;
287 /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has
288 * already been checked against the MAC size at the top of the function. */
289 assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx));
290 }
291 data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx);
292
293 /* At this point, |padding_ok| is 1 or -1. If 1, the padding is valid and the
294 * first |data_plus_mac_size| bytes after |out| are the plaintext and
295 * MAC. Either way, |data_plus_mac_size| is large enough to extract a MAC. */
296
297 /* To allow for CBC mode which changes cipher length, |ad| doesn't include the
298 * length for legacy ciphers. */
299 uint8_t ad_fixed[13];
300 memcpy(ad_fixed, ad, 11);
301 ad_fixed[11] = (uint8_t)(data_len >> 8);
302 ad_fixed[12] = (uint8_t)(data_len & 0xff);
303 ad_len += 2;
304
305 /* Compute the MAC and extract the one in the record. */
306 uint8_t mac[EVP_MAX_MD_SIZE];
307 size_t mac_len;
308 uint8_t record_mac_tmp[EVP_MAX_MD_SIZE];
309 uint8_t *record_mac;
310 if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
311 EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
312 if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
313 ad_fixed, out, data_plus_mac_len, total,
314 tls_ctx->mac_key, tls_ctx->mac_key_len)) {
315 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
316 return 0;
317 }
318 assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
319
320 record_mac = record_mac_tmp;
321 EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total);
322 } else {
323 /* We should support the constant-time path for all CBC-mode ciphers
324 * implemented. */
325 assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE);
326
327 HMAC_CTX hmac_ctx;
328 HMAC_CTX_init(&hmac_ctx);
329 unsigned mac_len_u;
330 if (!HMAC_CTX_copy_ex(&hmac_ctx, &tls_ctx->hmac_ctx) ||
331 !HMAC_Update(&hmac_ctx, ad_fixed, ad_len) ||
332 !HMAC_Update(&hmac_ctx, out, data_len) ||
333 !HMAC_Final(&hmac_ctx, mac, &mac_len_u)) {
334 HMAC_CTX_cleanup(&hmac_ctx);
335 return 0;
336 }
337 mac_len = mac_len_u;
338 HMAC_CTX_cleanup(&hmac_ctx);
339
340 assert(mac_len == HMAC_size(&tls_ctx->hmac_ctx));
341 record_mac = &out[data_len];
342 }
343
344 /* Perform the MAC check and the padding check in constant-time. It should be
345 * safe to simply perform the padding check first, but it would not be under a
346 * different choice of MAC location on padding failure. See
347 * EVP_tls_cbc_remove_padding. */
348 unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len),
349 0);
350 good &= constant_time_eq_int(padding_ok, 1);
351 if (!good) {
352 OPENSSL_PUT_ERROR(CIPHER, aead_tls_open, CIPHER_R_BAD_DECRYPT);
353 return 0;
354 }
355
356 /* End of timing-sensitive code. */
357
358 *out_len = data_len;
359 return 1;
360 }
361
aead_rc4_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)362 static int aead_rc4_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
363 size_t key_len, size_t tag_len,
364 enum evp_aead_direction_t dir) {
365 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_rc4(), EVP_sha1(),
366 0);
367 }
368
aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)369 static int aead_aes_128_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
370 size_t key_len, size_t tag_len,
371 enum evp_aead_direction_t dir) {
372 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
373 EVP_sha1(), 0);
374 }
375
aead_aes_128_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)376 static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
377 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
378 enum evp_aead_direction_t dir) {
379 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
380 EVP_sha1(), 1);
381 }
382
aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)383 static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
384 const uint8_t *key, size_t key_len,
385 size_t tag_len,
386 enum evp_aead_direction_t dir) {
387 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
388 EVP_sha256(), 0);
389 }
390
aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)391 static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
392 size_t key_len, size_t tag_len,
393 enum evp_aead_direction_t dir) {
394 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
395 EVP_sha1(), 0);
396 }
397
aead_aes_256_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)398 static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
399 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
400 enum evp_aead_direction_t dir) {
401 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
402 EVP_sha1(), 1);
403 }
404
aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)405 static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
406 const uint8_t *key, size_t key_len,
407 size_t tag_len,
408 enum evp_aead_direction_t dir) {
409 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
410 EVP_sha256(), 0);
411 }
412
aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)413 static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
414 const uint8_t *key, size_t key_len,
415 size_t tag_len,
416 enum evp_aead_direction_t dir) {
417 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
418 EVP_sha384(), 0);
419 }
420
aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)421 static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
422 const uint8_t *key, size_t key_len,
423 size_t tag_len,
424 enum evp_aead_direction_t dir) {
425 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
426 EVP_sha1(), 0);
427 }
428
aead_des_ede3_cbc_sha1_tls_implicit_iv_init(EVP_AEAD_CTX * ctx,const uint8_t * key,size_t key_len,size_t tag_len,enum evp_aead_direction_t dir)429 static int aead_des_ede3_cbc_sha1_tls_implicit_iv_init(
430 EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, size_t tag_len,
431 enum evp_aead_direction_t dir) {
432 return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_des_ede3_cbc(),
433 EVP_sha1(), 1);
434 }
435
aead_rc4_sha1_tls_get_rc4_state(const EVP_AEAD_CTX * ctx,const RC4_KEY ** out_key)436 static int aead_rc4_sha1_tls_get_rc4_state(const EVP_AEAD_CTX *ctx,
437 const RC4_KEY **out_key) {
438 const AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX*) ctx->aead_state;
439 if (EVP_CIPHER_CTX_cipher(&tls_ctx->cipher_ctx) != EVP_rc4()) {
440 return 0;
441 }
442
443 *out_key = (const RC4_KEY*) tls_ctx->cipher_ctx.cipher_data;
444 return 1;
445 }
446
447 static const EVP_AEAD aead_rc4_sha1_tls = {
448 SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + RC4) */
449 0, /* nonce len */
450 SHA_DIGEST_LENGTH, /* overhead */
451 SHA_DIGEST_LENGTH, /* max tag length */
452 NULL, /* init */
453 aead_rc4_sha1_tls_init,
454 aead_tls_cleanup,
455 aead_tls_seal,
456 aead_tls_open,
457 aead_rc4_sha1_tls_get_rc4_state, /* get_rc4_state */
458 };
459
460 static const EVP_AEAD aead_aes_128_cbc_sha1_tls = {
461 SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */
462 16, /* nonce len (IV) */
463 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
464 SHA_DIGEST_LENGTH, /* max tag length */
465 NULL, /* init */
466 aead_aes_128_cbc_sha1_tls_init,
467 aead_tls_cleanup,
468 aead_tls_seal,
469 aead_tls_open,
470 NULL, /* get_rc4_state */
471 };
472
473 static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
474 SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */
475 0, /* nonce len */
476 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
477 SHA_DIGEST_LENGTH, /* max tag length */
478 NULL, /* init */
479 aead_aes_128_cbc_sha1_tls_implicit_iv_init,
480 aead_tls_cleanup,
481 aead_tls_seal,
482 aead_tls_open,
483 NULL, /* get_rc4_state */
484 };
485
486 static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
487 SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */
488 16, /* nonce len (IV) */
489 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
490 SHA_DIGEST_LENGTH, /* max tag length */
491 NULL, /* init */
492 aead_aes_128_cbc_sha256_tls_init,
493 aead_tls_cleanup,
494 aead_tls_seal,
495 aead_tls_open,
496 NULL, /* get_rc4_state */
497 };
498
499 static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
500 SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */
501 16, /* nonce len (IV) */
502 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
503 SHA_DIGEST_LENGTH, /* max tag length */
504 NULL, /* init */
505 aead_aes_256_cbc_sha1_tls_init,
506 aead_tls_cleanup,
507 aead_tls_seal,
508 aead_tls_open,
509 NULL, /* get_rc4_state */
510 };
511
512 static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
513 SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */
514 0, /* nonce len */
515 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
516 SHA_DIGEST_LENGTH, /* max tag length */
517 NULL, /* init */
518 aead_aes_256_cbc_sha1_tls_implicit_iv_init,
519 aead_tls_cleanup,
520 aead_tls_seal,
521 aead_tls_open,
522 NULL, /* get_rc4_state */
523 };
524
525 static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
526 SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */
527 16, /* nonce len (IV) */
528 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */
529 SHA_DIGEST_LENGTH, /* max tag length */
530 NULL, /* init */
531 aead_aes_256_cbc_sha256_tls_init,
532 aead_tls_cleanup,
533 aead_tls_seal,
534 aead_tls_open,
535 NULL, /* get_rc4_state */
536 };
537
538 static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
539 SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */
540 16, /* nonce len (IV) */
541 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */
542 SHA_DIGEST_LENGTH, /* max tag length */
543 NULL, /* init */
544 aead_aes_256_cbc_sha384_tls_init,
545 aead_tls_cleanup,
546 aead_tls_seal,
547 aead_tls_open,
548 NULL, /* get_rc4_state */
549 };
550
551 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
552 SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */
553 8, /* nonce len (IV) */
554 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
555 SHA_DIGEST_LENGTH, /* max tag length */
556 NULL, /* init */
557 aead_des_ede3_cbc_sha1_tls_init,
558 aead_tls_cleanup,
559 aead_tls_seal,
560 aead_tls_open,
561 NULL, /* get_rc4_state */
562 };
563
564 static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = {
565 SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */
566 0, /* nonce len */
567 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */
568 SHA_DIGEST_LENGTH, /* max tag length */
569 NULL, /* init */
570 aead_des_ede3_cbc_sha1_tls_implicit_iv_init,
571 aead_tls_cleanup,
572 aead_tls_seal,
573 aead_tls_open,
574 NULL, /* get_rc4_state */
575 };
576
EVP_aead_rc4_sha1_tls(void)577 const EVP_AEAD *EVP_aead_rc4_sha1_tls(void) { return &aead_rc4_sha1_tls; }
578
EVP_aead_aes_128_cbc_sha1_tls(void)579 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) {
580 return &aead_aes_128_cbc_sha1_tls;
581 }
582
EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void)583 const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
584 return &aead_aes_128_cbc_sha1_tls_implicit_iv;
585 }
586
EVP_aead_aes_128_cbc_sha256_tls(void)587 const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
588 return &aead_aes_128_cbc_sha256_tls;
589 }
590
EVP_aead_aes_256_cbc_sha1_tls(void)591 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
592 return &aead_aes_256_cbc_sha1_tls;
593 }
594
EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void)595 const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
596 return &aead_aes_256_cbc_sha1_tls_implicit_iv;
597 }
598
EVP_aead_aes_256_cbc_sha256_tls(void)599 const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
600 return &aead_aes_256_cbc_sha256_tls;
601 }
602
EVP_aead_aes_256_cbc_sha384_tls(void)603 const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
604 return &aead_aes_256_cbc_sha384_tls;
605 }
606
EVP_aead_des_ede3_cbc_sha1_tls(void)607 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
608 return &aead_des_ede3_cbc_sha1_tls;
609 }
610
EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void)611 const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void) {
612 return &aead_des_ede3_cbc_sha1_tls_implicit_iv;
613 }
614