1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * algif_aead: User-space interface for AEAD algorithms
4 *
5 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
6 *
7 * This file provides the user-space API for AEAD ciphers.
8 *
9 * The following concept of the memory management is used:
10 *
11 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
12 * filled by user space with the data submitted via sendpage/sendmsg. Filling
13 * up the TX SGL does not cause a crypto operation -- the data will only be
14 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
15 * provide a buffer which is tracked with the RX SGL.
16 *
17 * During the processing of the recvmsg operation, the cipher request is
18 * allocated and prepared. As part of the recvmsg operation, the processed
19 * TX buffers are extracted from the TX SGL into a separate SGL.
20 *
21 * After the completion of the crypto operation, the RX SGL and the cipher
22 * request is released. The extracted TX SGL parts are released together with
23 * the RX SGL release.
24 */
25
26 #include <crypto/internal/aead.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/if_alg.h>
29 #include <crypto/skcipher.h>
30 #include <crypto/null.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/kernel.h>
34 #include <linux/mm.h>
35 #include <linux/module.h>
36 #include <linux/net.h>
37 #include <net/sock.h>
38
39 struct aead_tfm {
40 struct crypto_aead *aead;
41 struct crypto_sync_skcipher *null_tfm;
42 };
43
aead_sufficient_data(struct sock * sk)44 static inline bool aead_sufficient_data(struct sock *sk)
45 {
46 struct alg_sock *ask = alg_sk(sk);
47 struct sock *psk = ask->parent;
48 struct alg_sock *pask = alg_sk(psk);
49 struct af_alg_ctx *ctx = ask->private;
50 struct aead_tfm *aeadc = pask->private;
51 struct crypto_aead *tfm = aeadc->aead;
52 unsigned int as = crypto_aead_authsize(tfm);
53
54 /*
55 * The minimum amount of memory needed for an AEAD cipher is
56 * the AAD and in case of decryption the tag.
57 */
58 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
59 }
60
aead_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)61 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
62 {
63 struct sock *sk = sock->sk;
64 struct alg_sock *ask = alg_sk(sk);
65 struct sock *psk = ask->parent;
66 struct alg_sock *pask = alg_sk(psk);
67 struct aead_tfm *aeadc = pask->private;
68 struct crypto_aead *tfm = aeadc->aead;
69 unsigned int ivsize = crypto_aead_ivsize(tfm);
70
71 return af_alg_sendmsg(sock, msg, size, ivsize);
72 }
73
crypto_aead_copy_sgl(struct crypto_sync_skcipher * null_tfm,struct scatterlist * src,struct scatterlist * dst,unsigned int len)74 static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
75 struct scatterlist *src,
76 struct scatterlist *dst, unsigned int len)
77 {
78 SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
79
80 skcipher_request_set_sync_tfm(skreq, null_tfm);
81 skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
82 NULL, NULL);
83 skcipher_request_set_crypt(skreq, src, dst, len, NULL);
84
85 return crypto_skcipher_encrypt(skreq);
86 }
87
_aead_recvmsg(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)88 static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
89 size_t ignored, int flags)
90 {
91 struct sock *sk = sock->sk;
92 struct alg_sock *ask = alg_sk(sk);
93 struct sock *psk = ask->parent;
94 struct alg_sock *pask = alg_sk(psk);
95 struct af_alg_ctx *ctx = ask->private;
96 struct aead_tfm *aeadc = pask->private;
97 struct crypto_aead *tfm = aeadc->aead;
98 struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
99 unsigned int i, as = crypto_aead_authsize(tfm);
100 struct af_alg_async_req *areq;
101 struct af_alg_tsgl *tsgl, *tmp;
102 struct scatterlist *rsgl_src, *tsgl_src = NULL;
103 int err = 0;
104 size_t used = 0; /* [in] TX bufs to be en/decrypted */
105 size_t outlen = 0; /* [out] RX bufs produced by kernel */
106 size_t usedpages = 0; /* [in] RX bufs to be used from user */
107 size_t processed = 0; /* [in] TX bufs to be consumed */
108
109 if (!ctx->init || ctx->more) {
110 err = af_alg_wait_for_data(sk, flags, 0);
111 if (err)
112 return err;
113 }
114
115 /*
116 * Data length provided by caller via sendmsg/sendpage that has not
117 * yet been processed.
118 */
119 used = ctx->used;
120
121 /*
122 * Make sure sufficient data is present -- note, the same check is
123 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
124 * shall provide an information to the data sender that something is
125 * wrong, but they are irrelevant to maintain the kernel integrity.
126 * We need this check here too in case user space decides to not honor
127 * the error message in sendmsg/sendpage and still call recvmsg. This
128 * check here protects the kernel integrity.
129 */
130 if (!aead_sufficient_data(sk))
131 return -EINVAL;
132
133 /*
134 * Calculate the minimum output buffer size holding the result of the
135 * cipher operation. When encrypting data, the receiving buffer is
136 * larger by the tag length compared to the input buffer as the
137 * encryption operation generates the tag. For decryption, the input
138 * buffer provides the tag which is consumed resulting in only the
139 * plaintext without a buffer for the tag returned to the caller.
140 */
141 if (ctx->enc)
142 outlen = used + as;
143 else
144 outlen = used - as;
145
146 /*
147 * The cipher operation input data is reduced by the associated data
148 * length as this data is processed separately later on.
149 */
150 used -= ctx->aead_assoclen;
151
152 /* Allocate cipher request for current operation. */
153 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
154 crypto_aead_reqsize(tfm));
155 if (IS_ERR(areq))
156 return PTR_ERR(areq);
157
158 /* convert iovecs of output buffers into RX SGL */
159 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
160 if (err)
161 goto free;
162
163 /*
164 * Ensure output buffer is sufficiently large. If the caller provides
165 * less buffer space, only use the relative required input size. This
166 * allows AIO operation where the caller sent all data to be processed
167 * and the AIO operation performs the operation on the different chunks
168 * of the input data.
169 */
170 if (usedpages < outlen) {
171 size_t less = outlen - usedpages;
172
173 if (used < less) {
174 err = -EINVAL;
175 goto free;
176 }
177 used -= less;
178 outlen -= less;
179 }
180
181 processed = used + ctx->aead_assoclen;
182 list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
183 for (i = 0; i < tsgl->cur; i++) {
184 struct scatterlist *process_sg = tsgl->sg + i;
185
186 if (!(process_sg->length) || !sg_page(process_sg))
187 continue;
188 tsgl_src = process_sg;
189 break;
190 }
191 if (tsgl_src)
192 break;
193 }
194 if (processed && !tsgl_src) {
195 err = -EFAULT;
196 goto free;
197 }
198
199 /*
200 * Copy of AAD from source to destination
201 *
202 * The AAD is copied to the destination buffer without change. Even
203 * when user space uses an in-place cipher operation, the kernel
204 * will copy the data as it does not see whether such in-place operation
205 * is initiated.
206 *
207 * To ensure efficiency, the following implementation ensure that the
208 * ciphers are invoked to perform a crypto operation in-place. This
209 * is achieved by memory management specified as follows.
210 */
211
212 /* Use the RX SGL as source (and destination) for crypto op. */
213 rsgl_src = areq->first_rsgl.sgl.sg;
214
215 if (ctx->enc) {
216 /*
217 * Encryption operation - The in-place cipher operation is
218 * achieved by the following operation:
219 *
220 * TX SGL: AAD || PT
221 * | |
222 * | copy |
223 * v v
224 * RX SGL: AAD || PT || Tag
225 */
226 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
227 areq->first_rsgl.sgl.sg, processed);
228 if (err)
229 goto free;
230 af_alg_pull_tsgl(sk, processed, NULL, 0);
231 } else {
232 /*
233 * Decryption operation - To achieve an in-place cipher
234 * operation, the following SGL structure is used:
235 *
236 * TX SGL: AAD || CT || Tag
237 * | | ^
238 * | copy | | Create SGL link.
239 * v v |
240 * RX SGL: AAD || CT ----+
241 */
242
243 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
244 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
245 areq->first_rsgl.sgl.sg, outlen);
246 if (err)
247 goto free;
248
249 /* Create TX SGL for tag and chain it to RX SGL. */
250 areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
251 processed - as);
252 if (!areq->tsgl_entries)
253 areq->tsgl_entries = 1;
254 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
255 areq->tsgl_entries),
256 GFP_KERNEL);
257 if (!areq->tsgl) {
258 err = -ENOMEM;
259 goto free;
260 }
261 sg_init_table(areq->tsgl, areq->tsgl_entries);
262
263 /* Release TX SGL, except for tag data and reassign tag data. */
264 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
265
266 /* chain the areq TX SGL holding the tag with RX SGL */
267 if (usedpages) {
268 /* RX SGL present */
269 struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
270
271 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
272 sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
273 areq->tsgl);
274 } else
275 /* no RX SGL present (e.g. authentication only) */
276 rsgl_src = areq->tsgl;
277 }
278
279 /* Initialize the crypto operation */
280 aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
281 areq->first_rsgl.sgl.sg, used, ctx->iv);
282 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
283 aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
284
285 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
286 /* AIO operation */
287 sock_hold(sk);
288 areq->iocb = msg->msg_iocb;
289
290 /* Remember output size that will be generated. */
291 areq->outlen = outlen;
292
293 aead_request_set_callback(&areq->cra_u.aead_req,
294 CRYPTO_TFM_REQ_MAY_SLEEP,
295 af_alg_async_cb, areq);
296 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
297 crypto_aead_decrypt(&areq->cra_u.aead_req);
298
299 /* AIO operation in progress */
300 if (err == -EINPROGRESS)
301 return -EIOCBQUEUED;
302
303 sock_put(sk);
304 } else {
305 /* Synchronous operation */
306 aead_request_set_callback(&areq->cra_u.aead_req,
307 CRYPTO_TFM_REQ_MAY_SLEEP |
308 CRYPTO_TFM_REQ_MAY_BACKLOG,
309 crypto_req_done, &ctx->wait);
310 err = crypto_wait_req(ctx->enc ?
311 crypto_aead_encrypt(&areq->cra_u.aead_req) :
312 crypto_aead_decrypt(&areq->cra_u.aead_req),
313 &ctx->wait);
314 }
315
316
317 free:
318 af_alg_free_resources(areq);
319
320 return err ? err : outlen;
321 }
322
aead_recvmsg(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)323 static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
324 size_t ignored, int flags)
325 {
326 struct sock *sk = sock->sk;
327 int ret = 0;
328
329 lock_sock(sk);
330 while (msg_data_left(msg)) {
331 int err = _aead_recvmsg(sock, msg, ignored, flags);
332
333 /*
334 * This error covers -EIOCBQUEUED which implies that we can
335 * only handle one AIO request. If the caller wants to have
336 * multiple AIO requests in parallel, he must make multiple
337 * separate AIO calls.
338 *
339 * Also return the error if no data has been processed so far.
340 */
341 if (err <= 0) {
342 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
343 ret = err;
344 goto out;
345 }
346
347 ret += err;
348 }
349
350 out:
351 af_alg_wmem_wakeup(sk);
352 release_sock(sk);
353 return ret;
354 }
355
356 static struct proto_ops algif_aead_ops = {
357 .family = PF_ALG,
358
359 .connect = sock_no_connect,
360 .socketpair = sock_no_socketpair,
361 .getname = sock_no_getname,
362 .ioctl = sock_no_ioctl,
363 .listen = sock_no_listen,
364 .shutdown = sock_no_shutdown,
365 .getsockopt = sock_no_getsockopt,
366 .mmap = sock_no_mmap,
367 .bind = sock_no_bind,
368 .accept = sock_no_accept,
369 .setsockopt = sock_no_setsockopt,
370
371 .release = af_alg_release,
372 .sendmsg = aead_sendmsg,
373 .sendpage = af_alg_sendpage,
374 .recvmsg = aead_recvmsg,
375 .poll = af_alg_poll,
376 };
377
aead_check_key(struct socket * sock)378 static int aead_check_key(struct socket *sock)
379 {
380 int err = 0;
381 struct sock *psk;
382 struct alg_sock *pask;
383 struct aead_tfm *tfm;
384 struct sock *sk = sock->sk;
385 struct alg_sock *ask = alg_sk(sk);
386
387 lock_sock(sk);
388 if (!atomic_read(&ask->nokey_refcnt))
389 goto unlock_child;
390
391 psk = ask->parent;
392 pask = alg_sk(ask->parent);
393 tfm = pask->private;
394
395 err = -ENOKEY;
396 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
397 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
398 goto unlock;
399
400 atomic_dec(&pask->nokey_refcnt);
401 atomic_set(&ask->nokey_refcnt, 0);
402
403 err = 0;
404
405 unlock:
406 release_sock(psk);
407 unlock_child:
408 release_sock(sk);
409
410 return err;
411 }
412
aead_sendmsg_nokey(struct socket * sock,struct msghdr * msg,size_t size)413 static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
414 size_t size)
415 {
416 int err;
417
418 err = aead_check_key(sock);
419 if (err)
420 return err;
421
422 return aead_sendmsg(sock, msg, size);
423 }
424
aead_sendpage_nokey(struct socket * sock,struct page * page,int offset,size_t size,int flags)425 static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
426 int offset, size_t size, int flags)
427 {
428 int err;
429
430 err = aead_check_key(sock);
431 if (err)
432 return err;
433
434 return af_alg_sendpage(sock, page, offset, size, flags);
435 }
436
aead_recvmsg_nokey(struct socket * sock,struct msghdr * msg,size_t ignored,int flags)437 static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
438 size_t ignored, int flags)
439 {
440 int err;
441
442 err = aead_check_key(sock);
443 if (err)
444 return err;
445
446 return aead_recvmsg(sock, msg, ignored, flags);
447 }
448
449 static struct proto_ops algif_aead_ops_nokey = {
450 .family = PF_ALG,
451
452 .connect = sock_no_connect,
453 .socketpair = sock_no_socketpair,
454 .getname = sock_no_getname,
455 .ioctl = sock_no_ioctl,
456 .listen = sock_no_listen,
457 .shutdown = sock_no_shutdown,
458 .getsockopt = sock_no_getsockopt,
459 .mmap = sock_no_mmap,
460 .bind = sock_no_bind,
461 .accept = sock_no_accept,
462 .setsockopt = sock_no_setsockopt,
463
464 .release = af_alg_release,
465 .sendmsg = aead_sendmsg_nokey,
466 .sendpage = aead_sendpage_nokey,
467 .recvmsg = aead_recvmsg_nokey,
468 .poll = af_alg_poll,
469 };
470
aead_bind(const char * name,u32 type,u32 mask)471 static void *aead_bind(const char *name, u32 type, u32 mask)
472 {
473 struct aead_tfm *tfm;
474 struct crypto_aead *aead;
475 struct crypto_sync_skcipher *null_tfm;
476
477 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
478 if (!tfm)
479 return ERR_PTR(-ENOMEM);
480
481 aead = crypto_alloc_aead(name, type, mask);
482 if (IS_ERR(aead)) {
483 kfree(tfm);
484 return ERR_CAST(aead);
485 }
486
487 null_tfm = crypto_get_default_null_skcipher();
488 if (IS_ERR(null_tfm)) {
489 crypto_free_aead(aead);
490 kfree(tfm);
491 return ERR_CAST(null_tfm);
492 }
493
494 tfm->aead = aead;
495 tfm->null_tfm = null_tfm;
496
497 return tfm;
498 }
499
aead_release(void * private)500 static void aead_release(void *private)
501 {
502 struct aead_tfm *tfm = private;
503
504 crypto_free_aead(tfm->aead);
505 crypto_put_default_null_skcipher();
506 kfree(tfm);
507 }
508
aead_setauthsize(void * private,unsigned int authsize)509 static int aead_setauthsize(void *private, unsigned int authsize)
510 {
511 struct aead_tfm *tfm = private;
512
513 return crypto_aead_setauthsize(tfm->aead, authsize);
514 }
515
aead_setkey(void * private,const u8 * key,unsigned int keylen)516 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
517 {
518 struct aead_tfm *tfm = private;
519
520 return crypto_aead_setkey(tfm->aead, key, keylen);
521 }
522
aead_sock_destruct(struct sock * sk)523 static void aead_sock_destruct(struct sock *sk)
524 {
525 struct alg_sock *ask = alg_sk(sk);
526 struct af_alg_ctx *ctx = ask->private;
527 struct sock *psk = ask->parent;
528 struct alg_sock *pask = alg_sk(psk);
529 struct aead_tfm *aeadc = pask->private;
530 struct crypto_aead *tfm = aeadc->aead;
531 unsigned int ivlen = crypto_aead_ivsize(tfm);
532
533 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
534 sock_kzfree_s(sk, ctx->iv, ivlen);
535 sock_kfree_s(sk, ctx, ctx->len);
536 af_alg_release_parent(sk);
537 }
538
aead_accept_parent_nokey(void * private,struct sock * sk)539 static int aead_accept_parent_nokey(void *private, struct sock *sk)
540 {
541 struct af_alg_ctx *ctx;
542 struct alg_sock *ask = alg_sk(sk);
543 struct aead_tfm *tfm = private;
544 struct crypto_aead *aead = tfm->aead;
545 unsigned int len = sizeof(*ctx);
546 unsigned int ivlen = crypto_aead_ivsize(aead);
547
548 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
549 if (!ctx)
550 return -ENOMEM;
551 memset(ctx, 0, len);
552
553 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
554 if (!ctx->iv) {
555 sock_kfree_s(sk, ctx, len);
556 return -ENOMEM;
557 }
558 memset(ctx->iv, 0, ivlen);
559
560 INIT_LIST_HEAD(&ctx->tsgl_list);
561 ctx->len = len;
562 crypto_init_wait(&ctx->wait);
563
564 ask->private = ctx;
565
566 sk->sk_destruct = aead_sock_destruct;
567
568 return 0;
569 }
570
aead_accept_parent(void * private,struct sock * sk)571 static int aead_accept_parent(void *private, struct sock *sk)
572 {
573 struct aead_tfm *tfm = private;
574
575 if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
576 return -ENOKEY;
577
578 return aead_accept_parent_nokey(private, sk);
579 }
580
581 static const struct af_alg_type algif_type_aead = {
582 .bind = aead_bind,
583 .release = aead_release,
584 .setkey = aead_setkey,
585 .setauthsize = aead_setauthsize,
586 .accept = aead_accept_parent,
587 .accept_nokey = aead_accept_parent_nokey,
588 .ops = &algif_aead_ops,
589 .ops_nokey = &algif_aead_ops_nokey,
590 .name = "aead",
591 .owner = THIS_MODULE
592 };
593
algif_aead_init(void)594 static int __init algif_aead_init(void)
595 {
596 return af_alg_register_type(&algif_type_aead);
597 }
598
algif_aead_exit(void)599 static void __exit algif_aead_exit(void)
600 {
601 int err = af_alg_unregister_type(&algif_type_aead);
602 BUG_ON(err);
603 }
604
605 module_init(algif_aead_init);
606 module_exit(algif_aead_exit);
607 MODULE_LICENSE("GPL");
608 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
609 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
610