1 /*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
36
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/skmsg.h>
43 #include <linux/mutex.h>
44 #include <linux/netdevice.h>
45 #include <linux/rcupdate.h>
46
47 #include <net/net_namespace.h>
48 #include <net/tcp.h>
49 #include <net/strparser.h>
50 #include <crypto/aead.h>
51 #include <uapi/linux/tls.h>
52
53
54 /* Maximum data size carried in a TLS record */
55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
56
57 #define TLS_HEADER_SIZE 5
58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
59
60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
61
62 #define TLS_RECORD_TYPE_DATA 0x17
63
64 #define TLS_AAD_SPACE_SIZE 13
65
66 #define MAX_IV_SIZE 16
67 #define TLS_MAX_REC_SEQ_SIZE 8
68
69 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
70 *
71 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
72 *
73 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
74 * Hence b0 contains (3 - 1) = 2.
75 */
76 #define TLS_AES_CCM_IV_B0_BYTE 2
77
78 #define __TLS_INC_STATS(net, field) \
79 __SNMP_INC_STATS((net)->mib.tls_statistics, field)
80 #define TLS_INC_STATS(net, field) \
81 SNMP_INC_STATS((net)->mib.tls_statistics, field)
82 #define __TLS_DEC_STATS(net, field) \
83 __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
84 #define TLS_DEC_STATS(net, field) \
85 SNMP_DEC_STATS((net)->mib.tls_statistics, field)
86
87 enum {
88 TLS_BASE,
89 TLS_SW,
90 TLS_HW,
91 TLS_HW_RECORD,
92 TLS_NUM_CONFIG,
93 };
94
95 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
96 * allocated or mapped for each TLS record. After encryption, the records are
97 * stores in a linked list.
98 */
99 struct tls_rec {
100 struct list_head list;
101 int tx_ready;
102 int tx_flags;
103
104 struct sk_msg msg_plaintext;
105 struct sk_msg msg_encrypted;
106
107 /* AAD | msg_plaintext.sg.data | sg_tag */
108 struct scatterlist sg_aead_in[2];
109 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
110 struct scatterlist sg_aead_out[2];
111
112 char content_type;
113 struct scatterlist sg_content_type;
114
115 char aad_space[TLS_AAD_SPACE_SIZE];
116 u8 iv_data[MAX_IV_SIZE];
117 struct aead_request aead_req;
118 u8 aead_req_ctx[];
119 };
120
121 struct tls_msg {
122 struct strp_msg rxm;
123 u8 control;
124 };
125
126 struct tx_work {
127 struct delayed_work work;
128 struct sock *sk;
129 };
130
131 struct tls_sw_context_tx {
132 struct crypto_aead *aead_send;
133 struct crypto_wait async_wait;
134 struct tx_work tx_work;
135 struct tls_rec *open_rec;
136 struct list_head tx_list;
137 atomic_t encrypt_pending;
138 /* protect crypto_wait with encrypt_pending */
139 spinlock_t encrypt_compl_lock;
140 int async_notify;
141 u8 async_capable:1;
142
143 #define BIT_TX_SCHEDULED 0
144 #define BIT_TX_CLOSING 1
145 unsigned long tx_bitmask;
146 };
147
148 struct tls_sw_context_rx {
149 struct crypto_aead *aead_recv;
150 struct crypto_wait async_wait;
151 struct strparser strp;
152 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
153 void (*saved_data_ready)(struct sock *sk);
154
155 struct sk_buff *recv_pkt;
156 u8 control;
157 u8 async_capable:1;
158 u8 decrypted:1;
159 atomic_t decrypt_pending;
160 /* protect crypto_wait with decrypt_pending*/
161 spinlock_t decrypt_compl_lock;
162 bool async_notify;
163 };
164
165 struct tls_record_info {
166 struct list_head list;
167 u32 end_seq;
168 int len;
169 int num_frags;
170 skb_frag_t frags[MAX_SKB_FRAGS];
171 };
172
173 struct tls_offload_context_tx {
174 struct crypto_aead *aead_send;
175 spinlock_t lock; /* protects records list */
176 struct list_head records_list;
177 struct tls_record_info *open_record;
178 struct tls_record_info *retransmit_hint;
179 u64 hint_record_sn;
180 u64 unacked_record_sn;
181
182 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
183 void (*sk_destruct)(struct sock *sk);
184 u8 driver_state[] __aligned(8);
185 /* The TLS layer reserves room for driver specific state
186 * Currently the belief is that there is not enough
187 * driver specific state to justify another layer of indirection
188 */
189 #define TLS_DRIVER_STATE_SIZE_TX 16
190 };
191
192 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
193 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
194
195 enum tls_context_flags {
196 /* tls_device_down was called after the netdev went down, device state
197 * was released, and kTLS works in software, even though rx_conf is
198 * still TLS_HW (needed for transition).
199 */
200 TLS_RX_DEV_DEGRADED = 0,
201 /* Unlike RX where resync is driven entirely by the core in TX only
202 * the driver knows when things went out of sync, so we need the flag
203 * to be atomic.
204 */
205 TLS_TX_SYNC_SCHED = 1,
206 /* tls_dev_del was called for the RX side, device state was released,
207 * but tls_ctx->netdev might still be kept, because TX-side driver
208 * resources might not be released yet. Used to prevent the second
209 * tls_dev_del call in tls_device_down if it happens simultaneously.
210 */
211 TLS_RX_DEV_CLOSED = 2,
212 };
213
214 struct cipher_context {
215 char *iv;
216 char *rec_seq;
217 };
218
219 union tls_crypto_context {
220 struct tls_crypto_info info;
221 union {
222 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
223 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
224 };
225 };
226
227 struct tls_prot_info {
228 u16 version;
229 u16 cipher_type;
230 u16 prepend_size;
231 u16 tag_size;
232 u16 overhead_size;
233 u16 iv_size;
234 u16 salt_size;
235 u16 rec_seq_size;
236 u16 aad_size;
237 u16 tail_size;
238 };
239
240 struct tls_context {
241 /* read-only cache line */
242 struct tls_prot_info prot_info;
243
244 u8 tx_conf:3;
245 u8 rx_conf:3;
246
247 int (*push_pending_record)(struct sock *sk, int flags);
248 void (*sk_write_space)(struct sock *sk);
249
250 void *priv_ctx_tx;
251 void *priv_ctx_rx;
252
253 struct net_device *netdev;
254
255 /* rw cache line */
256 struct cipher_context tx;
257 struct cipher_context rx;
258
259 struct scatterlist *partially_sent_record;
260 u16 partially_sent_offset;
261
262 bool in_tcp_sendpages;
263 bool pending_open_record_frags;
264
265 struct mutex tx_lock; /* protects partially_sent_* fields and
266 * per-type TX fields
267 */
268 unsigned long flags;
269
270 /* cache cold stuff */
271 struct proto *sk_proto;
272 struct sock *sk;
273
274 void (*sk_destruct)(struct sock *sk);
275
276 union tls_crypto_context crypto_send;
277 union tls_crypto_context crypto_recv;
278
279 struct list_head list;
280 refcount_t refcount;
281 struct rcu_head rcu;
282 };
283
284 enum tls_offload_ctx_dir {
285 TLS_OFFLOAD_CTX_DIR_RX,
286 TLS_OFFLOAD_CTX_DIR_TX,
287 };
288
289 struct tlsdev_ops {
290 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
291 enum tls_offload_ctx_dir direction,
292 struct tls_crypto_info *crypto_info,
293 u32 start_offload_tcp_sn);
294 void (*tls_dev_del)(struct net_device *netdev,
295 struct tls_context *ctx,
296 enum tls_offload_ctx_dir direction);
297 int (*tls_dev_resync)(struct net_device *netdev,
298 struct sock *sk, u32 seq, u8 *rcd_sn,
299 enum tls_offload_ctx_dir direction);
300 };
301
302 enum tls_offload_sync_type {
303 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
304 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
305 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
306 };
307
308 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
309 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
310
311 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
312 struct tls_offload_resync_async {
313 atomic64_t req;
314 u16 loglen;
315 u16 rcd_delta;
316 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
317 };
318
319 struct tls_offload_context_rx {
320 /* sw must be the first member of tls_offload_context_rx */
321 struct tls_sw_context_rx sw;
322 enum tls_offload_sync_type resync_type;
323 /* this member is set regardless of resync_type, to avoid branches */
324 u8 resync_nh_reset:1;
325 /* CORE_NEXT_HINT-only member, but use the hole here */
326 u8 resync_nh_do_now:1;
327 union {
328 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
329 struct {
330 atomic64_t resync_req;
331 };
332 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
333 struct {
334 u32 decrypted_failed;
335 u32 decrypted_tgt;
336 } resync_nh;
337 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
338 struct {
339 struct tls_offload_resync_async *resync_async;
340 };
341 };
342 u8 driver_state[] __aligned(8);
343 /* The TLS layer reserves room for driver specific state
344 * Currently the belief is that there is not enough
345 * driver specific state to justify another layer of indirection
346 */
347 #define TLS_DRIVER_STATE_SIZE_RX 8
348 };
349
350 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
351 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
352
353 struct tls_context *tls_ctx_create(struct sock *sk);
354 void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
355 void update_sk_prot(struct sock *sk, struct tls_context *ctx);
356
357 int wait_on_pending_writer(struct sock *sk, long *timeo);
358 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
359 int __user *optlen);
360 int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
361 unsigned int optlen);
362 void tls_err_abort(struct sock *sk, int err);
363
364 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
365 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
366 void tls_sw_strparser_done(struct tls_context *tls_ctx);
367 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
368 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
369 int offset, size_t size, int flags);
370 int tls_sw_sendpage(struct sock *sk, struct page *page,
371 int offset, size_t size, int flags);
372 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
373 void tls_sw_release_resources_tx(struct sock *sk);
374 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
375 void tls_sw_free_resources_rx(struct sock *sk);
376 void tls_sw_release_resources_rx(struct sock *sk);
377 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
378 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
379 int nonblock, int flags, int *addr_len);
380 bool tls_sw_stream_read(const struct sock *sk);
381 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
382 struct pipe_inode_info *pipe,
383 size_t len, unsigned int flags);
384
385 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
386 int tls_device_sendpage(struct sock *sk, struct page *page,
387 int offset, size_t size, int flags);
388 int tls_tx_records(struct sock *sk, int flags);
389
390 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
391 u32 seq, u64 *p_record_sn);
392
tls_record_is_start_marker(struct tls_record_info * rec)393 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
394 {
395 return rec->len == 0;
396 }
397
tls_record_start_seq(struct tls_record_info * rec)398 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
399 {
400 return rec->end_seq - rec->len;
401 }
402
403 int tls_push_sg(struct sock *sk, struct tls_context *ctx,
404 struct scatterlist *sg, u16 first_offset,
405 int flags);
406 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
407 int flags);
408 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
409
tls_msg(struct sk_buff * skb)410 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
411 {
412 return (struct tls_msg *)strp_msg(skb);
413 }
414
tls_is_partially_sent_record(struct tls_context * ctx)415 static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
416 {
417 return !!ctx->partially_sent_record;
418 }
419
tls_is_pending_open_record(struct tls_context * tls_ctx)420 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
421 {
422 return tls_ctx->pending_open_record_frags;
423 }
424
is_tx_ready(struct tls_sw_context_tx * ctx)425 static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
426 {
427 struct tls_rec *rec;
428
429 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
430 if (!rec)
431 return false;
432
433 return READ_ONCE(rec->tx_ready);
434 }
435
tls_user_config(struct tls_context * ctx,bool tx)436 static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
437 {
438 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
439
440 switch (config) {
441 case TLS_BASE:
442 return TLS_CONF_BASE;
443 case TLS_SW:
444 return TLS_CONF_SW;
445 case TLS_HW:
446 return TLS_CONF_HW;
447 case TLS_HW_RECORD:
448 return TLS_CONF_HW_RECORD;
449 }
450 return 0;
451 }
452
453 struct sk_buff *
454 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
455 struct sk_buff *skb);
456 struct sk_buff *
457 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
458 struct sk_buff *skb);
459
tls_is_sk_tx_device_offloaded(struct sock * sk)460 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
461 {
462 #ifdef CONFIG_SOCK_VALIDATE_XMIT
463 return sk_fullsock(sk) &&
464 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
465 &tls_validate_xmit_skb);
466 #else
467 return false;
468 #endif
469 }
470
tls_bigint_increment(unsigned char * seq,int len)471 static inline bool tls_bigint_increment(unsigned char *seq, int len)
472 {
473 int i;
474
475 for (i = len - 1; i >= 0; i--) {
476 ++seq[i];
477 if (seq[i] != 0)
478 break;
479 }
480
481 return (i == -1);
482 }
483
tls_bigint_subtract(unsigned char * seq,int n)484 static inline void tls_bigint_subtract(unsigned char *seq, int n)
485 {
486 u64 rcd_sn;
487 __be64 *p;
488
489 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
490
491 p = (__be64 *)seq;
492 rcd_sn = be64_to_cpu(*p);
493 *p = cpu_to_be64(rcd_sn - n);
494 }
495
tls_get_ctx(const struct sock * sk)496 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
497 {
498 struct inet_connection_sock *icsk = inet_csk(sk);
499
500 /* Use RCU on icsk_ulp_data only for sock diag code,
501 * TLS data path doesn't need rcu_dereference().
502 */
503 return (__force void *)icsk->icsk_ulp_data;
504 }
505
tls_advance_record_sn(struct sock * sk,struct tls_prot_info * prot,struct cipher_context * ctx)506 static inline void tls_advance_record_sn(struct sock *sk,
507 struct tls_prot_info *prot,
508 struct cipher_context *ctx)
509 {
510 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
511 tls_err_abort(sk, -EBADMSG);
512
513 if (prot->version != TLS_1_3_VERSION)
514 tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
515 prot->iv_size);
516 }
517
tls_fill_prepend(struct tls_context * ctx,char * buf,size_t plaintext_len,unsigned char record_type,int version)518 static inline void tls_fill_prepend(struct tls_context *ctx,
519 char *buf,
520 size_t plaintext_len,
521 unsigned char record_type,
522 int version)
523 {
524 struct tls_prot_info *prot = &ctx->prot_info;
525 size_t pkt_len, iv_size = prot->iv_size;
526
527 pkt_len = plaintext_len + prot->tag_size;
528 if (version != TLS_1_3_VERSION) {
529 pkt_len += iv_size;
530
531 memcpy(buf + TLS_NONCE_OFFSET,
532 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
533 }
534
535 /* we cover nonce explicit here as well, so buf should be of
536 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
537 */
538 buf[0] = version == TLS_1_3_VERSION ?
539 TLS_RECORD_TYPE_DATA : record_type;
540 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
541 buf[1] = TLS_1_2_VERSION_MINOR;
542 buf[2] = TLS_1_2_VERSION_MAJOR;
543 /* we can use IV for nonce explicit according to spec */
544 buf[3] = pkt_len >> 8;
545 buf[4] = pkt_len & 0xFF;
546 }
547
tls_make_aad(char * buf,size_t size,char * record_sequence,int record_sequence_size,unsigned char record_type,int version)548 static inline void tls_make_aad(char *buf,
549 size_t size,
550 char *record_sequence,
551 int record_sequence_size,
552 unsigned char record_type,
553 int version)
554 {
555 if (version != TLS_1_3_VERSION) {
556 memcpy(buf, record_sequence, record_sequence_size);
557 buf += 8;
558 } else {
559 size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
560 }
561
562 buf[0] = version == TLS_1_3_VERSION ?
563 TLS_RECORD_TYPE_DATA : record_type;
564 buf[1] = TLS_1_2_VERSION_MAJOR;
565 buf[2] = TLS_1_2_VERSION_MINOR;
566 buf[3] = size >> 8;
567 buf[4] = size & 0xFF;
568 }
569
xor_iv_with_seq(int version,char * iv,char * seq)570 static inline void xor_iv_with_seq(int version, char *iv, char *seq)
571 {
572 int i;
573
574 if (version == TLS_1_3_VERSION) {
575 for (i = 0; i < 8; i++)
576 iv[i + 4] ^= seq[i];
577 }
578 }
579
580
tls_sw_ctx_rx(const struct tls_context * tls_ctx)581 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
582 const struct tls_context *tls_ctx)
583 {
584 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
585 }
586
tls_sw_ctx_tx(const struct tls_context * tls_ctx)587 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
588 const struct tls_context *tls_ctx)
589 {
590 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
591 }
592
593 static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context * tls_ctx)594 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
595 {
596 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
597 }
598
tls_sw_has_ctx_tx(const struct sock * sk)599 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
600 {
601 struct tls_context *ctx = tls_get_ctx(sk);
602
603 if (!ctx)
604 return false;
605 return !!tls_sw_ctx_tx(ctx);
606 }
607
tls_sw_has_ctx_rx(const struct sock * sk)608 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
609 {
610 struct tls_context *ctx = tls_get_ctx(sk);
611
612 if (!ctx)
613 return false;
614 return !!tls_sw_ctx_rx(ctx);
615 }
616
617 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
618 void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
619
620 static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context * tls_ctx)621 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
622 {
623 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
624 }
625
626 #if IS_ENABLED(CONFIG_TLS_DEVICE)
__tls_driver_ctx(struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)627 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
628 enum tls_offload_ctx_dir direction)
629 {
630 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
631 return tls_offload_ctx_tx(tls_ctx)->driver_state;
632 else
633 return tls_offload_ctx_rx(tls_ctx)->driver_state;
634 }
635
636 static inline void *
tls_driver_ctx(const struct sock * sk,enum tls_offload_ctx_dir direction)637 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
638 {
639 return __tls_driver_ctx(tls_get_ctx(sk), direction);
640 }
641 #endif
642
643 #define RESYNC_REQ BIT(0)
644 #define RESYNC_REQ_ASYNC BIT(1)
645 /* The TLS context is valid until sk_destruct is called */
tls_offload_rx_resync_request(struct sock * sk,__be32 seq)646 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
647 {
648 struct tls_context *tls_ctx = tls_get_ctx(sk);
649 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
650
651 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
652 }
653
654 /* Log all TLS record header TCP sequences in [seq, seq+len] */
655 static inline void
tls_offload_rx_resync_async_request_start(struct sock * sk,__be32 seq,u16 len)656 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
657 {
658 struct tls_context *tls_ctx = tls_get_ctx(sk);
659 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
660
661 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
662 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
663 rx_ctx->resync_async->loglen = 0;
664 rx_ctx->resync_async->rcd_delta = 0;
665 }
666
667 static inline void
tls_offload_rx_resync_async_request_end(struct sock * sk,__be32 seq)668 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
669 {
670 struct tls_context *tls_ctx = tls_get_ctx(sk);
671 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
672
673 atomic64_set(&rx_ctx->resync_async->req,
674 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
675 }
676
677 static inline void
tls_offload_rx_resync_set_type(struct sock * sk,enum tls_offload_sync_type type)678 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
679 {
680 struct tls_context *tls_ctx = tls_get_ctx(sk);
681
682 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
683 }
684
685 /* Driver's seq tracking has to be disabled until resync succeeded */
tls_offload_tx_resync_pending(struct sock * sk)686 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
687 {
688 struct tls_context *tls_ctx = tls_get_ctx(sk);
689 bool ret;
690
691 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
692 smp_mb__after_atomic();
693 return ret;
694 }
695
696 int __net_init tls_proc_init(struct net *net);
697 void __net_exit tls_proc_fini(struct net *net);
698
699 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
700 unsigned char *record_type);
701 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
702 struct scatterlist *sgout);
703 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
704
705 int tls_sw_fallback_init(struct sock *sk,
706 struct tls_offload_context_tx *offload_ctx,
707 struct tls_crypto_info *crypto_info);
708
709 #ifdef CONFIG_TLS_DEVICE
710 void tls_device_init(void);
711 void tls_device_cleanup(void);
712 void tls_device_sk_destruct(struct sock *sk);
713 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
714 void tls_device_free_resources_tx(struct sock *sk);
715 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
716 void tls_device_offload_cleanup_rx(struct sock *sk);
717 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
718 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
719 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
720 struct sk_buff *skb, struct strp_msg *rxm);
721
tls_is_sk_rx_device_offloaded(struct sock * sk)722 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
723 {
724 if (!sk_fullsock(sk) ||
725 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
726 return false;
727 return tls_get_ctx(sk)->rx_conf == TLS_HW;
728 }
729 #else
tls_device_init(void)730 static inline void tls_device_init(void) {}
tls_device_cleanup(void)731 static inline void tls_device_cleanup(void) {}
732
733 static inline int
tls_set_device_offload(struct sock * sk,struct tls_context * ctx)734 tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
735 {
736 return -EOPNOTSUPP;
737 }
738
tls_device_free_resources_tx(struct sock * sk)739 static inline void tls_device_free_resources_tx(struct sock *sk) {}
740
741 static inline int
tls_set_device_offload_rx(struct sock * sk,struct tls_context * ctx)742 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
743 {
744 return -EOPNOTSUPP;
745 }
746
tls_device_offload_cleanup_rx(struct sock * sk)747 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
748 static inline void
tls_device_rx_resync_new_rec(struct sock * sk,u32 rcd_len,u32 seq)749 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
750
751 static inline int
tls_device_decrypted(struct sock * sk,struct tls_context * tls_ctx,struct sk_buff * skb,struct strp_msg * rxm)752 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
753 struct sk_buff *skb, struct strp_msg *rxm)
754 {
755 return 0;
756 }
757 #endif
758 #endif /* _TLS_OFFLOAD_H */
759