1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 */
7
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
18 #include <net/sock.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <linux/phy.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/if_arp.h>
24
25 #include <uapi/linux/if_macsec.h>
26
27 #define MACSEC_SCI_LEN 8
28
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
31
32 struct macsec_eth_header {
33 struct ethhdr eth;
34 /* SecTAG */
35 u8 tci_an;
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
37 u8 short_length:6,
38 unused:2;
39 #elif defined(__BIG_ENDIAN_BITFIELD)
40 u8 unused:2,
41 short_length:6;
42 #else
43 #error "Please fix <asm/byteorder.h>"
44 #endif
45 __be32 packet_number;
46 u8 secure_channel_id[8]; /* optional */
47 } __packed;
48
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES 0x40 /* end station */
51 #define MACSEC_TCI_SC 0x20 /* SCI present */
52 #define MACSEC_TCI_SCB 0x10 /* epon */
53 #define MACSEC_TCI_E 0x08 /* encryption */
54 #define MACSEC_TCI_C 0x04 /* changed text */
55 #define MACSEC_AN_MASK 0x03 /* association number */
56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
57
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
60
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
63
64 #define for_each_rxsc(secy, sc) \
65 for (sc = rcu_dereference_bh(secy->rx_sc); \
66 sc; \
67 sc = rcu_dereference_bh(sc->next))
68 #define for_each_rxsc_rtnl(secy, sc) \
69 for (sc = rtnl_dereference(secy->rx_sc); \
70 sc; \
71 sc = rtnl_dereference(sc->next))
72
73 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
74
75 struct gcm_iv_xpn {
76 union {
77 u8 short_secure_channel_id[4];
78 ssci_t ssci;
79 };
80 __be64 pn;
81 } __packed;
82
83 struct gcm_iv {
84 union {
85 u8 secure_channel_id[8];
86 sci_t sci;
87 };
88 __be32 pn;
89 };
90
91 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92
93 struct pcpu_secy_stats {
94 struct macsec_dev_stats stats;
95 struct u64_stats_sync syncp;
96 };
97
98 /**
99 * struct macsec_dev - private data
100 * @secy: SecY config
101 * @real_dev: pointer to underlying netdevice
102 * @stats: MACsec device stats
103 * @secys: linked list of SecY's on the underlying device
104 * @offload: status of offloading on the MACsec device
105 */
106 struct macsec_dev {
107 struct macsec_secy secy;
108 struct net_device *real_dev;
109 struct pcpu_secy_stats __percpu *stats;
110 struct list_head secys;
111 struct gro_cells gro_cells;
112 enum macsec_offload offload;
113 };
114
115 /**
116 * struct macsec_rxh_data - rx_handler private argument
117 * @secys: linked list of SecY's on this underlying device
118 */
119 struct macsec_rxh_data {
120 struct list_head secys;
121 };
122
macsec_priv(const struct net_device * dev)123 static struct macsec_dev *macsec_priv(const struct net_device *dev)
124 {
125 return (struct macsec_dev *)netdev_priv(dev);
126 }
127
macsec_data_rcu(const struct net_device * dev)128 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
129 {
130 return rcu_dereference_bh(dev->rx_handler_data);
131 }
132
macsec_data_rtnl(const struct net_device * dev)133 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
134 {
135 return rtnl_dereference(dev->rx_handler_data);
136 }
137
138 struct macsec_cb {
139 struct aead_request *req;
140 union {
141 struct macsec_tx_sa *tx_sa;
142 struct macsec_rx_sa *rx_sa;
143 };
144 u8 assoc_num;
145 bool valid;
146 bool has_sci;
147 };
148
macsec_rxsa_get(struct macsec_rx_sa __rcu * ptr)149 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
150 {
151 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
152
153 if (!sa || !sa->active)
154 return NULL;
155
156 if (!refcount_inc_not_zero(&sa->refcnt))
157 return NULL;
158
159 return sa;
160 }
161
free_rx_sc_rcu(struct rcu_head * head)162 static void free_rx_sc_rcu(struct rcu_head *head)
163 {
164 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
165
166 free_percpu(rx_sc->stats);
167 kfree(rx_sc);
168 }
169
macsec_rxsc_get(struct macsec_rx_sc * sc)170 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
171 {
172 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
173 }
174
macsec_rxsc_put(struct macsec_rx_sc * sc)175 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
176 {
177 if (refcount_dec_and_test(&sc->refcnt))
178 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
179 }
180
free_rxsa(struct rcu_head * head)181 static void free_rxsa(struct rcu_head *head)
182 {
183 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
184
185 crypto_free_aead(sa->key.tfm);
186 free_percpu(sa->stats);
187 kfree(sa);
188 }
189
macsec_rxsa_put(struct macsec_rx_sa * sa)190 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
191 {
192 if (refcount_dec_and_test(&sa->refcnt))
193 call_rcu(&sa->rcu, free_rxsa);
194 }
195
macsec_txsa_get(struct macsec_tx_sa __rcu * ptr)196 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
197 {
198 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
199
200 if (!sa || !sa->active)
201 return NULL;
202
203 if (!refcount_inc_not_zero(&sa->refcnt))
204 return NULL;
205
206 return sa;
207 }
208
free_txsa(struct rcu_head * head)209 static void free_txsa(struct rcu_head *head)
210 {
211 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
212
213 crypto_free_aead(sa->key.tfm);
214 free_percpu(sa->stats);
215 kfree(sa);
216 }
217
macsec_txsa_put(struct macsec_tx_sa * sa)218 static void macsec_txsa_put(struct macsec_tx_sa *sa)
219 {
220 if (refcount_dec_and_test(&sa->refcnt))
221 call_rcu(&sa->rcu, free_txsa);
222 }
223
macsec_skb_cb(struct sk_buff * skb)224 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
225 {
226 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
227 return (struct macsec_cb *)skb->cb;
228 }
229
230 #define MACSEC_PORT_ES (htons(0x0001))
231 #define MACSEC_PORT_SCB (0x0000)
232 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
233 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
234
235 #define MACSEC_GCM_AES_128_SAK_LEN 16
236 #define MACSEC_GCM_AES_256_SAK_LEN 32
237
238 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
239 #define DEFAULT_XPN false
240 #define DEFAULT_SEND_SCI true
241 #define DEFAULT_ENCRYPT false
242 #define DEFAULT_ENCODING_SA 0
243 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
244
send_sci(const struct macsec_secy * secy)245 static bool send_sci(const struct macsec_secy *secy)
246 {
247 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
248
249 return tx_sc->send_sci ||
250 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
251 }
252
make_sci(u8 * addr,__be16 port)253 static sci_t make_sci(u8 *addr, __be16 port)
254 {
255 sci_t sci;
256
257 memcpy(&sci, addr, ETH_ALEN);
258 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
259
260 return sci;
261 }
262
macsec_frame_sci(struct macsec_eth_header * hdr,bool sci_present)263 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
264 {
265 sci_t sci;
266
267 if (sci_present)
268 memcpy(&sci, hdr->secure_channel_id,
269 sizeof(hdr->secure_channel_id));
270 else
271 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
272
273 return sci;
274 }
275
macsec_sectag_len(bool sci_present)276 static unsigned int macsec_sectag_len(bool sci_present)
277 {
278 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
279 }
280
macsec_hdr_len(bool sci_present)281 static unsigned int macsec_hdr_len(bool sci_present)
282 {
283 return macsec_sectag_len(sci_present) + ETH_HLEN;
284 }
285
macsec_extra_len(bool sci_present)286 static unsigned int macsec_extra_len(bool sci_present)
287 {
288 return macsec_sectag_len(sci_present) + sizeof(__be16);
289 }
290
291 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
macsec_fill_sectag(struct macsec_eth_header * h,const struct macsec_secy * secy,u32 pn,bool sci_present)292 static void macsec_fill_sectag(struct macsec_eth_header *h,
293 const struct macsec_secy *secy, u32 pn,
294 bool sci_present)
295 {
296 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
297
298 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
299 h->eth.h_proto = htons(ETH_P_MACSEC);
300
301 if (sci_present) {
302 h->tci_an |= MACSEC_TCI_SC;
303 memcpy(&h->secure_channel_id, &secy->sci,
304 sizeof(h->secure_channel_id));
305 } else {
306 if (tx_sc->end_station)
307 h->tci_an |= MACSEC_TCI_ES;
308 if (tx_sc->scb)
309 h->tci_an |= MACSEC_TCI_SCB;
310 }
311
312 h->packet_number = htonl(pn);
313
314 /* with GCM, C/E clear for !encrypt, both set for encrypt */
315 if (tx_sc->encrypt)
316 h->tci_an |= MACSEC_TCI_CONFID;
317 else if (secy->icv_len != DEFAULT_ICV_LEN)
318 h->tci_an |= MACSEC_TCI_C;
319
320 h->tci_an |= tx_sc->encoding_sa;
321 }
322
macsec_set_shortlen(struct macsec_eth_header * h,size_t data_len)323 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
324 {
325 if (data_len < MIN_NON_SHORT_LEN)
326 h->short_length = data_len;
327 }
328
329 /* Checks if a MACsec interface is being offloaded to an hardware engine */
macsec_is_offloaded(struct macsec_dev * macsec)330 static bool macsec_is_offloaded(struct macsec_dev *macsec)
331 {
332 if (macsec->offload == MACSEC_OFFLOAD_MAC ||
333 macsec->offload == MACSEC_OFFLOAD_PHY)
334 return true;
335
336 return false;
337 }
338
339 /* Checks if underlying layers implement MACsec offloading functions. */
macsec_check_offload(enum macsec_offload offload,struct macsec_dev * macsec)340 static bool macsec_check_offload(enum macsec_offload offload,
341 struct macsec_dev *macsec)
342 {
343 if (!macsec || !macsec->real_dev)
344 return false;
345
346 if (offload == MACSEC_OFFLOAD_PHY)
347 return macsec->real_dev->phydev &&
348 macsec->real_dev->phydev->macsec_ops;
349 else if (offload == MACSEC_OFFLOAD_MAC)
350 return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
351 macsec->real_dev->macsec_ops;
352
353 return false;
354 }
355
__macsec_get_ops(enum macsec_offload offload,struct macsec_dev * macsec,struct macsec_context * ctx)356 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
357 struct macsec_dev *macsec,
358 struct macsec_context *ctx)
359 {
360 if (ctx) {
361 memset(ctx, 0, sizeof(*ctx));
362 ctx->offload = offload;
363
364 if (offload == MACSEC_OFFLOAD_PHY)
365 ctx->phydev = macsec->real_dev->phydev;
366 else if (offload == MACSEC_OFFLOAD_MAC)
367 ctx->netdev = macsec->real_dev;
368 }
369
370 if (offload == MACSEC_OFFLOAD_PHY)
371 return macsec->real_dev->phydev->macsec_ops;
372 else
373 return macsec->real_dev->macsec_ops;
374 }
375
376 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
377 * context device reference if provided.
378 */
macsec_get_ops(struct macsec_dev * macsec,struct macsec_context * ctx)379 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
380 struct macsec_context *ctx)
381 {
382 if (!macsec_check_offload(macsec->offload, macsec))
383 return NULL;
384
385 return __macsec_get_ops(macsec->offload, macsec, ctx);
386 }
387
388 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
macsec_validate_skb(struct sk_buff * skb,u16 icv_len,bool xpn)389 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
390 {
391 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
392 int len = skb->len - 2 * ETH_ALEN;
393 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
394
395 /* a) It comprises at least 17 octets */
396 if (skb->len <= 16)
397 return false;
398
399 /* b) MACsec EtherType: already checked */
400
401 /* c) V bit is clear */
402 if (h->tci_an & MACSEC_TCI_VERSION)
403 return false;
404
405 /* d) ES or SCB => !SC */
406 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
407 (h->tci_an & MACSEC_TCI_SC))
408 return false;
409
410 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
411 if (h->unused)
412 return false;
413
414 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
415 if (!h->packet_number && !xpn)
416 return false;
417
418 /* length check, f) g) h) i) */
419 if (h->short_length)
420 return len == extra_len + h->short_length;
421 return len >= extra_len + MIN_NON_SHORT_LEN;
422 }
423
424 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
425 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
426
macsec_fill_iv_xpn(unsigned char * iv,ssci_t ssci,u64 pn,salt_t salt)427 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
428 salt_t salt)
429 {
430 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
431
432 gcm_iv->ssci = ssci ^ salt.ssci;
433 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
434 }
435
macsec_fill_iv(unsigned char * iv,sci_t sci,u32 pn)436 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
437 {
438 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
439
440 gcm_iv->sci = sci;
441 gcm_iv->pn = htonl(pn);
442 }
443
macsec_ethhdr(struct sk_buff * skb)444 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
445 {
446 return (struct macsec_eth_header *)skb_mac_header(skb);
447 }
448
dev_to_sci(struct net_device * dev,__be16 port)449 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
450 {
451 return make_sci(dev->dev_addr, port);
452 }
453
__macsec_pn_wrapped(struct macsec_secy * secy,struct macsec_tx_sa * tx_sa)454 static void __macsec_pn_wrapped(struct macsec_secy *secy,
455 struct macsec_tx_sa *tx_sa)
456 {
457 pr_debug("PN wrapped, transitioning to !oper\n");
458 tx_sa->active = false;
459 if (secy->protect_frames)
460 secy->operational = false;
461 }
462
macsec_pn_wrapped(struct macsec_secy * secy,struct macsec_tx_sa * tx_sa)463 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
464 {
465 spin_lock_bh(&tx_sa->lock);
466 __macsec_pn_wrapped(secy, tx_sa);
467 spin_unlock_bh(&tx_sa->lock);
468 }
469 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
470
tx_sa_update_pn(struct macsec_tx_sa * tx_sa,struct macsec_secy * secy)471 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
472 struct macsec_secy *secy)
473 {
474 pn_t pn;
475
476 spin_lock_bh(&tx_sa->lock);
477
478 pn = tx_sa->next_pn_halves;
479 if (secy->xpn)
480 tx_sa->next_pn++;
481 else
482 tx_sa->next_pn_halves.lower++;
483
484 if (tx_sa->next_pn == 0)
485 __macsec_pn_wrapped(secy, tx_sa);
486 spin_unlock_bh(&tx_sa->lock);
487
488 return pn;
489 }
490
macsec_encrypt_finish(struct sk_buff * skb,struct net_device * dev)491 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
492 {
493 struct macsec_dev *macsec = netdev_priv(dev);
494
495 skb->dev = macsec->real_dev;
496 skb_reset_mac_header(skb);
497 skb->protocol = eth_hdr(skb)->h_proto;
498 }
499
macsec_count_tx(struct sk_buff * skb,struct macsec_tx_sc * tx_sc,struct macsec_tx_sa * tx_sa)500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
501 struct macsec_tx_sa *tx_sa)
502 {
503 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
504
505 u64_stats_update_begin(&txsc_stats->syncp);
506 if (tx_sc->encrypt) {
507 txsc_stats->stats.OutOctetsEncrypted += skb->len;
508 txsc_stats->stats.OutPktsEncrypted++;
509 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
510 } else {
511 txsc_stats->stats.OutOctetsProtected += skb->len;
512 txsc_stats->stats.OutPktsProtected++;
513 this_cpu_inc(tx_sa->stats->OutPktsProtected);
514 }
515 u64_stats_update_end(&txsc_stats->syncp);
516 }
517
count_tx(struct net_device * dev,int ret,int len)518 static void count_tx(struct net_device *dev, int ret, int len)
519 {
520 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
521 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
522
523 u64_stats_update_begin(&stats->syncp);
524 stats->tx_packets++;
525 stats->tx_bytes += len;
526 u64_stats_update_end(&stats->syncp);
527 }
528 }
529
macsec_encrypt_done(struct crypto_async_request * base,int err)530 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
531 {
532 struct sk_buff *skb = base->data;
533 struct net_device *dev = skb->dev;
534 struct macsec_dev *macsec = macsec_priv(dev);
535 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
536 int len, ret;
537
538 aead_request_free(macsec_skb_cb(skb)->req);
539
540 rcu_read_lock_bh();
541 macsec_encrypt_finish(skb, dev);
542 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
543 len = skb->len;
544 ret = dev_queue_xmit(skb);
545 count_tx(dev, ret, len);
546 rcu_read_unlock_bh();
547
548 macsec_txsa_put(sa);
549 dev_put(dev);
550 }
551
macsec_alloc_req(struct crypto_aead * tfm,unsigned char ** iv,struct scatterlist ** sg,int num_frags)552 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
553 unsigned char **iv,
554 struct scatterlist **sg,
555 int num_frags)
556 {
557 size_t size, iv_offset, sg_offset;
558 struct aead_request *req;
559 void *tmp;
560
561 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
562 iv_offset = size;
563 size += GCM_AES_IV_LEN;
564
565 size = ALIGN(size, __alignof__(struct scatterlist));
566 sg_offset = size;
567 size += sizeof(struct scatterlist) * num_frags;
568
569 tmp = kmalloc(size, GFP_ATOMIC);
570 if (!tmp)
571 return NULL;
572
573 *iv = (unsigned char *)(tmp + iv_offset);
574 *sg = (struct scatterlist *)(tmp + sg_offset);
575 req = tmp;
576
577 aead_request_set_tfm(req, tfm);
578
579 return req;
580 }
581
macsec_encrypt(struct sk_buff * skb,struct net_device * dev)582 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
583 struct net_device *dev)
584 {
585 int ret;
586 struct scatterlist *sg;
587 struct sk_buff *trailer;
588 unsigned char *iv;
589 struct ethhdr *eth;
590 struct macsec_eth_header *hh;
591 size_t unprotected_len;
592 struct aead_request *req;
593 struct macsec_secy *secy;
594 struct macsec_tx_sc *tx_sc;
595 struct macsec_tx_sa *tx_sa;
596 struct macsec_dev *macsec = macsec_priv(dev);
597 bool sci_present;
598 pn_t pn;
599
600 secy = &macsec->secy;
601 tx_sc = &secy->tx_sc;
602
603 /* 10.5.1 TX SA assignment */
604 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
605 if (!tx_sa) {
606 secy->operational = false;
607 kfree_skb(skb);
608 return ERR_PTR(-EINVAL);
609 }
610
611 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
612 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
613 struct sk_buff *nskb = skb_copy_expand(skb,
614 MACSEC_NEEDED_HEADROOM,
615 MACSEC_NEEDED_TAILROOM,
616 GFP_ATOMIC);
617 if (likely(nskb)) {
618 consume_skb(skb);
619 skb = nskb;
620 } else {
621 macsec_txsa_put(tx_sa);
622 kfree_skb(skb);
623 return ERR_PTR(-ENOMEM);
624 }
625 } else {
626 skb = skb_unshare(skb, GFP_ATOMIC);
627 if (!skb) {
628 macsec_txsa_put(tx_sa);
629 return ERR_PTR(-ENOMEM);
630 }
631 }
632
633 unprotected_len = skb->len;
634 eth = eth_hdr(skb);
635 sci_present = send_sci(secy);
636 hh = skb_push(skb, macsec_extra_len(sci_present));
637 memmove(hh, eth, 2 * ETH_ALEN);
638
639 pn = tx_sa_update_pn(tx_sa, secy);
640 if (pn.full64 == 0) {
641 macsec_txsa_put(tx_sa);
642 kfree_skb(skb);
643 return ERR_PTR(-ENOLINK);
644 }
645 macsec_fill_sectag(hh, secy, pn.lower, sci_present);
646 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
647
648 skb_put(skb, secy->icv_len);
649
650 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
651 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
652
653 u64_stats_update_begin(&secy_stats->syncp);
654 secy_stats->stats.OutPktsTooLong++;
655 u64_stats_update_end(&secy_stats->syncp);
656
657 macsec_txsa_put(tx_sa);
658 kfree_skb(skb);
659 return ERR_PTR(-EINVAL);
660 }
661
662 ret = skb_cow_data(skb, 0, &trailer);
663 if (unlikely(ret < 0)) {
664 macsec_txsa_put(tx_sa);
665 kfree_skb(skb);
666 return ERR_PTR(ret);
667 }
668
669 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
670 if (!req) {
671 macsec_txsa_put(tx_sa);
672 kfree_skb(skb);
673 return ERR_PTR(-ENOMEM);
674 }
675
676 if (secy->xpn)
677 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
678 else
679 macsec_fill_iv(iv, secy->sci, pn.lower);
680
681 sg_init_table(sg, ret);
682 ret = skb_to_sgvec(skb, sg, 0, skb->len);
683 if (unlikely(ret < 0)) {
684 aead_request_free(req);
685 macsec_txsa_put(tx_sa);
686 kfree_skb(skb);
687 return ERR_PTR(ret);
688 }
689
690 if (tx_sc->encrypt) {
691 int len = skb->len - macsec_hdr_len(sci_present) -
692 secy->icv_len;
693 aead_request_set_crypt(req, sg, sg, len, iv);
694 aead_request_set_ad(req, macsec_hdr_len(sci_present));
695 } else {
696 aead_request_set_crypt(req, sg, sg, 0, iv);
697 aead_request_set_ad(req, skb->len - secy->icv_len);
698 }
699
700 macsec_skb_cb(skb)->req = req;
701 macsec_skb_cb(skb)->tx_sa = tx_sa;
702 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
703
704 dev_hold(skb->dev);
705 ret = crypto_aead_encrypt(req);
706 if (ret == -EINPROGRESS) {
707 return ERR_PTR(ret);
708 } else if (ret != 0) {
709 dev_put(skb->dev);
710 kfree_skb(skb);
711 aead_request_free(req);
712 macsec_txsa_put(tx_sa);
713 return ERR_PTR(-EINVAL);
714 }
715
716 dev_put(skb->dev);
717 aead_request_free(req);
718 macsec_txsa_put(tx_sa);
719
720 return skb;
721 }
722
macsec_post_decrypt(struct sk_buff * skb,struct macsec_secy * secy,u32 pn)723 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
724 {
725 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
726 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
727 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
728 u32 lowest_pn = 0;
729
730 spin_lock(&rx_sa->lock);
731 if (rx_sa->next_pn_halves.lower >= secy->replay_window)
732 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
733
734 /* Now perform replay protection check again
735 * (see IEEE 802.1AE-2006 figure 10-5)
736 */
737 if (secy->replay_protect && pn < lowest_pn &&
738 (!secy->xpn || pn_same_half(pn, lowest_pn))) {
739 spin_unlock(&rx_sa->lock);
740 u64_stats_update_begin(&rxsc_stats->syncp);
741 rxsc_stats->stats.InPktsLate++;
742 u64_stats_update_end(&rxsc_stats->syncp);
743 return false;
744 }
745
746 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
747 u64_stats_update_begin(&rxsc_stats->syncp);
748 if (hdr->tci_an & MACSEC_TCI_E)
749 rxsc_stats->stats.InOctetsDecrypted += skb->len;
750 else
751 rxsc_stats->stats.InOctetsValidated += skb->len;
752 u64_stats_update_end(&rxsc_stats->syncp);
753 }
754
755 if (!macsec_skb_cb(skb)->valid) {
756 spin_unlock(&rx_sa->lock);
757
758 /* 10.6.5 */
759 if (hdr->tci_an & MACSEC_TCI_C ||
760 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
761 u64_stats_update_begin(&rxsc_stats->syncp);
762 rxsc_stats->stats.InPktsNotValid++;
763 u64_stats_update_end(&rxsc_stats->syncp);
764 return false;
765 }
766
767 u64_stats_update_begin(&rxsc_stats->syncp);
768 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
769 rxsc_stats->stats.InPktsInvalid++;
770 this_cpu_inc(rx_sa->stats->InPktsInvalid);
771 } else if (pn < lowest_pn) {
772 rxsc_stats->stats.InPktsDelayed++;
773 } else {
774 rxsc_stats->stats.InPktsUnchecked++;
775 }
776 u64_stats_update_end(&rxsc_stats->syncp);
777 } else {
778 u64_stats_update_begin(&rxsc_stats->syncp);
779 if (pn < lowest_pn) {
780 rxsc_stats->stats.InPktsDelayed++;
781 } else {
782 rxsc_stats->stats.InPktsOK++;
783 this_cpu_inc(rx_sa->stats->InPktsOK);
784 }
785 u64_stats_update_end(&rxsc_stats->syncp);
786
787 // Instead of "pn >=" - to support pn overflow in xpn
788 if (pn + 1 > rx_sa->next_pn_halves.lower) {
789 rx_sa->next_pn_halves.lower = pn + 1;
790 } else if (secy->xpn &&
791 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
792 rx_sa->next_pn_halves.upper++;
793 rx_sa->next_pn_halves.lower = pn + 1;
794 }
795
796 spin_unlock(&rx_sa->lock);
797 }
798
799 return true;
800 }
801
macsec_reset_skb(struct sk_buff * skb,struct net_device * dev)802 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
803 {
804 skb->pkt_type = PACKET_HOST;
805 skb->protocol = eth_type_trans(skb, dev);
806
807 skb_reset_network_header(skb);
808 if (!skb_transport_header_was_set(skb))
809 skb_reset_transport_header(skb);
810 skb_reset_mac_len(skb);
811 }
812
macsec_finalize_skb(struct sk_buff * skb,u8 icv_len,u8 hdr_len)813 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
814 {
815 skb->ip_summed = CHECKSUM_NONE;
816 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
817 skb_pull(skb, hdr_len);
818 pskb_trim_unique(skb, skb->len - icv_len);
819 }
820
count_rx(struct net_device * dev,int len)821 static void count_rx(struct net_device *dev, int len)
822 {
823 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
824
825 u64_stats_update_begin(&stats->syncp);
826 stats->rx_packets++;
827 stats->rx_bytes += len;
828 u64_stats_update_end(&stats->syncp);
829 }
830
macsec_decrypt_done(struct crypto_async_request * base,int err)831 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
832 {
833 struct sk_buff *skb = base->data;
834 struct net_device *dev = skb->dev;
835 struct macsec_dev *macsec = macsec_priv(dev);
836 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
837 struct macsec_rx_sc *rx_sc = rx_sa->sc;
838 int len;
839 u32 pn;
840
841 aead_request_free(macsec_skb_cb(skb)->req);
842
843 if (!err)
844 macsec_skb_cb(skb)->valid = true;
845
846 rcu_read_lock_bh();
847 pn = ntohl(macsec_ethhdr(skb)->packet_number);
848 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
849 rcu_read_unlock_bh();
850 kfree_skb(skb);
851 goto out;
852 }
853
854 macsec_finalize_skb(skb, macsec->secy.icv_len,
855 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
856 macsec_reset_skb(skb, macsec->secy.netdev);
857
858 len = skb->len;
859 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
860 count_rx(dev, len);
861
862 rcu_read_unlock_bh();
863
864 out:
865 macsec_rxsa_put(rx_sa);
866 macsec_rxsc_put(rx_sc);
867 dev_put(dev);
868 }
869
macsec_decrypt(struct sk_buff * skb,struct net_device * dev,struct macsec_rx_sa * rx_sa,sci_t sci,struct macsec_secy * secy)870 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
871 struct net_device *dev,
872 struct macsec_rx_sa *rx_sa,
873 sci_t sci,
874 struct macsec_secy *secy)
875 {
876 int ret;
877 struct scatterlist *sg;
878 struct sk_buff *trailer;
879 unsigned char *iv;
880 struct aead_request *req;
881 struct macsec_eth_header *hdr;
882 u32 hdr_pn;
883 u16 icv_len = secy->icv_len;
884
885 macsec_skb_cb(skb)->valid = false;
886 skb = skb_share_check(skb, GFP_ATOMIC);
887 if (!skb)
888 return ERR_PTR(-ENOMEM);
889
890 ret = skb_cow_data(skb, 0, &trailer);
891 if (unlikely(ret < 0)) {
892 kfree_skb(skb);
893 return ERR_PTR(ret);
894 }
895 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
896 if (!req) {
897 kfree_skb(skb);
898 return ERR_PTR(-ENOMEM);
899 }
900
901 hdr = (struct macsec_eth_header *)skb->data;
902 hdr_pn = ntohl(hdr->packet_number);
903
904 if (secy->xpn) {
905 pn_t recovered_pn = rx_sa->next_pn_halves;
906
907 recovered_pn.lower = hdr_pn;
908 if (hdr_pn < rx_sa->next_pn_halves.lower &&
909 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
910 recovered_pn.upper++;
911
912 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
913 rx_sa->key.salt);
914 } else {
915 macsec_fill_iv(iv, sci, hdr_pn);
916 }
917
918 sg_init_table(sg, ret);
919 ret = skb_to_sgvec(skb, sg, 0, skb->len);
920 if (unlikely(ret < 0)) {
921 aead_request_free(req);
922 kfree_skb(skb);
923 return ERR_PTR(ret);
924 }
925
926 if (hdr->tci_an & MACSEC_TCI_E) {
927 /* confidentiality: ethernet + macsec header
928 * authenticated, encrypted payload
929 */
930 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
931
932 aead_request_set_crypt(req, sg, sg, len, iv);
933 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
934 skb = skb_unshare(skb, GFP_ATOMIC);
935 if (!skb) {
936 aead_request_free(req);
937 return ERR_PTR(-ENOMEM);
938 }
939 } else {
940 /* integrity only: all headers + data authenticated */
941 aead_request_set_crypt(req, sg, sg, icv_len, iv);
942 aead_request_set_ad(req, skb->len - icv_len);
943 }
944
945 macsec_skb_cb(skb)->req = req;
946 skb->dev = dev;
947 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
948
949 dev_hold(dev);
950 ret = crypto_aead_decrypt(req);
951 if (ret == -EINPROGRESS) {
952 return ERR_PTR(ret);
953 } else if (ret != 0) {
954 /* decryption/authentication failed
955 * 10.6 if validateFrames is disabled, deliver anyway
956 */
957 if (ret != -EBADMSG) {
958 kfree_skb(skb);
959 skb = ERR_PTR(ret);
960 }
961 } else {
962 macsec_skb_cb(skb)->valid = true;
963 }
964 dev_put(dev);
965
966 aead_request_free(req);
967
968 return skb;
969 }
970
find_rx_sc(struct macsec_secy * secy,sci_t sci)971 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
972 {
973 struct macsec_rx_sc *rx_sc;
974
975 for_each_rxsc(secy, rx_sc) {
976 if (rx_sc->sci == sci)
977 return rx_sc;
978 }
979
980 return NULL;
981 }
982
find_rx_sc_rtnl(struct macsec_secy * secy,sci_t sci)983 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
984 {
985 struct macsec_rx_sc *rx_sc;
986
987 for_each_rxsc_rtnl(secy, rx_sc) {
988 if (rx_sc->sci == sci)
989 return rx_sc;
990 }
991
992 return NULL;
993 }
994
handle_not_macsec(struct sk_buff * skb)995 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
996 {
997 /* Deliver to the uncontrolled port by default */
998 enum rx_handler_result ret = RX_HANDLER_PASS;
999 struct ethhdr *hdr = eth_hdr(skb);
1000 struct macsec_rxh_data *rxd;
1001 struct macsec_dev *macsec;
1002
1003 rcu_read_lock();
1004 rxd = macsec_data_rcu(skb->dev);
1005
1006 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1007 struct sk_buff *nskb;
1008 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1009 struct net_device *ndev = macsec->secy.netdev;
1010
1011 /* If h/w offloading is enabled, HW decodes frames and strips
1012 * the SecTAG, so we have to deduce which port to deliver to.
1013 */
1014 if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1015 if (ether_addr_equal_64bits(hdr->h_dest,
1016 ndev->dev_addr)) {
1017 /* exact match, divert skb to this port */
1018 skb->dev = ndev;
1019 skb->pkt_type = PACKET_HOST;
1020 ret = RX_HANDLER_ANOTHER;
1021 goto out;
1022 } else if (is_multicast_ether_addr_64bits(
1023 hdr->h_dest)) {
1024 /* multicast frame, deliver on this port too */
1025 nskb = skb_clone(skb, GFP_ATOMIC);
1026 if (!nskb)
1027 break;
1028
1029 nskb->dev = ndev;
1030 if (ether_addr_equal_64bits(hdr->h_dest,
1031 ndev->broadcast))
1032 nskb->pkt_type = PACKET_BROADCAST;
1033 else
1034 nskb->pkt_type = PACKET_MULTICAST;
1035
1036 netif_rx(nskb);
1037 }
1038 continue;
1039 }
1040
1041 /* 10.6 If the management control validateFrames is not
1042 * Strict, frames without a SecTAG are received, counted, and
1043 * delivered to the Controlled Port
1044 */
1045 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1046 u64_stats_update_begin(&secy_stats->syncp);
1047 secy_stats->stats.InPktsNoTag++;
1048 u64_stats_update_end(&secy_stats->syncp);
1049 continue;
1050 }
1051
1052 /* deliver on this port */
1053 nskb = skb_clone(skb, GFP_ATOMIC);
1054 if (!nskb)
1055 break;
1056
1057 nskb->dev = ndev;
1058
1059 if (netif_rx(nskb) == NET_RX_SUCCESS) {
1060 u64_stats_update_begin(&secy_stats->syncp);
1061 secy_stats->stats.InPktsUntagged++;
1062 u64_stats_update_end(&secy_stats->syncp);
1063 }
1064 }
1065
1066 out:
1067 rcu_read_unlock();
1068 return ret;
1069 }
1070
macsec_handle_frame(struct sk_buff ** pskb)1071 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1072 {
1073 struct sk_buff *skb = *pskb;
1074 struct net_device *dev = skb->dev;
1075 struct macsec_eth_header *hdr;
1076 struct macsec_secy *secy = NULL;
1077 struct macsec_rx_sc *rx_sc;
1078 struct macsec_rx_sa *rx_sa;
1079 struct macsec_rxh_data *rxd;
1080 struct macsec_dev *macsec;
1081 unsigned int len;
1082 sci_t sci;
1083 u32 hdr_pn;
1084 bool cbit;
1085 struct pcpu_rx_sc_stats *rxsc_stats;
1086 struct pcpu_secy_stats *secy_stats;
1087 bool pulled_sci;
1088 int ret;
1089
1090 if (skb_headroom(skb) < ETH_HLEN)
1091 goto drop_direct;
1092
1093 hdr = macsec_ethhdr(skb);
1094 if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1095 return handle_not_macsec(skb);
1096
1097 skb = skb_unshare(skb, GFP_ATOMIC);
1098 *pskb = skb;
1099 if (!skb)
1100 return RX_HANDLER_CONSUMED;
1101
1102 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1103 if (!pulled_sci) {
1104 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1105 goto drop_direct;
1106 }
1107
1108 hdr = macsec_ethhdr(skb);
1109
1110 /* Frames with a SecTAG that has the TCI E bit set but the C
1111 * bit clear are discarded, as this reserved encoding is used
1112 * to identify frames with a SecTAG that are not to be
1113 * delivered to the Controlled Port.
1114 */
1115 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1116 return RX_HANDLER_PASS;
1117
1118 /* now, pull the extra length */
1119 if (hdr->tci_an & MACSEC_TCI_SC) {
1120 if (!pulled_sci)
1121 goto drop_direct;
1122 }
1123
1124 /* ethernet header is part of crypto processing */
1125 skb_push(skb, ETH_HLEN);
1126
1127 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1128 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1129 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1130
1131 rcu_read_lock();
1132 rxd = macsec_data_rcu(skb->dev);
1133
1134 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1135 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1136
1137 sc = sc ? macsec_rxsc_get(sc) : NULL;
1138
1139 if (sc) {
1140 secy = &macsec->secy;
1141 rx_sc = sc;
1142 break;
1143 }
1144 }
1145
1146 if (!secy)
1147 goto nosci;
1148
1149 dev = secy->netdev;
1150 macsec = macsec_priv(dev);
1151 secy_stats = this_cpu_ptr(macsec->stats);
1152 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1153
1154 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1155 u64_stats_update_begin(&secy_stats->syncp);
1156 secy_stats->stats.InPktsBadTag++;
1157 u64_stats_update_end(&secy_stats->syncp);
1158 goto drop_nosa;
1159 }
1160
1161 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1162 if (!rx_sa) {
1163 /* 10.6.1 if the SA is not in use */
1164
1165 /* If validateFrames is Strict or the C bit in the
1166 * SecTAG is set, discard
1167 */
1168 if (hdr->tci_an & MACSEC_TCI_C ||
1169 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1170 u64_stats_update_begin(&rxsc_stats->syncp);
1171 rxsc_stats->stats.InPktsNotUsingSA++;
1172 u64_stats_update_end(&rxsc_stats->syncp);
1173 goto drop_nosa;
1174 }
1175
1176 /* not Strict, the frame (with the SecTAG and ICV
1177 * removed) is delivered to the Controlled Port.
1178 */
1179 u64_stats_update_begin(&rxsc_stats->syncp);
1180 rxsc_stats->stats.InPktsUnusedSA++;
1181 u64_stats_update_end(&rxsc_stats->syncp);
1182 goto deliver;
1183 }
1184
1185 /* First, PN check to avoid decrypting obviously wrong packets */
1186 hdr_pn = ntohl(hdr->packet_number);
1187 if (secy->replay_protect) {
1188 bool late;
1189
1190 spin_lock(&rx_sa->lock);
1191 late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1192 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1193
1194 if (secy->xpn)
1195 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1196 spin_unlock(&rx_sa->lock);
1197
1198 if (late) {
1199 u64_stats_update_begin(&rxsc_stats->syncp);
1200 rxsc_stats->stats.InPktsLate++;
1201 u64_stats_update_end(&rxsc_stats->syncp);
1202 goto drop;
1203 }
1204 }
1205
1206 macsec_skb_cb(skb)->rx_sa = rx_sa;
1207
1208 /* Disabled && !changed text => skip validation */
1209 if (hdr->tci_an & MACSEC_TCI_C ||
1210 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1211 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1212
1213 if (IS_ERR(skb)) {
1214 /* the decrypt callback needs the reference */
1215 if (PTR_ERR(skb) != -EINPROGRESS) {
1216 macsec_rxsa_put(rx_sa);
1217 macsec_rxsc_put(rx_sc);
1218 }
1219 rcu_read_unlock();
1220 *pskb = NULL;
1221 return RX_HANDLER_CONSUMED;
1222 }
1223
1224 if (!macsec_post_decrypt(skb, secy, hdr_pn))
1225 goto drop;
1226
1227 deliver:
1228 macsec_finalize_skb(skb, secy->icv_len,
1229 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1230 macsec_reset_skb(skb, secy->netdev);
1231
1232 if (rx_sa)
1233 macsec_rxsa_put(rx_sa);
1234 macsec_rxsc_put(rx_sc);
1235
1236 skb_orphan(skb);
1237 len = skb->len;
1238 ret = gro_cells_receive(&macsec->gro_cells, skb);
1239 if (ret == NET_RX_SUCCESS)
1240 count_rx(dev, len);
1241 else
1242 macsec->secy.netdev->stats.rx_dropped++;
1243
1244 rcu_read_unlock();
1245
1246 *pskb = NULL;
1247 return RX_HANDLER_CONSUMED;
1248
1249 drop:
1250 macsec_rxsa_put(rx_sa);
1251 drop_nosa:
1252 macsec_rxsc_put(rx_sc);
1253 rcu_read_unlock();
1254 drop_direct:
1255 kfree_skb(skb);
1256 *pskb = NULL;
1257 return RX_HANDLER_CONSUMED;
1258
1259 nosci:
1260 /* 10.6.1 if the SC is not found */
1261 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1262 if (!cbit)
1263 macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1264 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1265
1266 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1267 struct sk_buff *nskb;
1268
1269 secy_stats = this_cpu_ptr(macsec->stats);
1270
1271 /* If validateFrames is Strict or the C bit in the
1272 * SecTAG is set, discard
1273 */
1274 if (cbit ||
1275 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1276 u64_stats_update_begin(&secy_stats->syncp);
1277 secy_stats->stats.InPktsNoSCI++;
1278 u64_stats_update_end(&secy_stats->syncp);
1279 continue;
1280 }
1281
1282 /* not strict, the frame (with the SecTAG and ICV
1283 * removed) is delivered to the Controlled Port.
1284 */
1285 nskb = skb_clone(skb, GFP_ATOMIC);
1286 if (!nskb)
1287 break;
1288
1289 macsec_reset_skb(nskb, macsec->secy.netdev);
1290
1291 ret = netif_rx(nskb);
1292 if (ret == NET_RX_SUCCESS) {
1293 u64_stats_update_begin(&secy_stats->syncp);
1294 secy_stats->stats.InPktsUnknownSCI++;
1295 u64_stats_update_end(&secy_stats->syncp);
1296 } else {
1297 macsec->secy.netdev->stats.rx_dropped++;
1298 }
1299 }
1300
1301 rcu_read_unlock();
1302 *pskb = skb;
1303 return RX_HANDLER_PASS;
1304 }
1305
macsec_alloc_tfm(char * key,int key_len,int icv_len)1306 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1307 {
1308 struct crypto_aead *tfm;
1309 int ret;
1310
1311 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
1312 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
1313
1314 if (IS_ERR(tfm))
1315 return tfm;
1316
1317 ret = crypto_aead_setkey(tfm, key, key_len);
1318 if (ret < 0)
1319 goto fail;
1320
1321 ret = crypto_aead_setauthsize(tfm, icv_len);
1322 if (ret < 0)
1323 goto fail;
1324
1325 return tfm;
1326 fail:
1327 crypto_free_aead(tfm);
1328 return ERR_PTR(ret);
1329 }
1330
init_rx_sa(struct macsec_rx_sa * rx_sa,char * sak,int key_len,int icv_len)1331 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1332 int icv_len)
1333 {
1334 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1335 if (!rx_sa->stats)
1336 return -ENOMEM;
1337
1338 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1339 if (IS_ERR(rx_sa->key.tfm)) {
1340 free_percpu(rx_sa->stats);
1341 return PTR_ERR(rx_sa->key.tfm);
1342 }
1343
1344 rx_sa->ssci = MACSEC_UNDEF_SSCI;
1345 rx_sa->active = false;
1346 rx_sa->next_pn = 1;
1347 refcount_set(&rx_sa->refcnt, 1);
1348 spin_lock_init(&rx_sa->lock);
1349
1350 return 0;
1351 }
1352
clear_rx_sa(struct macsec_rx_sa * rx_sa)1353 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1354 {
1355 rx_sa->active = false;
1356
1357 macsec_rxsa_put(rx_sa);
1358 }
1359
free_rx_sc(struct macsec_rx_sc * rx_sc)1360 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1361 {
1362 int i;
1363
1364 for (i = 0; i < MACSEC_NUM_AN; i++) {
1365 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1366
1367 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1368 if (sa)
1369 clear_rx_sa(sa);
1370 }
1371
1372 macsec_rxsc_put(rx_sc);
1373 }
1374
del_rx_sc(struct macsec_secy * secy,sci_t sci)1375 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1376 {
1377 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1378
1379 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1380 rx_sc;
1381 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1382 if (rx_sc->sci == sci) {
1383 if (rx_sc->active)
1384 secy->n_rx_sc--;
1385 rcu_assign_pointer(*rx_scp, rx_sc->next);
1386 return rx_sc;
1387 }
1388 }
1389
1390 return NULL;
1391 }
1392
create_rx_sc(struct net_device * dev,sci_t sci,bool active)1393 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1394 bool active)
1395 {
1396 struct macsec_rx_sc *rx_sc;
1397 struct macsec_dev *macsec;
1398 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1399 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1400 struct macsec_secy *secy;
1401
1402 list_for_each_entry(macsec, &rxd->secys, secys) {
1403 if (find_rx_sc_rtnl(&macsec->secy, sci))
1404 return ERR_PTR(-EEXIST);
1405 }
1406
1407 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1408 if (!rx_sc)
1409 return ERR_PTR(-ENOMEM);
1410
1411 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1412 if (!rx_sc->stats) {
1413 kfree(rx_sc);
1414 return ERR_PTR(-ENOMEM);
1415 }
1416
1417 rx_sc->sci = sci;
1418 rx_sc->active = active;
1419 refcount_set(&rx_sc->refcnt, 1);
1420
1421 secy = &macsec_priv(dev)->secy;
1422 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1423 rcu_assign_pointer(secy->rx_sc, rx_sc);
1424
1425 if (rx_sc->active)
1426 secy->n_rx_sc++;
1427
1428 return rx_sc;
1429 }
1430
init_tx_sa(struct macsec_tx_sa * tx_sa,char * sak,int key_len,int icv_len)1431 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1432 int icv_len)
1433 {
1434 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1435 if (!tx_sa->stats)
1436 return -ENOMEM;
1437
1438 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1439 if (IS_ERR(tx_sa->key.tfm)) {
1440 free_percpu(tx_sa->stats);
1441 return PTR_ERR(tx_sa->key.tfm);
1442 }
1443
1444 tx_sa->ssci = MACSEC_UNDEF_SSCI;
1445 tx_sa->active = false;
1446 refcount_set(&tx_sa->refcnt, 1);
1447 spin_lock_init(&tx_sa->lock);
1448
1449 return 0;
1450 }
1451
clear_tx_sa(struct macsec_tx_sa * tx_sa)1452 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1453 {
1454 tx_sa->active = false;
1455
1456 macsec_txsa_put(tx_sa);
1457 }
1458
1459 static struct genl_family macsec_fam;
1460
get_dev_from_nl(struct net * net,struct nlattr ** attrs)1461 static struct net_device *get_dev_from_nl(struct net *net,
1462 struct nlattr **attrs)
1463 {
1464 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1465 struct net_device *dev;
1466
1467 dev = __dev_get_by_index(net, ifindex);
1468 if (!dev)
1469 return ERR_PTR(-ENODEV);
1470
1471 if (!netif_is_macsec(dev))
1472 return ERR_PTR(-ENODEV);
1473
1474 return dev;
1475 }
1476
nla_get_offload(const struct nlattr * nla)1477 static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1478 {
1479 return (__force enum macsec_offload)nla_get_u8(nla);
1480 }
1481
nla_get_sci(const struct nlattr * nla)1482 static sci_t nla_get_sci(const struct nlattr *nla)
1483 {
1484 return (__force sci_t)nla_get_u64(nla);
1485 }
1486
nla_put_sci(struct sk_buff * skb,int attrtype,sci_t value,int padattr)1487 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1488 int padattr)
1489 {
1490 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1491 }
1492
nla_get_ssci(const struct nlattr * nla)1493 static ssci_t nla_get_ssci(const struct nlattr *nla)
1494 {
1495 return (__force ssci_t)nla_get_u32(nla);
1496 }
1497
nla_put_ssci(struct sk_buff * skb,int attrtype,ssci_t value)1498 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1499 {
1500 return nla_put_u32(skb, attrtype, (__force u64)value);
1501 }
1502
get_txsa_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_sa,struct net_device ** devp,struct macsec_secy ** secyp,struct macsec_tx_sc ** scp,u8 * assoc_num)1503 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1504 struct nlattr **attrs,
1505 struct nlattr **tb_sa,
1506 struct net_device **devp,
1507 struct macsec_secy **secyp,
1508 struct macsec_tx_sc **scp,
1509 u8 *assoc_num)
1510 {
1511 struct net_device *dev;
1512 struct macsec_secy *secy;
1513 struct macsec_tx_sc *tx_sc;
1514 struct macsec_tx_sa *tx_sa;
1515
1516 if (!tb_sa[MACSEC_SA_ATTR_AN])
1517 return ERR_PTR(-EINVAL);
1518
1519 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1520
1521 dev = get_dev_from_nl(net, attrs);
1522 if (IS_ERR(dev))
1523 return ERR_CAST(dev);
1524
1525 if (*assoc_num >= MACSEC_NUM_AN)
1526 return ERR_PTR(-EINVAL);
1527
1528 secy = &macsec_priv(dev)->secy;
1529 tx_sc = &secy->tx_sc;
1530
1531 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1532 if (!tx_sa)
1533 return ERR_PTR(-ENODEV);
1534
1535 *devp = dev;
1536 *scp = tx_sc;
1537 *secyp = secy;
1538 return tx_sa;
1539 }
1540
get_rxsc_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_rxsc,struct net_device ** devp,struct macsec_secy ** secyp)1541 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1542 struct nlattr **attrs,
1543 struct nlattr **tb_rxsc,
1544 struct net_device **devp,
1545 struct macsec_secy **secyp)
1546 {
1547 struct net_device *dev;
1548 struct macsec_secy *secy;
1549 struct macsec_rx_sc *rx_sc;
1550 sci_t sci;
1551
1552 dev = get_dev_from_nl(net, attrs);
1553 if (IS_ERR(dev))
1554 return ERR_CAST(dev);
1555
1556 secy = &macsec_priv(dev)->secy;
1557
1558 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1559 return ERR_PTR(-EINVAL);
1560
1561 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1562 rx_sc = find_rx_sc_rtnl(secy, sci);
1563 if (!rx_sc)
1564 return ERR_PTR(-ENODEV);
1565
1566 *secyp = secy;
1567 *devp = dev;
1568
1569 return rx_sc;
1570 }
1571
get_rxsa_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_rxsc,struct nlattr ** tb_sa,struct net_device ** devp,struct macsec_secy ** secyp,struct macsec_rx_sc ** scp,u8 * assoc_num)1572 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1573 struct nlattr **attrs,
1574 struct nlattr **tb_rxsc,
1575 struct nlattr **tb_sa,
1576 struct net_device **devp,
1577 struct macsec_secy **secyp,
1578 struct macsec_rx_sc **scp,
1579 u8 *assoc_num)
1580 {
1581 struct macsec_rx_sc *rx_sc;
1582 struct macsec_rx_sa *rx_sa;
1583
1584 if (!tb_sa[MACSEC_SA_ATTR_AN])
1585 return ERR_PTR(-EINVAL);
1586
1587 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1588 if (*assoc_num >= MACSEC_NUM_AN)
1589 return ERR_PTR(-EINVAL);
1590
1591 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1592 if (IS_ERR(rx_sc))
1593 return ERR_CAST(rx_sc);
1594
1595 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1596 if (!rx_sa)
1597 return ERR_PTR(-ENODEV);
1598
1599 *scp = rx_sc;
1600 return rx_sa;
1601 }
1602
1603 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1604 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1605 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1606 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1607 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1608 };
1609
1610 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1611 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1612 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1613 };
1614
1615 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1616 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1617 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1618 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1619 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1620 .len = MACSEC_KEYID_LEN, },
1621 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1622 .len = MACSEC_MAX_KEY_LEN, },
1623 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1624 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1625 .len = MACSEC_SALT_LEN, },
1626 };
1627
1628 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1629 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1630 };
1631
1632 /* Offloads an operation to a device driver */
macsec_offload(int (* const func)(struct macsec_context *),struct macsec_context * ctx)1633 static int macsec_offload(int (* const func)(struct macsec_context *),
1634 struct macsec_context *ctx)
1635 {
1636 int ret;
1637
1638 if (unlikely(!func))
1639 return 0;
1640
1641 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1642 mutex_lock(&ctx->phydev->lock);
1643
1644 /* Phase I: prepare. The drive should fail here if there are going to be
1645 * issues in the commit phase.
1646 */
1647 ctx->prepare = true;
1648 ret = (*func)(ctx);
1649 if (ret)
1650 goto phy_unlock;
1651
1652 /* Phase II: commit. This step cannot fail. */
1653 ctx->prepare = false;
1654 ret = (*func)(ctx);
1655 /* This should never happen: commit is not allowed to fail */
1656 if (unlikely(ret))
1657 WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1658
1659 phy_unlock:
1660 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1661 mutex_unlock(&ctx->phydev->lock);
1662
1663 return ret;
1664 }
1665
parse_sa_config(struct nlattr ** attrs,struct nlattr ** tb_sa)1666 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1667 {
1668 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1669 return -EINVAL;
1670
1671 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1672 return -EINVAL;
1673
1674 return 0;
1675 }
1676
parse_rxsc_config(struct nlattr ** attrs,struct nlattr ** tb_rxsc)1677 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1678 {
1679 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1680 return -EINVAL;
1681
1682 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1683 return -EINVAL;
1684
1685 return 0;
1686 }
1687
validate_add_rxsa(struct nlattr ** attrs)1688 static bool validate_add_rxsa(struct nlattr **attrs)
1689 {
1690 if (!attrs[MACSEC_SA_ATTR_AN] ||
1691 !attrs[MACSEC_SA_ATTR_KEY] ||
1692 !attrs[MACSEC_SA_ATTR_KEYID])
1693 return false;
1694
1695 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1696 return false;
1697
1698 if (attrs[MACSEC_SA_ATTR_PN] &&
1699 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1700 return false;
1701
1702 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1703 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1704 return false;
1705 }
1706
1707 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1708 return false;
1709
1710 return true;
1711 }
1712
macsec_add_rxsa(struct sk_buff * skb,struct genl_info * info)1713 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1714 {
1715 struct net_device *dev;
1716 struct nlattr **attrs = info->attrs;
1717 struct macsec_secy *secy;
1718 struct macsec_rx_sc *rx_sc;
1719 struct macsec_rx_sa *rx_sa;
1720 unsigned char assoc_num;
1721 int pn_len;
1722 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1723 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1724 int err;
1725
1726 if (!attrs[MACSEC_ATTR_IFINDEX])
1727 return -EINVAL;
1728
1729 if (parse_sa_config(attrs, tb_sa))
1730 return -EINVAL;
1731
1732 if (parse_rxsc_config(attrs, tb_rxsc))
1733 return -EINVAL;
1734
1735 if (!validate_add_rxsa(tb_sa))
1736 return -EINVAL;
1737
1738 rtnl_lock();
1739 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1740 if (IS_ERR(rx_sc)) {
1741 rtnl_unlock();
1742 return PTR_ERR(rx_sc);
1743 }
1744
1745 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1746
1747 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1748 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1749 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1750 rtnl_unlock();
1751 return -EINVAL;
1752 }
1753
1754 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1755 if (tb_sa[MACSEC_SA_ATTR_PN] &&
1756 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1757 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1758 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1759 rtnl_unlock();
1760 return -EINVAL;
1761 }
1762
1763 if (secy->xpn) {
1764 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1765 rtnl_unlock();
1766 return -EINVAL;
1767 }
1768
1769 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1770 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1771 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1772 MACSEC_SALT_LEN);
1773 rtnl_unlock();
1774 return -EINVAL;
1775 }
1776 }
1777
1778 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1779 if (rx_sa) {
1780 rtnl_unlock();
1781 return -EBUSY;
1782 }
1783
1784 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1785 if (!rx_sa) {
1786 rtnl_unlock();
1787 return -ENOMEM;
1788 }
1789
1790 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1791 secy->key_len, secy->icv_len);
1792 if (err < 0) {
1793 kfree(rx_sa);
1794 rtnl_unlock();
1795 return err;
1796 }
1797
1798 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1799 spin_lock_bh(&rx_sa->lock);
1800 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1801 spin_unlock_bh(&rx_sa->lock);
1802 }
1803
1804 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1805 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1806
1807 rx_sa->sc = rx_sc;
1808
1809 /* If h/w offloading is available, propagate to the device */
1810 if (macsec_is_offloaded(netdev_priv(dev))) {
1811 const struct macsec_ops *ops;
1812 struct macsec_context ctx;
1813
1814 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1815 if (!ops) {
1816 err = -EOPNOTSUPP;
1817 goto cleanup;
1818 }
1819
1820 ctx.sa.assoc_num = assoc_num;
1821 ctx.sa.rx_sa = rx_sa;
1822 ctx.secy = secy;
1823 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1824 secy->key_len);
1825
1826 err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1827 memzero_explicit(ctx.sa.key, secy->key_len);
1828 if (err)
1829 goto cleanup;
1830 }
1831
1832 if (secy->xpn) {
1833 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1834 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1835 MACSEC_SALT_LEN);
1836 }
1837
1838 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1839 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1840
1841 rtnl_unlock();
1842
1843 return 0;
1844
1845 cleanup:
1846 macsec_rxsa_put(rx_sa);
1847 rtnl_unlock();
1848 return err;
1849 }
1850
validate_add_rxsc(struct nlattr ** attrs)1851 static bool validate_add_rxsc(struct nlattr **attrs)
1852 {
1853 if (!attrs[MACSEC_RXSC_ATTR_SCI])
1854 return false;
1855
1856 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1857 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1858 return false;
1859 }
1860
1861 return true;
1862 }
1863
macsec_add_rxsc(struct sk_buff * skb,struct genl_info * info)1864 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1865 {
1866 struct net_device *dev;
1867 sci_t sci = MACSEC_UNDEF_SCI;
1868 struct nlattr **attrs = info->attrs;
1869 struct macsec_rx_sc *rx_sc;
1870 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1871 struct macsec_secy *secy;
1872 bool active = true;
1873 int ret;
1874
1875 if (!attrs[MACSEC_ATTR_IFINDEX])
1876 return -EINVAL;
1877
1878 if (parse_rxsc_config(attrs, tb_rxsc))
1879 return -EINVAL;
1880
1881 if (!validate_add_rxsc(tb_rxsc))
1882 return -EINVAL;
1883
1884 rtnl_lock();
1885 dev = get_dev_from_nl(genl_info_net(info), attrs);
1886 if (IS_ERR(dev)) {
1887 rtnl_unlock();
1888 return PTR_ERR(dev);
1889 }
1890
1891 secy = &macsec_priv(dev)->secy;
1892 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1893
1894 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1895 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1896
1897 rx_sc = create_rx_sc(dev, sci, active);
1898 if (IS_ERR(rx_sc)) {
1899 rtnl_unlock();
1900 return PTR_ERR(rx_sc);
1901 }
1902
1903 if (macsec_is_offloaded(netdev_priv(dev))) {
1904 const struct macsec_ops *ops;
1905 struct macsec_context ctx;
1906
1907 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1908 if (!ops) {
1909 ret = -EOPNOTSUPP;
1910 goto cleanup;
1911 }
1912
1913 ctx.rx_sc = rx_sc;
1914 ctx.secy = secy;
1915
1916 ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1917 if (ret)
1918 goto cleanup;
1919 }
1920
1921 rtnl_unlock();
1922
1923 return 0;
1924
1925 cleanup:
1926 del_rx_sc(secy, sci);
1927 free_rx_sc(rx_sc);
1928 rtnl_unlock();
1929 return ret;
1930 }
1931
validate_add_txsa(struct nlattr ** attrs)1932 static bool validate_add_txsa(struct nlattr **attrs)
1933 {
1934 if (!attrs[MACSEC_SA_ATTR_AN] ||
1935 !attrs[MACSEC_SA_ATTR_PN] ||
1936 !attrs[MACSEC_SA_ATTR_KEY] ||
1937 !attrs[MACSEC_SA_ATTR_KEYID])
1938 return false;
1939
1940 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1941 return false;
1942
1943 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1944 return false;
1945
1946 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1947 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1948 return false;
1949 }
1950
1951 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1952 return false;
1953
1954 return true;
1955 }
1956
macsec_add_txsa(struct sk_buff * skb,struct genl_info * info)1957 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1958 {
1959 struct net_device *dev;
1960 struct nlattr **attrs = info->attrs;
1961 struct macsec_secy *secy;
1962 struct macsec_tx_sc *tx_sc;
1963 struct macsec_tx_sa *tx_sa;
1964 unsigned char assoc_num;
1965 int pn_len;
1966 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1967 bool was_operational;
1968 int err;
1969
1970 if (!attrs[MACSEC_ATTR_IFINDEX])
1971 return -EINVAL;
1972
1973 if (parse_sa_config(attrs, tb_sa))
1974 return -EINVAL;
1975
1976 if (!validate_add_txsa(tb_sa))
1977 return -EINVAL;
1978
1979 rtnl_lock();
1980 dev = get_dev_from_nl(genl_info_net(info), attrs);
1981 if (IS_ERR(dev)) {
1982 rtnl_unlock();
1983 return PTR_ERR(dev);
1984 }
1985
1986 secy = &macsec_priv(dev)->secy;
1987 tx_sc = &secy->tx_sc;
1988
1989 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1990
1991 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1992 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1993 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1994 rtnl_unlock();
1995 return -EINVAL;
1996 }
1997
1998 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1999 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2000 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2001 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2002 rtnl_unlock();
2003 return -EINVAL;
2004 }
2005
2006 if (secy->xpn) {
2007 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2008 rtnl_unlock();
2009 return -EINVAL;
2010 }
2011
2012 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2013 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2014 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2015 MACSEC_SALT_LEN);
2016 rtnl_unlock();
2017 return -EINVAL;
2018 }
2019 }
2020
2021 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2022 if (tx_sa) {
2023 rtnl_unlock();
2024 return -EBUSY;
2025 }
2026
2027 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2028 if (!tx_sa) {
2029 rtnl_unlock();
2030 return -ENOMEM;
2031 }
2032
2033 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2034 secy->key_len, secy->icv_len);
2035 if (err < 0) {
2036 kfree(tx_sa);
2037 rtnl_unlock();
2038 return err;
2039 }
2040
2041 spin_lock_bh(&tx_sa->lock);
2042 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2043 spin_unlock_bh(&tx_sa->lock);
2044
2045 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2046 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2047
2048 was_operational = secy->operational;
2049 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2050 secy->operational = true;
2051
2052 /* If h/w offloading is available, propagate to the device */
2053 if (macsec_is_offloaded(netdev_priv(dev))) {
2054 const struct macsec_ops *ops;
2055 struct macsec_context ctx;
2056
2057 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2058 if (!ops) {
2059 err = -EOPNOTSUPP;
2060 goto cleanup;
2061 }
2062
2063 ctx.sa.assoc_num = assoc_num;
2064 ctx.sa.tx_sa = tx_sa;
2065 ctx.secy = secy;
2066 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2067 secy->key_len);
2068
2069 err = macsec_offload(ops->mdo_add_txsa, &ctx);
2070 memzero_explicit(ctx.sa.key, secy->key_len);
2071 if (err)
2072 goto cleanup;
2073 }
2074
2075 if (secy->xpn) {
2076 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2077 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2078 MACSEC_SALT_LEN);
2079 }
2080
2081 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2082 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2083
2084 rtnl_unlock();
2085
2086 return 0;
2087
2088 cleanup:
2089 secy->operational = was_operational;
2090 macsec_txsa_put(tx_sa);
2091 rtnl_unlock();
2092 return err;
2093 }
2094
macsec_del_rxsa(struct sk_buff * skb,struct genl_info * info)2095 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2096 {
2097 struct nlattr **attrs = info->attrs;
2098 struct net_device *dev;
2099 struct macsec_secy *secy;
2100 struct macsec_rx_sc *rx_sc;
2101 struct macsec_rx_sa *rx_sa;
2102 u8 assoc_num;
2103 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2104 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2105 int ret;
2106
2107 if (!attrs[MACSEC_ATTR_IFINDEX])
2108 return -EINVAL;
2109
2110 if (parse_sa_config(attrs, tb_sa))
2111 return -EINVAL;
2112
2113 if (parse_rxsc_config(attrs, tb_rxsc))
2114 return -EINVAL;
2115
2116 rtnl_lock();
2117 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2118 &dev, &secy, &rx_sc, &assoc_num);
2119 if (IS_ERR(rx_sa)) {
2120 rtnl_unlock();
2121 return PTR_ERR(rx_sa);
2122 }
2123
2124 if (rx_sa->active) {
2125 rtnl_unlock();
2126 return -EBUSY;
2127 }
2128
2129 /* If h/w offloading is available, propagate to the device */
2130 if (macsec_is_offloaded(netdev_priv(dev))) {
2131 const struct macsec_ops *ops;
2132 struct macsec_context ctx;
2133
2134 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2135 if (!ops) {
2136 ret = -EOPNOTSUPP;
2137 goto cleanup;
2138 }
2139
2140 ctx.sa.assoc_num = assoc_num;
2141 ctx.sa.rx_sa = rx_sa;
2142 ctx.secy = secy;
2143
2144 ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2145 if (ret)
2146 goto cleanup;
2147 }
2148
2149 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2150 clear_rx_sa(rx_sa);
2151
2152 rtnl_unlock();
2153
2154 return 0;
2155
2156 cleanup:
2157 rtnl_unlock();
2158 return ret;
2159 }
2160
macsec_del_rxsc(struct sk_buff * skb,struct genl_info * info)2161 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2162 {
2163 struct nlattr **attrs = info->attrs;
2164 struct net_device *dev;
2165 struct macsec_secy *secy;
2166 struct macsec_rx_sc *rx_sc;
2167 sci_t sci;
2168 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2169 int ret;
2170
2171 if (!attrs[MACSEC_ATTR_IFINDEX])
2172 return -EINVAL;
2173
2174 if (parse_rxsc_config(attrs, tb_rxsc))
2175 return -EINVAL;
2176
2177 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2178 return -EINVAL;
2179
2180 rtnl_lock();
2181 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2182 if (IS_ERR(dev)) {
2183 rtnl_unlock();
2184 return PTR_ERR(dev);
2185 }
2186
2187 secy = &macsec_priv(dev)->secy;
2188 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2189
2190 rx_sc = del_rx_sc(secy, sci);
2191 if (!rx_sc) {
2192 rtnl_unlock();
2193 return -ENODEV;
2194 }
2195
2196 /* If h/w offloading is available, propagate to the device */
2197 if (macsec_is_offloaded(netdev_priv(dev))) {
2198 const struct macsec_ops *ops;
2199 struct macsec_context ctx;
2200
2201 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2202 if (!ops) {
2203 ret = -EOPNOTSUPP;
2204 goto cleanup;
2205 }
2206
2207 ctx.rx_sc = rx_sc;
2208 ctx.secy = secy;
2209 ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2210 if (ret)
2211 goto cleanup;
2212 }
2213
2214 free_rx_sc(rx_sc);
2215 rtnl_unlock();
2216
2217 return 0;
2218
2219 cleanup:
2220 rtnl_unlock();
2221 return ret;
2222 }
2223
macsec_del_txsa(struct sk_buff * skb,struct genl_info * info)2224 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2225 {
2226 struct nlattr **attrs = info->attrs;
2227 struct net_device *dev;
2228 struct macsec_secy *secy;
2229 struct macsec_tx_sc *tx_sc;
2230 struct macsec_tx_sa *tx_sa;
2231 u8 assoc_num;
2232 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2233 int ret;
2234
2235 if (!attrs[MACSEC_ATTR_IFINDEX])
2236 return -EINVAL;
2237
2238 if (parse_sa_config(attrs, tb_sa))
2239 return -EINVAL;
2240
2241 rtnl_lock();
2242 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2243 &dev, &secy, &tx_sc, &assoc_num);
2244 if (IS_ERR(tx_sa)) {
2245 rtnl_unlock();
2246 return PTR_ERR(tx_sa);
2247 }
2248
2249 if (tx_sa->active) {
2250 rtnl_unlock();
2251 return -EBUSY;
2252 }
2253
2254 /* If h/w offloading is available, propagate to the device */
2255 if (macsec_is_offloaded(netdev_priv(dev))) {
2256 const struct macsec_ops *ops;
2257 struct macsec_context ctx;
2258
2259 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2260 if (!ops) {
2261 ret = -EOPNOTSUPP;
2262 goto cleanup;
2263 }
2264
2265 ctx.sa.assoc_num = assoc_num;
2266 ctx.sa.tx_sa = tx_sa;
2267 ctx.secy = secy;
2268
2269 ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2270 if (ret)
2271 goto cleanup;
2272 }
2273
2274 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2275 clear_tx_sa(tx_sa);
2276
2277 rtnl_unlock();
2278
2279 return 0;
2280
2281 cleanup:
2282 rtnl_unlock();
2283 return ret;
2284 }
2285
validate_upd_sa(struct nlattr ** attrs)2286 static bool validate_upd_sa(struct nlattr **attrs)
2287 {
2288 if (!attrs[MACSEC_SA_ATTR_AN] ||
2289 attrs[MACSEC_SA_ATTR_KEY] ||
2290 attrs[MACSEC_SA_ATTR_KEYID] ||
2291 attrs[MACSEC_SA_ATTR_SSCI] ||
2292 attrs[MACSEC_SA_ATTR_SALT])
2293 return false;
2294
2295 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2296 return false;
2297
2298 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2299 return false;
2300
2301 if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2302 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2303 return false;
2304 }
2305
2306 return true;
2307 }
2308
macsec_upd_txsa(struct sk_buff * skb,struct genl_info * info)2309 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2310 {
2311 struct nlattr **attrs = info->attrs;
2312 struct net_device *dev;
2313 struct macsec_secy *secy;
2314 struct macsec_tx_sc *tx_sc;
2315 struct macsec_tx_sa *tx_sa;
2316 u8 assoc_num;
2317 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2318 bool was_operational, was_active;
2319 pn_t prev_pn;
2320 int ret = 0;
2321
2322 prev_pn.full64 = 0;
2323
2324 if (!attrs[MACSEC_ATTR_IFINDEX])
2325 return -EINVAL;
2326
2327 if (parse_sa_config(attrs, tb_sa))
2328 return -EINVAL;
2329
2330 if (!validate_upd_sa(tb_sa))
2331 return -EINVAL;
2332
2333 rtnl_lock();
2334 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2335 &dev, &secy, &tx_sc, &assoc_num);
2336 if (IS_ERR(tx_sa)) {
2337 rtnl_unlock();
2338 return PTR_ERR(tx_sa);
2339 }
2340
2341 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2342 int pn_len;
2343
2344 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2345 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2346 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2347 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2348 rtnl_unlock();
2349 return -EINVAL;
2350 }
2351
2352 spin_lock_bh(&tx_sa->lock);
2353 prev_pn = tx_sa->next_pn_halves;
2354 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2355 spin_unlock_bh(&tx_sa->lock);
2356 }
2357
2358 was_active = tx_sa->active;
2359 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2360 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2361
2362 was_operational = secy->operational;
2363 if (assoc_num == tx_sc->encoding_sa)
2364 secy->operational = tx_sa->active;
2365
2366 /* If h/w offloading is available, propagate to the device */
2367 if (macsec_is_offloaded(netdev_priv(dev))) {
2368 const struct macsec_ops *ops;
2369 struct macsec_context ctx;
2370
2371 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2372 if (!ops) {
2373 ret = -EOPNOTSUPP;
2374 goto cleanup;
2375 }
2376
2377 ctx.sa.assoc_num = assoc_num;
2378 ctx.sa.tx_sa = tx_sa;
2379 ctx.secy = secy;
2380
2381 ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2382 if (ret)
2383 goto cleanup;
2384 }
2385
2386 rtnl_unlock();
2387
2388 return 0;
2389
2390 cleanup:
2391 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2392 spin_lock_bh(&tx_sa->lock);
2393 tx_sa->next_pn_halves = prev_pn;
2394 spin_unlock_bh(&tx_sa->lock);
2395 }
2396 tx_sa->active = was_active;
2397 secy->operational = was_operational;
2398 rtnl_unlock();
2399 return ret;
2400 }
2401
macsec_upd_rxsa(struct sk_buff * skb,struct genl_info * info)2402 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2403 {
2404 struct nlattr **attrs = info->attrs;
2405 struct net_device *dev;
2406 struct macsec_secy *secy;
2407 struct macsec_rx_sc *rx_sc;
2408 struct macsec_rx_sa *rx_sa;
2409 u8 assoc_num;
2410 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2411 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2412 bool was_active;
2413 pn_t prev_pn;
2414 int ret = 0;
2415
2416 prev_pn.full64 = 0;
2417
2418 if (!attrs[MACSEC_ATTR_IFINDEX])
2419 return -EINVAL;
2420
2421 if (parse_rxsc_config(attrs, tb_rxsc))
2422 return -EINVAL;
2423
2424 if (parse_sa_config(attrs, tb_sa))
2425 return -EINVAL;
2426
2427 if (!validate_upd_sa(tb_sa))
2428 return -EINVAL;
2429
2430 rtnl_lock();
2431 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2432 &dev, &secy, &rx_sc, &assoc_num);
2433 if (IS_ERR(rx_sa)) {
2434 rtnl_unlock();
2435 return PTR_ERR(rx_sa);
2436 }
2437
2438 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2439 int pn_len;
2440
2441 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2442 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2443 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2444 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2445 rtnl_unlock();
2446 return -EINVAL;
2447 }
2448
2449 spin_lock_bh(&rx_sa->lock);
2450 prev_pn = rx_sa->next_pn_halves;
2451 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2452 spin_unlock_bh(&rx_sa->lock);
2453 }
2454
2455 was_active = rx_sa->active;
2456 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2457 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2458
2459 /* If h/w offloading is available, propagate to the device */
2460 if (macsec_is_offloaded(netdev_priv(dev))) {
2461 const struct macsec_ops *ops;
2462 struct macsec_context ctx;
2463
2464 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2465 if (!ops) {
2466 ret = -EOPNOTSUPP;
2467 goto cleanup;
2468 }
2469
2470 ctx.sa.assoc_num = assoc_num;
2471 ctx.sa.rx_sa = rx_sa;
2472 ctx.secy = secy;
2473
2474 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2475 if (ret)
2476 goto cleanup;
2477 }
2478
2479 rtnl_unlock();
2480 return 0;
2481
2482 cleanup:
2483 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2484 spin_lock_bh(&rx_sa->lock);
2485 rx_sa->next_pn_halves = prev_pn;
2486 spin_unlock_bh(&rx_sa->lock);
2487 }
2488 rx_sa->active = was_active;
2489 rtnl_unlock();
2490 return ret;
2491 }
2492
macsec_upd_rxsc(struct sk_buff * skb,struct genl_info * info)2493 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2494 {
2495 struct nlattr **attrs = info->attrs;
2496 struct net_device *dev;
2497 struct macsec_secy *secy;
2498 struct macsec_rx_sc *rx_sc;
2499 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2500 unsigned int prev_n_rx_sc;
2501 bool was_active;
2502 int ret;
2503
2504 if (!attrs[MACSEC_ATTR_IFINDEX])
2505 return -EINVAL;
2506
2507 if (parse_rxsc_config(attrs, tb_rxsc))
2508 return -EINVAL;
2509
2510 if (!validate_add_rxsc(tb_rxsc))
2511 return -EINVAL;
2512
2513 rtnl_lock();
2514 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2515 if (IS_ERR(rx_sc)) {
2516 rtnl_unlock();
2517 return PTR_ERR(rx_sc);
2518 }
2519
2520 was_active = rx_sc->active;
2521 prev_n_rx_sc = secy->n_rx_sc;
2522 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2523 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2524
2525 if (rx_sc->active != new)
2526 secy->n_rx_sc += new ? 1 : -1;
2527
2528 rx_sc->active = new;
2529 }
2530
2531 /* If h/w offloading is available, propagate to the device */
2532 if (macsec_is_offloaded(netdev_priv(dev))) {
2533 const struct macsec_ops *ops;
2534 struct macsec_context ctx;
2535
2536 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2537 if (!ops) {
2538 ret = -EOPNOTSUPP;
2539 goto cleanup;
2540 }
2541
2542 ctx.rx_sc = rx_sc;
2543 ctx.secy = secy;
2544
2545 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2546 if (ret)
2547 goto cleanup;
2548 }
2549
2550 rtnl_unlock();
2551
2552 return 0;
2553
2554 cleanup:
2555 secy->n_rx_sc = prev_n_rx_sc;
2556 rx_sc->active = was_active;
2557 rtnl_unlock();
2558 return ret;
2559 }
2560
macsec_is_configured(struct macsec_dev * macsec)2561 static bool macsec_is_configured(struct macsec_dev *macsec)
2562 {
2563 struct macsec_secy *secy = &macsec->secy;
2564 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2565 int i;
2566
2567 if (secy->rx_sc)
2568 return true;
2569
2570 for (i = 0; i < MACSEC_NUM_AN; i++)
2571 if (tx_sc->sa[i])
2572 return true;
2573
2574 return false;
2575 }
2576
macsec_upd_offload(struct sk_buff * skb,struct genl_info * info)2577 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2578 {
2579 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2580 enum macsec_offload offload, prev_offload;
2581 int (*func)(struct macsec_context *ctx);
2582 struct nlattr **attrs = info->attrs;
2583 struct net_device *dev;
2584 const struct macsec_ops *ops;
2585 struct macsec_context ctx;
2586 struct macsec_dev *macsec;
2587 int ret = 0;
2588
2589 if (!attrs[MACSEC_ATTR_IFINDEX])
2590 return -EINVAL;
2591
2592 if (!attrs[MACSEC_ATTR_OFFLOAD])
2593 return -EINVAL;
2594
2595 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2596 attrs[MACSEC_ATTR_OFFLOAD],
2597 macsec_genl_offload_policy, NULL))
2598 return -EINVAL;
2599
2600 rtnl_lock();
2601
2602 dev = get_dev_from_nl(genl_info_net(info), attrs);
2603 if (IS_ERR(dev)) {
2604 ret = PTR_ERR(dev);
2605 goto out;
2606 }
2607 macsec = macsec_priv(dev);
2608
2609 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
2610 ret = -EINVAL;
2611 goto out;
2612 }
2613
2614 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2615 if (macsec->offload == offload)
2616 goto out;
2617
2618 /* Check if the offloading mode is supported by the underlying layers */
2619 if (offload != MACSEC_OFFLOAD_OFF &&
2620 !macsec_check_offload(offload, macsec)) {
2621 ret = -EOPNOTSUPP;
2622 goto out;
2623 }
2624
2625 /* Check if the net device is busy. */
2626 if (netif_running(dev)) {
2627 ret = -EBUSY;
2628 goto out;
2629 }
2630
2631 prev_offload = macsec->offload;
2632 macsec->offload = offload;
2633
2634 /* Check if the device already has rules configured: we do not support
2635 * rules migration.
2636 */
2637 if (macsec_is_configured(macsec)) {
2638 ret = -EBUSY;
2639 goto rollback;
2640 }
2641
2642 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2643 macsec, &ctx);
2644 if (!ops) {
2645 ret = -EOPNOTSUPP;
2646 goto rollback;
2647 }
2648
2649 if (prev_offload == MACSEC_OFFLOAD_OFF)
2650 func = ops->mdo_add_secy;
2651 else
2652 func = ops->mdo_del_secy;
2653
2654 ctx.secy = &macsec->secy;
2655 ret = macsec_offload(func, &ctx);
2656 if (ret)
2657 goto rollback;
2658
2659 rtnl_unlock();
2660 return 0;
2661
2662 rollback:
2663 macsec->offload = prev_offload;
2664 out:
2665 rtnl_unlock();
2666 return ret;
2667 }
2668
get_tx_sa_stats(struct net_device * dev,int an,struct macsec_tx_sa * tx_sa,struct macsec_tx_sa_stats * sum)2669 static void get_tx_sa_stats(struct net_device *dev, int an,
2670 struct macsec_tx_sa *tx_sa,
2671 struct macsec_tx_sa_stats *sum)
2672 {
2673 struct macsec_dev *macsec = macsec_priv(dev);
2674 int cpu;
2675
2676 /* If h/w offloading is available, propagate to the device */
2677 if (macsec_is_offloaded(macsec)) {
2678 const struct macsec_ops *ops;
2679 struct macsec_context ctx;
2680
2681 ops = macsec_get_ops(macsec, &ctx);
2682 if (ops) {
2683 ctx.sa.assoc_num = an;
2684 ctx.sa.tx_sa = tx_sa;
2685 ctx.stats.tx_sa_stats = sum;
2686 ctx.secy = &macsec_priv(dev)->secy;
2687 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2688 }
2689 return;
2690 }
2691
2692 for_each_possible_cpu(cpu) {
2693 const struct macsec_tx_sa_stats *stats =
2694 per_cpu_ptr(tx_sa->stats, cpu);
2695
2696 sum->OutPktsProtected += stats->OutPktsProtected;
2697 sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2698 }
2699 }
2700
copy_tx_sa_stats(struct sk_buff * skb,struct macsec_tx_sa_stats * sum)2701 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2702 {
2703 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2704 sum->OutPktsProtected) ||
2705 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2706 sum->OutPktsEncrypted))
2707 return -EMSGSIZE;
2708
2709 return 0;
2710 }
2711
get_rx_sa_stats(struct net_device * dev,struct macsec_rx_sc * rx_sc,int an,struct macsec_rx_sa * rx_sa,struct macsec_rx_sa_stats * sum)2712 static void get_rx_sa_stats(struct net_device *dev,
2713 struct macsec_rx_sc *rx_sc, int an,
2714 struct macsec_rx_sa *rx_sa,
2715 struct macsec_rx_sa_stats *sum)
2716 {
2717 struct macsec_dev *macsec = macsec_priv(dev);
2718 int cpu;
2719
2720 /* If h/w offloading is available, propagate to the device */
2721 if (macsec_is_offloaded(macsec)) {
2722 const struct macsec_ops *ops;
2723 struct macsec_context ctx;
2724
2725 ops = macsec_get_ops(macsec, &ctx);
2726 if (ops) {
2727 ctx.sa.assoc_num = an;
2728 ctx.sa.rx_sa = rx_sa;
2729 ctx.stats.rx_sa_stats = sum;
2730 ctx.secy = &macsec_priv(dev)->secy;
2731 ctx.rx_sc = rx_sc;
2732 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2733 }
2734 return;
2735 }
2736
2737 for_each_possible_cpu(cpu) {
2738 const struct macsec_rx_sa_stats *stats =
2739 per_cpu_ptr(rx_sa->stats, cpu);
2740
2741 sum->InPktsOK += stats->InPktsOK;
2742 sum->InPktsInvalid += stats->InPktsInvalid;
2743 sum->InPktsNotValid += stats->InPktsNotValid;
2744 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2745 sum->InPktsUnusedSA += stats->InPktsUnusedSA;
2746 }
2747 }
2748
copy_rx_sa_stats(struct sk_buff * skb,struct macsec_rx_sa_stats * sum)2749 static int copy_rx_sa_stats(struct sk_buff *skb,
2750 struct macsec_rx_sa_stats *sum)
2751 {
2752 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2753 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2754 sum->InPktsInvalid) ||
2755 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2756 sum->InPktsNotValid) ||
2757 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2758 sum->InPktsNotUsingSA) ||
2759 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2760 sum->InPktsUnusedSA))
2761 return -EMSGSIZE;
2762
2763 return 0;
2764 }
2765
get_rx_sc_stats(struct net_device * dev,struct macsec_rx_sc * rx_sc,struct macsec_rx_sc_stats * sum)2766 static void get_rx_sc_stats(struct net_device *dev,
2767 struct macsec_rx_sc *rx_sc,
2768 struct macsec_rx_sc_stats *sum)
2769 {
2770 struct macsec_dev *macsec = macsec_priv(dev);
2771 int cpu;
2772
2773 /* If h/w offloading is available, propagate to the device */
2774 if (macsec_is_offloaded(macsec)) {
2775 const struct macsec_ops *ops;
2776 struct macsec_context ctx;
2777
2778 ops = macsec_get_ops(macsec, &ctx);
2779 if (ops) {
2780 ctx.stats.rx_sc_stats = sum;
2781 ctx.secy = &macsec_priv(dev)->secy;
2782 ctx.rx_sc = rx_sc;
2783 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2784 }
2785 return;
2786 }
2787
2788 for_each_possible_cpu(cpu) {
2789 const struct pcpu_rx_sc_stats *stats;
2790 struct macsec_rx_sc_stats tmp;
2791 unsigned int start;
2792
2793 stats = per_cpu_ptr(rx_sc->stats, cpu);
2794 do {
2795 start = u64_stats_fetch_begin_irq(&stats->syncp);
2796 memcpy(&tmp, &stats->stats, sizeof(tmp));
2797 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2798
2799 sum->InOctetsValidated += tmp.InOctetsValidated;
2800 sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2801 sum->InPktsUnchecked += tmp.InPktsUnchecked;
2802 sum->InPktsDelayed += tmp.InPktsDelayed;
2803 sum->InPktsOK += tmp.InPktsOK;
2804 sum->InPktsInvalid += tmp.InPktsInvalid;
2805 sum->InPktsLate += tmp.InPktsLate;
2806 sum->InPktsNotValid += tmp.InPktsNotValid;
2807 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2808 sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
2809 }
2810 }
2811
copy_rx_sc_stats(struct sk_buff * skb,struct macsec_rx_sc_stats * sum)2812 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2813 {
2814 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2815 sum->InOctetsValidated,
2816 MACSEC_RXSC_STATS_ATTR_PAD) ||
2817 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2818 sum->InOctetsDecrypted,
2819 MACSEC_RXSC_STATS_ATTR_PAD) ||
2820 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2821 sum->InPktsUnchecked,
2822 MACSEC_RXSC_STATS_ATTR_PAD) ||
2823 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2824 sum->InPktsDelayed,
2825 MACSEC_RXSC_STATS_ATTR_PAD) ||
2826 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2827 sum->InPktsOK,
2828 MACSEC_RXSC_STATS_ATTR_PAD) ||
2829 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2830 sum->InPktsInvalid,
2831 MACSEC_RXSC_STATS_ATTR_PAD) ||
2832 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2833 sum->InPktsLate,
2834 MACSEC_RXSC_STATS_ATTR_PAD) ||
2835 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2836 sum->InPktsNotValid,
2837 MACSEC_RXSC_STATS_ATTR_PAD) ||
2838 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2839 sum->InPktsNotUsingSA,
2840 MACSEC_RXSC_STATS_ATTR_PAD) ||
2841 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2842 sum->InPktsUnusedSA,
2843 MACSEC_RXSC_STATS_ATTR_PAD))
2844 return -EMSGSIZE;
2845
2846 return 0;
2847 }
2848
get_tx_sc_stats(struct net_device * dev,struct macsec_tx_sc_stats * sum)2849 static void get_tx_sc_stats(struct net_device *dev,
2850 struct macsec_tx_sc_stats *sum)
2851 {
2852 struct macsec_dev *macsec = macsec_priv(dev);
2853 int cpu;
2854
2855 /* If h/w offloading is available, propagate to the device */
2856 if (macsec_is_offloaded(macsec)) {
2857 const struct macsec_ops *ops;
2858 struct macsec_context ctx;
2859
2860 ops = macsec_get_ops(macsec, &ctx);
2861 if (ops) {
2862 ctx.stats.tx_sc_stats = sum;
2863 ctx.secy = &macsec_priv(dev)->secy;
2864 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2865 }
2866 return;
2867 }
2868
2869 for_each_possible_cpu(cpu) {
2870 const struct pcpu_tx_sc_stats *stats;
2871 struct macsec_tx_sc_stats tmp;
2872 unsigned int start;
2873
2874 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2875 do {
2876 start = u64_stats_fetch_begin_irq(&stats->syncp);
2877 memcpy(&tmp, &stats->stats, sizeof(tmp));
2878 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2879
2880 sum->OutPktsProtected += tmp.OutPktsProtected;
2881 sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
2882 sum->OutOctetsProtected += tmp.OutOctetsProtected;
2883 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2884 }
2885 }
2886
copy_tx_sc_stats(struct sk_buff * skb,struct macsec_tx_sc_stats * sum)2887 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2888 {
2889 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2890 sum->OutPktsProtected,
2891 MACSEC_TXSC_STATS_ATTR_PAD) ||
2892 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2893 sum->OutPktsEncrypted,
2894 MACSEC_TXSC_STATS_ATTR_PAD) ||
2895 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2896 sum->OutOctetsProtected,
2897 MACSEC_TXSC_STATS_ATTR_PAD) ||
2898 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2899 sum->OutOctetsEncrypted,
2900 MACSEC_TXSC_STATS_ATTR_PAD))
2901 return -EMSGSIZE;
2902
2903 return 0;
2904 }
2905
get_secy_stats(struct net_device * dev,struct macsec_dev_stats * sum)2906 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2907 {
2908 struct macsec_dev *macsec = macsec_priv(dev);
2909 int cpu;
2910
2911 /* If h/w offloading is available, propagate to the device */
2912 if (macsec_is_offloaded(macsec)) {
2913 const struct macsec_ops *ops;
2914 struct macsec_context ctx;
2915
2916 ops = macsec_get_ops(macsec, &ctx);
2917 if (ops) {
2918 ctx.stats.dev_stats = sum;
2919 ctx.secy = &macsec_priv(dev)->secy;
2920 macsec_offload(ops->mdo_get_dev_stats, &ctx);
2921 }
2922 return;
2923 }
2924
2925 for_each_possible_cpu(cpu) {
2926 const struct pcpu_secy_stats *stats;
2927 struct macsec_dev_stats tmp;
2928 unsigned int start;
2929
2930 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2931 do {
2932 start = u64_stats_fetch_begin_irq(&stats->syncp);
2933 memcpy(&tmp, &stats->stats, sizeof(tmp));
2934 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2935
2936 sum->OutPktsUntagged += tmp.OutPktsUntagged;
2937 sum->InPktsUntagged += tmp.InPktsUntagged;
2938 sum->OutPktsTooLong += tmp.OutPktsTooLong;
2939 sum->InPktsNoTag += tmp.InPktsNoTag;
2940 sum->InPktsBadTag += tmp.InPktsBadTag;
2941 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2942 sum->InPktsNoSCI += tmp.InPktsNoSCI;
2943 sum->InPktsOverrun += tmp.InPktsOverrun;
2944 }
2945 }
2946
copy_secy_stats(struct sk_buff * skb,struct macsec_dev_stats * sum)2947 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2948 {
2949 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2950 sum->OutPktsUntagged,
2951 MACSEC_SECY_STATS_ATTR_PAD) ||
2952 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2953 sum->InPktsUntagged,
2954 MACSEC_SECY_STATS_ATTR_PAD) ||
2955 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2956 sum->OutPktsTooLong,
2957 MACSEC_SECY_STATS_ATTR_PAD) ||
2958 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2959 sum->InPktsNoTag,
2960 MACSEC_SECY_STATS_ATTR_PAD) ||
2961 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2962 sum->InPktsBadTag,
2963 MACSEC_SECY_STATS_ATTR_PAD) ||
2964 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2965 sum->InPktsUnknownSCI,
2966 MACSEC_SECY_STATS_ATTR_PAD) ||
2967 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2968 sum->InPktsNoSCI,
2969 MACSEC_SECY_STATS_ATTR_PAD) ||
2970 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2971 sum->InPktsOverrun,
2972 MACSEC_SECY_STATS_ATTR_PAD))
2973 return -EMSGSIZE;
2974
2975 return 0;
2976 }
2977
nla_put_secy(struct macsec_secy * secy,struct sk_buff * skb)2978 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2979 {
2980 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2981 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
2982 MACSEC_ATTR_SECY);
2983 u64 csid;
2984
2985 if (!secy_nest)
2986 return 1;
2987
2988 switch (secy->key_len) {
2989 case MACSEC_GCM_AES_128_SAK_LEN:
2990 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
2991 break;
2992 case MACSEC_GCM_AES_256_SAK_LEN:
2993 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
2994 break;
2995 default:
2996 goto cancel;
2997 }
2998
2999 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3000 MACSEC_SECY_ATTR_PAD) ||
3001 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3002 csid, MACSEC_SECY_ATTR_PAD) ||
3003 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3004 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3005 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3006 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3007 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3008 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3009 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3010 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3011 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3012 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3013 goto cancel;
3014
3015 if (secy->replay_protect) {
3016 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3017 goto cancel;
3018 }
3019
3020 nla_nest_end(skb, secy_nest);
3021 return 0;
3022
3023 cancel:
3024 nla_nest_cancel(skb, secy_nest);
3025 return 1;
3026 }
3027
3028 static noinline_for_stack int
dump_secy(struct macsec_secy * secy,struct net_device * dev,struct sk_buff * skb,struct netlink_callback * cb)3029 dump_secy(struct macsec_secy *secy, struct net_device *dev,
3030 struct sk_buff *skb, struct netlink_callback *cb)
3031 {
3032 struct macsec_tx_sc_stats tx_sc_stats = {0, };
3033 struct macsec_tx_sa_stats tx_sa_stats = {0, };
3034 struct macsec_rx_sc_stats rx_sc_stats = {0, };
3035 struct macsec_rx_sa_stats rx_sa_stats = {0, };
3036 struct macsec_dev *macsec = netdev_priv(dev);
3037 struct macsec_dev_stats dev_stats = {0, };
3038 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3039 struct nlattr *txsa_list, *rxsc_list;
3040 struct macsec_rx_sc *rx_sc;
3041 struct nlattr *attr;
3042 void *hdr;
3043 int i, j;
3044
3045 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3046 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3047 if (!hdr)
3048 return -EMSGSIZE;
3049
3050 genl_dump_check_consistent(cb, hdr);
3051
3052 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3053 goto nla_put_failure;
3054
3055 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3056 if (!attr)
3057 goto nla_put_failure;
3058 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3059 goto nla_put_failure;
3060 nla_nest_end(skb, attr);
3061
3062 if (nla_put_secy(secy, skb))
3063 goto nla_put_failure;
3064
3065 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3066 if (!attr)
3067 goto nla_put_failure;
3068
3069 get_tx_sc_stats(dev, &tx_sc_stats);
3070 if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3071 nla_nest_cancel(skb, attr);
3072 goto nla_put_failure;
3073 }
3074 nla_nest_end(skb, attr);
3075
3076 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3077 if (!attr)
3078 goto nla_put_failure;
3079 get_secy_stats(dev, &dev_stats);
3080 if (copy_secy_stats(skb, &dev_stats)) {
3081 nla_nest_cancel(skb, attr);
3082 goto nla_put_failure;
3083 }
3084 nla_nest_end(skb, attr);
3085
3086 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3087 if (!txsa_list)
3088 goto nla_put_failure;
3089 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3090 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3091 struct nlattr *txsa_nest;
3092 u64 pn;
3093 int pn_len;
3094
3095 if (!tx_sa)
3096 continue;
3097
3098 txsa_nest = nla_nest_start_noflag(skb, j++);
3099 if (!txsa_nest) {
3100 nla_nest_cancel(skb, txsa_list);
3101 goto nla_put_failure;
3102 }
3103
3104 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3105 if (!attr) {
3106 nla_nest_cancel(skb, txsa_nest);
3107 nla_nest_cancel(skb, txsa_list);
3108 goto nla_put_failure;
3109 }
3110 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3111 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3112 if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3113 nla_nest_cancel(skb, attr);
3114 nla_nest_cancel(skb, txsa_nest);
3115 nla_nest_cancel(skb, txsa_list);
3116 goto nla_put_failure;
3117 }
3118 nla_nest_end(skb, attr);
3119
3120 if (secy->xpn) {
3121 pn = tx_sa->next_pn;
3122 pn_len = MACSEC_XPN_PN_LEN;
3123 } else {
3124 pn = tx_sa->next_pn_halves.lower;
3125 pn_len = MACSEC_DEFAULT_PN_LEN;
3126 }
3127
3128 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3129 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3130 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3131 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3132 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3133 nla_nest_cancel(skb, txsa_nest);
3134 nla_nest_cancel(skb, txsa_list);
3135 goto nla_put_failure;
3136 }
3137
3138 nla_nest_end(skb, txsa_nest);
3139 }
3140 nla_nest_end(skb, txsa_list);
3141
3142 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3143 if (!rxsc_list)
3144 goto nla_put_failure;
3145
3146 j = 1;
3147 for_each_rxsc_rtnl(secy, rx_sc) {
3148 int k;
3149 struct nlattr *rxsa_list;
3150 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3151
3152 if (!rxsc_nest) {
3153 nla_nest_cancel(skb, rxsc_list);
3154 goto nla_put_failure;
3155 }
3156
3157 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3158 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3159 MACSEC_RXSC_ATTR_PAD)) {
3160 nla_nest_cancel(skb, rxsc_nest);
3161 nla_nest_cancel(skb, rxsc_list);
3162 goto nla_put_failure;
3163 }
3164
3165 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3166 if (!attr) {
3167 nla_nest_cancel(skb, rxsc_nest);
3168 nla_nest_cancel(skb, rxsc_list);
3169 goto nla_put_failure;
3170 }
3171 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3172 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3173 if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3174 nla_nest_cancel(skb, attr);
3175 nla_nest_cancel(skb, rxsc_nest);
3176 nla_nest_cancel(skb, rxsc_list);
3177 goto nla_put_failure;
3178 }
3179 nla_nest_end(skb, attr);
3180
3181 rxsa_list = nla_nest_start_noflag(skb,
3182 MACSEC_RXSC_ATTR_SA_LIST);
3183 if (!rxsa_list) {
3184 nla_nest_cancel(skb, rxsc_nest);
3185 nla_nest_cancel(skb, rxsc_list);
3186 goto nla_put_failure;
3187 }
3188
3189 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3190 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3191 struct nlattr *rxsa_nest;
3192 u64 pn;
3193 int pn_len;
3194
3195 if (!rx_sa)
3196 continue;
3197
3198 rxsa_nest = nla_nest_start_noflag(skb, k++);
3199 if (!rxsa_nest) {
3200 nla_nest_cancel(skb, rxsa_list);
3201 nla_nest_cancel(skb, rxsc_nest);
3202 nla_nest_cancel(skb, rxsc_list);
3203 goto nla_put_failure;
3204 }
3205
3206 attr = nla_nest_start_noflag(skb,
3207 MACSEC_SA_ATTR_STATS);
3208 if (!attr) {
3209 nla_nest_cancel(skb, rxsa_list);
3210 nla_nest_cancel(skb, rxsc_nest);
3211 nla_nest_cancel(skb, rxsc_list);
3212 goto nla_put_failure;
3213 }
3214 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3215 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3216 if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3217 nla_nest_cancel(skb, attr);
3218 nla_nest_cancel(skb, rxsa_list);
3219 nla_nest_cancel(skb, rxsc_nest);
3220 nla_nest_cancel(skb, rxsc_list);
3221 goto nla_put_failure;
3222 }
3223 nla_nest_end(skb, attr);
3224
3225 if (secy->xpn) {
3226 pn = rx_sa->next_pn;
3227 pn_len = MACSEC_XPN_PN_LEN;
3228 } else {
3229 pn = rx_sa->next_pn_halves.lower;
3230 pn_len = MACSEC_DEFAULT_PN_LEN;
3231 }
3232
3233 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3234 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3235 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3236 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3237 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3238 nla_nest_cancel(skb, rxsa_nest);
3239 nla_nest_cancel(skb, rxsc_nest);
3240 nla_nest_cancel(skb, rxsc_list);
3241 goto nla_put_failure;
3242 }
3243 nla_nest_end(skb, rxsa_nest);
3244 }
3245
3246 nla_nest_end(skb, rxsa_list);
3247 nla_nest_end(skb, rxsc_nest);
3248 }
3249
3250 nla_nest_end(skb, rxsc_list);
3251
3252 genlmsg_end(skb, hdr);
3253
3254 return 0;
3255
3256 nla_put_failure:
3257 genlmsg_cancel(skb, hdr);
3258 return -EMSGSIZE;
3259 }
3260
3261 static int macsec_generation = 1; /* protected by RTNL */
3262
macsec_dump_txsc(struct sk_buff * skb,struct netlink_callback * cb)3263 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3264 {
3265 struct net *net = sock_net(skb->sk);
3266 struct net_device *dev;
3267 int dev_idx, d;
3268
3269 dev_idx = cb->args[0];
3270
3271 d = 0;
3272 rtnl_lock();
3273
3274 cb->seq = macsec_generation;
3275
3276 for_each_netdev(net, dev) {
3277 struct macsec_secy *secy;
3278
3279 if (d < dev_idx)
3280 goto next;
3281
3282 if (!netif_is_macsec(dev))
3283 goto next;
3284
3285 secy = &macsec_priv(dev)->secy;
3286 if (dump_secy(secy, dev, skb, cb) < 0)
3287 goto done;
3288 next:
3289 d++;
3290 }
3291
3292 done:
3293 rtnl_unlock();
3294 cb->args[0] = d;
3295 return skb->len;
3296 }
3297
3298 static const struct genl_small_ops macsec_genl_ops[] = {
3299 {
3300 .cmd = MACSEC_CMD_GET_TXSC,
3301 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3302 .dumpit = macsec_dump_txsc,
3303 },
3304 {
3305 .cmd = MACSEC_CMD_ADD_RXSC,
3306 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3307 .doit = macsec_add_rxsc,
3308 .flags = GENL_ADMIN_PERM,
3309 },
3310 {
3311 .cmd = MACSEC_CMD_DEL_RXSC,
3312 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3313 .doit = macsec_del_rxsc,
3314 .flags = GENL_ADMIN_PERM,
3315 },
3316 {
3317 .cmd = MACSEC_CMD_UPD_RXSC,
3318 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3319 .doit = macsec_upd_rxsc,
3320 .flags = GENL_ADMIN_PERM,
3321 },
3322 {
3323 .cmd = MACSEC_CMD_ADD_TXSA,
3324 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3325 .doit = macsec_add_txsa,
3326 .flags = GENL_ADMIN_PERM,
3327 },
3328 {
3329 .cmd = MACSEC_CMD_DEL_TXSA,
3330 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3331 .doit = macsec_del_txsa,
3332 .flags = GENL_ADMIN_PERM,
3333 },
3334 {
3335 .cmd = MACSEC_CMD_UPD_TXSA,
3336 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3337 .doit = macsec_upd_txsa,
3338 .flags = GENL_ADMIN_PERM,
3339 },
3340 {
3341 .cmd = MACSEC_CMD_ADD_RXSA,
3342 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3343 .doit = macsec_add_rxsa,
3344 .flags = GENL_ADMIN_PERM,
3345 },
3346 {
3347 .cmd = MACSEC_CMD_DEL_RXSA,
3348 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3349 .doit = macsec_del_rxsa,
3350 .flags = GENL_ADMIN_PERM,
3351 },
3352 {
3353 .cmd = MACSEC_CMD_UPD_RXSA,
3354 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3355 .doit = macsec_upd_rxsa,
3356 .flags = GENL_ADMIN_PERM,
3357 },
3358 {
3359 .cmd = MACSEC_CMD_UPD_OFFLOAD,
3360 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3361 .doit = macsec_upd_offload,
3362 .flags = GENL_ADMIN_PERM,
3363 },
3364 };
3365
3366 static struct genl_family macsec_fam __ro_after_init = {
3367 .name = MACSEC_GENL_NAME,
3368 .hdrsize = 0,
3369 .version = MACSEC_GENL_VERSION,
3370 .maxattr = MACSEC_ATTR_MAX,
3371 .policy = macsec_genl_policy,
3372 .netnsok = true,
3373 .module = THIS_MODULE,
3374 .small_ops = macsec_genl_ops,
3375 .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
3376 };
3377
macsec_start_xmit(struct sk_buff * skb,struct net_device * dev)3378 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3379 struct net_device *dev)
3380 {
3381 struct macsec_dev *macsec = netdev_priv(dev);
3382 struct macsec_secy *secy = &macsec->secy;
3383 struct pcpu_secy_stats *secy_stats;
3384 int ret, len;
3385
3386 if (macsec_is_offloaded(netdev_priv(dev))) {
3387 skb->dev = macsec->real_dev;
3388 return dev_queue_xmit(skb);
3389 }
3390
3391 /* 10.5 */
3392 if (!secy->protect_frames) {
3393 secy_stats = this_cpu_ptr(macsec->stats);
3394 u64_stats_update_begin(&secy_stats->syncp);
3395 secy_stats->stats.OutPktsUntagged++;
3396 u64_stats_update_end(&secy_stats->syncp);
3397 skb->dev = macsec->real_dev;
3398 len = skb->len;
3399 ret = dev_queue_xmit(skb);
3400 count_tx(dev, ret, len);
3401 return ret;
3402 }
3403
3404 if (!secy->operational) {
3405 kfree_skb(skb);
3406 dev->stats.tx_dropped++;
3407 return NETDEV_TX_OK;
3408 }
3409
3410 skb = macsec_encrypt(skb, dev);
3411 if (IS_ERR(skb)) {
3412 if (PTR_ERR(skb) != -EINPROGRESS)
3413 dev->stats.tx_dropped++;
3414 return NETDEV_TX_OK;
3415 }
3416
3417 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3418
3419 macsec_encrypt_finish(skb, dev);
3420 len = skb->len;
3421 ret = dev_queue_xmit(skb);
3422 count_tx(dev, ret, len);
3423 return ret;
3424 }
3425
3426 #define MACSEC_FEATURES \
3427 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3428
macsec_dev_init(struct net_device * dev)3429 static int macsec_dev_init(struct net_device *dev)
3430 {
3431 struct macsec_dev *macsec = macsec_priv(dev);
3432 struct net_device *real_dev = macsec->real_dev;
3433 int err;
3434
3435 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3436 if (!dev->tstats)
3437 return -ENOMEM;
3438
3439 err = gro_cells_init(&macsec->gro_cells, dev);
3440 if (err) {
3441 free_percpu(dev->tstats);
3442 return err;
3443 }
3444
3445 dev->features = real_dev->features & MACSEC_FEATURES;
3446 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3447
3448 dev->needed_headroom = real_dev->needed_headroom +
3449 MACSEC_NEEDED_HEADROOM;
3450 dev->needed_tailroom = real_dev->needed_tailroom +
3451 MACSEC_NEEDED_TAILROOM;
3452
3453 if (is_zero_ether_addr(dev->dev_addr))
3454 eth_hw_addr_inherit(dev, real_dev);
3455 if (is_zero_ether_addr(dev->broadcast))
3456 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3457
3458 return 0;
3459 }
3460
macsec_dev_uninit(struct net_device * dev)3461 static void macsec_dev_uninit(struct net_device *dev)
3462 {
3463 struct macsec_dev *macsec = macsec_priv(dev);
3464
3465 gro_cells_destroy(&macsec->gro_cells);
3466 free_percpu(dev->tstats);
3467 }
3468
macsec_fix_features(struct net_device * dev,netdev_features_t features)3469 static netdev_features_t macsec_fix_features(struct net_device *dev,
3470 netdev_features_t features)
3471 {
3472 struct macsec_dev *macsec = macsec_priv(dev);
3473 struct net_device *real_dev = macsec->real_dev;
3474
3475 features &= (real_dev->features & MACSEC_FEATURES) |
3476 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3477 features |= NETIF_F_LLTX;
3478
3479 return features;
3480 }
3481
macsec_dev_open(struct net_device * dev)3482 static int macsec_dev_open(struct net_device *dev)
3483 {
3484 struct macsec_dev *macsec = macsec_priv(dev);
3485 struct net_device *real_dev = macsec->real_dev;
3486 int err;
3487
3488 err = dev_uc_add(real_dev, dev->dev_addr);
3489 if (err < 0)
3490 return err;
3491
3492 if (dev->flags & IFF_ALLMULTI) {
3493 err = dev_set_allmulti(real_dev, 1);
3494 if (err < 0)
3495 goto del_unicast;
3496 }
3497
3498 if (dev->flags & IFF_PROMISC) {
3499 err = dev_set_promiscuity(real_dev, 1);
3500 if (err < 0)
3501 goto clear_allmulti;
3502 }
3503
3504 /* If h/w offloading is available, propagate to the device */
3505 if (macsec_is_offloaded(macsec)) {
3506 const struct macsec_ops *ops;
3507 struct macsec_context ctx;
3508
3509 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3510 if (!ops) {
3511 err = -EOPNOTSUPP;
3512 goto clear_allmulti;
3513 }
3514
3515 ctx.secy = &macsec->secy;
3516 err = macsec_offload(ops->mdo_dev_open, &ctx);
3517 if (err)
3518 goto clear_allmulti;
3519 }
3520
3521 if (netif_carrier_ok(real_dev))
3522 netif_carrier_on(dev);
3523
3524 return 0;
3525 clear_allmulti:
3526 if (dev->flags & IFF_ALLMULTI)
3527 dev_set_allmulti(real_dev, -1);
3528 del_unicast:
3529 dev_uc_del(real_dev, dev->dev_addr);
3530 netif_carrier_off(dev);
3531 return err;
3532 }
3533
macsec_dev_stop(struct net_device * dev)3534 static int macsec_dev_stop(struct net_device *dev)
3535 {
3536 struct macsec_dev *macsec = macsec_priv(dev);
3537 struct net_device *real_dev = macsec->real_dev;
3538
3539 netif_carrier_off(dev);
3540
3541 /* If h/w offloading is available, propagate to the device */
3542 if (macsec_is_offloaded(macsec)) {
3543 const struct macsec_ops *ops;
3544 struct macsec_context ctx;
3545
3546 ops = macsec_get_ops(macsec, &ctx);
3547 if (ops) {
3548 ctx.secy = &macsec->secy;
3549 macsec_offload(ops->mdo_dev_stop, &ctx);
3550 }
3551 }
3552
3553 dev_mc_unsync(real_dev, dev);
3554 dev_uc_unsync(real_dev, dev);
3555
3556 if (dev->flags & IFF_ALLMULTI)
3557 dev_set_allmulti(real_dev, -1);
3558
3559 if (dev->flags & IFF_PROMISC)
3560 dev_set_promiscuity(real_dev, -1);
3561
3562 dev_uc_del(real_dev, dev->dev_addr);
3563
3564 return 0;
3565 }
3566
macsec_dev_change_rx_flags(struct net_device * dev,int change)3567 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3568 {
3569 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3570
3571 if (!(dev->flags & IFF_UP))
3572 return;
3573
3574 if (change & IFF_ALLMULTI)
3575 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3576
3577 if (change & IFF_PROMISC)
3578 dev_set_promiscuity(real_dev,
3579 dev->flags & IFF_PROMISC ? 1 : -1);
3580 }
3581
macsec_dev_set_rx_mode(struct net_device * dev)3582 static void macsec_dev_set_rx_mode(struct net_device *dev)
3583 {
3584 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3585
3586 dev_mc_sync(real_dev, dev);
3587 dev_uc_sync(real_dev, dev);
3588 }
3589
macsec_set_mac_address(struct net_device * dev,void * p)3590 static int macsec_set_mac_address(struct net_device *dev, void *p)
3591 {
3592 struct macsec_dev *macsec = macsec_priv(dev);
3593 struct net_device *real_dev = macsec->real_dev;
3594 struct sockaddr *addr = p;
3595 int err;
3596
3597 if (!is_valid_ether_addr(addr->sa_data))
3598 return -EADDRNOTAVAIL;
3599
3600 if (!(dev->flags & IFF_UP))
3601 goto out;
3602
3603 err = dev_uc_add(real_dev, addr->sa_data);
3604 if (err < 0)
3605 return err;
3606
3607 dev_uc_del(real_dev, dev->dev_addr);
3608
3609 out:
3610 ether_addr_copy(dev->dev_addr, addr->sa_data);
3611 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3612
3613 /* If h/w offloading is available, propagate to the device */
3614 if (macsec_is_offloaded(macsec)) {
3615 const struct macsec_ops *ops;
3616 struct macsec_context ctx;
3617
3618 ops = macsec_get_ops(macsec, &ctx);
3619 if (ops) {
3620 ctx.secy = &macsec->secy;
3621 macsec_offload(ops->mdo_upd_secy, &ctx);
3622 }
3623 }
3624
3625 return 0;
3626 }
3627
macsec_change_mtu(struct net_device * dev,int new_mtu)3628 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3629 {
3630 struct macsec_dev *macsec = macsec_priv(dev);
3631 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3632
3633 if (macsec->real_dev->mtu - extra < new_mtu)
3634 return -ERANGE;
3635
3636 dev->mtu = new_mtu;
3637
3638 return 0;
3639 }
3640
macsec_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)3641 static void macsec_get_stats64(struct net_device *dev,
3642 struct rtnl_link_stats64 *s)
3643 {
3644 if (!dev->tstats)
3645 return;
3646
3647 dev_fetch_sw_netstats(s, dev->tstats);
3648
3649 s->rx_dropped = dev->stats.rx_dropped;
3650 s->tx_dropped = dev->stats.tx_dropped;
3651 }
3652
macsec_get_iflink(const struct net_device * dev)3653 static int macsec_get_iflink(const struct net_device *dev)
3654 {
3655 return macsec_priv(dev)->real_dev->ifindex;
3656 }
3657
3658 static const struct net_device_ops macsec_netdev_ops = {
3659 .ndo_init = macsec_dev_init,
3660 .ndo_uninit = macsec_dev_uninit,
3661 .ndo_open = macsec_dev_open,
3662 .ndo_stop = macsec_dev_stop,
3663 .ndo_fix_features = macsec_fix_features,
3664 .ndo_change_mtu = macsec_change_mtu,
3665 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
3666 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
3667 .ndo_set_mac_address = macsec_set_mac_address,
3668 .ndo_start_xmit = macsec_start_xmit,
3669 .ndo_get_stats64 = macsec_get_stats64,
3670 .ndo_get_iflink = macsec_get_iflink,
3671 };
3672
3673 static const struct device_type macsec_type = {
3674 .name = "macsec",
3675 };
3676
3677 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3678 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3679 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3680 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3681 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3682 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3683 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3684 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3685 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3686 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3687 [IFLA_MACSEC_ES] = { .type = NLA_U8 },
3688 [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3689 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3690 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3691 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
3692 };
3693
macsec_free_netdev(struct net_device * dev)3694 static void macsec_free_netdev(struct net_device *dev)
3695 {
3696 struct macsec_dev *macsec = macsec_priv(dev);
3697
3698 free_percpu(macsec->stats);
3699 free_percpu(macsec->secy.tx_sc.stats);
3700
3701 }
3702
macsec_setup(struct net_device * dev)3703 static void macsec_setup(struct net_device *dev)
3704 {
3705 ether_setup(dev);
3706 dev->min_mtu = 0;
3707 dev->max_mtu = ETH_MAX_MTU;
3708 dev->priv_flags |= IFF_NO_QUEUE;
3709 dev->netdev_ops = &macsec_netdev_ops;
3710 dev->needs_free_netdev = true;
3711 dev->priv_destructor = macsec_free_netdev;
3712 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3713
3714 eth_zero_addr(dev->broadcast);
3715 }
3716
macsec_changelink_common(struct net_device * dev,struct nlattr * data[])3717 static int macsec_changelink_common(struct net_device *dev,
3718 struct nlattr *data[])
3719 {
3720 struct macsec_secy *secy;
3721 struct macsec_tx_sc *tx_sc;
3722
3723 secy = &macsec_priv(dev)->secy;
3724 tx_sc = &secy->tx_sc;
3725
3726 if (data[IFLA_MACSEC_ENCODING_SA]) {
3727 struct macsec_tx_sa *tx_sa;
3728
3729 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3730 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3731
3732 secy->operational = tx_sa && tx_sa->active;
3733 }
3734
3735 if (data[IFLA_MACSEC_ENCRYPT])
3736 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3737
3738 if (data[IFLA_MACSEC_PROTECT])
3739 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3740
3741 if (data[IFLA_MACSEC_INC_SCI])
3742 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3743
3744 if (data[IFLA_MACSEC_ES])
3745 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3746
3747 if (data[IFLA_MACSEC_SCB])
3748 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3749
3750 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3751 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3752
3753 if (data[IFLA_MACSEC_VALIDATION])
3754 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3755
3756 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3757 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3758 case MACSEC_CIPHER_ID_GCM_AES_128:
3759 case MACSEC_DEFAULT_CIPHER_ID:
3760 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3761 secy->xpn = false;
3762 break;
3763 case MACSEC_CIPHER_ID_GCM_AES_256:
3764 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3765 secy->xpn = false;
3766 break;
3767 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3768 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3769 secy->xpn = true;
3770 break;
3771 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3772 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3773 secy->xpn = true;
3774 break;
3775 default:
3776 return -EINVAL;
3777 }
3778 }
3779
3780 if (data[IFLA_MACSEC_WINDOW]) {
3781 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3782
3783 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3784 * for XPN cipher suites */
3785 if (secy->xpn &&
3786 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3787 return -EINVAL;
3788 }
3789
3790 return 0;
3791 }
3792
macsec_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)3793 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3794 struct nlattr *data[],
3795 struct netlink_ext_ack *extack)
3796 {
3797 struct macsec_dev *macsec = macsec_priv(dev);
3798 struct macsec_tx_sc tx_sc;
3799 struct macsec_secy secy;
3800 int ret;
3801
3802 if (!data)
3803 return 0;
3804
3805 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3806 data[IFLA_MACSEC_ICV_LEN] ||
3807 data[IFLA_MACSEC_SCI] ||
3808 data[IFLA_MACSEC_PORT])
3809 return -EINVAL;
3810
3811 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3812 * propagation fails, to revert macsec_changelink_common.
3813 */
3814 memcpy(&secy, &macsec->secy, sizeof(secy));
3815 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3816
3817 ret = macsec_changelink_common(dev, data);
3818 if (ret)
3819 goto cleanup;
3820
3821 /* If h/w offloading is available, propagate to the device */
3822 if (macsec_is_offloaded(macsec)) {
3823 const struct macsec_ops *ops;
3824 struct macsec_context ctx;
3825
3826 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3827 if (!ops) {
3828 ret = -EOPNOTSUPP;
3829 goto cleanup;
3830 }
3831
3832 ctx.secy = &macsec->secy;
3833 ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3834 if (ret)
3835 goto cleanup;
3836 }
3837
3838 return 0;
3839
3840 cleanup:
3841 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3842 memcpy(&macsec->secy, &secy, sizeof(secy));
3843
3844 return ret;
3845 }
3846
macsec_del_dev(struct macsec_dev * macsec)3847 static void macsec_del_dev(struct macsec_dev *macsec)
3848 {
3849 int i;
3850
3851 while (macsec->secy.rx_sc) {
3852 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3853
3854 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3855 free_rx_sc(rx_sc);
3856 }
3857
3858 for (i = 0; i < MACSEC_NUM_AN; i++) {
3859 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3860
3861 if (sa) {
3862 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3863 clear_tx_sa(sa);
3864 }
3865 }
3866 }
3867
macsec_common_dellink(struct net_device * dev,struct list_head * head)3868 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3869 {
3870 struct macsec_dev *macsec = macsec_priv(dev);
3871 struct net_device *real_dev = macsec->real_dev;
3872
3873 /* If h/w offloading is available, propagate to the device */
3874 if (macsec_is_offloaded(macsec)) {
3875 const struct macsec_ops *ops;
3876 struct macsec_context ctx;
3877
3878 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3879 if (ops) {
3880 ctx.secy = &macsec->secy;
3881 macsec_offload(ops->mdo_del_secy, &ctx);
3882 }
3883 }
3884
3885 unregister_netdevice_queue(dev, head);
3886 list_del_rcu(&macsec->secys);
3887 macsec_del_dev(macsec);
3888 netdev_upper_dev_unlink(real_dev, dev);
3889
3890 macsec_generation++;
3891 }
3892
macsec_dellink(struct net_device * dev,struct list_head * head)3893 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3894 {
3895 struct macsec_dev *macsec = macsec_priv(dev);
3896 struct net_device *real_dev = macsec->real_dev;
3897 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3898
3899 macsec_common_dellink(dev, head);
3900
3901 if (list_empty(&rxd->secys)) {
3902 netdev_rx_handler_unregister(real_dev);
3903 kfree(rxd);
3904 }
3905 }
3906
register_macsec_dev(struct net_device * real_dev,struct net_device * dev)3907 static int register_macsec_dev(struct net_device *real_dev,
3908 struct net_device *dev)
3909 {
3910 struct macsec_dev *macsec = macsec_priv(dev);
3911 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3912
3913 if (!rxd) {
3914 int err;
3915
3916 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3917 if (!rxd)
3918 return -ENOMEM;
3919
3920 INIT_LIST_HEAD(&rxd->secys);
3921
3922 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3923 rxd);
3924 if (err < 0) {
3925 kfree(rxd);
3926 return err;
3927 }
3928 }
3929
3930 list_add_tail_rcu(&macsec->secys, &rxd->secys);
3931 return 0;
3932 }
3933
sci_exists(struct net_device * dev,sci_t sci)3934 static bool sci_exists(struct net_device *dev, sci_t sci)
3935 {
3936 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3937 struct macsec_dev *macsec;
3938
3939 list_for_each_entry(macsec, &rxd->secys, secys) {
3940 if (macsec->secy.sci == sci)
3941 return true;
3942 }
3943
3944 return false;
3945 }
3946
macsec_add_dev(struct net_device * dev,sci_t sci,u8 icv_len)3947 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3948 {
3949 struct macsec_dev *macsec = macsec_priv(dev);
3950 struct macsec_secy *secy = &macsec->secy;
3951
3952 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3953 if (!macsec->stats)
3954 return -ENOMEM;
3955
3956 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3957 if (!secy->tx_sc.stats) {
3958 free_percpu(macsec->stats);
3959 return -ENOMEM;
3960 }
3961
3962 if (sci == MACSEC_UNDEF_SCI)
3963 sci = dev_to_sci(dev, MACSEC_PORT_ES);
3964
3965 secy->netdev = dev;
3966 secy->operational = true;
3967 secy->key_len = DEFAULT_SAK_LEN;
3968 secy->icv_len = icv_len;
3969 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3970 secy->protect_frames = true;
3971 secy->replay_protect = false;
3972 secy->xpn = DEFAULT_XPN;
3973
3974 secy->sci = sci;
3975 secy->tx_sc.active = true;
3976 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3977 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3978 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3979 secy->tx_sc.end_station = false;
3980 secy->tx_sc.scb = false;
3981
3982 return 0;
3983 }
3984
3985 static struct lock_class_key macsec_netdev_addr_lock_key;
3986
macsec_newlink(struct net * net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)3987 static int macsec_newlink(struct net *net, struct net_device *dev,
3988 struct nlattr *tb[], struct nlattr *data[],
3989 struct netlink_ext_ack *extack)
3990 {
3991 struct macsec_dev *macsec = macsec_priv(dev);
3992 rx_handler_func_t *rx_handler;
3993 u8 icv_len = DEFAULT_ICV_LEN;
3994 struct net_device *real_dev;
3995 int err, mtu;
3996 sci_t sci;
3997
3998 if (!tb[IFLA_LINK])
3999 return -EINVAL;
4000 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4001 if (!real_dev)
4002 return -ENODEV;
4003 if (real_dev->type != ARPHRD_ETHER)
4004 return -EINVAL;
4005
4006 dev->priv_flags |= IFF_MACSEC;
4007
4008 macsec->real_dev = real_dev;
4009
4010 if (data && data[IFLA_MACSEC_OFFLOAD])
4011 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4012 else
4013 /* MACsec offloading is off by default */
4014 macsec->offload = MACSEC_OFFLOAD_OFF;
4015
4016 /* Check if the offloading mode is supported by the underlying layers */
4017 if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4018 !macsec_check_offload(macsec->offload, macsec))
4019 return -EOPNOTSUPP;
4020
4021 /* send_sci must be set to true when transmit sci explicitly is set */
4022 if ((data && data[IFLA_MACSEC_SCI]) &&
4023 (data && data[IFLA_MACSEC_INC_SCI])) {
4024 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4025
4026 if (!send_sci)
4027 return -EINVAL;
4028 }
4029
4030 if (data && data[IFLA_MACSEC_ICV_LEN])
4031 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4032 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4033 if (mtu < 0)
4034 dev->mtu = 0;
4035 else
4036 dev->mtu = mtu;
4037
4038 rx_handler = rtnl_dereference(real_dev->rx_handler);
4039 if (rx_handler && rx_handler != macsec_handle_frame)
4040 return -EBUSY;
4041
4042 err = register_netdevice(dev);
4043 if (err < 0)
4044 return err;
4045
4046 netdev_lockdep_set_classes(dev);
4047 lockdep_set_class(&dev->addr_list_lock,
4048 &macsec_netdev_addr_lock_key);
4049
4050 err = netdev_upper_dev_link(real_dev, dev, extack);
4051 if (err < 0)
4052 goto unregister;
4053
4054 /* need to be already registered so that ->init has run and
4055 * the MAC addr is set
4056 */
4057 if (data && data[IFLA_MACSEC_SCI])
4058 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4059 else if (data && data[IFLA_MACSEC_PORT])
4060 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4061 else
4062 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4063
4064 if (rx_handler && sci_exists(real_dev, sci)) {
4065 err = -EBUSY;
4066 goto unlink;
4067 }
4068
4069 err = macsec_add_dev(dev, sci, icv_len);
4070 if (err)
4071 goto unlink;
4072
4073 if (data) {
4074 err = macsec_changelink_common(dev, data);
4075 if (err)
4076 goto del_dev;
4077 }
4078
4079 /* If h/w offloading is available, propagate to the device */
4080 if (macsec_is_offloaded(macsec)) {
4081 const struct macsec_ops *ops;
4082 struct macsec_context ctx;
4083
4084 ops = macsec_get_ops(macsec, &ctx);
4085 if (ops) {
4086 ctx.secy = &macsec->secy;
4087 err = macsec_offload(ops->mdo_add_secy, &ctx);
4088 if (err)
4089 goto del_dev;
4090 }
4091 }
4092
4093 err = register_macsec_dev(real_dev, dev);
4094 if (err < 0)
4095 goto del_dev;
4096
4097 netif_stacked_transfer_operstate(real_dev, dev);
4098 linkwatch_fire_event(dev);
4099
4100 macsec_generation++;
4101
4102 return 0;
4103
4104 del_dev:
4105 macsec_del_dev(macsec);
4106 unlink:
4107 netdev_upper_dev_unlink(real_dev, dev);
4108 unregister:
4109 unregister_netdevice(dev);
4110 return err;
4111 }
4112
macsec_validate_attr(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)4113 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4114 struct netlink_ext_ack *extack)
4115 {
4116 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4117 u8 icv_len = DEFAULT_ICV_LEN;
4118 int flag;
4119 bool es, scb, sci;
4120
4121 if (!data)
4122 return 0;
4123
4124 if (data[IFLA_MACSEC_CIPHER_SUITE])
4125 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4126
4127 if (data[IFLA_MACSEC_ICV_LEN]) {
4128 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4129 if (icv_len != DEFAULT_ICV_LEN) {
4130 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4131 struct crypto_aead *dummy_tfm;
4132
4133 dummy_tfm = macsec_alloc_tfm(dummy_key,
4134 DEFAULT_SAK_LEN,
4135 icv_len);
4136 if (IS_ERR(dummy_tfm))
4137 return PTR_ERR(dummy_tfm);
4138 crypto_free_aead(dummy_tfm);
4139 }
4140 }
4141
4142 switch (csid) {
4143 case MACSEC_CIPHER_ID_GCM_AES_128:
4144 case MACSEC_CIPHER_ID_GCM_AES_256:
4145 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4146 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4147 case MACSEC_DEFAULT_CIPHER_ID:
4148 if (icv_len < MACSEC_MIN_ICV_LEN ||
4149 icv_len > MACSEC_STD_ICV_LEN)
4150 return -EINVAL;
4151 break;
4152 default:
4153 return -EINVAL;
4154 }
4155
4156 if (data[IFLA_MACSEC_ENCODING_SA]) {
4157 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4158 return -EINVAL;
4159 }
4160
4161 for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4162 flag < IFLA_MACSEC_VALIDATION;
4163 flag++) {
4164 if (data[flag]) {
4165 if (nla_get_u8(data[flag]) > 1)
4166 return -EINVAL;
4167 }
4168 }
4169
4170 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4171 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4172 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4173
4174 if ((sci && (scb || es)) || (scb && es))
4175 return -EINVAL;
4176
4177 if (data[IFLA_MACSEC_VALIDATION] &&
4178 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4179 return -EINVAL;
4180
4181 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4182 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4183 !data[IFLA_MACSEC_WINDOW])
4184 return -EINVAL;
4185
4186 return 0;
4187 }
4188
macsec_get_link_net(const struct net_device * dev)4189 static struct net *macsec_get_link_net(const struct net_device *dev)
4190 {
4191 return dev_net(macsec_priv(dev)->real_dev);
4192 }
4193
macsec_get_size(const struct net_device * dev)4194 static size_t macsec_get_size(const struct net_device *dev)
4195 {
4196 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4197 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4198 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4199 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4200 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4201 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4202 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4203 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4204 nla_total_size(1) + /* IFLA_MACSEC_ES */
4205 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4206 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4207 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4208 0;
4209 }
4210
macsec_fill_info(struct sk_buff * skb,const struct net_device * dev)4211 static int macsec_fill_info(struct sk_buff *skb,
4212 const struct net_device *dev)
4213 {
4214 struct macsec_secy *secy = &macsec_priv(dev)->secy;
4215 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4216 u64 csid;
4217
4218 switch (secy->key_len) {
4219 case MACSEC_GCM_AES_128_SAK_LEN:
4220 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4221 break;
4222 case MACSEC_GCM_AES_256_SAK_LEN:
4223 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4224 break;
4225 default:
4226 goto nla_put_failure;
4227 }
4228
4229 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4230 IFLA_MACSEC_PAD) ||
4231 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4232 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4233 csid, IFLA_MACSEC_PAD) ||
4234 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4235 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4236 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4237 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4238 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4239 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4240 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4241 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4242 0)
4243 goto nla_put_failure;
4244
4245 if (secy->replay_protect) {
4246 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4247 goto nla_put_failure;
4248 }
4249
4250 return 0;
4251
4252 nla_put_failure:
4253 return -EMSGSIZE;
4254 }
4255
4256 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4257 .kind = "macsec",
4258 .priv_size = sizeof(struct macsec_dev),
4259 .maxtype = IFLA_MACSEC_MAX,
4260 .policy = macsec_rtnl_policy,
4261 .setup = macsec_setup,
4262 .validate = macsec_validate_attr,
4263 .newlink = macsec_newlink,
4264 .changelink = macsec_changelink,
4265 .dellink = macsec_dellink,
4266 .get_size = macsec_get_size,
4267 .fill_info = macsec_fill_info,
4268 .get_link_net = macsec_get_link_net,
4269 };
4270
is_macsec_master(struct net_device * dev)4271 static bool is_macsec_master(struct net_device *dev)
4272 {
4273 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4274 }
4275
macsec_notify(struct notifier_block * this,unsigned long event,void * ptr)4276 static int macsec_notify(struct notifier_block *this, unsigned long event,
4277 void *ptr)
4278 {
4279 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4280 LIST_HEAD(head);
4281
4282 if (!is_macsec_master(real_dev))
4283 return NOTIFY_DONE;
4284
4285 switch (event) {
4286 case NETDEV_DOWN:
4287 case NETDEV_UP:
4288 case NETDEV_CHANGE: {
4289 struct macsec_dev *m, *n;
4290 struct macsec_rxh_data *rxd;
4291
4292 rxd = macsec_data_rtnl(real_dev);
4293 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4294 struct net_device *dev = m->secy.netdev;
4295
4296 netif_stacked_transfer_operstate(real_dev, dev);
4297 }
4298 break;
4299 }
4300 case NETDEV_UNREGISTER: {
4301 struct macsec_dev *m, *n;
4302 struct macsec_rxh_data *rxd;
4303
4304 rxd = macsec_data_rtnl(real_dev);
4305 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4306 macsec_common_dellink(m->secy.netdev, &head);
4307 }
4308
4309 netdev_rx_handler_unregister(real_dev);
4310 kfree(rxd);
4311
4312 unregister_netdevice_many(&head);
4313 break;
4314 }
4315 case NETDEV_CHANGEMTU: {
4316 struct macsec_dev *m;
4317 struct macsec_rxh_data *rxd;
4318
4319 rxd = macsec_data_rtnl(real_dev);
4320 list_for_each_entry(m, &rxd->secys, secys) {
4321 struct net_device *dev = m->secy.netdev;
4322 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4323 macsec_extra_len(true));
4324
4325 if (dev->mtu > mtu)
4326 dev_set_mtu(dev, mtu);
4327 }
4328 }
4329 }
4330
4331 return NOTIFY_OK;
4332 }
4333
4334 static struct notifier_block macsec_notifier = {
4335 .notifier_call = macsec_notify,
4336 };
4337
macsec_init(void)4338 static int __init macsec_init(void)
4339 {
4340 int err;
4341
4342 pr_info("MACsec IEEE 802.1AE\n");
4343 err = register_netdevice_notifier(&macsec_notifier);
4344 if (err)
4345 return err;
4346
4347 err = rtnl_link_register(&macsec_link_ops);
4348 if (err)
4349 goto notifier;
4350
4351 err = genl_register_family(&macsec_fam);
4352 if (err)
4353 goto rtnl;
4354
4355 return 0;
4356
4357 rtnl:
4358 rtnl_link_unregister(&macsec_link_ops);
4359 notifier:
4360 unregister_netdevice_notifier(&macsec_notifier);
4361 return err;
4362 }
4363
macsec_exit(void)4364 static void __exit macsec_exit(void)
4365 {
4366 genl_unregister_family(&macsec_fam);
4367 rtnl_link_unregister(&macsec_link_ops);
4368 unregister_netdevice_notifier(&macsec_notifier);
4369 rcu_barrier();
4370 }
4371
4372 module_init(macsec_init);
4373 module_exit(macsec_exit);
4374
4375 MODULE_ALIAS_RTNL_LINK("macsec");
4376 MODULE_ALIAS_GENL_FAMILY("macsec");
4377
4378 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4379 MODULE_LICENSE("GPL v2");
4380