1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3
4 #include "ixgbe.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
7 #include <linux/if_bridge.h>
8
9 /**
10 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
11 * @hw: hw specific details
12 * @idx: register index to write
13 * @key: key byte array
14 * @salt: salt bytes
15 **/
ixgbe_ipsec_set_tx_sa(struct ixgbe_hw * hw,u16 idx,u32 key[],u32 salt)16 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
17 u32 key[], u32 salt)
18 {
19 u32 reg;
20 int i;
21
22 for (i = 0; i < 4; i++)
23 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
24 (__force u32)cpu_to_be32(key[3 - i]));
25 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
26 IXGBE_WRITE_FLUSH(hw);
27
28 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
29 reg &= IXGBE_RXTXIDX_IPS_EN;
30 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
31 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
32 IXGBE_WRITE_FLUSH(hw);
33 }
34
35 /**
36 * ixgbe_ipsec_set_rx_item - set an Rx table item
37 * @hw: hw specific details
38 * @idx: register index to write
39 * @tbl: table selector
40 *
41 * Trigger the device to store into a particular Rx table the
42 * data that has already been loaded into the input register
43 **/
ixgbe_ipsec_set_rx_item(struct ixgbe_hw * hw,u16 idx,enum ixgbe_ipsec_tbl_sel tbl)44 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
45 enum ixgbe_ipsec_tbl_sel tbl)
46 {
47 u32 reg;
48
49 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
50 reg &= IXGBE_RXTXIDX_IPS_EN;
51 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
52 idx << IXGBE_RXTXIDX_IDX_SHIFT |
53 IXGBE_RXTXIDX_WRITE;
54 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
55 IXGBE_WRITE_FLUSH(hw);
56 }
57
58 /**
59 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
60 * @hw: hw specific details
61 * @idx: register index to write
62 * @spi: security parameter index
63 * @key: key byte array
64 * @salt: salt bytes
65 * @mode: rx decrypt control bits
66 * @ip_idx: index into IP table for related IP address
67 **/
ixgbe_ipsec_set_rx_sa(struct ixgbe_hw * hw,u16 idx,__be32 spi,u32 key[],u32 salt,u32 mode,u32 ip_idx)68 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
69 u32 key[], u32 salt, u32 mode, u32 ip_idx)
70 {
71 int i;
72
73 /* store the SPI (in bigendian) and IPidx */
74 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
75 (__force u32)cpu_to_le32((__force u32)spi));
76 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
77 IXGBE_WRITE_FLUSH(hw);
78
79 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
80
81 /* store the key, salt, and mode */
82 for (i = 0; i < 4; i++)
83 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
84 (__force u32)cpu_to_be32(key[3 - i]));
85 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
86 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
87 IXGBE_WRITE_FLUSH(hw);
88
89 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
90 }
91
92 /**
93 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
94 * @hw: hw specific details
95 * @idx: register index to write
96 * @addr: IP address byte array
97 **/
ixgbe_ipsec_set_rx_ip(struct ixgbe_hw * hw,u16 idx,__be32 addr[])98 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
99 {
100 int i;
101
102 /* store the ip address */
103 for (i = 0; i < 4; i++)
104 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
105 (__force u32)cpu_to_le32((__force u32)addr[i]));
106 IXGBE_WRITE_FLUSH(hw);
107
108 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
109 }
110
111 /**
112 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
113 * @adapter: board private structure
114 **/
ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter * adapter)115 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
116 {
117 struct ixgbe_hw *hw = &adapter->hw;
118 u32 buf[4] = {0, 0, 0, 0};
119 u16 idx;
120
121 /* disable Rx and Tx SA lookup */
122 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
123 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
124
125 /* scrub the tables - split the loops for the max of the IP table */
126 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
127 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
128 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
129 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
130 }
131 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
132 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
133 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
134 }
135 }
136
137 /**
138 * ixgbe_ipsec_stop_data
139 * @adapter: board private structure
140 **/
ixgbe_ipsec_stop_data(struct ixgbe_adapter * adapter)141 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
142 {
143 struct ixgbe_hw *hw = &adapter->hw;
144 bool link = adapter->link_up;
145 u32 t_rdy, r_rdy;
146 u32 limit;
147 u32 reg;
148
149 /* halt data paths */
150 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
151 reg |= IXGBE_SECTXCTRL_TX_DIS;
152 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
153
154 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
155 reg |= IXGBE_SECRXCTRL_RX_DIS;
156 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
157
158 /* If both Tx and Rx are ready there are no packets
159 * that we need to flush so the loopback configuration
160 * below is not necessary.
161 */
162 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
163 IXGBE_SECTXSTAT_SECTX_RDY;
164 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
165 IXGBE_SECRXSTAT_SECRX_RDY;
166 if (t_rdy && r_rdy)
167 return;
168
169 /* If the tx fifo doesn't have link, but still has data,
170 * we can't clear the tx sec block. Set the MAC loopback
171 * before block clear
172 */
173 if (!link) {
174 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
175 reg |= IXGBE_MACC_FLU;
176 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
177
178 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
179 reg |= IXGBE_HLREG0_LPBK;
180 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
181
182 IXGBE_WRITE_FLUSH(hw);
183 mdelay(3);
184 }
185
186 /* wait for the paths to empty */
187 limit = 20;
188 do {
189 mdelay(10);
190 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
191 IXGBE_SECTXSTAT_SECTX_RDY;
192 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
193 IXGBE_SECRXSTAT_SECRX_RDY;
194 } while (!(t_rdy && r_rdy) && limit--);
195
196 /* undo loopback if we played with it earlier */
197 if (!link) {
198 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
199 reg &= ~IXGBE_MACC_FLU;
200 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
201
202 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
203 reg &= ~IXGBE_HLREG0_LPBK;
204 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
205
206 IXGBE_WRITE_FLUSH(hw);
207 }
208 }
209
210 /**
211 * ixgbe_ipsec_stop_engine
212 * @adapter: board private structure
213 **/
ixgbe_ipsec_stop_engine(struct ixgbe_adapter * adapter)214 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
215 {
216 struct ixgbe_hw *hw = &adapter->hw;
217 u32 reg;
218
219 ixgbe_ipsec_stop_data(adapter);
220
221 /* disable Rx and Tx SA lookup */
222 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
223 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
224
225 /* disable the Rx and Tx engines and full packet store-n-forward */
226 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
227 reg |= IXGBE_SECTXCTRL_SECTX_DIS;
228 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
229 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
230
231 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
232 reg |= IXGBE_SECRXCTRL_SECRX_DIS;
233 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
234
235 /* restore the "tx security buffer almost full threshold" to 0x250 */
236 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
237
238 /* Set minimum IFG between packets back to the default 0x1 */
239 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
240 reg = (reg & 0xfffffff0) | 0x1;
241 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
242
243 /* final set for normal (no ipsec offload) processing */
244 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
245 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
246
247 IXGBE_WRITE_FLUSH(hw);
248 }
249
250 /**
251 * ixgbe_ipsec_start_engine
252 * @adapter: board private structure
253 *
254 * NOTE: this increases power consumption whether being used or not
255 **/
ixgbe_ipsec_start_engine(struct ixgbe_adapter * adapter)256 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
257 {
258 struct ixgbe_hw *hw = &adapter->hw;
259 u32 reg;
260
261 ixgbe_ipsec_stop_data(adapter);
262
263 /* Set minimum IFG between packets to 3 */
264 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
265 reg = (reg & 0xfffffff0) | 0x3;
266 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
267
268 /* Set "tx security buffer almost full threshold" to 0x15 so that the
269 * almost full indication is generated only after buffer contains at
270 * least an entire jumbo packet.
271 */
272 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
273 reg = (reg & 0xfffffc00) | 0x15;
274 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
275
276 /* restart the data paths by clearing the DISABLE bits */
277 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
278 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
279
280 /* enable Rx and Tx SA lookup */
281 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
282 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
283
284 IXGBE_WRITE_FLUSH(hw);
285 }
286
287 /**
288 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
289 * @adapter: board private structure
290 **/
ixgbe_ipsec_restore(struct ixgbe_adapter * adapter)291 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
292 {
293 struct ixgbe_ipsec *ipsec = adapter->ipsec;
294 struct ixgbe_hw *hw = &adapter->hw;
295 int i;
296
297 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
298 return;
299
300 /* clean up and restart the engine */
301 ixgbe_ipsec_stop_engine(adapter);
302 ixgbe_ipsec_clear_hw_tables(adapter);
303 ixgbe_ipsec_start_engine(adapter);
304
305 /* reload the IP addrs */
306 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
307 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
308
309 if (ipsa->used)
310 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
311 }
312
313 /* reload the Rx and Tx keys */
314 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
315 struct rx_sa *rsa = &ipsec->rx_tbl[i];
316 struct tx_sa *tsa = &ipsec->tx_tbl[i];
317
318 if (rsa->used)
319 ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
320 rsa->key, rsa->salt,
321 rsa->mode, rsa->iptbl_ind);
322
323 if (tsa->used)
324 ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
325 }
326 }
327
328 /**
329 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
330 * @ipsec: pointer to ipsec struct
331 * @rxtable: true if we need to look in the Rx table
332 *
333 * Returns the first unused index in either the Rx or Tx SA table
334 **/
ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec * ipsec,bool rxtable)335 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
336 {
337 u32 i;
338
339 if (rxtable) {
340 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
341 return -ENOSPC;
342
343 /* search rx sa table */
344 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
345 if (!ipsec->rx_tbl[i].used)
346 return i;
347 }
348 } else {
349 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
350 return -ENOSPC;
351
352 /* search tx sa table */
353 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
354 if (!ipsec->tx_tbl[i].used)
355 return i;
356 }
357 }
358
359 return -ENOSPC;
360 }
361
362 /**
363 * ixgbe_ipsec_find_rx_state - find the state that matches
364 * @ipsec: pointer to ipsec struct
365 * @daddr: inbound address to match
366 * @proto: protocol to match
367 * @spi: SPI to match
368 * @ip4: true if using an ipv4 address
369 *
370 * Returns a pointer to the matching SA state information
371 **/
ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec * ipsec,__be32 * daddr,u8 proto,__be32 spi,bool ip4)372 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
373 __be32 *daddr, u8 proto,
374 __be32 spi, bool ip4)
375 {
376 struct rx_sa *rsa;
377 struct xfrm_state *ret = NULL;
378
379 rcu_read_lock();
380 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
381 (__force u32)spi) {
382 if (spi == rsa->xs->id.spi &&
383 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
384 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
385 sizeof(rsa->xs->id.daddr.a6)))) &&
386 proto == rsa->xs->id.proto) {
387 ret = rsa->xs;
388 xfrm_state_hold(ret);
389 break;
390 }
391 }
392 rcu_read_unlock();
393 return ret;
394 }
395
396 /**
397 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
398 * @xs: pointer to xfrm_state struct
399 * @mykey: pointer to key array to populate
400 * @mysalt: pointer to salt value to populate
401 *
402 * This copies the protocol keys and salt to our own data tables. The
403 * 82599 family only supports the one algorithm.
404 **/
ixgbe_ipsec_parse_proto_keys(struct xfrm_state * xs,u32 * mykey,u32 * mysalt)405 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
406 u32 *mykey, u32 *mysalt)
407 {
408 struct net_device *dev = xs->xso.dev;
409 unsigned char *key_data;
410 char *alg_name = NULL;
411 const char aes_gcm_name[] = "rfc4106(gcm(aes))";
412 int key_len;
413
414 if (!xs->aead) {
415 netdev_err(dev, "Unsupported IPsec algorithm\n");
416 return -EINVAL;
417 }
418
419 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
420 netdev_err(dev, "IPsec offload requires %d bit authentication\n",
421 IXGBE_IPSEC_AUTH_BITS);
422 return -EINVAL;
423 }
424
425 key_data = &xs->aead->alg_key[0];
426 key_len = xs->aead->alg_key_len;
427 alg_name = xs->aead->alg_name;
428
429 if (strcmp(alg_name, aes_gcm_name)) {
430 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
431 aes_gcm_name);
432 return -EINVAL;
433 }
434
435 /* The key bytes come down in a bigendian array of bytes, so
436 * we don't need to do any byteswapping.
437 * 160 accounts for 16 byte key and 4 byte salt
438 */
439 if (key_len == 160) {
440 *mysalt = ((u32 *)key_data)[4];
441 } else if (key_len != 128) {
442 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
443 return -EINVAL;
444 } else {
445 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
446 *mysalt = 0;
447 }
448 memcpy(mykey, key_data, 16);
449
450 return 0;
451 }
452
453 /**
454 * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
455 * @xs: pointer to transformer state struct
456 **/
ixgbe_ipsec_check_mgmt_ip(struct xfrm_state * xs)457 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
458 {
459 struct net_device *dev = xs->xso.dev;
460 struct ixgbe_adapter *adapter = netdev_priv(dev);
461 struct ixgbe_hw *hw = &adapter->hw;
462 u32 mfval, manc, reg;
463 int num_filters = 4;
464 bool manc_ipv4;
465 u32 bmcipval;
466 int i, j;
467
468 #define MANC_EN_IPV4_FILTER BIT(24)
469 #define MFVAL_IPV4_FILTER_SHIFT 16
470 #define MFVAL_IPV6_FILTER_SHIFT 24
471 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
472
473 #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
474 #define IXGBE_BMCIPVAL 0x5060
475 #define BMCIP_V4 0x2
476 #define BMCIP_V6 0x3
477 #define BMCIP_MASK 0x3
478
479 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
480 manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
481 mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
482 bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
483
484 if (xs->props.family == AF_INET) {
485 /* are there any IPv4 filters to check? */
486 if (manc_ipv4) {
487 /* the 4 ipv4 filters are all in MIPAF(3, i) */
488 for (i = 0; i < num_filters; i++) {
489 if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
490 continue;
491
492 reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
493 if (reg == xs->id.daddr.a4)
494 return 1;
495 }
496 }
497
498 if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
499 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
500 if (reg == xs->id.daddr.a4)
501 return 1;
502 }
503
504 } else {
505 /* if there are ipv4 filters, they are in the last ipv6 slot */
506 if (manc_ipv4)
507 num_filters = 3;
508
509 for (i = 0; i < num_filters; i++) {
510 if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
511 continue;
512
513 for (j = 0; j < 4; j++) {
514 reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
515 if (reg != xs->id.daddr.a6[j])
516 break;
517 }
518 if (j == 4) /* did we match all 4 words? */
519 return 1;
520 }
521
522 if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
523 for (j = 0; j < 4; j++) {
524 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
525 if (reg != xs->id.daddr.a6[j])
526 break;
527 }
528 if (j == 4) /* did we match all 4 words? */
529 return 1;
530 }
531 }
532
533 return 0;
534 }
535
536 /**
537 * ixgbe_ipsec_add_sa - program device with a security association
538 * @xs: pointer to transformer state struct
539 **/
ixgbe_ipsec_add_sa(struct xfrm_state * xs)540 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
541 {
542 struct net_device *dev = xs->xso.dev;
543 struct ixgbe_adapter *adapter = netdev_priv(dev);
544 struct ixgbe_ipsec *ipsec = adapter->ipsec;
545 struct ixgbe_hw *hw = &adapter->hw;
546 int checked, match, first;
547 u16 sa_idx;
548 int ret;
549 int i;
550
551 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
552 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
553 xs->id.proto);
554 return -EINVAL;
555 }
556
557 if (ixgbe_ipsec_check_mgmt_ip(xs)) {
558 netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
559 return -EINVAL;
560 }
561
562 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
563 struct rx_sa rsa;
564
565 if (xs->calg) {
566 netdev_err(dev, "Compression offload not supported\n");
567 return -EINVAL;
568 }
569
570 /* find the first unused index */
571 ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
572 if (ret < 0) {
573 netdev_err(dev, "No space for SA in Rx table!\n");
574 return ret;
575 }
576 sa_idx = (u16)ret;
577
578 memset(&rsa, 0, sizeof(rsa));
579 rsa.used = true;
580 rsa.xs = xs;
581
582 if (rsa.xs->id.proto & IPPROTO_ESP)
583 rsa.decrypt = xs->ealg || xs->aead;
584
585 /* get the key and salt */
586 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
587 if (ret) {
588 netdev_err(dev, "Failed to get key data for Rx SA table\n");
589 return ret;
590 }
591
592 /* get ip for rx sa table */
593 if (xs->props.family == AF_INET6)
594 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
595 else
596 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
597
598 /* The HW does not have a 1:1 mapping from keys to IP addrs, so
599 * check for a matching IP addr entry in the table. If the addr
600 * already exists, use it; else find an unused slot and add the
601 * addr. If one does not exist and there are no unused table
602 * entries, fail the request.
603 */
604
605 /* Find an existing match or first not used, and stop looking
606 * after we've checked all we know we have.
607 */
608 checked = 0;
609 match = -1;
610 first = -1;
611 for (i = 0;
612 i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
613 (checked < ipsec->num_rx_sa || first < 0);
614 i++) {
615 if (ipsec->ip_tbl[i].used) {
616 if (!memcmp(ipsec->ip_tbl[i].ipaddr,
617 rsa.ipaddr, sizeof(rsa.ipaddr))) {
618 match = i;
619 break;
620 }
621 checked++;
622 } else if (first < 0) {
623 first = i; /* track the first empty seen */
624 }
625 }
626
627 if (ipsec->num_rx_sa == 0)
628 first = 0;
629
630 if (match >= 0) {
631 /* addrs are the same, we should use this one */
632 rsa.iptbl_ind = match;
633 ipsec->ip_tbl[match].ref_cnt++;
634
635 } else if (first >= 0) {
636 /* no matches, but here's an empty slot */
637 rsa.iptbl_ind = first;
638
639 memcpy(ipsec->ip_tbl[first].ipaddr,
640 rsa.ipaddr, sizeof(rsa.ipaddr));
641 ipsec->ip_tbl[first].ref_cnt = 1;
642 ipsec->ip_tbl[first].used = true;
643
644 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
645
646 } else {
647 /* no match and no empty slot */
648 netdev_err(dev, "No space for SA in Rx IP SA table\n");
649 memset(&rsa, 0, sizeof(rsa));
650 return -ENOSPC;
651 }
652
653 rsa.mode = IXGBE_RXMOD_VALID;
654 if (rsa.xs->id.proto & IPPROTO_ESP)
655 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
656 if (rsa.decrypt)
657 rsa.mode |= IXGBE_RXMOD_DECRYPT;
658 if (rsa.xs->props.family == AF_INET6)
659 rsa.mode |= IXGBE_RXMOD_IPV6;
660
661 /* the preparations worked, so save the info */
662 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
663
664 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
665 rsa.salt, rsa.mode, rsa.iptbl_ind);
666 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
667
668 ipsec->num_rx_sa++;
669
670 /* hash the new entry for faster search in Rx path */
671 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
672 (__force u32)rsa.xs->id.spi);
673 } else {
674 struct tx_sa tsa;
675
676 if (adapter->num_vfs &&
677 adapter->bridge_mode != BRIDGE_MODE_VEPA)
678 return -EOPNOTSUPP;
679
680 /* find the first unused index */
681 ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
682 if (ret < 0) {
683 netdev_err(dev, "No space for SA in Tx table\n");
684 return ret;
685 }
686 sa_idx = (u16)ret;
687
688 memset(&tsa, 0, sizeof(tsa));
689 tsa.used = true;
690 tsa.xs = xs;
691
692 if (xs->id.proto & IPPROTO_ESP)
693 tsa.encrypt = xs->ealg || xs->aead;
694
695 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
696 if (ret) {
697 netdev_err(dev, "Failed to get key data for Tx SA table\n");
698 memset(&tsa, 0, sizeof(tsa));
699 return ret;
700 }
701
702 /* the preparations worked, so save the info */
703 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
704
705 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
706
707 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
708
709 ipsec->num_tx_sa++;
710 }
711
712 /* enable the engine if not already warmed up */
713 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
714 ixgbe_ipsec_start_engine(adapter);
715 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
716 }
717
718 return 0;
719 }
720
721 /**
722 * ixgbe_ipsec_del_sa - clear out this specific SA
723 * @xs: pointer to transformer state struct
724 **/
ixgbe_ipsec_del_sa(struct xfrm_state * xs)725 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
726 {
727 struct net_device *dev = xs->xso.dev;
728 struct ixgbe_adapter *adapter = netdev_priv(dev);
729 struct ixgbe_ipsec *ipsec = adapter->ipsec;
730 struct ixgbe_hw *hw = &adapter->hw;
731 u32 zerobuf[4] = {0, 0, 0, 0};
732 u16 sa_idx;
733
734 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
735 struct rx_sa *rsa;
736 u8 ipi;
737
738 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
739 rsa = &ipsec->rx_tbl[sa_idx];
740
741 if (!rsa->used) {
742 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
743 sa_idx, xs->xso.offload_handle);
744 return;
745 }
746
747 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
748 hash_del_rcu(&rsa->hlist);
749
750 /* if the IP table entry is referenced by only this SA,
751 * i.e. ref_cnt is only 1, clear the IP table entry as well
752 */
753 ipi = rsa->iptbl_ind;
754 if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
755 ipsec->ip_tbl[ipi].ref_cnt--;
756
757 if (!ipsec->ip_tbl[ipi].ref_cnt) {
758 memset(&ipsec->ip_tbl[ipi], 0,
759 sizeof(struct rx_ip_sa));
760 ixgbe_ipsec_set_rx_ip(hw, ipi,
761 (__force __be32 *)zerobuf);
762 }
763 }
764
765 memset(rsa, 0, sizeof(struct rx_sa));
766 ipsec->num_rx_sa--;
767 } else {
768 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
769
770 if (!ipsec->tx_tbl[sa_idx].used) {
771 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
772 sa_idx, xs->xso.offload_handle);
773 return;
774 }
775
776 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
777 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
778 ipsec->num_tx_sa--;
779 }
780
781 /* if there are no SAs left, stop the engine to save energy */
782 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
783 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
784 ixgbe_ipsec_stop_engine(adapter);
785 }
786 }
787
788 /**
789 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
790 * @skb: current data packet
791 * @xs: pointer to transformer state struct
792 **/
ixgbe_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * xs)793 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
794 {
795 if (xs->props.family == AF_INET) {
796 /* Offload with IPv4 options is not supported yet */
797 if (ip_hdr(skb)->ihl != 5)
798 return false;
799 } else {
800 /* Offload with IPv6 extension headers is not support yet */
801 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
802 return false;
803 }
804
805 return true;
806 }
807
808 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
809 .xdo_dev_state_add = ixgbe_ipsec_add_sa,
810 .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
811 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
812 };
813
814 /**
815 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
816 * @tx_ring: outgoing context
817 * @first: current data packet
818 * @itd: ipsec Tx data for later use in building context descriptor
819 **/
ixgbe_ipsec_tx(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * first,struct ixgbe_ipsec_tx_data * itd)820 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
821 struct ixgbe_tx_buffer *first,
822 struct ixgbe_ipsec_tx_data *itd)
823 {
824 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
825 struct ixgbe_ipsec *ipsec = adapter->ipsec;
826 struct xfrm_state *xs;
827 struct tx_sa *tsa;
828
829 if (unlikely(!first->skb->sp->len)) {
830 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
831 __func__, first->skb->sp->len);
832 return 0;
833 }
834
835 xs = xfrm_input_state(first->skb);
836 if (unlikely(!xs)) {
837 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
838 __func__, xs);
839 return 0;
840 }
841
842 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
843 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
844 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
845 __func__, itd->sa_idx, xs->xso.offload_handle);
846 return 0;
847 }
848
849 tsa = &ipsec->tx_tbl[itd->sa_idx];
850 if (unlikely(!tsa->used)) {
851 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
852 __func__, itd->sa_idx);
853 return 0;
854 }
855
856 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
857
858 if (xs->id.proto == IPPROTO_ESP) {
859
860 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
861 IXGBE_ADVTXD_TUCMD_L4T_TCP;
862 if (first->protocol == htons(ETH_P_IP))
863 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
864
865 /* The actual trailer length is authlen (16 bytes) plus
866 * 2 bytes for the proto and the padlen values, plus
867 * padlen bytes of padding. This ends up not the same
868 * as the static value found in xs->props.trailer_len (21).
869 *
870 * ... but if we're doing GSO, don't bother as the stack
871 * doesn't add a trailer for those.
872 */
873 if (!skb_is_gso(first->skb)) {
874 /* The "correct" way to get the auth length would be
875 * to use
876 * authlen = crypto_aead_authsize(xs->data);
877 * but since we know we only have one size to worry
878 * about * we can let the compiler use the constant
879 * and save us a few CPU cycles.
880 */
881 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
882 struct sk_buff *skb = first->skb;
883 u8 padlen;
884 int ret;
885
886 ret = skb_copy_bits(skb, skb->len - (authlen + 2),
887 &padlen, 1);
888 if (unlikely(ret))
889 return 0;
890 itd->trailer_len = authlen + 2 + padlen;
891 }
892 }
893 if (tsa->encrypt)
894 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
895
896 return 1;
897 }
898
899 /**
900 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
901 * @rx_ring: receiving ring
902 * @rx_desc: receive data descriptor
903 * @skb: current data packet
904 *
905 * Determine if there was an ipsec encapsulation noticed, and if so set up
906 * the resulting status for later in the receive stack.
907 **/
ixgbe_ipsec_rx(struct ixgbe_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)908 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
909 union ixgbe_adv_rx_desc *rx_desc,
910 struct sk_buff *skb)
911 {
912 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
913 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
914 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
915 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
916 struct ixgbe_ipsec *ipsec = adapter->ipsec;
917 struct xfrm_offload *xo = NULL;
918 struct xfrm_state *xs = NULL;
919 struct ipv6hdr *ip6 = NULL;
920 struct iphdr *ip4 = NULL;
921 void *daddr;
922 __be32 spi;
923 u8 *c_hdr;
924 u8 proto;
925
926 /* Find the ip and crypto headers in the data.
927 * We can assume no vlan header in the way, b/c the
928 * hw won't recognize the IPsec packet and anyway the
929 * currently vlan device doesn't support xfrm offload.
930 */
931 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
932 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
933 daddr = &ip4->daddr;
934 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
935 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
936 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
937 daddr = &ip6->daddr;
938 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
939 } else {
940 return;
941 }
942
943 switch (pkt_info & ipsec_pkt_types) {
944 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
945 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
946 proto = IPPROTO_AH;
947 break;
948 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
949 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
950 proto = IPPROTO_ESP;
951 break;
952 default:
953 return;
954 }
955
956 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
957 if (unlikely(!xs))
958 return;
959
960 skb->sp = secpath_dup(skb->sp);
961 if (unlikely(!skb->sp))
962 return;
963
964 skb->sp->xvec[skb->sp->len++] = xs;
965 skb->sp->olen++;
966 xo = xfrm_offload(skb);
967 xo->flags = CRYPTO_DONE;
968 xo->status = CRYPTO_SUCCESS;
969
970 adapter->rx_ipsec++;
971 }
972
973 /**
974 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
975 * @adapter: board private structure
976 **/
ixgbe_init_ipsec_offload(struct ixgbe_adapter * adapter)977 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
978 {
979 struct ixgbe_hw *hw = &adapter->hw;
980 struct ixgbe_ipsec *ipsec;
981 u32 t_dis, r_dis;
982 size_t size;
983
984 if (hw->mac.type == ixgbe_mac_82598EB)
985 return;
986
987 /* If there is no support for either Tx or Rx offload
988 * we should not be advertising support for IPsec.
989 */
990 t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
991 IXGBE_SECTXSTAT_SECTX_OFF_DIS;
992 r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
993 IXGBE_SECRXSTAT_SECRX_OFF_DIS;
994 if (t_dis || r_dis)
995 return;
996
997 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
998 if (!ipsec)
999 goto err1;
1000 hash_init(ipsec->rx_sa_list);
1001
1002 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1003 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
1004 if (!ipsec->rx_tbl)
1005 goto err2;
1006
1007 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1008 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
1009 if (!ipsec->tx_tbl)
1010 goto err2;
1011
1012 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
1013 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
1014 if (!ipsec->ip_tbl)
1015 goto err2;
1016
1017 ipsec->num_rx_sa = 0;
1018 ipsec->num_tx_sa = 0;
1019
1020 adapter->ipsec = ipsec;
1021 ixgbe_ipsec_stop_engine(adapter);
1022 ixgbe_ipsec_clear_hw_tables(adapter);
1023
1024 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
1025
1026 return;
1027
1028 err2:
1029 kfree(ipsec->ip_tbl);
1030 kfree(ipsec->rx_tbl);
1031 kfree(ipsec->tx_tbl);
1032 kfree(ipsec);
1033 err1:
1034 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
1035 }
1036
1037 /**
1038 * ixgbe_stop_ipsec_offload - tear down the ipsec offload
1039 * @adapter: board private structure
1040 **/
ixgbe_stop_ipsec_offload(struct ixgbe_adapter * adapter)1041 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
1042 {
1043 struct ixgbe_ipsec *ipsec = adapter->ipsec;
1044
1045 adapter->ipsec = NULL;
1046 if (ipsec) {
1047 kfree(ipsec->ip_tbl);
1048 kfree(ipsec->rx_tbl);
1049 kfree(ipsec->tx_tbl);
1050 kfree(ipsec);
1051 }
1052 }
1053