1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4 #include "i40e.h"
5 #include "i40e_type.h"
6 #include "i40e_adminq.h"
7 #include "i40e_prototype.h"
8 #include <linux/avf/virtchnl.h>
9
10 /**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
i40e_set_mac_type(struct i40e_hw * hw)17 int i40e_set_mac_type(struct i40e_hw *hw)
18 {
19 int status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_1G_BASE_T_BC:
31 case I40E_DEV_ID_5G_BASE_T_BC:
32 case I40E_DEV_ID_10G_BASE_T:
33 case I40E_DEV_ID_10G_BASE_T4:
34 case I40E_DEV_ID_10G_BASE_T_BC:
35 case I40E_DEV_ID_10G_B:
36 case I40E_DEV_ID_10G_SFP:
37 case I40E_DEV_ID_20G_KR2:
38 case I40E_DEV_ID_20G_KR2_A:
39 case I40E_DEV_ID_25G_B:
40 case I40E_DEV_ID_25G_SFP28:
41 case I40E_DEV_ID_X710_N3000:
42 case I40E_DEV_ID_XXV710_N3000:
43 hw->mac.type = I40E_MAC_XL710;
44 break;
45 case I40E_DEV_ID_KX_X722:
46 case I40E_DEV_ID_QSFP_X722:
47 case I40E_DEV_ID_SFP_X722:
48 case I40E_DEV_ID_1G_BASE_T_X722:
49 case I40E_DEV_ID_10G_BASE_T_X722:
50 case I40E_DEV_ID_SFP_I_X722:
51 case I40E_DEV_ID_SFP_X722_A:
52 hw->mac.type = I40E_MAC_X722;
53 break;
54 default:
55 hw->mac.type = I40E_MAC_GENERIC;
56 break;
57 }
58 } else {
59 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
60 }
61
62 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
63 hw->mac.type, status);
64 return status;
65 }
66
67 /**
68 * i40e_aq_str - convert AQ err code to a string
69 * @hw: pointer to the HW structure
70 * @aq_err: the AQ error code to convert
71 **/
i40e_aq_str(struct i40e_hw * hw,enum i40e_admin_queue_err aq_err)72 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
73 {
74 switch (aq_err) {
75 case I40E_AQ_RC_OK:
76 return "OK";
77 case I40E_AQ_RC_EPERM:
78 return "I40E_AQ_RC_EPERM";
79 case I40E_AQ_RC_ENOENT:
80 return "I40E_AQ_RC_ENOENT";
81 case I40E_AQ_RC_ESRCH:
82 return "I40E_AQ_RC_ESRCH";
83 case I40E_AQ_RC_EINTR:
84 return "I40E_AQ_RC_EINTR";
85 case I40E_AQ_RC_EIO:
86 return "I40E_AQ_RC_EIO";
87 case I40E_AQ_RC_ENXIO:
88 return "I40E_AQ_RC_ENXIO";
89 case I40E_AQ_RC_E2BIG:
90 return "I40E_AQ_RC_E2BIG";
91 case I40E_AQ_RC_EAGAIN:
92 return "I40E_AQ_RC_EAGAIN";
93 case I40E_AQ_RC_ENOMEM:
94 return "I40E_AQ_RC_ENOMEM";
95 case I40E_AQ_RC_EACCES:
96 return "I40E_AQ_RC_EACCES";
97 case I40E_AQ_RC_EFAULT:
98 return "I40E_AQ_RC_EFAULT";
99 case I40E_AQ_RC_EBUSY:
100 return "I40E_AQ_RC_EBUSY";
101 case I40E_AQ_RC_EEXIST:
102 return "I40E_AQ_RC_EEXIST";
103 case I40E_AQ_RC_EINVAL:
104 return "I40E_AQ_RC_EINVAL";
105 case I40E_AQ_RC_ENOTTY:
106 return "I40E_AQ_RC_ENOTTY";
107 case I40E_AQ_RC_ENOSPC:
108 return "I40E_AQ_RC_ENOSPC";
109 case I40E_AQ_RC_ENOSYS:
110 return "I40E_AQ_RC_ENOSYS";
111 case I40E_AQ_RC_ERANGE:
112 return "I40E_AQ_RC_ERANGE";
113 case I40E_AQ_RC_EFLUSHED:
114 return "I40E_AQ_RC_EFLUSHED";
115 case I40E_AQ_RC_BAD_ADDR:
116 return "I40E_AQ_RC_BAD_ADDR";
117 case I40E_AQ_RC_EMODE:
118 return "I40E_AQ_RC_EMODE";
119 case I40E_AQ_RC_EFBIG:
120 return "I40E_AQ_RC_EFBIG";
121 }
122
123 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
124 return hw->err_str;
125 }
126
127 /**
128 * i40e_debug_aq
129 * @hw: debug mask related to admin queue
130 * @mask: debug mask
131 * @desc: pointer to admin queue descriptor
132 * @buffer: pointer to command buffer
133 * @buf_len: max length of buffer
134 *
135 * Dumps debug log about adminq command with descriptor contents.
136 **/
i40e_debug_aq(struct i40e_hw * hw,enum i40e_debug_mask mask,void * desc,void * buffer,u16 buf_len)137 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
138 void *buffer, u16 buf_len)
139 {
140 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
141 u32 effective_mask = hw->debug_mask & mask;
142 char prefix[27];
143 u16 len;
144 u8 *buf = (u8 *)buffer;
145
146 if (!effective_mask || !desc)
147 return;
148
149 len = le16_to_cpu(aq_desc->datalen);
150
151 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
152 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
153 le16_to_cpu(aq_desc->opcode),
154 le16_to_cpu(aq_desc->flags),
155 le16_to_cpu(aq_desc->datalen),
156 le16_to_cpu(aq_desc->retval));
157 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
158 "\tcookie (h,l) 0x%08X 0x%08X\n",
159 le32_to_cpu(aq_desc->cookie_high),
160 le32_to_cpu(aq_desc->cookie_low));
161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
162 "\tparam (0,1) 0x%08X 0x%08X\n",
163 le32_to_cpu(aq_desc->params.internal.param0),
164 le32_to_cpu(aq_desc->params.internal.param1));
165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
166 "\taddr (h,l) 0x%08X 0x%08X\n",
167 le32_to_cpu(aq_desc->params.external.addr_high),
168 le32_to_cpu(aq_desc->params.external.addr_low));
169
170 if (buffer && buf_len != 0 && len != 0 &&
171 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
172 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
173 if (buf_len < len)
174 len = buf_len;
175
176 snprintf(prefix, sizeof(prefix),
177 "i40e %02x:%02x.%x: \t0x",
178 hw->bus.bus_id,
179 hw->bus.device,
180 hw->bus.func);
181
182 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
183 16, 1, buf, len, false);
184 }
185 }
186
187 /**
188 * i40e_check_asq_alive
189 * @hw: pointer to the hw struct
190 *
191 * Returns true if Queue is enabled else false.
192 **/
i40e_check_asq_alive(struct i40e_hw * hw)193 bool i40e_check_asq_alive(struct i40e_hw *hw)
194 {
195 if (hw->aq.asq.len)
196 return !!(rd32(hw, hw->aq.asq.len) &
197 I40E_PF_ATQLEN_ATQENABLE_MASK);
198 else
199 return false;
200 }
201
202 /**
203 * i40e_aq_queue_shutdown
204 * @hw: pointer to the hw struct
205 * @unloading: is the driver unloading itself
206 *
207 * Tell the Firmware that we're shutting down the AdminQ and whether
208 * or not the driver is unloading as well.
209 **/
i40e_aq_queue_shutdown(struct i40e_hw * hw,bool unloading)210 int i40e_aq_queue_shutdown(struct i40e_hw *hw,
211 bool unloading)
212 {
213 struct i40e_aq_desc desc;
214 struct i40e_aqc_queue_shutdown *cmd =
215 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
216 int status;
217
218 i40e_fill_default_direct_cmd_desc(&desc,
219 i40e_aqc_opc_queue_shutdown);
220
221 if (unloading)
222 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
223 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
224
225 return status;
226 }
227
228 /**
229 * i40e_aq_get_set_rss_lut
230 * @hw: pointer to the hardware structure
231 * @vsi_id: vsi fw index
232 * @pf_lut: for PF table set true, for VSI table set false
233 * @lut: pointer to the lut buffer provided by the caller
234 * @lut_size: size of the lut buffer
235 * @set: set true to set the table, false to get the table
236 *
237 * Internal function to get or set RSS look up table
238 **/
i40e_aq_get_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size,bool set)239 static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
240 u16 vsi_id, bool pf_lut,
241 u8 *lut, u16 lut_size,
242 bool set)
243 {
244 struct i40e_aq_desc desc;
245 struct i40e_aqc_get_set_rss_lut *cmd_resp =
246 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
247 int status;
248
249 if (set)
250 i40e_fill_default_direct_cmd_desc(&desc,
251 i40e_aqc_opc_set_rss_lut);
252 else
253 i40e_fill_default_direct_cmd_desc(&desc,
254 i40e_aqc_opc_get_rss_lut);
255
256 /* Indirect command */
257 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
258 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
259
260 cmd_resp->vsi_id =
261 cpu_to_le16((u16)((vsi_id <<
262 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
263 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
264 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
265
266 if (pf_lut)
267 cmd_resp->flags |= cpu_to_le16((u16)
268 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
269 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
270 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
271 else
272 cmd_resp->flags |= cpu_to_le16((u16)
273 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
275 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
276
277 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
278
279 return status;
280 }
281
282 /**
283 * i40e_aq_get_rss_lut
284 * @hw: pointer to the hardware structure
285 * @vsi_id: vsi fw index
286 * @pf_lut: for PF table set true, for VSI table set false
287 * @lut: pointer to the lut buffer provided by the caller
288 * @lut_size: size of the lut buffer
289 *
290 * get the RSS lookup table, PF or VSI type
291 **/
i40e_aq_get_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)292 int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
293 bool pf_lut, u8 *lut, u16 lut_size)
294 {
295 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
296 false);
297 }
298
299 /**
300 * i40e_aq_set_rss_lut
301 * @hw: pointer to the hardware structure
302 * @vsi_id: vsi fw index
303 * @pf_lut: for PF table set true, for VSI table set false
304 * @lut: pointer to the lut buffer provided by the caller
305 * @lut_size: size of the lut buffer
306 *
307 * set the RSS lookup table, PF or VSI type
308 **/
i40e_aq_set_rss_lut(struct i40e_hw * hw,u16 vsi_id,bool pf_lut,u8 * lut,u16 lut_size)309 int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
310 bool pf_lut, u8 *lut, u16 lut_size)
311 {
312 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
313 }
314
315 /**
316 * i40e_aq_get_set_rss_key
317 * @hw: pointer to the hw struct
318 * @vsi_id: vsi fw index
319 * @key: pointer to key info struct
320 * @set: set true to set the key, false to get the key
321 *
322 * get the RSS key per VSI
323 **/
i40e_aq_get_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key,bool set)324 static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
325 u16 vsi_id,
326 struct i40e_aqc_get_set_rss_key_data *key,
327 bool set)
328 {
329 struct i40e_aq_desc desc;
330 struct i40e_aqc_get_set_rss_key *cmd_resp =
331 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
332 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
333 int status;
334
335 if (set)
336 i40e_fill_default_direct_cmd_desc(&desc,
337 i40e_aqc_opc_set_rss_key);
338 else
339 i40e_fill_default_direct_cmd_desc(&desc,
340 i40e_aqc_opc_get_rss_key);
341
342 /* Indirect command */
343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
344 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
345
346 cmd_resp->vsi_id =
347 cpu_to_le16((u16)((vsi_id <<
348 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
349 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
350 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
351
352 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
353
354 return status;
355 }
356
357 /**
358 * i40e_aq_get_rss_key
359 * @hw: pointer to the hw struct
360 * @vsi_id: vsi fw index
361 * @key: pointer to key info struct
362 *
363 **/
i40e_aq_get_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)364 int i40e_aq_get_rss_key(struct i40e_hw *hw,
365 u16 vsi_id,
366 struct i40e_aqc_get_set_rss_key_data *key)
367 {
368 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
369 }
370
371 /**
372 * i40e_aq_set_rss_key
373 * @hw: pointer to the hw struct
374 * @vsi_id: vsi fw index
375 * @key: pointer to key info struct
376 *
377 * set the RSS key per VSI
378 **/
i40e_aq_set_rss_key(struct i40e_hw * hw,u16 vsi_id,struct i40e_aqc_get_set_rss_key_data * key)379 int i40e_aq_set_rss_key(struct i40e_hw *hw,
380 u16 vsi_id,
381 struct i40e_aqc_get_set_rss_key_data *key)
382 {
383 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
384 }
385
386 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
387 * hardware to a bit-field that can be used by SW to more easily determine the
388 * packet type.
389 *
390 * Macros are used to shorten the table lines and make this table human
391 * readable.
392 *
393 * We store the PTYPE in the top byte of the bit field - this is just so that
394 * we can check that the table doesn't have a row missing, as the index into
395 * the table should be the PTYPE.
396 *
397 * Typical work flow:
398 *
399 * IF NOT i40e_ptype_lookup[ptype].known
400 * THEN
401 * Packet is unknown
402 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
403 * Use the rest of the fields to look at the tunnels, inner protocols, etc
404 * ELSE
405 * Use the enum i40e_rx_l2_ptype to decode the packet type
406 * ENDIF
407 */
408
409 /* macro to make the table lines short, use explicit indexing with [PTYPE] */
410 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
411 [PTYPE] = { \
412 1, \
413 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
414 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
415 I40E_RX_PTYPE_##OUTER_FRAG, \
416 I40E_RX_PTYPE_TUNNEL_##T, \
417 I40E_RX_PTYPE_TUNNEL_END_##TE, \
418 I40E_RX_PTYPE_##TEF, \
419 I40E_RX_PTYPE_INNER_PROT_##I, \
420 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
421
422 #define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
423
424 /* shorter macros makes the table fit but are terse */
425 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
426 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
427 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
428
429 /* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
430 struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
431 /* L2 Packet types */
432 I40E_PTT_UNUSED_ENTRY(0),
433 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
434 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
435 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
436 I40E_PTT_UNUSED_ENTRY(4),
437 I40E_PTT_UNUSED_ENTRY(5),
438 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
439 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
440 I40E_PTT_UNUSED_ENTRY(8),
441 I40E_PTT_UNUSED_ENTRY(9),
442 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
443 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
444 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
445 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
446 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
447 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
448 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
449 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
450 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
451 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
452 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
453 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
454
455 /* Non Tunneled IPv4 */
456 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
457 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
458 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
459 I40E_PTT_UNUSED_ENTRY(25),
460 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
461 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
462 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
463
464 /* IPv4 --> IPv4 */
465 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
466 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
467 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
468 I40E_PTT_UNUSED_ENTRY(32),
469 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
470 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
471 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
472
473 /* IPv4 --> IPv6 */
474 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
475 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
476 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
477 I40E_PTT_UNUSED_ENTRY(39),
478 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
479 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
480 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
481
482 /* IPv4 --> GRE/NAT */
483 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
484
485 /* IPv4 --> GRE/NAT --> IPv4 */
486 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
487 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
488 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
489 I40E_PTT_UNUSED_ENTRY(47),
490 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
491 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
492 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
493
494 /* IPv4 --> GRE/NAT --> IPv6 */
495 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
496 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
497 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
498 I40E_PTT_UNUSED_ENTRY(54),
499 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
500 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
501 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
502
503 /* IPv4 --> GRE/NAT --> MAC */
504 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
505
506 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
507 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
508 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
509 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
510 I40E_PTT_UNUSED_ENTRY(62),
511 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
512 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
513 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
514
515 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
516 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
517 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
518 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
519 I40E_PTT_UNUSED_ENTRY(69),
520 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
521 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
522 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
523
524 /* IPv4 --> GRE/NAT --> MAC/VLAN */
525 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
526
527 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
528 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
529 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
530 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
531 I40E_PTT_UNUSED_ENTRY(77),
532 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
533 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
534 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
535
536 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
537 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
538 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
539 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
540 I40E_PTT_UNUSED_ENTRY(84),
541 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
542 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
543 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
544
545 /* Non Tunneled IPv6 */
546 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
547 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
548 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
549 I40E_PTT_UNUSED_ENTRY(91),
550 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
551 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
552 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
553
554 /* IPv6 --> IPv4 */
555 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
556 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
557 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
558 I40E_PTT_UNUSED_ENTRY(98),
559 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
560 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
561 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
562
563 /* IPv6 --> IPv6 */
564 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
565 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
566 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
567 I40E_PTT_UNUSED_ENTRY(105),
568 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
569 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
570 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
571
572 /* IPv6 --> GRE/NAT */
573 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
574
575 /* IPv6 --> GRE/NAT -> IPv4 */
576 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
577 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
578 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
579 I40E_PTT_UNUSED_ENTRY(113),
580 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
581 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
582 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
583
584 /* IPv6 --> GRE/NAT -> IPv6 */
585 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
586 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
587 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
588 I40E_PTT_UNUSED_ENTRY(120),
589 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
590 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
591 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
592
593 /* IPv6 --> GRE/NAT -> MAC */
594 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
595
596 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
597 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
598 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
599 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
600 I40E_PTT_UNUSED_ENTRY(128),
601 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
602 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
603 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
604
605 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
606 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
607 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
608 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
609 I40E_PTT_UNUSED_ENTRY(135),
610 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
611 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
612 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
613
614 /* IPv6 --> GRE/NAT -> MAC/VLAN */
615 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
616
617 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
618 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
619 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
620 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
621 I40E_PTT_UNUSED_ENTRY(143),
622 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
623 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
624 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
625
626 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
627 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
628 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
629 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
630 I40E_PTT_UNUSED_ENTRY(150),
631 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
632 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
633 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
634
635 /* unused entries */
636 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
637 };
638
639 /**
640 * i40e_init_shared_code - Initialize the shared code
641 * @hw: pointer to hardware structure
642 *
643 * This assigns the MAC type and PHY code and inits the NVM.
644 * Does not touch the hardware. This function must be called prior to any
645 * other function in the shared code. The i40e_hw structure should be
646 * memset to 0 prior to calling this function. The following fields in
647 * hw structure should be filled in prior to calling this function:
648 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
649 * subsystem_vendor_id, and revision_id
650 **/
i40e_init_shared_code(struct i40e_hw * hw)651 int i40e_init_shared_code(struct i40e_hw *hw)
652 {
653 u32 port, ari, func_rid;
654 int status = 0;
655
656 i40e_set_mac_type(hw);
657
658 switch (hw->mac.type) {
659 case I40E_MAC_XL710:
660 case I40E_MAC_X722:
661 break;
662 default:
663 return I40E_ERR_DEVICE_NOT_SUPPORTED;
664 }
665
666 hw->phy.get_link_info = true;
667
668 /* Determine port number and PF number*/
669 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
670 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
671 hw->port = (u8)port;
672 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
673 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
674 func_rid = rd32(hw, I40E_PF_FUNC_RID);
675 if (ari)
676 hw->pf_id = (u8)(func_rid & 0xff);
677 else
678 hw->pf_id = (u8)(func_rid & 0x7);
679
680 status = i40e_init_nvm(hw);
681 return status;
682 }
683
684 /**
685 * i40e_aq_mac_address_read - Retrieve the MAC addresses
686 * @hw: pointer to the hw struct
687 * @flags: a return indicator of what addresses were added to the addr store
688 * @addrs: the requestor's mac addr store
689 * @cmd_details: pointer to command details structure or NULL
690 **/
691 static int
i40e_aq_mac_address_read(struct i40e_hw * hw,u16 * flags,struct i40e_aqc_mac_address_read_data * addrs,struct i40e_asq_cmd_details * cmd_details)692 i40e_aq_mac_address_read(struct i40e_hw *hw,
693 u16 *flags,
694 struct i40e_aqc_mac_address_read_data *addrs,
695 struct i40e_asq_cmd_details *cmd_details)
696 {
697 struct i40e_aq_desc desc;
698 struct i40e_aqc_mac_address_read *cmd_data =
699 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
700 int status;
701
702 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
703 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
704
705 status = i40e_asq_send_command(hw, &desc, addrs,
706 sizeof(*addrs), cmd_details);
707 *flags = le16_to_cpu(cmd_data->command_flags);
708
709 return status;
710 }
711
712 /**
713 * i40e_aq_mac_address_write - Change the MAC addresses
714 * @hw: pointer to the hw struct
715 * @flags: indicates which MAC to be written
716 * @mac_addr: address to write
717 * @cmd_details: pointer to command details structure or NULL
718 **/
i40e_aq_mac_address_write(struct i40e_hw * hw,u16 flags,u8 * mac_addr,struct i40e_asq_cmd_details * cmd_details)719 int i40e_aq_mac_address_write(struct i40e_hw *hw,
720 u16 flags, u8 *mac_addr,
721 struct i40e_asq_cmd_details *cmd_details)
722 {
723 struct i40e_aq_desc desc;
724 struct i40e_aqc_mac_address_write *cmd_data =
725 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
726 int status;
727
728 i40e_fill_default_direct_cmd_desc(&desc,
729 i40e_aqc_opc_mac_address_write);
730 cmd_data->command_flags = cpu_to_le16(flags);
731 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
732 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
733 ((u32)mac_addr[3] << 16) |
734 ((u32)mac_addr[4] << 8) |
735 mac_addr[5]);
736
737 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
738
739 return status;
740 }
741
742 /**
743 * i40e_get_mac_addr - get MAC address
744 * @hw: pointer to the HW structure
745 * @mac_addr: pointer to MAC address
746 *
747 * Reads the adapter's MAC address from register
748 **/
i40e_get_mac_addr(struct i40e_hw * hw,u8 * mac_addr)749 int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
750 {
751 struct i40e_aqc_mac_address_read_data addrs;
752 u16 flags = 0;
753 int status;
754
755 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
756
757 if (flags & I40E_AQC_LAN_ADDR_VALID)
758 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
759
760 return status;
761 }
762
763 /**
764 * i40e_get_port_mac_addr - get Port MAC address
765 * @hw: pointer to the HW structure
766 * @mac_addr: pointer to Port MAC address
767 *
768 * Reads the adapter's Port MAC address
769 **/
i40e_get_port_mac_addr(struct i40e_hw * hw,u8 * mac_addr)770 int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
771 {
772 struct i40e_aqc_mac_address_read_data addrs;
773 u16 flags = 0;
774 int status;
775
776 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
777 if (status)
778 return status;
779
780 if (flags & I40E_AQC_PORT_ADDR_VALID)
781 ether_addr_copy(mac_addr, addrs.port_mac);
782 else
783 status = I40E_ERR_INVALID_MAC_ADDR;
784
785 return status;
786 }
787
788 /**
789 * i40e_pre_tx_queue_cfg - pre tx queue configure
790 * @hw: pointer to the HW structure
791 * @queue: target PF queue index
792 * @enable: state change request
793 *
794 * Handles hw requirement to indicate intention to enable
795 * or disable target queue.
796 **/
i40e_pre_tx_queue_cfg(struct i40e_hw * hw,u32 queue,bool enable)797 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
798 {
799 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
800 u32 reg_block = 0;
801 u32 reg_val;
802
803 if (abs_queue_idx >= 128) {
804 reg_block = abs_queue_idx / 128;
805 abs_queue_idx %= 128;
806 }
807
808 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
809 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
810 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
811
812 if (enable)
813 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
814 else
815 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
816
817 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
818 }
819
820 /**
821 * i40e_read_pba_string - Reads part number string from EEPROM
822 * @hw: pointer to hardware structure
823 * @pba_num: stores the part number string from the EEPROM
824 * @pba_num_size: part number string buffer length
825 *
826 * Reads the part number string from the EEPROM.
827 **/
i40e_read_pba_string(struct i40e_hw * hw,u8 * pba_num,u32 pba_num_size)828 int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
829 u32 pba_num_size)
830 {
831 u16 pba_word = 0;
832 u16 pba_size = 0;
833 u16 pba_ptr = 0;
834 int status = 0;
835 u16 i = 0;
836
837 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
838 if (status || (pba_word != 0xFAFA)) {
839 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
840 return status;
841 }
842
843 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
844 if (status) {
845 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
846 return status;
847 }
848
849 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
850 if (status) {
851 hw_dbg(hw, "Failed to read PBA Block size.\n");
852 return status;
853 }
854
855 /* Subtract one to get PBA word count (PBA Size word is included in
856 * total size)
857 */
858 pba_size--;
859 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
860 hw_dbg(hw, "Buffer too small for PBA data.\n");
861 return I40E_ERR_PARAM;
862 }
863
864 for (i = 0; i < pba_size; i++) {
865 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
866 if (status) {
867 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
868 return status;
869 }
870
871 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
872 pba_num[(i * 2) + 1] = pba_word & 0xFF;
873 }
874 pba_num[(pba_size * 2)] = '\0';
875
876 return status;
877 }
878
879 /**
880 * i40e_get_media_type - Gets media type
881 * @hw: pointer to the hardware structure
882 **/
i40e_get_media_type(struct i40e_hw * hw)883 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
884 {
885 enum i40e_media_type media;
886
887 switch (hw->phy.link_info.phy_type) {
888 case I40E_PHY_TYPE_10GBASE_SR:
889 case I40E_PHY_TYPE_10GBASE_LR:
890 case I40E_PHY_TYPE_1000BASE_SX:
891 case I40E_PHY_TYPE_1000BASE_LX:
892 case I40E_PHY_TYPE_40GBASE_SR4:
893 case I40E_PHY_TYPE_40GBASE_LR4:
894 case I40E_PHY_TYPE_25GBASE_LR:
895 case I40E_PHY_TYPE_25GBASE_SR:
896 media = I40E_MEDIA_TYPE_FIBER;
897 break;
898 case I40E_PHY_TYPE_100BASE_TX:
899 case I40E_PHY_TYPE_1000BASE_T:
900 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
901 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
902 case I40E_PHY_TYPE_10GBASE_T:
903 media = I40E_MEDIA_TYPE_BASET;
904 break;
905 case I40E_PHY_TYPE_10GBASE_CR1_CU:
906 case I40E_PHY_TYPE_40GBASE_CR4_CU:
907 case I40E_PHY_TYPE_10GBASE_CR1:
908 case I40E_PHY_TYPE_40GBASE_CR4:
909 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
910 case I40E_PHY_TYPE_40GBASE_AOC:
911 case I40E_PHY_TYPE_10GBASE_AOC:
912 case I40E_PHY_TYPE_25GBASE_CR:
913 case I40E_PHY_TYPE_25GBASE_AOC:
914 case I40E_PHY_TYPE_25GBASE_ACC:
915 media = I40E_MEDIA_TYPE_DA;
916 break;
917 case I40E_PHY_TYPE_1000BASE_KX:
918 case I40E_PHY_TYPE_10GBASE_KX4:
919 case I40E_PHY_TYPE_10GBASE_KR:
920 case I40E_PHY_TYPE_40GBASE_KR4:
921 case I40E_PHY_TYPE_20GBASE_KR2:
922 case I40E_PHY_TYPE_25GBASE_KR:
923 media = I40E_MEDIA_TYPE_BACKPLANE;
924 break;
925 case I40E_PHY_TYPE_SGMII:
926 case I40E_PHY_TYPE_XAUI:
927 case I40E_PHY_TYPE_XFI:
928 case I40E_PHY_TYPE_XLAUI:
929 case I40E_PHY_TYPE_XLPPI:
930 default:
931 media = I40E_MEDIA_TYPE_UNKNOWN;
932 break;
933 }
934
935 return media;
936 }
937
938 /**
939 * i40e_poll_globr - Poll for Global Reset completion
940 * @hw: pointer to the hardware structure
941 * @retry_limit: how many times to retry before failure
942 **/
i40e_poll_globr(struct i40e_hw * hw,u32 retry_limit)943 static int i40e_poll_globr(struct i40e_hw *hw,
944 u32 retry_limit)
945 {
946 u32 cnt, reg = 0;
947
948 for (cnt = 0; cnt < retry_limit; cnt++) {
949 reg = rd32(hw, I40E_GLGEN_RSTAT);
950 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
951 return 0;
952 msleep(100);
953 }
954
955 hw_dbg(hw, "Global reset failed.\n");
956 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
957
958 return I40E_ERR_RESET_FAILED;
959 }
960
961 #define I40E_PF_RESET_WAIT_COUNT_A0 200
962 #define I40E_PF_RESET_WAIT_COUNT 200
963 /**
964 * i40e_pf_reset - Reset the PF
965 * @hw: pointer to the hardware structure
966 *
967 * Assuming someone else has triggered a global reset,
968 * assure the global reset is complete and then reset the PF
969 **/
i40e_pf_reset(struct i40e_hw * hw)970 int i40e_pf_reset(struct i40e_hw *hw)
971 {
972 u32 cnt = 0;
973 u32 cnt1 = 0;
974 u32 reg = 0;
975 u32 grst_del;
976
977 /* Poll for Global Reset steady state in case of recent GRST.
978 * The grst delay value is in 100ms units, and we'll wait a
979 * couple counts longer to be sure we don't just miss the end.
980 */
981 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
982 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
983 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
984
985 /* It can take upto 15 secs for GRST steady state.
986 * Bump it to 16 secs max to be safe.
987 */
988 grst_del = grst_del * 20;
989
990 for (cnt = 0; cnt < grst_del; cnt++) {
991 reg = rd32(hw, I40E_GLGEN_RSTAT);
992 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
993 break;
994 msleep(100);
995 }
996 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
997 hw_dbg(hw, "Global reset polling failed to complete.\n");
998 return I40E_ERR_RESET_FAILED;
999 }
1000
1001 /* Now Wait for the FW to be ready */
1002 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1003 reg = rd32(hw, I40E_GLNVM_ULD);
1004 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1005 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1006 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1007 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1008 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1009 break;
1010 }
1011 usleep_range(10000, 20000);
1012 }
1013 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1014 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1015 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1016 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1017 return I40E_ERR_RESET_FAILED;
1018 }
1019
1020 /* If there was a Global Reset in progress when we got here,
1021 * we don't need to do the PF Reset
1022 */
1023 if (!cnt) {
1024 u32 reg2 = 0;
1025 if (hw->revision_id == 0)
1026 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1027 else
1028 cnt = I40E_PF_RESET_WAIT_COUNT;
1029 reg = rd32(hw, I40E_PFGEN_CTRL);
1030 wr32(hw, I40E_PFGEN_CTRL,
1031 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1032 for (; cnt; cnt--) {
1033 reg = rd32(hw, I40E_PFGEN_CTRL);
1034 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1035 break;
1036 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1037 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1038 break;
1039 usleep_range(1000, 2000);
1040 }
1041 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1042 if (i40e_poll_globr(hw, grst_del))
1043 return I40E_ERR_RESET_FAILED;
1044 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1045 hw_dbg(hw, "PF reset polling failed to complete.\n");
1046 return I40E_ERR_RESET_FAILED;
1047 }
1048 }
1049
1050 i40e_clear_pxe_mode(hw);
1051
1052 return 0;
1053 }
1054
1055 /**
1056 * i40e_clear_hw - clear out any left over hw state
1057 * @hw: pointer to the hw struct
1058 *
1059 * Clear queues and interrupts, typically called at init time,
1060 * but after the capabilities have been found so we know how many
1061 * queues and msix vectors have been allocated.
1062 **/
i40e_clear_hw(struct i40e_hw * hw)1063 void i40e_clear_hw(struct i40e_hw *hw)
1064 {
1065 u32 num_queues, base_queue;
1066 u32 num_pf_int;
1067 u32 num_vf_int;
1068 u32 num_vfs;
1069 u32 i, j;
1070 u32 val;
1071 u32 eol = 0x7ff;
1072
1073 /* get number of interrupts, queues, and VFs */
1074 val = rd32(hw, I40E_GLPCI_CNF2);
1075 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1076 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1077 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1078 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1079
1080 val = rd32(hw, I40E_PFLAN_QALLOC);
1081 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1082 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1083 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1084 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1085 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
1086 num_queues = (j - base_queue) + 1;
1087 else
1088 num_queues = 0;
1089
1090 val = rd32(hw, I40E_PF_VT_PFALLOC);
1091 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1092 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1093 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1094 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1095 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
1096 num_vfs = (j - i) + 1;
1097 else
1098 num_vfs = 0;
1099
1100 /* stop all the interrupts */
1101 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1102 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1103 for (i = 0; i < num_pf_int - 2; i++)
1104 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1105
1106 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1107 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1108 wr32(hw, I40E_PFINT_LNKLST0, val);
1109 for (i = 0; i < num_pf_int - 2; i++)
1110 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1111 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1112 for (i = 0; i < num_vfs; i++)
1113 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1114 for (i = 0; i < num_vf_int - 2; i++)
1115 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1116
1117 /* warn the HW of the coming Tx disables */
1118 for (i = 0; i < num_queues; i++) {
1119 u32 abs_queue_idx = base_queue + i;
1120 u32 reg_block = 0;
1121
1122 if (abs_queue_idx >= 128) {
1123 reg_block = abs_queue_idx / 128;
1124 abs_queue_idx %= 128;
1125 }
1126
1127 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1128 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1129 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1130 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1131
1132 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1133 }
1134 udelay(400);
1135
1136 /* stop all the queues */
1137 for (i = 0; i < num_queues; i++) {
1138 wr32(hw, I40E_QINT_TQCTL(i), 0);
1139 wr32(hw, I40E_QTX_ENA(i), 0);
1140 wr32(hw, I40E_QINT_RQCTL(i), 0);
1141 wr32(hw, I40E_QRX_ENA(i), 0);
1142 }
1143
1144 /* short wait for all queue disables to settle */
1145 udelay(50);
1146 }
1147
1148 /**
1149 * i40e_clear_pxe_mode - clear pxe operations mode
1150 * @hw: pointer to the hw struct
1151 *
1152 * Make sure all PXE mode settings are cleared, including things
1153 * like descriptor fetch/write-back mode.
1154 **/
i40e_clear_pxe_mode(struct i40e_hw * hw)1155 void i40e_clear_pxe_mode(struct i40e_hw *hw)
1156 {
1157 u32 reg;
1158
1159 if (i40e_check_asq_alive(hw))
1160 i40e_aq_clear_pxe_mode(hw, NULL);
1161
1162 /* Clear single descriptor fetch/write-back mode */
1163 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1164
1165 if (hw->revision_id == 0) {
1166 /* As a work around clear PXE_MODE instead of setting it */
1167 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1168 } else {
1169 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1170 }
1171 }
1172
1173 /**
1174 * i40e_led_is_mine - helper to find matching led
1175 * @hw: pointer to the hw struct
1176 * @idx: index into GPIO registers
1177 *
1178 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1179 */
i40e_led_is_mine(struct i40e_hw * hw,int idx)1180 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1181 {
1182 u32 gpio_val = 0;
1183 u32 port;
1184
1185 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1186 !hw->func_caps.led[idx])
1187 return 0;
1188 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1189 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1190 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1191
1192 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1193 * if it is not our port then ignore
1194 */
1195 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1196 (port != hw->port))
1197 return 0;
1198
1199 return gpio_val;
1200 }
1201
1202 #define I40E_FW_LED BIT(4)
1203 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1204 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1205
1206 #define I40E_LED0 22
1207
1208 #define I40E_PIN_FUNC_SDP 0x0
1209 #define I40E_PIN_FUNC_LED 0x1
1210
1211 /**
1212 * i40e_led_get - return current on/off mode
1213 * @hw: pointer to the hw struct
1214 *
1215 * The value returned is the 'mode' field as defined in the
1216 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1217 * values are variations of possible behaviors relating to
1218 * blink, link, and wire.
1219 **/
i40e_led_get(struct i40e_hw * hw)1220 u32 i40e_led_get(struct i40e_hw *hw)
1221 {
1222 u32 mode = 0;
1223 int i;
1224
1225 /* as per the documentation GPIO 22-29 are the LED
1226 * GPIO pins named LED0..LED7
1227 */
1228 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1229 u32 gpio_val = i40e_led_is_mine(hw, i);
1230
1231 if (!gpio_val)
1232 continue;
1233
1234 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1235 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1236 break;
1237 }
1238
1239 return mode;
1240 }
1241
1242 /**
1243 * i40e_led_set - set new on/off mode
1244 * @hw: pointer to the hw struct
1245 * @mode: 0=off, 0xf=on (else see manual for mode details)
1246 * @blink: true if the LED should blink when on, false if steady
1247 *
1248 * if this function is used to turn on the blink it should
1249 * be used to disable the blink when restoring the original state.
1250 **/
i40e_led_set(struct i40e_hw * hw,u32 mode,bool blink)1251 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1252 {
1253 int i;
1254
1255 if (mode & ~I40E_LED_MODE_VALID) {
1256 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1257 return;
1258 }
1259
1260 /* as per the documentation GPIO 22-29 are the LED
1261 * GPIO pins named LED0..LED7
1262 */
1263 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1264 u32 gpio_val = i40e_led_is_mine(hw, i);
1265
1266 if (!gpio_val)
1267 continue;
1268
1269 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1270 u32 pin_func = 0;
1271
1272 if (mode & I40E_FW_LED)
1273 pin_func = I40E_PIN_FUNC_SDP;
1274 else
1275 pin_func = I40E_PIN_FUNC_LED;
1276
1277 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1278 gpio_val |= ((pin_func <<
1279 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1280 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1281 }
1282 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1283 /* this & is a bit of paranoia, but serves as a range check */
1284 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1285 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1286
1287 if (blink)
1288 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1289 else
1290 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1291
1292 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1293 break;
1294 }
1295 }
1296
1297 /* Admin command wrappers */
1298
1299 /**
1300 * i40e_aq_get_phy_capabilities
1301 * @hw: pointer to the hw struct
1302 * @abilities: structure for PHY capabilities to be filled
1303 * @qualified_modules: report Qualified Modules
1304 * @report_init: report init capabilities (active are default)
1305 * @cmd_details: pointer to command details structure or NULL
1306 *
1307 * Returns the various PHY abilities supported on the Port.
1308 **/
1309 int
i40e_aq_get_phy_capabilities(struct i40e_hw * hw,bool qualified_modules,bool report_init,struct i40e_aq_get_phy_abilities_resp * abilities,struct i40e_asq_cmd_details * cmd_details)1310 i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1311 bool qualified_modules, bool report_init,
1312 struct i40e_aq_get_phy_abilities_resp *abilities,
1313 struct i40e_asq_cmd_details *cmd_details)
1314 {
1315 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1316 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1317 struct i40e_aq_desc desc;
1318 int status;
1319
1320 if (!abilities)
1321 return I40E_ERR_PARAM;
1322
1323 do {
1324 i40e_fill_default_direct_cmd_desc(&desc,
1325 i40e_aqc_opc_get_phy_abilities);
1326
1327 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1328 if (abilities_size > I40E_AQ_LARGE_BUF)
1329 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1330
1331 if (qualified_modules)
1332 desc.params.external.param0 |=
1333 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1334
1335 if (report_init)
1336 desc.params.external.param0 |=
1337 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1338
1339 status = i40e_asq_send_command(hw, &desc, abilities,
1340 abilities_size, cmd_details);
1341
1342 switch (hw->aq.asq_last_status) {
1343 case I40E_AQ_RC_EIO:
1344 status = I40E_ERR_UNKNOWN_PHY;
1345 break;
1346 case I40E_AQ_RC_EAGAIN:
1347 usleep_range(1000, 2000);
1348 total_delay++;
1349 status = I40E_ERR_TIMEOUT;
1350 break;
1351 /* also covers I40E_AQ_RC_OK */
1352 default:
1353 break;
1354 }
1355
1356 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1357 (total_delay < max_delay));
1358
1359 if (status)
1360 return status;
1361
1362 if (report_init) {
1363 if (hw->mac.type == I40E_MAC_XL710 &&
1364 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1365 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1366 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1367 } else {
1368 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1369 hw->phy.phy_types |=
1370 ((u64)abilities->phy_type_ext << 32);
1371 }
1372 }
1373
1374 return status;
1375 }
1376
1377 /**
1378 * i40e_aq_set_phy_config
1379 * @hw: pointer to the hw struct
1380 * @config: structure with PHY configuration to be set
1381 * @cmd_details: pointer to command details structure or NULL
1382 *
1383 * Set the various PHY configuration parameters
1384 * supported on the Port.One or more of the Set PHY config parameters may be
1385 * ignored in an MFP mode as the PF may not have the privilege to set some
1386 * of the PHY Config parameters. This status will be indicated by the
1387 * command response.
1388 **/
i40e_aq_set_phy_config(struct i40e_hw * hw,struct i40e_aq_set_phy_config * config,struct i40e_asq_cmd_details * cmd_details)1389 int i40e_aq_set_phy_config(struct i40e_hw *hw,
1390 struct i40e_aq_set_phy_config *config,
1391 struct i40e_asq_cmd_details *cmd_details)
1392 {
1393 struct i40e_aq_desc desc;
1394 struct i40e_aq_set_phy_config *cmd =
1395 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1396 int status;
1397
1398 if (!config)
1399 return I40E_ERR_PARAM;
1400
1401 i40e_fill_default_direct_cmd_desc(&desc,
1402 i40e_aqc_opc_set_phy_config);
1403
1404 *cmd = *config;
1405
1406 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1407
1408 return status;
1409 }
1410
1411 static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw * hw,struct i40e_aq_get_phy_abilities_resp * abilities,bool atomic_restart)1412 i40e_set_fc_status(struct i40e_hw *hw,
1413 struct i40e_aq_get_phy_abilities_resp *abilities,
1414 bool atomic_restart)
1415 {
1416 struct i40e_aq_set_phy_config config;
1417 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1418 u8 pause_mask = 0x0;
1419
1420 switch (fc_mode) {
1421 case I40E_FC_FULL:
1422 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1423 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1424 break;
1425 case I40E_FC_RX_PAUSE:
1426 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1427 break;
1428 case I40E_FC_TX_PAUSE:
1429 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1430 break;
1431 default:
1432 break;
1433 }
1434
1435 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1436 /* clear the old pause settings */
1437 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1438 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1439 /* set the new abilities */
1440 config.abilities |= pause_mask;
1441 /* If the abilities have changed, then set the new config */
1442 if (config.abilities == abilities->abilities)
1443 return 0;
1444
1445 /* Auto restart link so settings take effect */
1446 if (atomic_restart)
1447 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1448 /* Copy over all the old settings */
1449 config.phy_type = abilities->phy_type;
1450 config.phy_type_ext = abilities->phy_type_ext;
1451 config.link_speed = abilities->link_speed;
1452 config.eee_capability = abilities->eee_capability;
1453 config.eeer = abilities->eeer_val;
1454 config.low_power_ctrl = abilities->d3_lpan;
1455 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1456 I40E_AQ_PHY_FEC_CONFIG_MASK;
1457
1458 return i40e_aq_set_phy_config(hw, &config, NULL);
1459 }
1460
1461 /**
1462 * i40e_set_fc
1463 * @hw: pointer to the hw struct
1464 * @aq_failures: buffer to return AdminQ failure information
1465 * @atomic_restart: whether to enable atomic link restart
1466 *
1467 * Set the requested flow control mode using set_phy_config.
1468 **/
i40e_set_fc(struct i40e_hw * hw,u8 * aq_failures,bool atomic_restart)1469 int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1470 bool atomic_restart)
1471 {
1472 struct i40e_aq_get_phy_abilities_resp abilities;
1473 int status;
1474
1475 *aq_failures = 0x0;
1476
1477 /* Get the current phy config */
1478 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1479 NULL);
1480 if (status) {
1481 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1482 return status;
1483 }
1484
1485 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1486 if (status)
1487 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1488
1489 /* Update the link info */
1490 status = i40e_update_link_info(hw);
1491 if (status) {
1492 /* Wait a little bit (on 40G cards it sometimes takes a really
1493 * long time for link to come back from the atomic reset)
1494 * and try once more
1495 */
1496 msleep(1000);
1497 status = i40e_update_link_info(hw);
1498 }
1499 if (status)
1500 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1501
1502 return status;
1503 }
1504
1505 /**
1506 * i40e_aq_clear_pxe_mode
1507 * @hw: pointer to the hw struct
1508 * @cmd_details: pointer to command details structure or NULL
1509 *
1510 * Tell the firmware that the driver is taking over from PXE
1511 **/
i40e_aq_clear_pxe_mode(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)1512 int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1513 struct i40e_asq_cmd_details *cmd_details)
1514 {
1515 struct i40e_aq_desc desc;
1516 struct i40e_aqc_clear_pxe *cmd =
1517 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1518 int status;
1519
1520 i40e_fill_default_direct_cmd_desc(&desc,
1521 i40e_aqc_opc_clear_pxe_mode);
1522
1523 cmd->rx_cnt = 0x2;
1524
1525 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1526
1527 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1528
1529 return status;
1530 }
1531
1532 /**
1533 * i40e_aq_set_link_restart_an
1534 * @hw: pointer to the hw struct
1535 * @enable_link: if true: enable link, if false: disable link
1536 * @cmd_details: pointer to command details structure or NULL
1537 *
1538 * Sets up the link and restarts the Auto-Negotiation over the link.
1539 **/
i40e_aq_set_link_restart_an(struct i40e_hw * hw,bool enable_link,struct i40e_asq_cmd_details * cmd_details)1540 int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1541 bool enable_link,
1542 struct i40e_asq_cmd_details *cmd_details)
1543 {
1544 struct i40e_aq_desc desc;
1545 struct i40e_aqc_set_link_restart_an *cmd =
1546 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1547 int status;
1548
1549 i40e_fill_default_direct_cmd_desc(&desc,
1550 i40e_aqc_opc_set_link_restart_an);
1551
1552 cmd->command = I40E_AQ_PHY_RESTART_AN;
1553 if (enable_link)
1554 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1555 else
1556 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1557
1558 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1559
1560 return status;
1561 }
1562
1563 /**
1564 * i40e_aq_get_link_info
1565 * @hw: pointer to the hw struct
1566 * @enable_lse: enable/disable LinkStatusEvent reporting
1567 * @link: pointer to link status structure - optional
1568 * @cmd_details: pointer to command details structure or NULL
1569 *
1570 * Returns the link status of the adapter.
1571 **/
i40e_aq_get_link_info(struct i40e_hw * hw,bool enable_lse,struct i40e_link_status * link,struct i40e_asq_cmd_details * cmd_details)1572 int i40e_aq_get_link_info(struct i40e_hw *hw,
1573 bool enable_lse, struct i40e_link_status *link,
1574 struct i40e_asq_cmd_details *cmd_details)
1575 {
1576 struct i40e_aq_desc desc;
1577 struct i40e_aqc_get_link_status *resp =
1578 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1579 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1580 bool tx_pause, rx_pause;
1581 u16 command_flags;
1582 int status;
1583
1584 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1585
1586 if (enable_lse)
1587 command_flags = I40E_AQ_LSE_ENABLE;
1588 else
1589 command_flags = I40E_AQ_LSE_DISABLE;
1590 resp->command_flags = cpu_to_le16(command_flags);
1591
1592 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1593
1594 if (status)
1595 goto aq_get_link_info_exit;
1596
1597 /* save off old link status information */
1598 hw->phy.link_info_old = *hw_link_info;
1599
1600 /* update link status */
1601 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1602 hw->phy.media_type = i40e_get_media_type(hw);
1603 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1604 hw_link_info->link_info = resp->link_info;
1605 hw_link_info->an_info = resp->an_info;
1606 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1607 I40E_AQ_CONFIG_FEC_RS_ENA);
1608 hw_link_info->ext_info = resp->ext_info;
1609 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1610 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1611 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1612
1613 /* update fc info */
1614 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1615 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1616 if (tx_pause & rx_pause)
1617 hw->fc.current_mode = I40E_FC_FULL;
1618 else if (tx_pause)
1619 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1620 else if (rx_pause)
1621 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1622 else
1623 hw->fc.current_mode = I40E_FC_NONE;
1624
1625 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1626 hw_link_info->crc_enable = true;
1627 else
1628 hw_link_info->crc_enable = false;
1629
1630 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1631 hw_link_info->lse_enable = true;
1632 else
1633 hw_link_info->lse_enable = false;
1634
1635 if ((hw->mac.type == I40E_MAC_XL710) &&
1636 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1637 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1638 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1639
1640 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1641 hw->mac.type != I40E_MAC_X722) {
1642 __le32 tmp;
1643
1644 memcpy(&tmp, resp->link_type, sizeof(tmp));
1645 hw->phy.phy_types = le32_to_cpu(tmp);
1646 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1647 }
1648
1649 /* save link status information */
1650 if (link)
1651 *link = *hw_link_info;
1652
1653 /* flag cleared so helper functions don't call AQ again */
1654 hw->phy.get_link_info = false;
1655
1656 aq_get_link_info_exit:
1657 return status;
1658 }
1659
1660 /**
1661 * i40e_aq_set_phy_int_mask
1662 * @hw: pointer to the hw struct
1663 * @mask: interrupt mask to be set
1664 * @cmd_details: pointer to command details structure or NULL
1665 *
1666 * Set link interrupt mask.
1667 **/
i40e_aq_set_phy_int_mask(struct i40e_hw * hw,u16 mask,struct i40e_asq_cmd_details * cmd_details)1668 int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1669 u16 mask,
1670 struct i40e_asq_cmd_details *cmd_details)
1671 {
1672 struct i40e_aq_desc desc;
1673 struct i40e_aqc_set_phy_int_mask *cmd =
1674 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1675 int status;
1676
1677 i40e_fill_default_direct_cmd_desc(&desc,
1678 i40e_aqc_opc_set_phy_int_mask);
1679
1680 cmd->event_mask = cpu_to_le16(mask);
1681
1682 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1683
1684 return status;
1685 }
1686
1687 /**
1688 * i40e_aq_set_phy_debug
1689 * @hw: pointer to the hw struct
1690 * @cmd_flags: debug command flags
1691 * @cmd_details: pointer to command details structure or NULL
1692 *
1693 * Reset the external PHY.
1694 **/
i40e_aq_set_phy_debug(struct i40e_hw * hw,u8 cmd_flags,struct i40e_asq_cmd_details * cmd_details)1695 int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1696 struct i40e_asq_cmd_details *cmd_details)
1697 {
1698 struct i40e_aq_desc desc;
1699 struct i40e_aqc_set_phy_debug *cmd =
1700 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1701 int status;
1702
1703 i40e_fill_default_direct_cmd_desc(&desc,
1704 i40e_aqc_opc_set_phy_debug);
1705
1706 cmd->command_flags = cmd_flags;
1707
1708 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1709
1710 return status;
1711 }
1712
1713 /**
1714 * i40e_is_aq_api_ver_ge
1715 * @aq: pointer to AdminQ info containing HW API version to compare
1716 * @maj: API major value
1717 * @min: API minor value
1718 *
1719 * Assert whether current HW API version is greater/equal than provided.
1720 **/
i40e_is_aq_api_ver_ge(struct i40e_adminq_info * aq,u16 maj,u16 min)1721 static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1722 u16 min)
1723 {
1724 return (aq->api_maj_ver > maj ||
1725 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1726 }
1727
1728 /**
1729 * i40e_aq_add_vsi
1730 * @hw: pointer to the hw struct
1731 * @vsi_ctx: pointer to a vsi context struct
1732 * @cmd_details: pointer to command details structure or NULL
1733 *
1734 * Add a VSI context to the hardware.
1735 **/
i40e_aq_add_vsi(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)1736 int i40e_aq_add_vsi(struct i40e_hw *hw,
1737 struct i40e_vsi_context *vsi_ctx,
1738 struct i40e_asq_cmd_details *cmd_details)
1739 {
1740 struct i40e_aq_desc desc;
1741 struct i40e_aqc_add_get_update_vsi *cmd =
1742 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1743 struct i40e_aqc_add_get_update_vsi_completion *resp =
1744 (struct i40e_aqc_add_get_update_vsi_completion *)
1745 &desc.params.raw;
1746 int status;
1747
1748 i40e_fill_default_direct_cmd_desc(&desc,
1749 i40e_aqc_opc_add_vsi);
1750
1751 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1752 cmd->connection_type = vsi_ctx->connection_type;
1753 cmd->vf_id = vsi_ctx->vf_num;
1754 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1755
1756 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1757
1758 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1759 sizeof(vsi_ctx->info),
1760 cmd_details, true);
1761
1762 if (status)
1763 goto aq_add_vsi_exit;
1764
1765 vsi_ctx->seid = le16_to_cpu(resp->seid);
1766 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1767 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1768 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1769
1770 aq_add_vsi_exit:
1771 return status;
1772 }
1773
1774 /**
1775 * i40e_aq_set_default_vsi
1776 * @hw: pointer to the hw struct
1777 * @seid: vsi number
1778 * @cmd_details: pointer to command details structure or NULL
1779 **/
i40e_aq_set_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1780 int i40e_aq_set_default_vsi(struct i40e_hw *hw,
1781 u16 seid,
1782 struct i40e_asq_cmd_details *cmd_details)
1783 {
1784 struct i40e_aq_desc desc;
1785 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1786 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1787 &desc.params.raw;
1788 int status;
1789
1790 i40e_fill_default_direct_cmd_desc(&desc,
1791 i40e_aqc_opc_set_vsi_promiscuous_modes);
1792
1793 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1794 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1795 cmd->seid = cpu_to_le16(seid);
1796
1797 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1798
1799 return status;
1800 }
1801
1802 /**
1803 * i40e_aq_clear_default_vsi
1804 * @hw: pointer to the hw struct
1805 * @seid: vsi number
1806 * @cmd_details: pointer to command details structure or NULL
1807 **/
i40e_aq_clear_default_vsi(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)1808 int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1809 u16 seid,
1810 struct i40e_asq_cmd_details *cmd_details)
1811 {
1812 struct i40e_aq_desc desc;
1813 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1814 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1815 &desc.params.raw;
1816 int status;
1817
1818 i40e_fill_default_direct_cmd_desc(&desc,
1819 i40e_aqc_opc_set_vsi_promiscuous_modes);
1820
1821 cmd->promiscuous_flags = cpu_to_le16(0);
1822 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1823 cmd->seid = cpu_to_le16(seid);
1824
1825 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1826
1827 return status;
1828 }
1829
1830 /**
1831 * i40e_aq_set_vsi_unicast_promiscuous
1832 * @hw: pointer to the hw struct
1833 * @seid: vsi number
1834 * @set: set unicast promiscuous enable/disable
1835 * @cmd_details: pointer to command details structure or NULL
1836 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1837 **/
i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details,bool rx_only_promisc)1838 int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1839 u16 seid, bool set,
1840 struct i40e_asq_cmd_details *cmd_details,
1841 bool rx_only_promisc)
1842 {
1843 struct i40e_aq_desc desc;
1844 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1845 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1846 u16 flags = 0;
1847 int status;
1848
1849 i40e_fill_default_direct_cmd_desc(&desc,
1850 i40e_aqc_opc_set_vsi_promiscuous_modes);
1851
1852 if (set) {
1853 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1854 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1855 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1856 }
1857
1858 cmd->promiscuous_flags = cpu_to_le16(flags);
1859
1860 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1861 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1862 cmd->valid_flags |=
1863 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1864
1865 cmd->seid = cpu_to_le16(seid);
1866 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1867
1868 return status;
1869 }
1870
1871 /**
1872 * i40e_aq_set_vsi_multicast_promiscuous
1873 * @hw: pointer to the hw struct
1874 * @seid: vsi number
1875 * @set: set multicast promiscuous enable/disable
1876 * @cmd_details: pointer to command details structure or NULL
1877 **/
i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw * hw,u16 seid,bool set,struct i40e_asq_cmd_details * cmd_details)1878 int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
1879 u16 seid, bool set,
1880 struct i40e_asq_cmd_details *cmd_details)
1881 {
1882 struct i40e_aq_desc desc;
1883 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1884 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1885 u16 flags = 0;
1886 int status;
1887
1888 i40e_fill_default_direct_cmd_desc(&desc,
1889 i40e_aqc_opc_set_vsi_promiscuous_modes);
1890
1891 if (set)
1892 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1893
1894 cmd->promiscuous_flags = cpu_to_le16(flags);
1895
1896 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1897
1898 cmd->seid = cpu_to_le16(seid);
1899 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1900
1901 return status;
1902 }
1903
1904 /**
1905 * i40e_aq_set_vsi_mc_promisc_on_vlan
1906 * @hw: pointer to the hw struct
1907 * @seid: vsi number
1908 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1909 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
1910 * @cmd_details: pointer to command details structure or NULL
1911 **/
i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1912 int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
1913 u16 seid, bool enable,
1914 u16 vid,
1915 struct i40e_asq_cmd_details *cmd_details)
1916 {
1917 struct i40e_aq_desc desc;
1918 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1919 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1920 u16 flags = 0;
1921 int status;
1922
1923 i40e_fill_default_direct_cmd_desc(&desc,
1924 i40e_aqc_opc_set_vsi_promiscuous_modes);
1925
1926 if (enable)
1927 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1928
1929 cmd->promiscuous_flags = cpu_to_le16(flags);
1930 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1931 cmd->seid = cpu_to_le16(seid);
1932 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1933
1934 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1935 cmd_details, true);
1936
1937 return status;
1938 }
1939
1940 /**
1941 * i40e_aq_set_vsi_uc_promisc_on_vlan
1942 * @hw: pointer to the hw struct
1943 * @seid: vsi number
1944 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1945 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
1946 * @cmd_details: pointer to command details structure or NULL
1947 **/
i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1948 int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1949 u16 seid, bool enable,
1950 u16 vid,
1951 struct i40e_asq_cmd_details *cmd_details)
1952 {
1953 struct i40e_aq_desc desc;
1954 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1955 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1956 u16 flags = 0;
1957 int status;
1958
1959 i40e_fill_default_direct_cmd_desc(&desc,
1960 i40e_aqc_opc_set_vsi_promiscuous_modes);
1961
1962 if (enable) {
1963 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1964 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1965 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1966 }
1967
1968 cmd->promiscuous_flags = cpu_to_le16(flags);
1969 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1970 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1971 cmd->valid_flags |=
1972 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1973 cmd->seid = cpu_to_le16(seid);
1974 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1975
1976 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1977 cmd_details, true);
1978
1979 return status;
1980 }
1981
1982 /**
1983 * i40e_aq_set_vsi_bc_promisc_on_vlan
1984 * @hw: pointer to the hw struct
1985 * @seid: vsi number
1986 * @enable: set broadcast promiscuous enable/disable for a given VLAN
1987 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
1988 * @cmd_details: pointer to command details structure or NULL
1989 **/
i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw * hw,u16 seid,bool enable,u16 vid,struct i40e_asq_cmd_details * cmd_details)1990 int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
1991 u16 seid, bool enable, u16 vid,
1992 struct i40e_asq_cmd_details *cmd_details)
1993 {
1994 struct i40e_aq_desc desc;
1995 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1996 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1997 u16 flags = 0;
1998 int status;
1999
2000 i40e_fill_default_direct_cmd_desc(&desc,
2001 i40e_aqc_opc_set_vsi_promiscuous_modes);
2002
2003 if (enable)
2004 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2005
2006 cmd->promiscuous_flags = cpu_to_le16(flags);
2007 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2008 cmd->seid = cpu_to_le16(seid);
2009 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2010
2011 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2012
2013 return status;
2014 }
2015
2016 /**
2017 * i40e_aq_set_vsi_broadcast
2018 * @hw: pointer to the hw struct
2019 * @seid: vsi number
2020 * @set_filter: true to set filter, false to clear filter
2021 * @cmd_details: pointer to command details structure or NULL
2022 *
2023 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2024 **/
i40e_aq_set_vsi_broadcast(struct i40e_hw * hw,u16 seid,bool set_filter,struct i40e_asq_cmd_details * cmd_details)2025 int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2026 u16 seid, bool set_filter,
2027 struct i40e_asq_cmd_details *cmd_details)
2028 {
2029 struct i40e_aq_desc desc;
2030 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2031 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2032 int status;
2033
2034 i40e_fill_default_direct_cmd_desc(&desc,
2035 i40e_aqc_opc_set_vsi_promiscuous_modes);
2036
2037 if (set_filter)
2038 cmd->promiscuous_flags
2039 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2040 else
2041 cmd->promiscuous_flags
2042 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2043
2044 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2045 cmd->seid = cpu_to_le16(seid);
2046 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2047
2048 return status;
2049 }
2050
2051 /**
2052 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2053 * @hw: pointer to the hw struct
2054 * @seid: vsi number
2055 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2056 * @cmd_details: pointer to command details structure or NULL
2057 **/
i40e_aq_set_vsi_vlan_promisc(struct i40e_hw * hw,u16 seid,bool enable,struct i40e_asq_cmd_details * cmd_details)2058 int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2059 u16 seid, bool enable,
2060 struct i40e_asq_cmd_details *cmd_details)
2061 {
2062 struct i40e_aq_desc desc;
2063 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2064 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2065 u16 flags = 0;
2066 int status;
2067
2068 i40e_fill_default_direct_cmd_desc(&desc,
2069 i40e_aqc_opc_set_vsi_promiscuous_modes);
2070 if (enable)
2071 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2072
2073 cmd->promiscuous_flags = cpu_to_le16(flags);
2074 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2075 cmd->seid = cpu_to_le16(seid);
2076
2077 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2078
2079 return status;
2080 }
2081
2082 /**
2083 * i40e_aq_get_vsi_params - get VSI configuration info
2084 * @hw: pointer to the hw struct
2085 * @vsi_ctx: pointer to a vsi context struct
2086 * @cmd_details: pointer to command details structure or NULL
2087 **/
i40e_aq_get_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2088 int i40e_aq_get_vsi_params(struct i40e_hw *hw,
2089 struct i40e_vsi_context *vsi_ctx,
2090 struct i40e_asq_cmd_details *cmd_details)
2091 {
2092 struct i40e_aq_desc desc;
2093 struct i40e_aqc_add_get_update_vsi *cmd =
2094 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2095 struct i40e_aqc_add_get_update_vsi_completion *resp =
2096 (struct i40e_aqc_add_get_update_vsi_completion *)
2097 &desc.params.raw;
2098 int status;
2099
2100 i40e_fill_default_direct_cmd_desc(&desc,
2101 i40e_aqc_opc_get_vsi_parameters);
2102
2103 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2104
2105 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2106
2107 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2108 sizeof(vsi_ctx->info), NULL);
2109
2110 if (status)
2111 goto aq_get_vsi_params_exit;
2112
2113 vsi_ctx->seid = le16_to_cpu(resp->seid);
2114 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2115 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2116 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2117
2118 aq_get_vsi_params_exit:
2119 return status;
2120 }
2121
2122 /**
2123 * i40e_aq_update_vsi_params
2124 * @hw: pointer to the hw struct
2125 * @vsi_ctx: pointer to a vsi context struct
2126 * @cmd_details: pointer to command details structure or NULL
2127 *
2128 * Update a VSI context.
2129 **/
i40e_aq_update_vsi_params(struct i40e_hw * hw,struct i40e_vsi_context * vsi_ctx,struct i40e_asq_cmd_details * cmd_details)2130 int i40e_aq_update_vsi_params(struct i40e_hw *hw,
2131 struct i40e_vsi_context *vsi_ctx,
2132 struct i40e_asq_cmd_details *cmd_details)
2133 {
2134 struct i40e_aq_desc desc;
2135 struct i40e_aqc_add_get_update_vsi *cmd =
2136 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2137 struct i40e_aqc_add_get_update_vsi_completion *resp =
2138 (struct i40e_aqc_add_get_update_vsi_completion *)
2139 &desc.params.raw;
2140 int status;
2141
2142 i40e_fill_default_direct_cmd_desc(&desc,
2143 i40e_aqc_opc_update_vsi_parameters);
2144 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2145
2146 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2147
2148 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2149 sizeof(vsi_ctx->info),
2150 cmd_details, true);
2151
2152 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2153 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2154
2155 return status;
2156 }
2157
2158 /**
2159 * i40e_aq_get_switch_config
2160 * @hw: pointer to the hardware structure
2161 * @buf: pointer to the result buffer
2162 * @buf_size: length of input buffer
2163 * @start_seid: seid to start for the report, 0 == beginning
2164 * @cmd_details: pointer to command details structure or NULL
2165 *
2166 * Fill the buf with switch configuration returned from AdminQ command
2167 **/
i40e_aq_get_switch_config(struct i40e_hw * hw,struct i40e_aqc_get_switch_config_resp * buf,u16 buf_size,u16 * start_seid,struct i40e_asq_cmd_details * cmd_details)2168 int i40e_aq_get_switch_config(struct i40e_hw *hw,
2169 struct i40e_aqc_get_switch_config_resp *buf,
2170 u16 buf_size, u16 *start_seid,
2171 struct i40e_asq_cmd_details *cmd_details)
2172 {
2173 struct i40e_aq_desc desc;
2174 struct i40e_aqc_switch_seid *scfg =
2175 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2176 int status;
2177
2178 i40e_fill_default_direct_cmd_desc(&desc,
2179 i40e_aqc_opc_get_switch_config);
2180 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2181 if (buf_size > I40E_AQ_LARGE_BUF)
2182 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2183 scfg->seid = cpu_to_le16(*start_seid);
2184
2185 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2186 *start_seid = le16_to_cpu(scfg->seid);
2187
2188 return status;
2189 }
2190
2191 /**
2192 * i40e_aq_set_switch_config
2193 * @hw: pointer to the hardware structure
2194 * @flags: bit flag values to set
2195 * @mode: cloud filter mode
2196 * @valid_flags: which bit flags to set
2197 * @mode: cloud filter mode
2198 * @cmd_details: pointer to command details structure or NULL
2199 *
2200 * Set switch configuration bits
2201 **/
i40e_aq_set_switch_config(struct i40e_hw * hw,u16 flags,u16 valid_flags,u8 mode,struct i40e_asq_cmd_details * cmd_details)2202 int i40e_aq_set_switch_config(struct i40e_hw *hw,
2203 u16 flags,
2204 u16 valid_flags, u8 mode,
2205 struct i40e_asq_cmd_details *cmd_details)
2206 {
2207 struct i40e_aq_desc desc;
2208 struct i40e_aqc_set_switch_config *scfg =
2209 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2210 int status;
2211
2212 i40e_fill_default_direct_cmd_desc(&desc,
2213 i40e_aqc_opc_set_switch_config);
2214 scfg->flags = cpu_to_le16(flags);
2215 scfg->valid_flags = cpu_to_le16(valid_flags);
2216 scfg->mode = mode;
2217 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2218 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2219 scfg->first_tag = cpu_to_le16(hw->first_tag);
2220 scfg->second_tag = cpu_to_le16(hw->second_tag);
2221 }
2222 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2223
2224 return status;
2225 }
2226
2227 /**
2228 * i40e_aq_get_firmware_version
2229 * @hw: pointer to the hw struct
2230 * @fw_major_version: firmware major version
2231 * @fw_minor_version: firmware minor version
2232 * @fw_build: firmware build number
2233 * @api_major_version: major queue version
2234 * @api_minor_version: minor queue version
2235 * @cmd_details: pointer to command details structure or NULL
2236 *
2237 * Get the firmware version from the admin queue commands
2238 **/
i40e_aq_get_firmware_version(struct i40e_hw * hw,u16 * fw_major_version,u16 * fw_minor_version,u32 * fw_build,u16 * api_major_version,u16 * api_minor_version,struct i40e_asq_cmd_details * cmd_details)2239 int i40e_aq_get_firmware_version(struct i40e_hw *hw,
2240 u16 *fw_major_version, u16 *fw_minor_version,
2241 u32 *fw_build,
2242 u16 *api_major_version, u16 *api_minor_version,
2243 struct i40e_asq_cmd_details *cmd_details)
2244 {
2245 struct i40e_aq_desc desc;
2246 struct i40e_aqc_get_version *resp =
2247 (struct i40e_aqc_get_version *)&desc.params.raw;
2248 int status;
2249
2250 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2251
2252 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2253
2254 if (!status) {
2255 if (fw_major_version)
2256 *fw_major_version = le16_to_cpu(resp->fw_major);
2257 if (fw_minor_version)
2258 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2259 if (fw_build)
2260 *fw_build = le32_to_cpu(resp->fw_build);
2261 if (api_major_version)
2262 *api_major_version = le16_to_cpu(resp->api_major);
2263 if (api_minor_version)
2264 *api_minor_version = le16_to_cpu(resp->api_minor);
2265 }
2266
2267 return status;
2268 }
2269
2270 /**
2271 * i40e_aq_send_driver_version
2272 * @hw: pointer to the hw struct
2273 * @dv: driver's major, minor version
2274 * @cmd_details: pointer to command details structure or NULL
2275 *
2276 * Send the driver version to the firmware
2277 **/
i40e_aq_send_driver_version(struct i40e_hw * hw,struct i40e_driver_version * dv,struct i40e_asq_cmd_details * cmd_details)2278 int i40e_aq_send_driver_version(struct i40e_hw *hw,
2279 struct i40e_driver_version *dv,
2280 struct i40e_asq_cmd_details *cmd_details)
2281 {
2282 struct i40e_aq_desc desc;
2283 struct i40e_aqc_driver_version *cmd =
2284 (struct i40e_aqc_driver_version *)&desc.params.raw;
2285 int status;
2286 u16 len;
2287
2288 if (dv == NULL)
2289 return I40E_ERR_PARAM;
2290
2291 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2292
2293 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2294 cmd->driver_major_ver = dv->major_version;
2295 cmd->driver_minor_ver = dv->minor_version;
2296 cmd->driver_build_ver = dv->build_version;
2297 cmd->driver_subbuild_ver = dv->subbuild_version;
2298
2299 len = 0;
2300 while (len < sizeof(dv->driver_string) &&
2301 (dv->driver_string[len] < 0x80) &&
2302 dv->driver_string[len])
2303 len++;
2304 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2305 len, cmd_details);
2306
2307 return status;
2308 }
2309
2310 /**
2311 * i40e_get_link_status - get status of the HW network link
2312 * @hw: pointer to the hw struct
2313 * @link_up: pointer to bool (true/false = linkup/linkdown)
2314 *
2315 * Variable link_up true if link is up, false if link is down.
2316 * The variable link_up is invalid if returned value of status != 0
2317 *
2318 * Side effect: LinkStatusEvent reporting becomes enabled
2319 **/
i40e_get_link_status(struct i40e_hw * hw,bool * link_up)2320 int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2321 {
2322 int status = 0;
2323
2324 if (hw->phy.get_link_info) {
2325 status = i40e_update_link_info(hw);
2326
2327 if (status)
2328 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2329 status);
2330 }
2331
2332 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2333
2334 return status;
2335 }
2336
2337 /**
2338 * i40e_update_link_info - update status of the HW network link
2339 * @hw: pointer to the hw struct
2340 **/
i40e_update_link_info(struct i40e_hw * hw)2341 noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
2342 {
2343 struct i40e_aq_get_phy_abilities_resp abilities;
2344 int status = 0;
2345
2346 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2347 if (status)
2348 return status;
2349
2350 /* extra checking needed to ensure link info to user is timely */
2351 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2352 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2353 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2354 status = i40e_aq_get_phy_capabilities(hw, false, false,
2355 &abilities, NULL);
2356 if (status)
2357 return status;
2358
2359 if (abilities.fec_cfg_curr_mod_ext_info &
2360 I40E_AQ_ENABLE_FEC_AUTO)
2361 hw->phy.link_info.req_fec_info =
2362 (I40E_AQ_REQUEST_FEC_KR |
2363 I40E_AQ_REQUEST_FEC_RS);
2364 else
2365 hw->phy.link_info.req_fec_info =
2366 abilities.fec_cfg_curr_mod_ext_info &
2367 (I40E_AQ_REQUEST_FEC_KR |
2368 I40E_AQ_REQUEST_FEC_RS);
2369
2370 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2371 sizeof(hw->phy.link_info.module_type));
2372 }
2373
2374 return status;
2375 }
2376
2377 /**
2378 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2379 * @hw: pointer to the hw struct
2380 * @uplink_seid: the MAC or other gizmo SEID
2381 * @downlink_seid: the VSI SEID
2382 * @enabled_tc: bitmap of TCs to be enabled
2383 * @default_port: true for default port VSI, false for control port
2384 * @veb_seid: pointer to where to put the resulting VEB SEID
2385 * @enable_stats: true to turn on VEB stats
2386 * @cmd_details: pointer to command details structure or NULL
2387 *
2388 * This asks the FW to add a VEB between the uplink and downlink
2389 * elements. If the uplink SEID is 0, this will be a floating VEB.
2390 **/
i40e_aq_add_veb(struct i40e_hw * hw,u16 uplink_seid,u16 downlink_seid,u8 enabled_tc,bool default_port,u16 * veb_seid,bool enable_stats,struct i40e_asq_cmd_details * cmd_details)2391 int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2392 u16 downlink_seid, u8 enabled_tc,
2393 bool default_port, u16 *veb_seid,
2394 bool enable_stats,
2395 struct i40e_asq_cmd_details *cmd_details)
2396 {
2397 struct i40e_aq_desc desc;
2398 struct i40e_aqc_add_veb *cmd =
2399 (struct i40e_aqc_add_veb *)&desc.params.raw;
2400 struct i40e_aqc_add_veb_completion *resp =
2401 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2402 u16 veb_flags = 0;
2403 int status;
2404
2405 /* SEIDs need to either both be set or both be 0 for floating VEB */
2406 if (!!uplink_seid != !!downlink_seid)
2407 return I40E_ERR_PARAM;
2408
2409 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2410
2411 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2412 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2413 cmd->enable_tcs = enabled_tc;
2414 if (!uplink_seid)
2415 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2416 if (default_port)
2417 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2418 else
2419 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2420
2421 /* reverse logic here: set the bitflag to disable the stats */
2422 if (!enable_stats)
2423 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2424
2425 cmd->veb_flags = cpu_to_le16(veb_flags);
2426
2427 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2428
2429 if (!status && veb_seid)
2430 *veb_seid = le16_to_cpu(resp->veb_seid);
2431
2432 return status;
2433 }
2434
2435 /**
2436 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2437 * @hw: pointer to the hw struct
2438 * @veb_seid: the SEID of the VEB to query
2439 * @switch_id: the uplink switch id
2440 * @floating: set to true if the VEB is floating
2441 * @statistic_index: index of the stats counter block for this VEB
2442 * @vebs_used: number of VEB's used by function
2443 * @vebs_free: total VEB's not reserved by any function
2444 * @cmd_details: pointer to command details structure or NULL
2445 *
2446 * This retrieves the parameters for a particular VEB, specified by
2447 * uplink_seid, and returns them to the caller.
2448 **/
i40e_aq_get_veb_parameters(struct i40e_hw * hw,u16 veb_seid,u16 * switch_id,bool * floating,u16 * statistic_index,u16 * vebs_used,u16 * vebs_free,struct i40e_asq_cmd_details * cmd_details)2449 int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2450 u16 veb_seid, u16 *switch_id,
2451 bool *floating, u16 *statistic_index,
2452 u16 *vebs_used, u16 *vebs_free,
2453 struct i40e_asq_cmd_details *cmd_details)
2454 {
2455 struct i40e_aq_desc desc;
2456 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2457 (struct i40e_aqc_get_veb_parameters_completion *)
2458 &desc.params.raw;
2459 int status;
2460
2461 if (veb_seid == 0)
2462 return I40E_ERR_PARAM;
2463
2464 i40e_fill_default_direct_cmd_desc(&desc,
2465 i40e_aqc_opc_get_veb_parameters);
2466 cmd_resp->seid = cpu_to_le16(veb_seid);
2467
2468 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2469 if (status)
2470 goto get_veb_exit;
2471
2472 if (switch_id)
2473 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2474 if (statistic_index)
2475 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2476 if (vebs_used)
2477 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2478 if (vebs_free)
2479 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2480 if (floating) {
2481 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2482
2483 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2484 *floating = true;
2485 else
2486 *floating = false;
2487 }
2488
2489 get_veb_exit:
2490 return status;
2491 }
2492
2493 /**
2494 * i40e_prepare_add_macvlan
2495 * @mv_list: list of macvlans to be added
2496 * @desc: pointer to AQ descriptor structure
2497 * @count: length of the list
2498 * @seid: VSI for the mac address
2499 *
2500 * Internal helper function that prepares the add macvlan request
2501 * and returns the buffer size.
2502 **/
2503 static u16
i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data * mv_list,struct i40e_aq_desc * desc,u16 count,u16 seid)2504 i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2505 struct i40e_aq_desc *desc, u16 count, u16 seid)
2506 {
2507 struct i40e_aqc_macvlan *cmd =
2508 (struct i40e_aqc_macvlan *)&desc->params.raw;
2509 u16 buf_size;
2510 int i;
2511
2512 buf_size = count * sizeof(*mv_list);
2513
2514 /* prep the rest of the request */
2515 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2516 cmd->num_addresses = cpu_to_le16(count);
2517 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2518 cmd->seid[1] = 0;
2519 cmd->seid[2] = 0;
2520
2521 for (i = 0; i < count; i++)
2522 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2523 mv_list[i].flags |=
2524 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2525
2526 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2527 if (buf_size > I40E_AQ_LARGE_BUF)
2528 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2529
2530 return buf_size;
2531 }
2532
2533 /**
2534 * i40e_aq_add_macvlan
2535 * @hw: pointer to the hw struct
2536 * @seid: VSI for the mac address
2537 * @mv_list: list of macvlans to be added
2538 * @count: length of the list
2539 * @cmd_details: pointer to command details structure or NULL
2540 *
2541 * Add MAC/VLAN addresses to the HW filtering
2542 **/
2543 int
i40e_aq_add_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2544 i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2545 struct i40e_aqc_add_macvlan_element_data *mv_list,
2546 u16 count, struct i40e_asq_cmd_details *cmd_details)
2547 {
2548 struct i40e_aq_desc desc;
2549 u16 buf_size;
2550
2551 if (count == 0 || !mv_list || !hw)
2552 return I40E_ERR_PARAM;
2553
2554 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2555
2556 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2557 cmd_details, true);
2558 }
2559
2560 /**
2561 * i40e_aq_add_macvlan_v2
2562 * @hw: pointer to the hw struct
2563 * @seid: VSI for the mac address
2564 * @mv_list: list of macvlans to be added
2565 * @count: length of the list
2566 * @cmd_details: pointer to command details structure or NULL
2567 * @aq_status: pointer to Admin Queue status return value
2568 *
2569 * Add MAC/VLAN addresses to the HW filtering.
2570 * The _v2 version returns the last Admin Queue status in aq_status
2571 * to avoid race conditions in access to hw->aq.asq_last_status.
2572 * It also calls _v2 versions of asq_send_command functions to
2573 * get the aq_status on the stack.
2574 **/
2575 int
i40e_aq_add_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_add_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2576 i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2577 struct i40e_aqc_add_macvlan_element_data *mv_list,
2578 u16 count, struct i40e_asq_cmd_details *cmd_details,
2579 enum i40e_admin_queue_err *aq_status)
2580 {
2581 struct i40e_aq_desc desc;
2582 u16 buf_size;
2583
2584 if (count == 0 || !mv_list || !hw)
2585 return I40E_ERR_PARAM;
2586
2587 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2588
2589 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2590 cmd_details, true, aq_status);
2591 }
2592
2593 /**
2594 * i40e_aq_remove_macvlan
2595 * @hw: pointer to the hw struct
2596 * @seid: VSI for the mac address
2597 * @mv_list: list of macvlans to be removed
2598 * @count: length of the list
2599 * @cmd_details: pointer to command details structure or NULL
2600 *
2601 * Remove MAC/VLAN addresses from the HW filtering
2602 **/
2603 int
i40e_aq_remove_macvlan(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details)2604 i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2605 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2606 u16 count, struct i40e_asq_cmd_details *cmd_details)
2607 {
2608 struct i40e_aq_desc desc;
2609 struct i40e_aqc_macvlan *cmd =
2610 (struct i40e_aqc_macvlan *)&desc.params.raw;
2611 u16 buf_size;
2612 int status;
2613
2614 if (count == 0 || !mv_list || !hw)
2615 return I40E_ERR_PARAM;
2616
2617 buf_size = count * sizeof(*mv_list);
2618
2619 /* prep the rest of the request */
2620 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2621 cmd->num_addresses = cpu_to_le16(count);
2622 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2623 cmd->seid[1] = 0;
2624 cmd->seid[2] = 0;
2625
2626 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2627 if (buf_size > I40E_AQ_LARGE_BUF)
2628 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2629
2630 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2631 cmd_details, true);
2632
2633 return status;
2634 }
2635
2636 /**
2637 * i40e_aq_remove_macvlan_v2
2638 * @hw: pointer to the hw struct
2639 * @seid: VSI for the mac address
2640 * @mv_list: list of macvlans to be removed
2641 * @count: length of the list
2642 * @cmd_details: pointer to command details structure or NULL
2643 * @aq_status: pointer to Admin Queue status return value
2644 *
2645 * Remove MAC/VLAN addresses from the HW filtering.
2646 * The _v2 version returns the last Admin Queue status in aq_status
2647 * to avoid race conditions in access to hw->aq.asq_last_status.
2648 * It also calls _v2 versions of asq_send_command functions to
2649 * get the aq_status on the stack.
2650 **/
2651 int
i40e_aq_remove_macvlan_v2(struct i40e_hw * hw,u16 seid,struct i40e_aqc_remove_macvlan_element_data * mv_list,u16 count,struct i40e_asq_cmd_details * cmd_details,enum i40e_admin_queue_err * aq_status)2652 i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2653 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2654 u16 count, struct i40e_asq_cmd_details *cmd_details,
2655 enum i40e_admin_queue_err *aq_status)
2656 {
2657 struct i40e_aqc_macvlan *cmd;
2658 struct i40e_aq_desc desc;
2659 u16 buf_size;
2660
2661 if (count == 0 || !mv_list || !hw)
2662 return I40E_ERR_PARAM;
2663
2664 buf_size = count * sizeof(*mv_list);
2665
2666 /* prep the rest of the request */
2667 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2668 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2669 cmd->num_addresses = cpu_to_le16(count);
2670 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2671 cmd->seid[1] = 0;
2672 cmd->seid[2] = 0;
2673
2674 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2675 if (buf_size > I40E_AQ_LARGE_BUF)
2676 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2677
2678 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2679 cmd_details, true, aq_status);
2680 }
2681
2682 /**
2683 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2684 * @hw: pointer to the hw struct
2685 * @opcode: AQ opcode for add or delete mirror rule
2686 * @sw_seid: Switch SEID (to which rule refers)
2687 * @rule_type: Rule Type (ingress/egress/VLAN)
2688 * @id: Destination VSI SEID or Rule ID
2689 * @count: length of the list
2690 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2691 * @cmd_details: pointer to command details structure or NULL
2692 * @rule_id: Rule ID returned from FW
2693 * @rules_used: Number of rules used in internal switch
2694 * @rules_free: Number of rules free in internal switch
2695 *
2696 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2697 * VEBs/VEPA elements only
2698 **/
i40e_mirrorrule_op(struct i40e_hw * hw,u16 opcode,u16 sw_seid,u16 rule_type,u16 id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2699 static int i40e_mirrorrule_op(struct i40e_hw *hw,
2700 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2701 u16 count, __le16 *mr_list,
2702 struct i40e_asq_cmd_details *cmd_details,
2703 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2704 {
2705 struct i40e_aq_desc desc;
2706 struct i40e_aqc_add_delete_mirror_rule *cmd =
2707 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2708 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2709 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2710 u16 buf_size;
2711 int status;
2712
2713 buf_size = count * sizeof(*mr_list);
2714
2715 /* prep the rest of the request */
2716 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2717 cmd->seid = cpu_to_le16(sw_seid);
2718 cmd->rule_type = cpu_to_le16(rule_type &
2719 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2720 cmd->num_entries = cpu_to_le16(count);
2721 /* Dest VSI for add, rule_id for delete */
2722 cmd->destination = cpu_to_le16(id);
2723 if (mr_list) {
2724 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2725 I40E_AQ_FLAG_RD));
2726 if (buf_size > I40E_AQ_LARGE_BUF)
2727 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2728 }
2729
2730 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2731 cmd_details);
2732 if (!status ||
2733 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2734 if (rule_id)
2735 *rule_id = le16_to_cpu(resp->rule_id);
2736 if (rules_used)
2737 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2738 if (rules_free)
2739 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2740 }
2741 return status;
2742 }
2743
2744 /**
2745 * i40e_aq_add_mirrorrule - add a mirror rule
2746 * @hw: pointer to the hw struct
2747 * @sw_seid: Switch SEID (to which rule refers)
2748 * @rule_type: Rule Type (ingress/egress/VLAN)
2749 * @dest_vsi: SEID of VSI to which packets will be mirrored
2750 * @count: length of the list
2751 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2752 * @cmd_details: pointer to command details structure or NULL
2753 * @rule_id: Rule ID returned from FW
2754 * @rules_used: Number of rules used in internal switch
2755 * @rules_free: Number of rules free in internal switch
2756 *
2757 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2758 **/
i40e_aq_add_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 dest_vsi,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rule_id,u16 * rules_used,u16 * rules_free)2759 int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2760 u16 rule_type, u16 dest_vsi, u16 count,
2761 __le16 *mr_list,
2762 struct i40e_asq_cmd_details *cmd_details,
2763 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2764 {
2765 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2766 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2767 if (count == 0 || !mr_list)
2768 return I40E_ERR_PARAM;
2769 }
2770
2771 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2772 rule_type, dest_vsi, count, mr_list,
2773 cmd_details, rule_id, rules_used, rules_free);
2774 }
2775
2776 /**
2777 * i40e_aq_delete_mirrorrule - delete a mirror rule
2778 * @hw: pointer to the hw struct
2779 * @sw_seid: Switch SEID (to which rule refers)
2780 * @rule_type: Rule Type (ingress/egress/VLAN)
2781 * @count: length of the list
2782 * @rule_id: Rule ID that is returned in the receive desc as part of
2783 * add_mirrorrule.
2784 * @mr_list: list of mirrored VLAN IDs to be removed
2785 * @cmd_details: pointer to command details structure or NULL
2786 * @rules_used: Number of rules used in internal switch
2787 * @rules_free: Number of rules free in internal switch
2788 *
2789 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2790 **/
i40e_aq_delete_mirrorrule(struct i40e_hw * hw,u16 sw_seid,u16 rule_type,u16 rule_id,u16 count,__le16 * mr_list,struct i40e_asq_cmd_details * cmd_details,u16 * rules_used,u16 * rules_free)2791 int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2792 u16 rule_type, u16 rule_id, u16 count,
2793 __le16 *mr_list,
2794 struct i40e_asq_cmd_details *cmd_details,
2795 u16 *rules_used, u16 *rules_free)
2796 {
2797 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2798 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2799 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2800 * mirroring. For other rule_type, count and rule_type should
2801 * not matter.
2802 */
2803 if (count == 0 || !mr_list)
2804 return I40E_ERR_PARAM;
2805 }
2806
2807 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2808 rule_type, rule_id, count, mr_list,
2809 cmd_details, NULL, rules_used, rules_free);
2810 }
2811
2812 /**
2813 * i40e_aq_send_msg_to_vf
2814 * @hw: pointer to the hardware structure
2815 * @vfid: VF id to send msg
2816 * @v_opcode: opcodes for VF-PF communication
2817 * @v_retval: return error code
2818 * @msg: pointer to the msg buffer
2819 * @msglen: msg length
2820 * @cmd_details: pointer to command details
2821 *
2822 * send msg to vf
2823 **/
i40e_aq_send_msg_to_vf(struct i40e_hw * hw,u16 vfid,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen,struct i40e_asq_cmd_details * cmd_details)2824 int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2825 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2826 struct i40e_asq_cmd_details *cmd_details)
2827 {
2828 struct i40e_aq_desc desc;
2829 struct i40e_aqc_pf_vf_message *cmd =
2830 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2831 int status;
2832
2833 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2834 cmd->id = cpu_to_le32(vfid);
2835 desc.cookie_high = cpu_to_le32(v_opcode);
2836 desc.cookie_low = cpu_to_le32(v_retval);
2837 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2838 if (msglen) {
2839 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2840 I40E_AQ_FLAG_RD));
2841 if (msglen > I40E_AQ_LARGE_BUF)
2842 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2843 desc.datalen = cpu_to_le16(msglen);
2844 }
2845 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2846
2847 return status;
2848 }
2849
2850 /**
2851 * i40e_aq_debug_read_register
2852 * @hw: pointer to the hw struct
2853 * @reg_addr: register address
2854 * @reg_val: register value
2855 * @cmd_details: pointer to command details structure or NULL
2856 *
2857 * Read the register using the admin queue commands
2858 **/
i40e_aq_debug_read_register(struct i40e_hw * hw,u32 reg_addr,u64 * reg_val,struct i40e_asq_cmd_details * cmd_details)2859 int i40e_aq_debug_read_register(struct i40e_hw *hw,
2860 u32 reg_addr, u64 *reg_val,
2861 struct i40e_asq_cmd_details *cmd_details)
2862 {
2863 struct i40e_aq_desc desc;
2864 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2865 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2866 int status;
2867
2868 if (reg_val == NULL)
2869 return I40E_ERR_PARAM;
2870
2871 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2872
2873 cmd_resp->address = cpu_to_le32(reg_addr);
2874
2875 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2876
2877 if (!status) {
2878 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2879 (u64)le32_to_cpu(cmd_resp->value_low);
2880 }
2881
2882 return status;
2883 }
2884
2885 /**
2886 * i40e_aq_debug_write_register
2887 * @hw: pointer to the hw struct
2888 * @reg_addr: register address
2889 * @reg_val: register value
2890 * @cmd_details: pointer to command details structure or NULL
2891 *
2892 * Write to a register using the admin queue commands
2893 **/
i40e_aq_debug_write_register(struct i40e_hw * hw,u32 reg_addr,u64 reg_val,struct i40e_asq_cmd_details * cmd_details)2894 int i40e_aq_debug_write_register(struct i40e_hw *hw,
2895 u32 reg_addr, u64 reg_val,
2896 struct i40e_asq_cmd_details *cmd_details)
2897 {
2898 struct i40e_aq_desc desc;
2899 struct i40e_aqc_debug_reg_read_write *cmd =
2900 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2901 int status;
2902
2903 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2904
2905 cmd->address = cpu_to_le32(reg_addr);
2906 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2907 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2908
2909 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2910
2911 return status;
2912 }
2913
2914 /**
2915 * i40e_aq_request_resource
2916 * @hw: pointer to the hw struct
2917 * @resource: resource id
2918 * @access: access type
2919 * @sdp_number: resource number
2920 * @timeout: the maximum time in ms that the driver may hold the resource
2921 * @cmd_details: pointer to command details structure or NULL
2922 *
2923 * requests common resource using the admin queue commands
2924 **/
i40e_aq_request_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,enum i40e_aq_resource_access_type access,u8 sdp_number,u64 * timeout,struct i40e_asq_cmd_details * cmd_details)2925 int i40e_aq_request_resource(struct i40e_hw *hw,
2926 enum i40e_aq_resources_ids resource,
2927 enum i40e_aq_resource_access_type access,
2928 u8 sdp_number, u64 *timeout,
2929 struct i40e_asq_cmd_details *cmd_details)
2930 {
2931 struct i40e_aq_desc desc;
2932 struct i40e_aqc_request_resource *cmd_resp =
2933 (struct i40e_aqc_request_resource *)&desc.params.raw;
2934 int status;
2935
2936 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2937
2938 cmd_resp->resource_id = cpu_to_le16(resource);
2939 cmd_resp->access_type = cpu_to_le16(access);
2940 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2941
2942 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2943 /* The completion specifies the maximum time in ms that the driver
2944 * may hold the resource in the Timeout field.
2945 * If the resource is held by someone else, the command completes with
2946 * busy return value and the timeout field indicates the maximum time
2947 * the current owner of the resource has to free it.
2948 */
2949 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2950 *timeout = le32_to_cpu(cmd_resp->timeout);
2951
2952 return status;
2953 }
2954
2955 /**
2956 * i40e_aq_release_resource
2957 * @hw: pointer to the hw struct
2958 * @resource: resource id
2959 * @sdp_number: resource number
2960 * @cmd_details: pointer to command details structure or NULL
2961 *
2962 * release common resource using the admin queue commands
2963 **/
i40e_aq_release_resource(struct i40e_hw * hw,enum i40e_aq_resources_ids resource,u8 sdp_number,struct i40e_asq_cmd_details * cmd_details)2964 int i40e_aq_release_resource(struct i40e_hw *hw,
2965 enum i40e_aq_resources_ids resource,
2966 u8 sdp_number,
2967 struct i40e_asq_cmd_details *cmd_details)
2968 {
2969 struct i40e_aq_desc desc;
2970 struct i40e_aqc_request_resource *cmd =
2971 (struct i40e_aqc_request_resource *)&desc.params.raw;
2972 int status;
2973
2974 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
2975
2976 cmd->resource_id = cpu_to_le16(resource);
2977 cmd->resource_number = cpu_to_le32(sdp_number);
2978
2979 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2980
2981 return status;
2982 }
2983
2984 /**
2985 * i40e_aq_read_nvm
2986 * @hw: pointer to the hw struct
2987 * @module_pointer: module pointer location in words from the NVM beginning
2988 * @offset: byte offset from the module beginning
2989 * @length: length of the section to be read (in bytes from the offset)
2990 * @data: command buffer (size [bytes] = length)
2991 * @last_command: tells if this is the last command in a series
2992 * @cmd_details: pointer to command details structure or NULL
2993 *
2994 * Read the NVM using the admin queue commands
2995 **/
i40e_aq_read_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,struct i40e_asq_cmd_details * cmd_details)2996 int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
2997 u32 offset, u16 length, void *data,
2998 bool last_command,
2999 struct i40e_asq_cmd_details *cmd_details)
3000 {
3001 struct i40e_aq_desc desc;
3002 struct i40e_aqc_nvm_update *cmd =
3003 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3004 int status;
3005
3006 /* In offset the highest byte must be zeroed. */
3007 if (offset & 0xFF000000) {
3008 status = I40E_ERR_PARAM;
3009 goto i40e_aq_read_nvm_exit;
3010 }
3011
3012 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3013
3014 /* If this is the last command in a series, set the proper flag. */
3015 if (last_command)
3016 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3017 cmd->module_pointer = module_pointer;
3018 cmd->offset = cpu_to_le32(offset);
3019 cmd->length = cpu_to_le16(length);
3020
3021 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3022 if (length > I40E_AQ_LARGE_BUF)
3023 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3024
3025 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3026
3027 i40e_aq_read_nvm_exit:
3028 return status;
3029 }
3030
3031 /**
3032 * i40e_aq_erase_nvm
3033 * @hw: pointer to the hw struct
3034 * @module_pointer: module pointer location in words from the NVM beginning
3035 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3036 * @length: length of the section to be erased (expressed in 4 KB)
3037 * @last_command: tells if this is the last command in a series
3038 * @cmd_details: pointer to command details structure or NULL
3039 *
3040 * Erase the NVM sector using the admin queue commands
3041 **/
i40e_aq_erase_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,bool last_command,struct i40e_asq_cmd_details * cmd_details)3042 int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3043 u32 offset, u16 length, bool last_command,
3044 struct i40e_asq_cmd_details *cmd_details)
3045 {
3046 struct i40e_aq_desc desc;
3047 struct i40e_aqc_nvm_update *cmd =
3048 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3049 int status;
3050
3051 /* In offset the highest byte must be zeroed. */
3052 if (offset & 0xFF000000) {
3053 status = I40E_ERR_PARAM;
3054 goto i40e_aq_erase_nvm_exit;
3055 }
3056
3057 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3058
3059 /* If this is the last command in a series, set the proper flag. */
3060 if (last_command)
3061 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3062 cmd->module_pointer = module_pointer;
3063 cmd->offset = cpu_to_le32(offset);
3064 cmd->length = cpu_to_le16(length);
3065
3066 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3067
3068 i40e_aq_erase_nvm_exit:
3069 return status;
3070 }
3071
3072 /**
3073 * i40e_parse_discover_capabilities
3074 * @hw: pointer to the hw struct
3075 * @buff: pointer to a buffer containing device/function capability records
3076 * @cap_count: number of capability records in the list
3077 * @list_type_opc: type of capabilities list to parse
3078 *
3079 * Parse the device/function capabilities list.
3080 **/
i40e_parse_discover_capabilities(struct i40e_hw * hw,void * buff,u32 cap_count,enum i40e_admin_queue_opc list_type_opc)3081 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3082 u32 cap_count,
3083 enum i40e_admin_queue_opc list_type_opc)
3084 {
3085 struct i40e_aqc_list_capabilities_element_resp *cap;
3086 u32 valid_functions, num_functions;
3087 u32 number, logical_id, phys_id;
3088 struct i40e_hw_capabilities *p;
3089 u16 id, ocp_cfg_word0;
3090 u8 major_rev;
3091 int status;
3092 u32 i = 0;
3093
3094 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3095
3096 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3097 p = &hw->dev_caps;
3098 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3099 p = &hw->func_caps;
3100 else
3101 return;
3102
3103 for (i = 0; i < cap_count; i++, cap++) {
3104 id = le16_to_cpu(cap->id);
3105 number = le32_to_cpu(cap->number);
3106 logical_id = le32_to_cpu(cap->logical_id);
3107 phys_id = le32_to_cpu(cap->phys_id);
3108 major_rev = cap->major_rev;
3109
3110 switch (id) {
3111 case I40E_AQ_CAP_ID_SWITCH_MODE:
3112 p->switch_mode = number;
3113 break;
3114 case I40E_AQ_CAP_ID_MNG_MODE:
3115 p->management_mode = number;
3116 if (major_rev > 1) {
3117 p->mng_protocols_over_mctp = logical_id;
3118 i40e_debug(hw, I40E_DEBUG_INIT,
3119 "HW Capability: Protocols over MCTP = %d\n",
3120 p->mng_protocols_over_mctp);
3121 } else {
3122 p->mng_protocols_over_mctp = 0;
3123 }
3124 break;
3125 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3126 p->npar_enable = number;
3127 break;
3128 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3129 p->os2bmc = number;
3130 break;
3131 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3132 p->valid_functions = number;
3133 break;
3134 case I40E_AQ_CAP_ID_SRIOV:
3135 if (number == 1)
3136 p->sr_iov_1_1 = true;
3137 break;
3138 case I40E_AQ_CAP_ID_VF:
3139 p->num_vfs = number;
3140 p->vf_base_id = logical_id;
3141 break;
3142 case I40E_AQ_CAP_ID_VMDQ:
3143 if (number == 1)
3144 p->vmdq = true;
3145 break;
3146 case I40E_AQ_CAP_ID_8021QBG:
3147 if (number == 1)
3148 p->evb_802_1_qbg = true;
3149 break;
3150 case I40E_AQ_CAP_ID_8021QBR:
3151 if (number == 1)
3152 p->evb_802_1_qbh = true;
3153 break;
3154 case I40E_AQ_CAP_ID_VSI:
3155 p->num_vsis = number;
3156 break;
3157 case I40E_AQ_CAP_ID_DCB:
3158 if (number == 1) {
3159 p->dcb = true;
3160 p->enabled_tcmap = logical_id;
3161 p->maxtc = phys_id;
3162 }
3163 break;
3164 case I40E_AQ_CAP_ID_FCOE:
3165 if (number == 1)
3166 p->fcoe = true;
3167 break;
3168 case I40E_AQ_CAP_ID_ISCSI:
3169 if (number == 1)
3170 p->iscsi = true;
3171 break;
3172 case I40E_AQ_CAP_ID_RSS:
3173 p->rss = true;
3174 p->rss_table_size = number;
3175 p->rss_table_entry_width = logical_id;
3176 break;
3177 case I40E_AQ_CAP_ID_RXQ:
3178 p->num_rx_qp = number;
3179 p->base_queue = phys_id;
3180 break;
3181 case I40E_AQ_CAP_ID_TXQ:
3182 p->num_tx_qp = number;
3183 p->base_queue = phys_id;
3184 break;
3185 case I40E_AQ_CAP_ID_MSIX:
3186 p->num_msix_vectors = number;
3187 i40e_debug(hw, I40E_DEBUG_INIT,
3188 "HW Capability: MSIX vector count = %d\n",
3189 p->num_msix_vectors);
3190 break;
3191 case I40E_AQ_CAP_ID_VF_MSIX:
3192 p->num_msix_vectors_vf = number;
3193 break;
3194 case I40E_AQ_CAP_ID_FLEX10:
3195 if (major_rev == 1) {
3196 if (number == 1) {
3197 p->flex10_enable = true;
3198 p->flex10_capable = true;
3199 }
3200 } else {
3201 /* Capability revision >= 2 */
3202 if (number & 1)
3203 p->flex10_enable = true;
3204 if (number & 2)
3205 p->flex10_capable = true;
3206 }
3207 p->flex10_mode = logical_id;
3208 p->flex10_status = phys_id;
3209 break;
3210 case I40E_AQ_CAP_ID_CEM:
3211 if (number == 1)
3212 p->mgmt_cem = true;
3213 break;
3214 case I40E_AQ_CAP_ID_IWARP:
3215 if (number == 1)
3216 p->iwarp = true;
3217 break;
3218 case I40E_AQ_CAP_ID_LED:
3219 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3220 p->led[phys_id] = true;
3221 break;
3222 case I40E_AQ_CAP_ID_SDP:
3223 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3224 p->sdp[phys_id] = true;
3225 break;
3226 case I40E_AQ_CAP_ID_MDIO:
3227 if (number == 1) {
3228 p->mdio_port_num = phys_id;
3229 p->mdio_port_mode = logical_id;
3230 }
3231 break;
3232 case I40E_AQ_CAP_ID_1588:
3233 if (number == 1)
3234 p->ieee_1588 = true;
3235 break;
3236 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3237 p->fd = true;
3238 p->fd_filters_guaranteed = number;
3239 p->fd_filters_best_effort = logical_id;
3240 break;
3241 case I40E_AQ_CAP_ID_WSR_PROT:
3242 p->wr_csr_prot = (u64)number;
3243 p->wr_csr_prot |= (u64)logical_id << 32;
3244 break;
3245 case I40E_AQ_CAP_ID_NVM_MGMT:
3246 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3247 p->sec_rev_disabled = true;
3248 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3249 p->update_disabled = true;
3250 break;
3251 default:
3252 break;
3253 }
3254 }
3255
3256 if (p->fcoe)
3257 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3258
3259 /* Software override ensuring FCoE is disabled if npar or mfp
3260 * mode because it is not supported in these modes.
3261 */
3262 if (p->npar_enable || p->flex10_enable)
3263 p->fcoe = false;
3264
3265 /* count the enabled ports (aka the "not disabled" ports) */
3266 hw->num_ports = 0;
3267 for (i = 0; i < 4; i++) {
3268 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3269 u64 port_cfg = 0;
3270
3271 /* use AQ read to get the physical register offset instead
3272 * of the port relative offset
3273 */
3274 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3275 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3276 hw->num_ports++;
3277 }
3278
3279 /* OCP cards case: if a mezz is removed the Ethernet port is at
3280 * disabled state in PRTGEN_CNF register. Additional NVM read is
3281 * needed in order to check if we are dealing with OCP card.
3282 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3283 * physical ports results in wrong partition id calculation and thus
3284 * not supporting WoL.
3285 */
3286 if (hw->mac.type == I40E_MAC_X722) {
3287 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3288 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3289 2 * I40E_SR_OCP_CFG_WORD0,
3290 sizeof(ocp_cfg_word0),
3291 &ocp_cfg_word0, true, NULL);
3292 if (!status &&
3293 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3294 hw->num_ports = 4;
3295 i40e_release_nvm(hw);
3296 }
3297 }
3298
3299 valid_functions = p->valid_functions;
3300 num_functions = 0;
3301 while (valid_functions) {
3302 if (valid_functions & 1)
3303 num_functions++;
3304 valid_functions >>= 1;
3305 }
3306
3307 /* partition id is 1-based, and functions are evenly spread
3308 * across the ports as partitions
3309 */
3310 if (hw->num_ports != 0) {
3311 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3312 hw->num_partitions = num_functions / hw->num_ports;
3313 }
3314
3315 /* additional HW specific goodies that might
3316 * someday be HW version specific
3317 */
3318 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3319 }
3320
3321 /**
3322 * i40e_aq_discover_capabilities
3323 * @hw: pointer to the hw struct
3324 * @buff: a virtual buffer to hold the capabilities
3325 * @buff_size: Size of the virtual buffer
3326 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3327 * @list_type_opc: capabilities type to discover - pass in the command opcode
3328 * @cmd_details: pointer to command details structure or NULL
3329 *
3330 * Get the device capabilities descriptions from the firmware
3331 **/
i40e_aq_discover_capabilities(struct i40e_hw * hw,void * buff,u16 buff_size,u16 * data_size,enum i40e_admin_queue_opc list_type_opc,struct i40e_asq_cmd_details * cmd_details)3332 int i40e_aq_discover_capabilities(struct i40e_hw *hw,
3333 void *buff, u16 buff_size, u16 *data_size,
3334 enum i40e_admin_queue_opc list_type_opc,
3335 struct i40e_asq_cmd_details *cmd_details)
3336 {
3337 struct i40e_aqc_list_capabilites *cmd;
3338 struct i40e_aq_desc desc;
3339 int status = 0;
3340
3341 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3342
3343 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3344 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3345 status = I40E_ERR_PARAM;
3346 goto exit;
3347 }
3348
3349 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3350
3351 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3352 if (buff_size > I40E_AQ_LARGE_BUF)
3353 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3354
3355 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3356 *data_size = le16_to_cpu(desc.datalen);
3357
3358 if (status)
3359 goto exit;
3360
3361 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3362 list_type_opc);
3363
3364 exit:
3365 return status;
3366 }
3367
3368 /**
3369 * i40e_aq_update_nvm
3370 * @hw: pointer to the hw struct
3371 * @module_pointer: module pointer location in words from the NVM beginning
3372 * @offset: byte offset from the module beginning
3373 * @length: length of the section to be written (in bytes from the offset)
3374 * @data: command buffer (size [bytes] = length)
3375 * @last_command: tells if this is the last command in a series
3376 * @preservation_flags: Preservation mode flags
3377 * @cmd_details: pointer to command details structure or NULL
3378 *
3379 * Update the NVM using the admin queue commands
3380 **/
i40e_aq_update_nvm(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 length,void * data,bool last_command,u8 preservation_flags,struct i40e_asq_cmd_details * cmd_details)3381 int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3382 u32 offset, u16 length, void *data,
3383 bool last_command, u8 preservation_flags,
3384 struct i40e_asq_cmd_details *cmd_details)
3385 {
3386 struct i40e_aq_desc desc;
3387 struct i40e_aqc_nvm_update *cmd =
3388 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3389 int status;
3390
3391 /* In offset the highest byte must be zeroed. */
3392 if (offset & 0xFF000000) {
3393 status = I40E_ERR_PARAM;
3394 goto i40e_aq_update_nvm_exit;
3395 }
3396
3397 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3398
3399 /* If this is the last command in a series, set the proper flag. */
3400 if (last_command)
3401 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3402 if (hw->mac.type == I40E_MAC_X722) {
3403 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3404 cmd->command_flags |=
3405 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3406 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3407 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3408 cmd->command_flags |=
3409 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3410 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3411 }
3412 cmd->module_pointer = module_pointer;
3413 cmd->offset = cpu_to_le32(offset);
3414 cmd->length = cpu_to_le16(length);
3415
3416 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3417 if (length > I40E_AQ_LARGE_BUF)
3418 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3419
3420 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3421
3422 i40e_aq_update_nvm_exit:
3423 return status;
3424 }
3425
3426 /**
3427 * i40e_aq_rearrange_nvm
3428 * @hw: pointer to the hw struct
3429 * @rearrange_nvm: defines direction of rearrangement
3430 * @cmd_details: pointer to command details structure or NULL
3431 *
3432 * Rearrange NVM structure, available only for transition FW
3433 **/
i40e_aq_rearrange_nvm(struct i40e_hw * hw,u8 rearrange_nvm,struct i40e_asq_cmd_details * cmd_details)3434 int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3435 u8 rearrange_nvm,
3436 struct i40e_asq_cmd_details *cmd_details)
3437 {
3438 struct i40e_aqc_nvm_update *cmd;
3439 struct i40e_aq_desc desc;
3440 int status;
3441
3442 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3443
3444 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3445
3446 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3447 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3448
3449 if (!rearrange_nvm) {
3450 status = I40E_ERR_PARAM;
3451 goto i40e_aq_rearrange_nvm_exit;
3452 }
3453
3454 cmd->command_flags |= rearrange_nvm;
3455 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3456
3457 i40e_aq_rearrange_nvm_exit:
3458 return status;
3459 }
3460
3461 /**
3462 * i40e_aq_get_lldp_mib
3463 * @hw: pointer to the hw struct
3464 * @bridge_type: type of bridge requested
3465 * @mib_type: Local, Remote or both Local and Remote MIBs
3466 * @buff: pointer to a user supplied buffer to store the MIB block
3467 * @buff_size: size of the buffer (in bytes)
3468 * @local_len : length of the returned Local LLDP MIB
3469 * @remote_len: length of the returned Remote LLDP MIB
3470 * @cmd_details: pointer to command details structure or NULL
3471 *
3472 * Requests the complete LLDP MIB (entire packet).
3473 **/
i40e_aq_get_lldp_mib(struct i40e_hw * hw,u8 bridge_type,u8 mib_type,void * buff,u16 buff_size,u16 * local_len,u16 * remote_len,struct i40e_asq_cmd_details * cmd_details)3474 int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3475 u8 mib_type, void *buff, u16 buff_size,
3476 u16 *local_len, u16 *remote_len,
3477 struct i40e_asq_cmd_details *cmd_details)
3478 {
3479 struct i40e_aq_desc desc;
3480 struct i40e_aqc_lldp_get_mib *cmd =
3481 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3482 struct i40e_aqc_lldp_get_mib *resp =
3483 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3484 int status;
3485
3486 if (buff_size == 0 || !buff)
3487 return I40E_ERR_PARAM;
3488
3489 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3490 /* Indirect Command */
3491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3492
3493 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3494 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3495 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3496
3497 desc.datalen = cpu_to_le16(buff_size);
3498
3499 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3500 if (buff_size > I40E_AQ_LARGE_BUF)
3501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3502
3503 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3504 if (!status) {
3505 if (local_len != NULL)
3506 *local_len = le16_to_cpu(resp->local_len);
3507 if (remote_len != NULL)
3508 *remote_len = le16_to_cpu(resp->remote_len);
3509 }
3510
3511 return status;
3512 }
3513
3514 /**
3515 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3516 * @hw: pointer to the hw struct
3517 * @mib_type: Local, Remote or both Local and Remote MIBs
3518 * @buff: pointer to a user supplied buffer to store the MIB block
3519 * @buff_size: size of the buffer (in bytes)
3520 * @cmd_details: pointer to command details structure or NULL
3521 *
3522 * Set the LLDP MIB.
3523 **/
3524 int
i40e_aq_set_lldp_mib(struct i40e_hw * hw,u8 mib_type,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3525 i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3526 u8 mib_type, void *buff, u16 buff_size,
3527 struct i40e_asq_cmd_details *cmd_details)
3528 {
3529 struct i40e_aqc_lldp_set_local_mib *cmd;
3530 struct i40e_aq_desc desc;
3531 int status;
3532
3533 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3534 if (buff_size == 0 || !buff)
3535 return I40E_ERR_PARAM;
3536
3537 i40e_fill_default_direct_cmd_desc(&desc,
3538 i40e_aqc_opc_lldp_set_local_mib);
3539 /* Indirect Command */
3540 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3541 if (buff_size > I40E_AQ_LARGE_BUF)
3542 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3543 desc.datalen = cpu_to_le16(buff_size);
3544
3545 cmd->type = mib_type;
3546 cmd->length = cpu_to_le16(buff_size);
3547 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3548 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3549
3550 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3551 return status;
3552 }
3553
3554 /**
3555 * i40e_aq_cfg_lldp_mib_change_event
3556 * @hw: pointer to the hw struct
3557 * @enable_update: Enable or Disable event posting
3558 * @cmd_details: pointer to command details structure or NULL
3559 *
3560 * Enable or Disable posting of an event on ARQ when LLDP MIB
3561 * associated with the interface changes
3562 **/
i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw * hw,bool enable_update,struct i40e_asq_cmd_details * cmd_details)3563 int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3564 bool enable_update,
3565 struct i40e_asq_cmd_details *cmd_details)
3566 {
3567 struct i40e_aq_desc desc;
3568 struct i40e_aqc_lldp_update_mib *cmd =
3569 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3570 int status;
3571
3572 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3573
3574 if (!enable_update)
3575 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3576
3577 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3578
3579 return status;
3580 }
3581
3582 /**
3583 * i40e_aq_restore_lldp
3584 * @hw: pointer to the hw struct
3585 * @setting: pointer to factory setting variable or NULL
3586 * @restore: True if factory settings should be restored
3587 * @cmd_details: pointer to command details structure or NULL
3588 *
3589 * Restore LLDP Agent factory settings if @restore set to True. In other case
3590 * only returns factory setting in AQ response.
3591 **/
3592 int
i40e_aq_restore_lldp(struct i40e_hw * hw,u8 * setting,bool restore,struct i40e_asq_cmd_details * cmd_details)3593 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3594 struct i40e_asq_cmd_details *cmd_details)
3595 {
3596 struct i40e_aq_desc desc;
3597 struct i40e_aqc_lldp_restore *cmd =
3598 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3599 int status;
3600
3601 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3602 i40e_debug(hw, I40E_DEBUG_ALL,
3603 "Restore LLDP not supported by current FW version.\n");
3604 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3605 }
3606
3607 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3608
3609 if (restore)
3610 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3611
3612 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3613
3614 if (setting)
3615 *setting = cmd->command & 1;
3616
3617 return status;
3618 }
3619
3620 /**
3621 * i40e_aq_stop_lldp
3622 * @hw: pointer to the hw struct
3623 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3624 * @persist: True if stop of LLDP should be persistent across power cycles
3625 * @cmd_details: pointer to command details structure or NULL
3626 *
3627 * Stop or Shutdown the embedded LLDP Agent
3628 **/
i40e_aq_stop_lldp(struct i40e_hw * hw,bool shutdown_agent,bool persist,struct i40e_asq_cmd_details * cmd_details)3629 int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3630 bool persist,
3631 struct i40e_asq_cmd_details *cmd_details)
3632 {
3633 struct i40e_aq_desc desc;
3634 struct i40e_aqc_lldp_stop *cmd =
3635 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3636 int status;
3637
3638 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3639
3640 if (shutdown_agent)
3641 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3642
3643 if (persist) {
3644 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3645 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3646 else
3647 i40e_debug(hw, I40E_DEBUG_ALL,
3648 "Persistent Stop LLDP not supported by current FW version.\n");
3649 }
3650
3651 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3652
3653 return status;
3654 }
3655
3656 /**
3657 * i40e_aq_start_lldp
3658 * @hw: pointer to the hw struct
3659 * @persist: True if start of LLDP should be persistent across power cycles
3660 * @cmd_details: pointer to command details structure or NULL
3661 *
3662 * Start the embedded LLDP Agent on all ports.
3663 **/
i40e_aq_start_lldp(struct i40e_hw * hw,bool persist,struct i40e_asq_cmd_details * cmd_details)3664 int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3665 struct i40e_asq_cmd_details *cmd_details)
3666 {
3667 struct i40e_aq_desc desc;
3668 struct i40e_aqc_lldp_start *cmd =
3669 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3670 int status;
3671
3672 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3673
3674 cmd->command = I40E_AQ_LLDP_AGENT_START;
3675
3676 if (persist) {
3677 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3678 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3679 else
3680 i40e_debug(hw, I40E_DEBUG_ALL,
3681 "Persistent Start LLDP not supported by current FW version.\n");
3682 }
3683
3684 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3685
3686 return status;
3687 }
3688
3689 /**
3690 * i40e_aq_set_dcb_parameters
3691 * @hw: pointer to the hw struct
3692 * @cmd_details: pointer to command details structure or NULL
3693 * @dcb_enable: True if DCB configuration needs to be applied
3694 *
3695 **/
3696 int
i40e_aq_set_dcb_parameters(struct i40e_hw * hw,bool dcb_enable,struct i40e_asq_cmd_details * cmd_details)3697 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3698 struct i40e_asq_cmd_details *cmd_details)
3699 {
3700 struct i40e_aq_desc desc;
3701 struct i40e_aqc_set_dcb_parameters *cmd =
3702 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3703 int status;
3704
3705 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3706 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3707
3708 i40e_fill_default_direct_cmd_desc(&desc,
3709 i40e_aqc_opc_set_dcb_parameters);
3710
3711 if (dcb_enable) {
3712 cmd->valid_flags = I40E_DCB_VALID;
3713 cmd->command = I40E_AQ_DCB_SET_AGENT;
3714 }
3715 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3716
3717 return status;
3718 }
3719
3720 /**
3721 * i40e_aq_get_cee_dcb_config
3722 * @hw: pointer to the hw struct
3723 * @buff: response buffer that stores CEE operational configuration
3724 * @buff_size: size of the buffer passed
3725 * @cmd_details: pointer to command details structure or NULL
3726 *
3727 * Get CEE DCBX mode operational configuration from firmware
3728 **/
i40e_aq_get_cee_dcb_config(struct i40e_hw * hw,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)3729 int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3730 void *buff, u16 buff_size,
3731 struct i40e_asq_cmd_details *cmd_details)
3732 {
3733 struct i40e_aq_desc desc;
3734 int status;
3735
3736 if (buff_size == 0 || !buff)
3737 return I40E_ERR_PARAM;
3738
3739 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3740
3741 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3742 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3743 cmd_details);
3744
3745 return status;
3746 }
3747
3748 /**
3749 * i40e_aq_add_udp_tunnel
3750 * @hw: pointer to the hw struct
3751 * @udp_port: the UDP port to add in Host byte order
3752 * @protocol_index: protocol index type
3753 * @filter_index: pointer to filter index
3754 * @cmd_details: pointer to command details structure or NULL
3755 *
3756 * Note: Firmware expects the udp_port value to be in Little Endian format,
3757 * and this function will call cpu_to_le16 to convert from Host byte order to
3758 * Little Endian order.
3759 **/
i40e_aq_add_udp_tunnel(struct i40e_hw * hw,u16 udp_port,u8 protocol_index,u8 * filter_index,struct i40e_asq_cmd_details * cmd_details)3760 int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3761 u16 udp_port, u8 protocol_index,
3762 u8 *filter_index,
3763 struct i40e_asq_cmd_details *cmd_details)
3764 {
3765 struct i40e_aq_desc desc;
3766 struct i40e_aqc_add_udp_tunnel *cmd =
3767 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3768 struct i40e_aqc_del_udp_tunnel_completion *resp =
3769 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3770 int status;
3771
3772 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3773
3774 cmd->udp_port = cpu_to_le16(udp_port);
3775 cmd->protocol_type = protocol_index;
3776
3777 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3778
3779 if (!status && filter_index)
3780 *filter_index = resp->index;
3781
3782 return status;
3783 }
3784
3785 /**
3786 * i40e_aq_del_udp_tunnel
3787 * @hw: pointer to the hw struct
3788 * @index: filter index
3789 * @cmd_details: pointer to command details structure or NULL
3790 **/
i40e_aq_del_udp_tunnel(struct i40e_hw * hw,u8 index,struct i40e_asq_cmd_details * cmd_details)3791 int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3792 struct i40e_asq_cmd_details *cmd_details)
3793 {
3794 struct i40e_aq_desc desc;
3795 struct i40e_aqc_remove_udp_tunnel *cmd =
3796 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3797 int status;
3798
3799 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3800
3801 cmd->index = index;
3802
3803 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3804
3805 return status;
3806 }
3807
3808 /**
3809 * i40e_aq_delete_element - Delete switch element
3810 * @hw: pointer to the hw struct
3811 * @seid: the SEID to delete from the switch
3812 * @cmd_details: pointer to command details structure or NULL
3813 *
3814 * This deletes a switch element from the switch.
3815 **/
i40e_aq_delete_element(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)3816 int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3817 struct i40e_asq_cmd_details *cmd_details)
3818 {
3819 struct i40e_aq_desc desc;
3820 struct i40e_aqc_switch_seid *cmd =
3821 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3822 int status;
3823
3824 if (seid == 0)
3825 return I40E_ERR_PARAM;
3826
3827 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3828
3829 cmd->seid = cpu_to_le16(seid);
3830
3831 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3832 cmd_details, true);
3833
3834 return status;
3835 }
3836
3837 /**
3838 * i40e_aq_dcb_updated - DCB Updated Command
3839 * @hw: pointer to the hw struct
3840 * @cmd_details: pointer to command details structure or NULL
3841 *
3842 * EMP will return when the shared RPB settings have been
3843 * recomputed and modified. The retval field in the descriptor
3844 * will be set to 0 when RPB is modified.
3845 **/
i40e_aq_dcb_updated(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)3846 int i40e_aq_dcb_updated(struct i40e_hw *hw,
3847 struct i40e_asq_cmd_details *cmd_details)
3848 {
3849 struct i40e_aq_desc desc;
3850 int status;
3851
3852 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3853
3854 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3855
3856 return status;
3857 }
3858
3859 /**
3860 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3861 * @hw: pointer to the hw struct
3862 * @seid: seid for the physical port/switching component/vsi
3863 * @buff: Indirect buffer to hold data parameters and response
3864 * @buff_size: Indirect buffer size
3865 * @opcode: Tx scheduler AQ command opcode
3866 * @cmd_details: pointer to command details structure or NULL
3867 *
3868 * Generic command handler for Tx scheduler AQ commands
3869 **/
i40e_aq_tx_sched_cmd(struct i40e_hw * hw,u16 seid,void * buff,u16 buff_size,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)3870 static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3871 void *buff, u16 buff_size,
3872 enum i40e_admin_queue_opc opcode,
3873 struct i40e_asq_cmd_details *cmd_details)
3874 {
3875 struct i40e_aq_desc desc;
3876 struct i40e_aqc_tx_sched_ind *cmd =
3877 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3878 int status;
3879 bool cmd_param_flag = false;
3880
3881 switch (opcode) {
3882 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3883 case i40e_aqc_opc_configure_vsi_tc_bw:
3884 case i40e_aqc_opc_enable_switching_comp_ets:
3885 case i40e_aqc_opc_modify_switching_comp_ets:
3886 case i40e_aqc_opc_disable_switching_comp_ets:
3887 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3888 case i40e_aqc_opc_configure_switching_comp_bw_config:
3889 cmd_param_flag = true;
3890 break;
3891 case i40e_aqc_opc_query_vsi_bw_config:
3892 case i40e_aqc_opc_query_vsi_ets_sla_config:
3893 case i40e_aqc_opc_query_switching_comp_ets_config:
3894 case i40e_aqc_opc_query_port_ets_config:
3895 case i40e_aqc_opc_query_switching_comp_bw_config:
3896 cmd_param_flag = false;
3897 break;
3898 default:
3899 return I40E_ERR_PARAM;
3900 }
3901
3902 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3903
3904 /* Indirect command */
3905 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3906 if (cmd_param_flag)
3907 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3908 if (buff_size > I40E_AQ_LARGE_BUF)
3909 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3910
3911 desc.datalen = cpu_to_le16(buff_size);
3912
3913 cmd->vsi_seid = cpu_to_le16(seid);
3914
3915 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3916
3917 return status;
3918 }
3919
3920 /**
3921 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3922 * @hw: pointer to the hw struct
3923 * @seid: VSI seid
3924 * @credit: BW limit credits (0 = disabled)
3925 * @max_credit: Max BW limit credits
3926 * @cmd_details: pointer to command details structure or NULL
3927 **/
i40e_aq_config_vsi_bw_limit(struct i40e_hw * hw,u16 seid,u16 credit,u8 max_credit,struct i40e_asq_cmd_details * cmd_details)3928 int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3929 u16 seid, u16 credit, u8 max_credit,
3930 struct i40e_asq_cmd_details *cmd_details)
3931 {
3932 struct i40e_aq_desc desc;
3933 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3934 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3935 int status;
3936
3937 i40e_fill_default_direct_cmd_desc(&desc,
3938 i40e_aqc_opc_configure_vsi_bw_limit);
3939
3940 cmd->vsi_seid = cpu_to_le16(seid);
3941 cmd->credit = cpu_to_le16(credit);
3942 cmd->max_credit = max_credit;
3943
3944 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3945
3946 return status;
3947 }
3948
3949 /**
3950 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3951 * @hw: pointer to the hw struct
3952 * @seid: VSI seid
3953 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3954 * @cmd_details: pointer to command details structure or NULL
3955 **/
i40e_aq_config_vsi_tc_bw(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_vsi_tc_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)3956 int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3957 u16 seid,
3958 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3959 struct i40e_asq_cmd_details *cmd_details)
3960 {
3961 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3962 i40e_aqc_opc_configure_vsi_tc_bw,
3963 cmd_details);
3964 }
3965
3966 /**
3967 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3968 * @hw: pointer to the hw struct
3969 * @seid: seid of the switching component connected to Physical Port
3970 * @ets_data: Buffer holding ETS parameters
3971 * @opcode: Tx scheduler AQ command opcode
3972 * @cmd_details: pointer to command details structure or NULL
3973 **/
3974 int
i40e_aq_config_switch_comp_ets(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_ets_data * ets_data,enum i40e_admin_queue_opc opcode,struct i40e_asq_cmd_details * cmd_details)3975 i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
3976 u16 seid,
3977 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
3978 enum i40e_admin_queue_opc opcode,
3979 struct i40e_asq_cmd_details *cmd_details)
3980 {
3981 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
3982 sizeof(*ets_data), opcode, cmd_details);
3983 }
3984
3985 /**
3986 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
3987 * @hw: pointer to the hw struct
3988 * @seid: seid of the switching component
3989 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
3990 * @cmd_details: pointer to command details structure or NULL
3991 **/
3992 int
i40e_aq_config_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_configure_switching_comp_bw_config_data * bw_data,struct i40e_asq_cmd_details * cmd_details)3993 i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
3994 u16 seid,
3995 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
3996 struct i40e_asq_cmd_details *cmd_details)
3997 {
3998 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3999 i40e_aqc_opc_configure_switching_comp_bw_config,
4000 cmd_details);
4001 }
4002
4003 /**
4004 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4005 * @hw: pointer to the hw struct
4006 * @seid: seid of the VSI
4007 * @bw_data: Buffer to hold VSI BW configuration
4008 * @cmd_details: pointer to command details structure or NULL
4009 **/
4010 int
i40e_aq_query_vsi_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4011 i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4012 u16 seid,
4013 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4014 struct i40e_asq_cmd_details *cmd_details)
4015 {
4016 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4017 i40e_aqc_opc_query_vsi_bw_config,
4018 cmd_details);
4019 }
4020
4021 /**
4022 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4023 * @hw: pointer to the hw struct
4024 * @seid: seid of the VSI
4025 * @bw_data: Buffer to hold VSI BW configuration per TC
4026 * @cmd_details: pointer to command details structure or NULL
4027 **/
4028 int
i40e_aq_query_vsi_ets_sla_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_vsi_ets_sla_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4029 i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4030 u16 seid,
4031 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4032 struct i40e_asq_cmd_details *cmd_details)
4033 {
4034 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4035 i40e_aqc_opc_query_vsi_ets_sla_config,
4036 cmd_details);
4037 }
4038
4039 /**
4040 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4041 * @hw: pointer to the hw struct
4042 * @seid: seid of the switching component
4043 * @bw_data: Buffer to hold switching component's per TC BW config
4044 * @cmd_details: pointer to command details structure or NULL
4045 **/
4046 int
i40e_aq_query_switch_comp_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4047 i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4048 u16 seid,
4049 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4050 struct i40e_asq_cmd_details *cmd_details)
4051 {
4052 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4053 i40e_aqc_opc_query_switching_comp_ets_config,
4054 cmd_details);
4055 }
4056
4057 /**
4058 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4059 * @hw: pointer to the hw struct
4060 * @seid: seid of the VSI or switching component connected to Physical Port
4061 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4062 * @cmd_details: pointer to command details structure or NULL
4063 **/
4064 int
i40e_aq_query_port_ets_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_port_ets_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4065 i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4066 u16 seid,
4067 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4068 struct i40e_asq_cmd_details *cmd_details)
4069 {
4070 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4071 i40e_aqc_opc_query_port_ets_config,
4072 cmd_details);
4073 }
4074
4075 /**
4076 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4077 * @hw: pointer to the hw struct
4078 * @seid: seid of the switching component
4079 * @bw_data: Buffer to hold switching component's BW configuration
4080 * @cmd_details: pointer to command details structure or NULL
4081 **/
4082 int
i40e_aq_query_switch_comp_bw_config(struct i40e_hw * hw,u16 seid,struct i40e_aqc_query_switching_comp_bw_config_resp * bw_data,struct i40e_asq_cmd_details * cmd_details)4083 i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4084 u16 seid,
4085 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4086 struct i40e_asq_cmd_details *cmd_details)
4087 {
4088 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4089 i40e_aqc_opc_query_switching_comp_bw_config,
4090 cmd_details);
4091 }
4092
4093 /**
4094 * i40e_validate_filter_settings
4095 * @hw: pointer to the hardware structure
4096 * @settings: Filter control settings
4097 *
4098 * Check and validate the filter control settings passed.
4099 * The function checks for the valid filter/context sizes being
4100 * passed for FCoE and PE.
4101 *
4102 * Returns 0 if the values passed are valid and within
4103 * range else returns an error.
4104 **/
4105 static int
i40e_validate_filter_settings(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4106 i40e_validate_filter_settings(struct i40e_hw *hw,
4107 struct i40e_filter_control_settings *settings)
4108 {
4109 u32 fcoe_cntx_size, fcoe_filt_size;
4110 u32 fcoe_fmax;
4111 u32 val;
4112
4113 /* Validate FCoE settings passed */
4114 switch (settings->fcoe_filt_num) {
4115 case I40E_HASH_FILTER_SIZE_1K:
4116 case I40E_HASH_FILTER_SIZE_2K:
4117 case I40E_HASH_FILTER_SIZE_4K:
4118 case I40E_HASH_FILTER_SIZE_8K:
4119 case I40E_HASH_FILTER_SIZE_16K:
4120 case I40E_HASH_FILTER_SIZE_32K:
4121 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4122 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4123 break;
4124 default:
4125 return I40E_ERR_PARAM;
4126 }
4127
4128 switch (settings->fcoe_cntx_num) {
4129 case I40E_DMA_CNTX_SIZE_512:
4130 case I40E_DMA_CNTX_SIZE_1K:
4131 case I40E_DMA_CNTX_SIZE_2K:
4132 case I40E_DMA_CNTX_SIZE_4K:
4133 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4134 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4135 break;
4136 default:
4137 return I40E_ERR_PARAM;
4138 }
4139
4140 /* Validate PE settings passed */
4141 switch (settings->pe_filt_num) {
4142 case I40E_HASH_FILTER_SIZE_1K:
4143 case I40E_HASH_FILTER_SIZE_2K:
4144 case I40E_HASH_FILTER_SIZE_4K:
4145 case I40E_HASH_FILTER_SIZE_8K:
4146 case I40E_HASH_FILTER_SIZE_16K:
4147 case I40E_HASH_FILTER_SIZE_32K:
4148 case I40E_HASH_FILTER_SIZE_64K:
4149 case I40E_HASH_FILTER_SIZE_128K:
4150 case I40E_HASH_FILTER_SIZE_256K:
4151 case I40E_HASH_FILTER_SIZE_512K:
4152 case I40E_HASH_FILTER_SIZE_1M:
4153 break;
4154 default:
4155 return I40E_ERR_PARAM;
4156 }
4157
4158 switch (settings->pe_cntx_num) {
4159 case I40E_DMA_CNTX_SIZE_512:
4160 case I40E_DMA_CNTX_SIZE_1K:
4161 case I40E_DMA_CNTX_SIZE_2K:
4162 case I40E_DMA_CNTX_SIZE_4K:
4163 case I40E_DMA_CNTX_SIZE_8K:
4164 case I40E_DMA_CNTX_SIZE_16K:
4165 case I40E_DMA_CNTX_SIZE_32K:
4166 case I40E_DMA_CNTX_SIZE_64K:
4167 case I40E_DMA_CNTX_SIZE_128K:
4168 case I40E_DMA_CNTX_SIZE_256K:
4169 break;
4170 default:
4171 return I40E_ERR_PARAM;
4172 }
4173
4174 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4175 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4176 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4177 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4178 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4179 return I40E_ERR_INVALID_SIZE;
4180
4181 return 0;
4182 }
4183
4184 /**
4185 * i40e_set_filter_control
4186 * @hw: pointer to the hardware structure
4187 * @settings: Filter control settings
4188 *
4189 * Set the Queue Filters for PE/FCoE and enable filters required
4190 * for a single PF. It is expected that these settings are programmed
4191 * at the driver initialization time.
4192 **/
i40e_set_filter_control(struct i40e_hw * hw,struct i40e_filter_control_settings * settings)4193 int i40e_set_filter_control(struct i40e_hw *hw,
4194 struct i40e_filter_control_settings *settings)
4195 {
4196 u32 hash_lut_size = 0;
4197 int ret = 0;
4198 u32 val;
4199
4200 if (!settings)
4201 return I40E_ERR_PARAM;
4202
4203 /* Validate the input settings */
4204 ret = i40e_validate_filter_settings(hw, settings);
4205 if (ret)
4206 return ret;
4207
4208 /* Read the PF Queue Filter control register */
4209 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4210
4211 /* Program required PE hash buckets for the PF */
4212 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4213 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4214 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4215 /* Program required PE contexts for the PF */
4216 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4217 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4218 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4219
4220 /* Program required FCoE hash buckets for the PF */
4221 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4222 val |= ((u32)settings->fcoe_filt_num <<
4223 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4224 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4225 /* Program required FCoE DDP contexts for the PF */
4226 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4227 val |= ((u32)settings->fcoe_cntx_num <<
4228 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4229 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4230
4231 /* Program Hash LUT size for the PF */
4232 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4233 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4234 hash_lut_size = 1;
4235 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4236 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4237
4238 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4239 if (settings->enable_fdir)
4240 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4241 if (settings->enable_ethtype)
4242 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4243 if (settings->enable_macvlan)
4244 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4245
4246 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4247
4248 return 0;
4249 }
4250
4251 /**
4252 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4253 * @hw: pointer to the hw struct
4254 * @mac_addr: MAC address to use in the filter
4255 * @ethtype: Ethertype to use in the filter
4256 * @flags: Flags that needs to be applied to the filter
4257 * @vsi_seid: seid of the control VSI
4258 * @queue: VSI queue number to send the packet to
4259 * @is_add: Add control packet filter if True else remove
4260 * @stats: Structure to hold information on control filter counts
4261 * @cmd_details: pointer to command details structure or NULL
4262 *
4263 * This command will Add or Remove control packet filter for a control VSI.
4264 * In return it will update the total number of perfect filter count in
4265 * the stats member.
4266 **/
i40e_aq_add_rem_control_packet_filter(struct i40e_hw * hw,u8 * mac_addr,u16 ethtype,u16 flags,u16 vsi_seid,u16 queue,bool is_add,struct i40e_control_filter_stats * stats,struct i40e_asq_cmd_details * cmd_details)4267 int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4268 u8 *mac_addr, u16 ethtype, u16 flags,
4269 u16 vsi_seid, u16 queue, bool is_add,
4270 struct i40e_control_filter_stats *stats,
4271 struct i40e_asq_cmd_details *cmd_details)
4272 {
4273 struct i40e_aq_desc desc;
4274 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4275 (struct i40e_aqc_add_remove_control_packet_filter *)
4276 &desc.params.raw;
4277 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4278 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4279 &desc.params.raw;
4280 int status;
4281
4282 if (vsi_seid == 0)
4283 return I40E_ERR_PARAM;
4284
4285 if (is_add) {
4286 i40e_fill_default_direct_cmd_desc(&desc,
4287 i40e_aqc_opc_add_control_packet_filter);
4288 cmd->queue = cpu_to_le16(queue);
4289 } else {
4290 i40e_fill_default_direct_cmd_desc(&desc,
4291 i40e_aqc_opc_remove_control_packet_filter);
4292 }
4293
4294 if (mac_addr)
4295 ether_addr_copy(cmd->mac, mac_addr);
4296
4297 cmd->etype = cpu_to_le16(ethtype);
4298 cmd->flags = cpu_to_le16(flags);
4299 cmd->seid = cpu_to_le16(vsi_seid);
4300
4301 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4302
4303 if (!status && stats) {
4304 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4305 stats->etype_used = le16_to_cpu(resp->etype_used);
4306 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4307 stats->etype_free = le16_to_cpu(resp->etype_free);
4308 }
4309
4310 return status;
4311 }
4312
4313 /**
4314 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4315 * @hw: pointer to the hw struct
4316 * @seid: VSI seid to add ethertype filter from
4317 **/
i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw * hw,u16 seid)4318 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4319 u16 seid)
4320 {
4321 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4322 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4323 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4324 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4325 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4326 int status;
4327
4328 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4329 seid, 0, true, NULL,
4330 NULL);
4331 if (status)
4332 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4333 }
4334
4335 /**
4336 * i40e_aq_alternate_read
4337 * @hw: pointer to the hardware structure
4338 * @reg_addr0: address of first dword to be read
4339 * @reg_val0: pointer for data read from 'reg_addr0'
4340 * @reg_addr1: address of second dword to be read
4341 * @reg_val1: pointer for data read from 'reg_addr1'
4342 *
4343 * Read one or two dwords from alternate structure. Fields are indicated
4344 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4345 * is not passed then only register at 'reg_addr0' is read.
4346 *
4347 **/
i40e_aq_alternate_read(struct i40e_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)4348 static int i40e_aq_alternate_read(struct i40e_hw *hw,
4349 u32 reg_addr0, u32 *reg_val0,
4350 u32 reg_addr1, u32 *reg_val1)
4351 {
4352 struct i40e_aq_desc desc;
4353 struct i40e_aqc_alternate_write *cmd_resp =
4354 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4355 int status;
4356
4357 if (!reg_val0)
4358 return I40E_ERR_PARAM;
4359
4360 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4361 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4362 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4363
4364 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4365
4366 if (!status) {
4367 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4368
4369 if (reg_val1)
4370 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4371 }
4372
4373 return status;
4374 }
4375
4376 /**
4377 * i40e_aq_suspend_port_tx
4378 * @hw: pointer to the hardware structure
4379 * @seid: port seid
4380 * @cmd_details: pointer to command details structure or NULL
4381 *
4382 * Suspend port's Tx traffic
4383 **/
i40e_aq_suspend_port_tx(struct i40e_hw * hw,u16 seid,struct i40e_asq_cmd_details * cmd_details)4384 int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4385 struct i40e_asq_cmd_details *cmd_details)
4386 {
4387 struct i40e_aqc_tx_sched_ind *cmd;
4388 struct i40e_aq_desc desc;
4389 int status;
4390
4391 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4392 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4393 cmd->vsi_seid = cpu_to_le16(seid);
4394 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4395
4396 return status;
4397 }
4398
4399 /**
4400 * i40e_aq_resume_port_tx
4401 * @hw: pointer to the hardware structure
4402 * @cmd_details: pointer to command details structure or NULL
4403 *
4404 * Resume port's Tx traffic
4405 **/
i40e_aq_resume_port_tx(struct i40e_hw * hw,struct i40e_asq_cmd_details * cmd_details)4406 int i40e_aq_resume_port_tx(struct i40e_hw *hw,
4407 struct i40e_asq_cmd_details *cmd_details)
4408 {
4409 struct i40e_aq_desc desc;
4410 int status;
4411
4412 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4413
4414 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4415
4416 return status;
4417 }
4418
4419 /**
4420 * i40e_set_pci_config_data - store PCI bus info
4421 * @hw: pointer to hardware structure
4422 * @link_status: the link status word from PCI config space
4423 *
4424 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4425 **/
i40e_set_pci_config_data(struct i40e_hw * hw,u16 link_status)4426 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4427 {
4428 hw->bus.type = i40e_bus_type_pci_express;
4429
4430 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4431 case PCI_EXP_LNKSTA_NLW_X1:
4432 hw->bus.width = i40e_bus_width_pcie_x1;
4433 break;
4434 case PCI_EXP_LNKSTA_NLW_X2:
4435 hw->bus.width = i40e_bus_width_pcie_x2;
4436 break;
4437 case PCI_EXP_LNKSTA_NLW_X4:
4438 hw->bus.width = i40e_bus_width_pcie_x4;
4439 break;
4440 case PCI_EXP_LNKSTA_NLW_X8:
4441 hw->bus.width = i40e_bus_width_pcie_x8;
4442 break;
4443 default:
4444 hw->bus.width = i40e_bus_width_unknown;
4445 break;
4446 }
4447
4448 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4449 case PCI_EXP_LNKSTA_CLS_2_5GB:
4450 hw->bus.speed = i40e_bus_speed_2500;
4451 break;
4452 case PCI_EXP_LNKSTA_CLS_5_0GB:
4453 hw->bus.speed = i40e_bus_speed_5000;
4454 break;
4455 case PCI_EXP_LNKSTA_CLS_8_0GB:
4456 hw->bus.speed = i40e_bus_speed_8000;
4457 break;
4458 default:
4459 hw->bus.speed = i40e_bus_speed_unknown;
4460 break;
4461 }
4462 }
4463
4464 /**
4465 * i40e_aq_debug_dump
4466 * @hw: pointer to the hardware structure
4467 * @cluster_id: specific cluster to dump
4468 * @table_id: table id within cluster
4469 * @start_index: index of line in the block to read
4470 * @buff_size: dump buffer size
4471 * @buff: dump buffer
4472 * @ret_buff_size: actual buffer size returned
4473 * @ret_next_table: next block to read
4474 * @ret_next_index: next index to read
4475 * @cmd_details: pointer to command details structure or NULL
4476 *
4477 * Dump internal FW/HW data for debug purposes.
4478 *
4479 **/
i40e_aq_debug_dump(struct i40e_hw * hw,u8 cluster_id,u8 table_id,u32 start_index,u16 buff_size,void * buff,u16 * ret_buff_size,u8 * ret_next_table,u32 * ret_next_index,struct i40e_asq_cmd_details * cmd_details)4480 int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4481 u8 table_id, u32 start_index, u16 buff_size,
4482 void *buff, u16 *ret_buff_size,
4483 u8 *ret_next_table, u32 *ret_next_index,
4484 struct i40e_asq_cmd_details *cmd_details)
4485 {
4486 struct i40e_aq_desc desc;
4487 struct i40e_aqc_debug_dump_internals *cmd =
4488 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4489 struct i40e_aqc_debug_dump_internals *resp =
4490 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4491 int status;
4492
4493 if (buff_size == 0 || !buff)
4494 return I40E_ERR_PARAM;
4495
4496 i40e_fill_default_direct_cmd_desc(&desc,
4497 i40e_aqc_opc_debug_dump_internals);
4498 /* Indirect Command */
4499 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4500 if (buff_size > I40E_AQ_LARGE_BUF)
4501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4502
4503 cmd->cluster_id = cluster_id;
4504 cmd->table_id = table_id;
4505 cmd->idx = cpu_to_le32(start_index);
4506
4507 desc.datalen = cpu_to_le16(buff_size);
4508
4509 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4510 if (!status) {
4511 if (ret_buff_size)
4512 *ret_buff_size = le16_to_cpu(desc.datalen);
4513 if (ret_next_table)
4514 *ret_next_table = resp->table_id;
4515 if (ret_next_index)
4516 *ret_next_index = le32_to_cpu(resp->idx);
4517 }
4518
4519 return status;
4520 }
4521
4522 /**
4523 * i40e_read_bw_from_alt_ram
4524 * @hw: pointer to the hardware structure
4525 * @max_bw: pointer for max_bw read
4526 * @min_bw: pointer for min_bw read
4527 * @min_valid: pointer for bool that is true if min_bw is a valid value
4528 * @max_valid: pointer for bool that is true if max_bw is a valid value
4529 *
4530 * Read bw from the alternate ram for the given pf
4531 **/
i40e_read_bw_from_alt_ram(struct i40e_hw * hw,u32 * max_bw,u32 * min_bw,bool * min_valid,bool * max_valid)4532 int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4533 u32 *max_bw, u32 *min_bw,
4534 bool *min_valid, bool *max_valid)
4535 {
4536 u32 max_bw_addr, min_bw_addr;
4537 int status;
4538
4539 /* Calculate the address of the min/max bw registers */
4540 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4541 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4542 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4543 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4544 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4545 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4546
4547 /* Read the bandwidths from alt ram */
4548 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4549 min_bw_addr, min_bw);
4550
4551 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4552 *min_valid = true;
4553 else
4554 *min_valid = false;
4555
4556 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4557 *max_valid = true;
4558 else
4559 *max_valid = false;
4560
4561 return status;
4562 }
4563
4564 /**
4565 * i40e_aq_configure_partition_bw
4566 * @hw: pointer to the hardware structure
4567 * @bw_data: Buffer holding valid pfs and bw limits
4568 * @cmd_details: pointer to command details
4569 *
4570 * Configure partitions guaranteed/max bw
4571 **/
4572 int
i40e_aq_configure_partition_bw(struct i40e_hw * hw,struct i40e_aqc_configure_partition_bw_data * bw_data,struct i40e_asq_cmd_details * cmd_details)4573 i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4574 struct i40e_aqc_configure_partition_bw_data *bw_data,
4575 struct i40e_asq_cmd_details *cmd_details)
4576 {
4577 u16 bwd_size = sizeof(*bw_data);
4578 struct i40e_aq_desc desc;
4579 int status;
4580
4581 i40e_fill_default_direct_cmd_desc(&desc,
4582 i40e_aqc_opc_configure_partition_bw);
4583
4584 /* Indirect command */
4585 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4586 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4587
4588 if (bwd_size > I40E_AQ_LARGE_BUF)
4589 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4590
4591 desc.datalen = cpu_to_le16(bwd_size);
4592
4593 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4594 cmd_details);
4595
4596 return status;
4597 }
4598
4599 /**
4600 * i40e_read_phy_register_clause22
4601 * @hw: pointer to the HW structure
4602 * @reg: register address in the page
4603 * @phy_addr: PHY address on MDIO interface
4604 * @value: PHY register value
4605 *
4606 * Reads specified PHY register value
4607 **/
i40e_read_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 * value)4608 int i40e_read_phy_register_clause22(struct i40e_hw *hw,
4609 u16 reg, u8 phy_addr, u16 *value)
4610 {
4611 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4612 int status = I40E_ERR_TIMEOUT;
4613 u32 command = 0;
4614 u16 retry = 1000;
4615
4616 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4617 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4618 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4619 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4620 (I40E_GLGEN_MSCA_MDICMD_MASK);
4621 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4622 do {
4623 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4624 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4625 status = 0;
4626 break;
4627 }
4628 udelay(10);
4629 retry--;
4630 } while (retry);
4631
4632 if (status) {
4633 i40e_debug(hw, I40E_DEBUG_PHY,
4634 "PHY: Can't write command to external PHY.\n");
4635 } else {
4636 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4637 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4638 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4639 }
4640
4641 return status;
4642 }
4643
4644 /**
4645 * i40e_write_phy_register_clause22
4646 * @hw: pointer to the HW structure
4647 * @reg: register address in the page
4648 * @phy_addr: PHY address on MDIO interface
4649 * @value: PHY register value
4650 *
4651 * Writes specified PHY register value
4652 **/
i40e_write_phy_register_clause22(struct i40e_hw * hw,u16 reg,u8 phy_addr,u16 value)4653 int i40e_write_phy_register_clause22(struct i40e_hw *hw,
4654 u16 reg, u8 phy_addr, u16 value)
4655 {
4656 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4657 int status = I40E_ERR_TIMEOUT;
4658 u32 command = 0;
4659 u16 retry = 1000;
4660
4661 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4662 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4663
4664 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4665 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4666 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4667 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4668 (I40E_GLGEN_MSCA_MDICMD_MASK);
4669
4670 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4671 do {
4672 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4673 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4674 status = 0;
4675 break;
4676 }
4677 udelay(10);
4678 retry--;
4679 } while (retry);
4680
4681 return status;
4682 }
4683
4684 /**
4685 * i40e_read_phy_register_clause45
4686 * @hw: pointer to the HW structure
4687 * @page: registers page number
4688 * @reg: register address in the page
4689 * @phy_addr: PHY address on MDIO interface
4690 * @value: PHY register value
4691 *
4692 * Reads specified PHY register value
4693 **/
i40e_read_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4694 int i40e_read_phy_register_clause45(struct i40e_hw *hw,
4695 u8 page, u16 reg, u8 phy_addr, u16 *value)
4696 {
4697 u8 port_num = hw->func_caps.mdio_port_num;
4698 int status = I40E_ERR_TIMEOUT;
4699 u32 command = 0;
4700 u16 retry = 1000;
4701
4702 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4703 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4704 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4705 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4706 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4707 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4708 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4709 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4710 do {
4711 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4712 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4713 status = 0;
4714 break;
4715 }
4716 usleep_range(10, 20);
4717 retry--;
4718 } while (retry);
4719
4720 if (status) {
4721 i40e_debug(hw, I40E_DEBUG_PHY,
4722 "PHY: Can't write command to external PHY.\n");
4723 goto phy_read_end;
4724 }
4725
4726 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4727 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4728 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4729 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4730 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4731 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4732 status = I40E_ERR_TIMEOUT;
4733 retry = 1000;
4734 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4735 do {
4736 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4737 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4738 status = 0;
4739 break;
4740 }
4741 usleep_range(10, 20);
4742 retry--;
4743 } while (retry);
4744
4745 if (!status) {
4746 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4747 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4748 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4749 } else {
4750 i40e_debug(hw, I40E_DEBUG_PHY,
4751 "PHY: Can't read register value from external PHY.\n");
4752 }
4753
4754 phy_read_end:
4755 return status;
4756 }
4757
4758 /**
4759 * i40e_write_phy_register_clause45
4760 * @hw: pointer to the HW structure
4761 * @page: registers page number
4762 * @reg: register address in the page
4763 * @phy_addr: PHY address on MDIO interface
4764 * @value: PHY register value
4765 *
4766 * Writes value to specified PHY register
4767 **/
i40e_write_phy_register_clause45(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4768 int i40e_write_phy_register_clause45(struct i40e_hw *hw,
4769 u8 page, u16 reg, u8 phy_addr, u16 value)
4770 {
4771 u8 port_num = hw->func_caps.mdio_port_num;
4772 int status = I40E_ERR_TIMEOUT;
4773 u16 retry = 1000;
4774 u32 command = 0;
4775
4776 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4777 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4778 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4779 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4780 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4781 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4782 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4783 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4784 do {
4785 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4786 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4787 status = 0;
4788 break;
4789 }
4790 usleep_range(10, 20);
4791 retry--;
4792 } while (retry);
4793 if (status) {
4794 i40e_debug(hw, I40E_DEBUG_PHY,
4795 "PHY: Can't write command to external PHY.\n");
4796 goto phy_write_end;
4797 }
4798
4799 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4800 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4801
4802 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4803 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4804 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4805 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4806 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4807 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4808 status = I40E_ERR_TIMEOUT;
4809 retry = 1000;
4810 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4811 do {
4812 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4813 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4814 status = 0;
4815 break;
4816 }
4817 usleep_range(10, 20);
4818 retry--;
4819 } while (retry);
4820
4821 phy_write_end:
4822 return status;
4823 }
4824
4825 /**
4826 * i40e_write_phy_register
4827 * @hw: pointer to the HW structure
4828 * @page: registers page number
4829 * @reg: register address in the page
4830 * @phy_addr: PHY address on MDIO interface
4831 * @value: PHY register value
4832 *
4833 * Writes value to specified PHY register
4834 **/
i40e_write_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 value)4835 int i40e_write_phy_register(struct i40e_hw *hw,
4836 u8 page, u16 reg, u8 phy_addr, u16 value)
4837 {
4838 int status;
4839
4840 switch (hw->device_id) {
4841 case I40E_DEV_ID_1G_BASE_T_X722:
4842 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4843 value);
4844 break;
4845 case I40E_DEV_ID_1G_BASE_T_BC:
4846 case I40E_DEV_ID_5G_BASE_T_BC:
4847 case I40E_DEV_ID_10G_BASE_T:
4848 case I40E_DEV_ID_10G_BASE_T4:
4849 case I40E_DEV_ID_10G_BASE_T_BC:
4850 case I40E_DEV_ID_10G_BASE_T_X722:
4851 case I40E_DEV_ID_25G_B:
4852 case I40E_DEV_ID_25G_SFP28:
4853 status = i40e_write_phy_register_clause45(hw, page, reg,
4854 phy_addr, value);
4855 break;
4856 default:
4857 status = I40E_ERR_UNKNOWN_PHY;
4858 break;
4859 }
4860
4861 return status;
4862 }
4863
4864 /**
4865 * i40e_read_phy_register
4866 * @hw: pointer to the HW structure
4867 * @page: registers page number
4868 * @reg: register address in the page
4869 * @phy_addr: PHY address on MDIO interface
4870 * @value: PHY register value
4871 *
4872 * Reads specified PHY register value
4873 **/
i40e_read_phy_register(struct i40e_hw * hw,u8 page,u16 reg,u8 phy_addr,u16 * value)4874 int i40e_read_phy_register(struct i40e_hw *hw,
4875 u8 page, u16 reg, u8 phy_addr, u16 *value)
4876 {
4877 int status;
4878
4879 switch (hw->device_id) {
4880 case I40E_DEV_ID_1G_BASE_T_X722:
4881 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4882 value);
4883 break;
4884 case I40E_DEV_ID_1G_BASE_T_BC:
4885 case I40E_DEV_ID_5G_BASE_T_BC:
4886 case I40E_DEV_ID_10G_BASE_T:
4887 case I40E_DEV_ID_10G_BASE_T4:
4888 case I40E_DEV_ID_10G_BASE_T_BC:
4889 case I40E_DEV_ID_10G_BASE_T_X722:
4890 case I40E_DEV_ID_25G_B:
4891 case I40E_DEV_ID_25G_SFP28:
4892 status = i40e_read_phy_register_clause45(hw, page, reg,
4893 phy_addr, value);
4894 break;
4895 default:
4896 status = I40E_ERR_UNKNOWN_PHY;
4897 break;
4898 }
4899
4900 return status;
4901 }
4902
4903 /**
4904 * i40e_get_phy_address
4905 * @hw: pointer to the HW structure
4906 * @dev_num: PHY port num that address we want
4907 *
4908 * Gets PHY address for current port
4909 **/
i40e_get_phy_address(struct i40e_hw * hw,u8 dev_num)4910 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4911 {
4912 u8 port_num = hw->func_caps.mdio_port_num;
4913 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4914
4915 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4916 }
4917
4918 /**
4919 * i40e_blink_phy_link_led
4920 * @hw: pointer to the HW structure
4921 * @time: time how long led will blinks in secs
4922 * @interval: gap between LED on and off in msecs
4923 *
4924 * Blinks PHY link LED
4925 **/
i40e_blink_phy_link_led(struct i40e_hw * hw,u32 time,u32 interval)4926 int i40e_blink_phy_link_led(struct i40e_hw *hw,
4927 u32 time, u32 interval)
4928 {
4929 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4930 u16 gpio_led_port;
4931 u8 phy_addr = 0;
4932 int status = 0;
4933 u16 led_ctl;
4934 u8 port_num;
4935 u16 led_reg;
4936 u32 i;
4937
4938 i = rd32(hw, I40E_PFGEN_PORTNUM);
4939 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4940 phy_addr = i40e_get_phy_address(hw, port_num);
4941
4942 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4943 led_addr++) {
4944 status = i40e_read_phy_register_clause45(hw,
4945 I40E_PHY_COM_REG_PAGE,
4946 led_addr, phy_addr,
4947 &led_reg);
4948 if (status)
4949 goto phy_blinking_end;
4950 led_ctl = led_reg;
4951 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4952 led_reg = 0;
4953 status = i40e_write_phy_register_clause45(hw,
4954 I40E_PHY_COM_REG_PAGE,
4955 led_addr, phy_addr,
4956 led_reg);
4957 if (status)
4958 goto phy_blinking_end;
4959 break;
4960 }
4961 }
4962
4963 if (time > 0 && interval > 0) {
4964 for (i = 0; i < time * 1000; i += interval) {
4965 status = i40e_read_phy_register_clause45(hw,
4966 I40E_PHY_COM_REG_PAGE,
4967 led_addr, phy_addr, &led_reg);
4968 if (status)
4969 goto restore_config;
4970 if (led_reg & I40E_PHY_LED_MANUAL_ON)
4971 led_reg = 0;
4972 else
4973 led_reg = I40E_PHY_LED_MANUAL_ON;
4974 status = i40e_write_phy_register_clause45(hw,
4975 I40E_PHY_COM_REG_PAGE,
4976 led_addr, phy_addr, led_reg);
4977 if (status)
4978 goto restore_config;
4979 msleep(interval);
4980 }
4981 }
4982
4983 restore_config:
4984 status = i40e_write_phy_register_clause45(hw,
4985 I40E_PHY_COM_REG_PAGE,
4986 led_addr, phy_addr, led_ctl);
4987
4988 phy_blinking_end:
4989 return status;
4990 }
4991
4992 /**
4993 * i40e_led_get_reg - read LED register
4994 * @hw: pointer to the HW structure
4995 * @led_addr: LED register address
4996 * @reg_val: read register value
4997 **/
i40e_led_get_reg(struct i40e_hw * hw,u16 led_addr,u32 * reg_val)4998 static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
4999 u32 *reg_val)
5000 {
5001 u8 phy_addr = 0;
5002 u8 port_num;
5003 int status;
5004 u32 i;
5005
5006 *reg_val = 0;
5007 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5008 status =
5009 i40e_aq_get_phy_register(hw,
5010 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5011 I40E_PHY_COM_REG_PAGE, true,
5012 I40E_PHY_LED_PROV_REG_1,
5013 reg_val, NULL);
5014 } else {
5015 i = rd32(hw, I40E_PFGEN_PORTNUM);
5016 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5017 phy_addr = i40e_get_phy_address(hw, port_num);
5018 status = i40e_read_phy_register_clause45(hw,
5019 I40E_PHY_COM_REG_PAGE,
5020 led_addr, phy_addr,
5021 (u16 *)reg_val);
5022 }
5023 return status;
5024 }
5025
5026 /**
5027 * i40e_led_set_reg - write LED register
5028 * @hw: pointer to the HW structure
5029 * @led_addr: LED register address
5030 * @reg_val: register value to write
5031 **/
i40e_led_set_reg(struct i40e_hw * hw,u16 led_addr,u32 reg_val)5032 static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5033 u32 reg_val)
5034 {
5035 u8 phy_addr = 0;
5036 u8 port_num;
5037 int status;
5038 u32 i;
5039
5040 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5041 status =
5042 i40e_aq_set_phy_register(hw,
5043 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5044 I40E_PHY_COM_REG_PAGE, true,
5045 I40E_PHY_LED_PROV_REG_1,
5046 reg_val, NULL);
5047 } else {
5048 i = rd32(hw, I40E_PFGEN_PORTNUM);
5049 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5050 phy_addr = i40e_get_phy_address(hw, port_num);
5051 status = i40e_write_phy_register_clause45(hw,
5052 I40E_PHY_COM_REG_PAGE,
5053 led_addr, phy_addr,
5054 (u16)reg_val);
5055 }
5056
5057 return status;
5058 }
5059
5060 /**
5061 * i40e_led_get_phy - return current on/off mode
5062 * @hw: pointer to the hw struct
5063 * @led_addr: address of led register to use
5064 * @val: original value of register to use
5065 *
5066 **/
i40e_led_get_phy(struct i40e_hw * hw,u16 * led_addr,u16 * val)5067 int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5068 u16 *val)
5069 {
5070 u16 gpio_led_port;
5071 u8 phy_addr = 0;
5072 u32 reg_val_aq;
5073 int status = 0;
5074 u16 temp_addr;
5075 u16 reg_val;
5076 u8 port_num;
5077 u32 i;
5078
5079 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5080 status =
5081 i40e_aq_get_phy_register(hw,
5082 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5083 I40E_PHY_COM_REG_PAGE, true,
5084 I40E_PHY_LED_PROV_REG_1,
5085 ®_val_aq, NULL);
5086 if (status == I40E_SUCCESS)
5087 *val = (u16)reg_val_aq;
5088 return status;
5089 }
5090 temp_addr = I40E_PHY_LED_PROV_REG_1;
5091 i = rd32(hw, I40E_PFGEN_PORTNUM);
5092 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5093 phy_addr = i40e_get_phy_address(hw, port_num);
5094
5095 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5096 temp_addr++) {
5097 status = i40e_read_phy_register_clause45(hw,
5098 I40E_PHY_COM_REG_PAGE,
5099 temp_addr, phy_addr,
5100 ®_val);
5101 if (status)
5102 return status;
5103 *val = reg_val;
5104 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5105 *led_addr = temp_addr;
5106 break;
5107 }
5108 }
5109 return status;
5110 }
5111
5112 /**
5113 * i40e_led_set_phy
5114 * @hw: pointer to the HW structure
5115 * @on: true or false
5116 * @led_addr: address of led register to use
5117 * @mode: original val plus bit for set or ignore
5118 *
5119 * Set led's on or off when controlled by the PHY
5120 *
5121 **/
i40e_led_set_phy(struct i40e_hw * hw,bool on,u16 led_addr,u32 mode)5122 int i40e_led_set_phy(struct i40e_hw *hw, bool on,
5123 u16 led_addr, u32 mode)
5124 {
5125 u32 led_ctl = 0;
5126 u32 led_reg = 0;
5127 int status = 0;
5128
5129 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5130 if (status)
5131 return status;
5132 led_ctl = led_reg;
5133 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5134 led_reg = 0;
5135 status = i40e_led_set_reg(hw, led_addr, led_reg);
5136 if (status)
5137 return status;
5138 }
5139 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5140 if (status)
5141 goto restore_config;
5142 if (on)
5143 led_reg = I40E_PHY_LED_MANUAL_ON;
5144 else
5145 led_reg = 0;
5146
5147 status = i40e_led_set_reg(hw, led_addr, led_reg);
5148 if (status)
5149 goto restore_config;
5150 if (mode & I40E_PHY_LED_MODE_ORIG) {
5151 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5152 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5153 }
5154 return status;
5155
5156 restore_config:
5157 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5158 return status;
5159 }
5160
5161 /**
5162 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5163 * @hw: pointer to the hw struct
5164 * @reg_addr: register address
5165 * @reg_val: ptr to register value
5166 * @cmd_details: pointer to command details structure or NULL
5167 *
5168 * Use the firmware to read the Rx control register,
5169 * especially useful if the Rx unit is under heavy pressure
5170 **/
i40e_aq_rx_ctl_read_register(struct i40e_hw * hw,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5171 int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5172 u32 reg_addr, u32 *reg_val,
5173 struct i40e_asq_cmd_details *cmd_details)
5174 {
5175 struct i40e_aq_desc desc;
5176 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5177 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5178 int status;
5179
5180 if (!reg_val)
5181 return I40E_ERR_PARAM;
5182
5183 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5184
5185 cmd_resp->address = cpu_to_le32(reg_addr);
5186
5187 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5188
5189 if (status == 0)
5190 *reg_val = le32_to_cpu(cmd_resp->value);
5191
5192 return status;
5193 }
5194
5195 /**
5196 * i40e_read_rx_ctl - read from an Rx control register
5197 * @hw: pointer to the hw struct
5198 * @reg_addr: register address
5199 **/
i40e_read_rx_ctl(struct i40e_hw * hw,u32 reg_addr)5200 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5201 {
5202 bool use_register;
5203 int status = 0;
5204 int retry = 5;
5205 u32 val = 0;
5206
5207 use_register = (((hw->aq.api_maj_ver == 1) &&
5208 (hw->aq.api_min_ver < 5)) ||
5209 (hw->mac.type == I40E_MAC_X722));
5210 if (!use_register) {
5211 do_retry:
5212 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5213 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5214 usleep_range(1000, 2000);
5215 retry--;
5216 goto do_retry;
5217 }
5218 }
5219
5220 /* if the AQ access failed, try the old-fashioned way */
5221 if (status || use_register)
5222 val = rd32(hw, reg_addr);
5223
5224 return val;
5225 }
5226
5227 /**
5228 * i40e_aq_rx_ctl_write_register
5229 * @hw: pointer to the hw struct
5230 * @reg_addr: register address
5231 * @reg_val: register value
5232 * @cmd_details: pointer to command details structure or NULL
5233 *
5234 * Use the firmware to write to an Rx control register,
5235 * especially useful if the Rx unit is under heavy pressure
5236 **/
i40e_aq_rx_ctl_write_register(struct i40e_hw * hw,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5237 int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5238 u32 reg_addr, u32 reg_val,
5239 struct i40e_asq_cmd_details *cmd_details)
5240 {
5241 struct i40e_aq_desc desc;
5242 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5243 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5244 int status;
5245
5246 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5247
5248 cmd->address = cpu_to_le32(reg_addr);
5249 cmd->value = cpu_to_le32(reg_val);
5250
5251 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5252
5253 return status;
5254 }
5255
5256 /**
5257 * i40e_write_rx_ctl - write to an Rx control register
5258 * @hw: pointer to the hw struct
5259 * @reg_addr: register address
5260 * @reg_val: register value
5261 **/
i40e_write_rx_ctl(struct i40e_hw * hw,u32 reg_addr,u32 reg_val)5262 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5263 {
5264 bool use_register;
5265 int status = 0;
5266 int retry = 5;
5267
5268 use_register = (((hw->aq.api_maj_ver == 1) &&
5269 (hw->aq.api_min_ver < 5)) ||
5270 (hw->mac.type == I40E_MAC_X722));
5271 if (!use_register) {
5272 do_retry:
5273 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5274 reg_val, NULL);
5275 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5276 usleep_range(1000, 2000);
5277 retry--;
5278 goto do_retry;
5279 }
5280 }
5281
5282 /* if the AQ access failed, try the old-fashioned way */
5283 if (status || use_register)
5284 wr32(hw, reg_addr, reg_val);
5285 }
5286
5287 /**
5288 * i40e_mdio_if_number_selection - MDIO I/F number selection
5289 * @hw: pointer to the hw struct
5290 * @set_mdio: use MDIO I/F number specified by mdio_num
5291 * @mdio_num: MDIO I/F number
5292 * @cmd: pointer to PHY Register command structure
5293 **/
i40e_mdio_if_number_selection(struct i40e_hw * hw,bool set_mdio,u8 mdio_num,struct i40e_aqc_phy_register_access * cmd)5294 static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5295 u8 mdio_num,
5296 struct i40e_aqc_phy_register_access *cmd)
5297 {
5298 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5299 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5300 cmd->cmd_flags |=
5301 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5302 ((mdio_num <<
5303 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5304 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5305 else
5306 i40e_debug(hw, I40E_DEBUG_PHY,
5307 "MDIO I/F number selection not supported by current FW version.\n");
5308 }
5309 }
5310
5311 /**
5312 * i40e_aq_set_phy_register_ext
5313 * @hw: pointer to the hw struct
5314 * @phy_select: select which phy should be accessed
5315 * @dev_addr: PHY device address
5316 * @page_change: flag to indicate if phy page should be updated
5317 * @set_mdio: use MDIO I/F number specified by mdio_num
5318 * @mdio_num: MDIO I/F number
5319 * @reg_addr: PHY register address
5320 * @reg_val: new register value
5321 * @cmd_details: pointer to command details structure or NULL
5322 *
5323 * Write the external PHY register.
5324 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5325 * may use simple wrapper i40e_aq_set_phy_register.
5326 **/
i40e_aq_set_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 reg_val,struct i40e_asq_cmd_details * cmd_details)5327 int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5328 u8 phy_select, u8 dev_addr, bool page_change,
5329 bool set_mdio, u8 mdio_num,
5330 u32 reg_addr, u32 reg_val,
5331 struct i40e_asq_cmd_details *cmd_details)
5332 {
5333 struct i40e_aq_desc desc;
5334 struct i40e_aqc_phy_register_access *cmd =
5335 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5336 int status;
5337
5338 i40e_fill_default_direct_cmd_desc(&desc,
5339 i40e_aqc_opc_set_phy_register);
5340
5341 cmd->phy_interface = phy_select;
5342 cmd->dev_address = dev_addr;
5343 cmd->reg_address = cpu_to_le32(reg_addr);
5344 cmd->reg_value = cpu_to_le32(reg_val);
5345
5346 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5347
5348 if (!page_change)
5349 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5350
5351 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5352
5353 return status;
5354 }
5355
5356 /**
5357 * i40e_aq_get_phy_register_ext
5358 * @hw: pointer to the hw struct
5359 * @phy_select: select which phy should be accessed
5360 * @dev_addr: PHY device address
5361 * @page_change: flag to indicate if phy page should be updated
5362 * @set_mdio: use MDIO I/F number specified by mdio_num
5363 * @mdio_num: MDIO I/F number
5364 * @reg_addr: PHY register address
5365 * @reg_val: read register value
5366 * @cmd_details: pointer to command details structure or NULL
5367 *
5368 * Read the external PHY register.
5369 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5370 * may use simple wrapper i40e_aq_get_phy_register.
5371 **/
i40e_aq_get_phy_register_ext(struct i40e_hw * hw,u8 phy_select,u8 dev_addr,bool page_change,bool set_mdio,u8 mdio_num,u32 reg_addr,u32 * reg_val,struct i40e_asq_cmd_details * cmd_details)5372 int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5373 u8 phy_select, u8 dev_addr, bool page_change,
5374 bool set_mdio, u8 mdio_num,
5375 u32 reg_addr, u32 *reg_val,
5376 struct i40e_asq_cmd_details *cmd_details)
5377 {
5378 struct i40e_aq_desc desc;
5379 struct i40e_aqc_phy_register_access *cmd =
5380 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5381 int status;
5382
5383 i40e_fill_default_direct_cmd_desc(&desc,
5384 i40e_aqc_opc_get_phy_register);
5385
5386 cmd->phy_interface = phy_select;
5387 cmd->dev_address = dev_addr;
5388 cmd->reg_address = cpu_to_le32(reg_addr);
5389
5390 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5391
5392 if (!page_change)
5393 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5394
5395 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5396 if (!status)
5397 *reg_val = le32_to_cpu(cmd->reg_value);
5398
5399 return status;
5400 }
5401
5402 /**
5403 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5404 * @hw: pointer to the hw struct
5405 * @buff: command buffer (size in bytes = buff_size)
5406 * @buff_size: buffer size in bytes
5407 * @track_id: package tracking id
5408 * @error_offset: returns error offset
5409 * @error_info: returns error information
5410 * @cmd_details: pointer to command details structure or NULL
5411 **/
i40e_aq_write_ddp(struct i40e_hw * hw,void * buff,u16 buff_size,u32 track_id,u32 * error_offset,u32 * error_info,struct i40e_asq_cmd_details * cmd_details)5412 int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5413 u16 buff_size, u32 track_id,
5414 u32 *error_offset, u32 *error_info,
5415 struct i40e_asq_cmd_details *cmd_details)
5416 {
5417 struct i40e_aq_desc desc;
5418 struct i40e_aqc_write_personalization_profile *cmd =
5419 (struct i40e_aqc_write_personalization_profile *)
5420 &desc.params.raw;
5421 struct i40e_aqc_write_ddp_resp *resp;
5422 int status;
5423
5424 i40e_fill_default_direct_cmd_desc(&desc,
5425 i40e_aqc_opc_write_personalization_profile);
5426
5427 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5428 if (buff_size > I40E_AQ_LARGE_BUF)
5429 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5430
5431 desc.datalen = cpu_to_le16(buff_size);
5432
5433 cmd->profile_track_id = cpu_to_le32(track_id);
5434
5435 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5436 if (!status) {
5437 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5438 if (error_offset)
5439 *error_offset = le32_to_cpu(resp->error_offset);
5440 if (error_info)
5441 *error_info = le32_to_cpu(resp->error_info);
5442 }
5443
5444 return status;
5445 }
5446
5447 /**
5448 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5449 * @hw: pointer to the hw struct
5450 * @buff: command buffer (size in bytes = buff_size)
5451 * @buff_size: buffer size in bytes
5452 * @flags: AdminQ command flags
5453 * @cmd_details: pointer to command details structure or NULL
5454 **/
i40e_aq_get_ddp_list(struct i40e_hw * hw,void * buff,u16 buff_size,u8 flags,struct i40e_asq_cmd_details * cmd_details)5455 int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5456 u16 buff_size, u8 flags,
5457 struct i40e_asq_cmd_details *cmd_details)
5458 {
5459 struct i40e_aq_desc desc;
5460 struct i40e_aqc_get_applied_profiles *cmd =
5461 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5462 int status;
5463
5464 i40e_fill_default_direct_cmd_desc(&desc,
5465 i40e_aqc_opc_get_personalization_profile_list);
5466
5467 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5468 if (buff_size > I40E_AQ_LARGE_BUF)
5469 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5470 desc.datalen = cpu_to_le16(buff_size);
5471
5472 cmd->flags = flags;
5473
5474 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5475
5476 return status;
5477 }
5478
5479 /**
5480 * i40e_find_segment_in_package
5481 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5482 * @pkg_hdr: pointer to the package header to be searched
5483 *
5484 * This function searches a package file for a particular segment type. On
5485 * success it returns a pointer to the segment header, otherwise it will
5486 * return NULL.
5487 **/
5488 struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,struct i40e_package_header * pkg_hdr)5489 i40e_find_segment_in_package(u32 segment_type,
5490 struct i40e_package_header *pkg_hdr)
5491 {
5492 struct i40e_generic_seg_header *segment;
5493 u32 i;
5494
5495 /* Search all package segments for the requested segment type */
5496 for (i = 0; i < pkg_hdr->segment_count; i++) {
5497 segment =
5498 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5499 pkg_hdr->segment_offset[i]);
5500
5501 if (segment->type == segment_type)
5502 return segment;
5503 }
5504
5505 return NULL;
5506 }
5507
5508 /* Get section table in profile */
5509 #define I40E_SECTION_TABLE(profile, sec_tbl) \
5510 do { \
5511 struct i40e_profile_segment *p = (profile); \
5512 u32 count; \
5513 u32 *nvm; \
5514 count = p->device_table_count; \
5515 nvm = (u32 *)&p->device_table[count]; \
5516 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5517 } while (0)
5518
5519 /* Get section header in profile */
5520 #define I40E_SECTION_HEADER(profile, offset) \
5521 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5522
5523 /**
5524 * i40e_find_section_in_profile
5525 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5526 * @profile: pointer to the i40e segment header to be searched
5527 *
5528 * This function searches i40e segment for a particular section type. On
5529 * success it returns a pointer to the section header, otherwise it will
5530 * return NULL.
5531 **/
5532 struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,struct i40e_profile_segment * profile)5533 i40e_find_section_in_profile(u32 section_type,
5534 struct i40e_profile_segment *profile)
5535 {
5536 struct i40e_profile_section_header *sec;
5537 struct i40e_section_table *sec_tbl;
5538 u32 sec_off;
5539 u32 i;
5540
5541 if (profile->header.type != SEGMENT_TYPE_I40E)
5542 return NULL;
5543
5544 I40E_SECTION_TABLE(profile, sec_tbl);
5545
5546 for (i = 0; i < sec_tbl->section_count; i++) {
5547 sec_off = sec_tbl->section_offset[i];
5548 sec = I40E_SECTION_HEADER(profile, sec_off);
5549 if (sec->section.type == section_type)
5550 return sec;
5551 }
5552
5553 return NULL;
5554 }
5555
5556 /**
5557 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5558 * @hw: pointer to the hw struct
5559 * @aq: command buffer containing all data to execute AQ
5560 **/
i40e_ddp_exec_aq_section(struct i40e_hw * hw,struct i40e_profile_aq_section * aq)5561 static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5562 struct i40e_profile_aq_section *aq)
5563 {
5564 struct i40e_aq_desc desc;
5565 u8 *msg = NULL;
5566 u16 msglen;
5567 int status;
5568
5569 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5570 desc.flags |= cpu_to_le16(aq->flags);
5571 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5572
5573 msglen = aq->datalen;
5574 if (msglen) {
5575 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5576 I40E_AQ_FLAG_RD));
5577 if (msglen > I40E_AQ_LARGE_BUF)
5578 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5579 desc.datalen = cpu_to_le16(msglen);
5580 msg = &aq->data[0];
5581 }
5582
5583 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5584
5585 if (status) {
5586 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5587 "unable to exec DDP AQ opcode %u, error %d\n",
5588 aq->opcode, status);
5589 return status;
5590 }
5591
5592 /* copy returned desc to aq_buf */
5593 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5594
5595 return 0;
5596 }
5597
5598 /**
5599 * i40e_validate_profile
5600 * @hw: pointer to the hardware structure
5601 * @profile: pointer to the profile segment of the package to be validated
5602 * @track_id: package tracking id
5603 * @rollback: flag if the profile is for rollback.
5604 *
5605 * Validates supported devices and profile's sections.
5606 */
5607 static int
i40e_validate_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id,bool rollback)5608 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5609 u32 track_id, bool rollback)
5610 {
5611 struct i40e_profile_section_header *sec = NULL;
5612 struct i40e_section_table *sec_tbl;
5613 u32 vendor_dev_id;
5614 int status = 0;
5615 u32 dev_cnt;
5616 u32 sec_off;
5617 u32 i;
5618
5619 if (track_id == I40E_DDP_TRACKID_INVALID) {
5620 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5621 return I40E_NOT_SUPPORTED;
5622 }
5623
5624 dev_cnt = profile->device_table_count;
5625 for (i = 0; i < dev_cnt; i++) {
5626 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5627 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5628 hw->device_id == (vendor_dev_id & 0xFFFF))
5629 break;
5630 }
5631 if (dev_cnt && i == dev_cnt) {
5632 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5633 "Device doesn't support DDP\n");
5634 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5635 }
5636
5637 I40E_SECTION_TABLE(profile, sec_tbl);
5638
5639 /* Validate sections types */
5640 for (i = 0; i < sec_tbl->section_count; i++) {
5641 sec_off = sec_tbl->section_offset[i];
5642 sec = I40E_SECTION_HEADER(profile, sec_off);
5643 if (rollback) {
5644 if (sec->section.type == SECTION_TYPE_MMIO ||
5645 sec->section.type == SECTION_TYPE_AQ ||
5646 sec->section.type == SECTION_TYPE_RB_AQ) {
5647 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5648 "Not a roll-back package\n");
5649 return I40E_NOT_SUPPORTED;
5650 }
5651 } else {
5652 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5653 sec->section.type == SECTION_TYPE_RB_MMIO) {
5654 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5655 "Not an original package\n");
5656 return I40E_NOT_SUPPORTED;
5657 }
5658 }
5659 }
5660
5661 return status;
5662 }
5663
5664 /**
5665 * i40e_write_profile
5666 * @hw: pointer to the hardware structure
5667 * @profile: pointer to the profile segment of the package to be downloaded
5668 * @track_id: package tracking id
5669 *
5670 * Handles the download of a complete package.
5671 */
5672 int
i40e_write_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5673 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5674 u32 track_id)
5675 {
5676 struct i40e_profile_section_header *sec = NULL;
5677 struct i40e_profile_aq_section *ddp_aq;
5678 struct i40e_section_table *sec_tbl;
5679 u32 offset = 0, info = 0;
5680 u32 section_size = 0;
5681 int status = 0;
5682 u32 sec_off;
5683 u32 i;
5684
5685 status = i40e_validate_profile(hw, profile, track_id, false);
5686 if (status)
5687 return status;
5688
5689 I40E_SECTION_TABLE(profile, sec_tbl);
5690
5691 for (i = 0; i < sec_tbl->section_count; i++) {
5692 sec_off = sec_tbl->section_offset[i];
5693 sec = I40E_SECTION_HEADER(profile, sec_off);
5694 /* Process generic admin command */
5695 if (sec->section.type == SECTION_TYPE_AQ) {
5696 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5697 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5698 if (status) {
5699 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5700 "Failed to execute aq: section %d, opcode %u\n",
5701 i, ddp_aq->opcode);
5702 break;
5703 }
5704 sec->section.type = SECTION_TYPE_RB_AQ;
5705 }
5706
5707 /* Skip any non-mmio sections */
5708 if (sec->section.type != SECTION_TYPE_MMIO)
5709 continue;
5710
5711 section_size = sec->section.size +
5712 sizeof(struct i40e_profile_section_header);
5713
5714 /* Write MMIO section */
5715 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5716 track_id, &offset, &info, NULL);
5717 if (status) {
5718 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5719 "Failed to write profile: section %d, offset %d, info %d\n",
5720 i, offset, info);
5721 break;
5722 }
5723 }
5724 return status;
5725 }
5726
5727 /**
5728 * i40e_rollback_profile
5729 * @hw: pointer to the hardware structure
5730 * @profile: pointer to the profile segment of the package to be removed
5731 * @track_id: package tracking id
5732 *
5733 * Rolls back previously loaded package.
5734 */
5735 int
i40e_rollback_profile(struct i40e_hw * hw,struct i40e_profile_segment * profile,u32 track_id)5736 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5737 u32 track_id)
5738 {
5739 struct i40e_profile_section_header *sec = NULL;
5740 struct i40e_section_table *sec_tbl;
5741 u32 offset = 0, info = 0;
5742 u32 section_size = 0;
5743 int status = 0;
5744 u32 sec_off;
5745 int i;
5746
5747 status = i40e_validate_profile(hw, profile, track_id, true);
5748 if (status)
5749 return status;
5750
5751 I40E_SECTION_TABLE(profile, sec_tbl);
5752
5753 /* For rollback write sections in reverse */
5754 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5755 sec_off = sec_tbl->section_offset[i];
5756 sec = I40E_SECTION_HEADER(profile, sec_off);
5757
5758 /* Skip any non-rollback sections */
5759 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5760 continue;
5761
5762 section_size = sec->section.size +
5763 sizeof(struct i40e_profile_section_header);
5764
5765 /* Write roll-back MMIO section */
5766 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5767 track_id, &offset, &info, NULL);
5768 if (status) {
5769 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5770 "Failed to write profile: section %d, offset %d, info %d\n",
5771 i, offset, info);
5772 break;
5773 }
5774 }
5775 return status;
5776 }
5777
5778 /**
5779 * i40e_add_pinfo_to_list
5780 * @hw: pointer to the hardware structure
5781 * @profile: pointer to the profile segment of the package
5782 * @profile_info_sec: buffer for information section
5783 * @track_id: package tracking id
5784 *
5785 * Register a profile to the list of loaded profiles.
5786 */
5787 int
i40e_add_pinfo_to_list(struct i40e_hw * hw,struct i40e_profile_segment * profile,u8 * profile_info_sec,u32 track_id)5788 i40e_add_pinfo_to_list(struct i40e_hw *hw,
5789 struct i40e_profile_segment *profile,
5790 u8 *profile_info_sec, u32 track_id)
5791 {
5792 struct i40e_profile_section_header *sec = NULL;
5793 struct i40e_profile_info *pinfo;
5794 u32 offset = 0, info = 0;
5795 int status = 0;
5796
5797 sec = (struct i40e_profile_section_header *)profile_info_sec;
5798 sec->tbl_size = 1;
5799 sec->data_end = sizeof(struct i40e_profile_section_header) +
5800 sizeof(struct i40e_profile_info);
5801 sec->section.type = SECTION_TYPE_INFO;
5802 sec->section.offset = sizeof(struct i40e_profile_section_header);
5803 sec->section.size = sizeof(struct i40e_profile_info);
5804 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5805 sec->section.offset);
5806 pinfo->track_id = track_id;
5807 pinfo->version = profile->version;
5808 pinfo->op = I40E_DDP_ADD_TRACKID;
5809 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5810
5811 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5812 track_id, &offset, &info, NULL);
5813
5814 return status;
5815 }
5816
5817 /**
5818 * i40e_aq_add_cloud_filters
5819 * @hw: pointer to the hardware structure
5820 * @seid: VSI seid to add cloud filters from
5821 * @filters: Buffer which contains the filters to be added
5822 * @filter_count: number of filters contained in the buffer
5823 *
5824 * Set the cloud filters for a given VSI. The contents of the
5825 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5826 * of the function.
5827 *
5828 **/
5829 int
i40e_aq_add_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5830 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5831 struct i40e_aqc_cloud_filters_element_data *filters,
5832 u8 filter_count)
5833 {
5834 struct i40e_aq_desc desc;
5835 struct i40e_aqc_add_remove_cloud_filters *cmd =
5836 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5837 u16 buff_len;
5838 int status;
5839
5840 i40e_fill_default_direct_cmd_desc(&desc,
5841 i40e_aqc_opc_add_cloud_filters);
5842
5843 buff_len = filter_count * sizeof(*filters);
5844 desc.datalen = cpu_to_le16(buff_len);
5845 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5846 cmd->num_filters = filter_count;
5847 cmd->seid = cpu_to_le16(seid);
5848
5849 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5850
5851 return status;
5852 }
5853
5854 /**
5855 * i40e_aq_add_cloud_filters_bb
5856 * @hw: pointer to the hardware structure
5857 * @seid: VSI seid to add cloud filters from
5858 * @filters: Buffer which contains the filters in big buffer to be added
5859 * @filter_count: number of filters contained in the buffer
5860 *
5861 * Set the big buffer cloud filters for a given VSI. The contents of the
5862 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5863 * function.
5864 *
5865 **/
5866 int
i40e_aq_add_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5867 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5868 struct i40e_aqc_cloud_filters_element_bb *filters,
5869 u8 filter_count)
5870 {
5871 struct i40e_aq_desc desc;
5872 struct i40e_aqc_add_remove_cloud_filters *cmd =
5873 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5874 u16 buff_len;
5875 int status;
5876 int i;
5877
5878 i40e_fill_default_direct_cmd_desc(&desc,
5879 i40e_aqc_opc_add_cloud_filters);
5880
5881 buff_len = filter_count * sizeof(*filters);
5882 desc.datalen = cpu_to_le16(buff_len);
5883 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5884 cmd->num_filters = filter_count;
5885 cmd->seid = cpu_to_le16(seid);
5886 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5887
5888 for (i = 0; i < filter_count; i++) {
5889 u16 tnl_type;
5890 u32 ti;
5891
5892 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5893 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5894 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5895
5896 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5897 * one more byte further than normally used for Tenant ID in
5898 * other tunnel types.
5899 */
5900 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5901 ti = le32_to_cpu(filters[i].element.tenant_id);
5902 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5903 }
5904 }
5905
5906 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5907
5908 return status;
5909 }
5910
5911 /**
5912 * i40e_aq_rem_cloud_filters
5913 * @hw: pointer to the hardware structure
5914 * @seid: VSI seid to remove cloud filters from
5915 * @filters: Buffer which contains the filters to be removed
5916 * @filter_count: number of filters contained in the buffer
5917 *
5918 * Remove the cloud filters for a given VSI. The contents of the
5919 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5920 * of the function.
5921 *
5922 **/
5923 int
i40e_aq_rem_cloud_filters(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_data * filters,u8 filter_count)5924 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5925 struct i40e_aqc_cloud_filters_element_data *filters,
5926 u8 filter_count)
5927 {
5928 struct i40e_aq_desc desc;
5929 struct i40e_aqc_add_remove_cloud_filters *cmd =
5930 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5931 u16 buff_len;
5932 int status;
5933
5934 i40e_fill_default_direct_cmd_desc(&desc,
5935 i40e_aqc_opc_remove_cloud_filters);
5936
5937 buff_len = filter_count * sizeof(*filters);
5938 desc.datalen = cpu_to_le16(buff_len);
5939 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5940 cmd->num_filters = filter_count;
5941 cmd->seid = cpu_to_le16(seid);
5942
5943 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5944
5945 return status;
5946 }
5947
5948 /**
5949 * i40e_aq_rem_cloud_filters_bb
5950 * @hw: pointer to the hardware structure
5951 * @seid: VSI seid to remove cloud filters from
5952 * @filters: Buffer which contains the filters in big buffer to be removed
5953 * @filter_count: number of filters contained in the buffer
5954 *
5955 * Remove the big buffer cloud filters for a given VSI. The contents of the
5956 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5957 * function.
5958 *
5959 **/
5960 int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw * hw,u16 seid,struct i40e_aqc_cloud_filters_element_bb * filters,u8 filter_count)5961 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5962 struct i40e_aqc_cloud_filters_element_bb *filters,
5963 u8 filter_count)
5964 {
5965 struct i40e_aq_desc desc;
5966 struct i40e_aqc_add_remove_cloud_filters *cmd =
5967 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5968 u16 buff_len;
5969 int status;
5970 int i;
5971
5972 i40e_fill_default_direct_cmd_desc(&desc,
5973 i40e_aqc_opc_remove_cloud_filters);
5974
5975 buff_len = filter_count * sizeof(*filters);
5976 desc.datalen = cpu_to_le16(buff_len);
5977 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5978 cmd->num_filters = filter_count;
5979 cmd->seid = cpu_to_le16(seid);
5980 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5981
5982 for (i = 0; i < filter_count; i++) {
5983 u16 tnl_type;
5984 u32 ti;
5985
5986 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5987 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5988 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5989
5990 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5991 * one more byte further than normally used for Tenant ID in
5992 * other tunnel types.
5993 */
5994 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5995 ti = le32_to_cpu(filters[i].element.tenant_id);
5996 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5997 }
5998 }
5999
6000 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6001
6002 return status;
6003 }
6004