1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7 #include "ice_flow.h"
8
9 #define ICE_PF_RESET_WAIT_COUNT 300
10
11 /**
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
14 *
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
17 */
ice_set_mac_type(struct ice_hw * hw)18 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19 {
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return ICE_ERR_DEVICE_NOT_SUPPORTED;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_BACKPLANE:
28 case ICE_DEV_ID_E810_XXV_QSFP:
29 case ICE_DEV_ID_E810_XXV_SFP:
30 hw->mac_type = ICE_MAC_E810;
31 break;
32 case ICE_DEV_ID_E823C_10G_BASE_T:
33 case ICE_DEV_ID_E823C_BACKPLANE:
34 case ICE_DEV_ID_E823C_QSFP:
35 case ICE_DEV_ID_E823C_SFP:
36 case ICE_DEV_ID_E823C_SGMII:
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
52 break;
53 default:
54 hw->mac_type = ICE_MAC_UNKNOWN;
55 break;
56 }
57
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 return 0;
60 }
61
62 /**
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
65 *
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
68 */
ice_clear_pf_cfg(struct ice_hw * hw)69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70 {
71 struct ice_aq_desc desc;
72
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76 }
77
78 /**
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
84 *
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_discover_dev_caps is expected to be called before this function is
91 * called.
92 */
93 static enum ice_status
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)94 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
95 struct ice_sq_cd *cd)
96 {
97 struct ice_aqc_manage_mac_read_resp *resp;
98 struct ice_aqc_manage_mac_read *cmd;
99 struct ice_aq_desc desc;
100 enum ice_status status;
101 u16 flags;
102 u8 i;
103
104 cmd = &desc.params.mac_read;
105
106 if (buf_size < sizeof(*resp))
107 return ICE_ERR_BUF_TOO_SHORT;
108
109 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
110
111 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
112 if (status)
113 return status;
114
115 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
116 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
117
118 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
119 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
120 return ICE_ERR_CFG;
121 }
122
123 /* A single port can report up to two (LAN and WoL) addresses */
124 for (i = 0; i < cmd->num_addr; i++)
125 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
126 ether_addr_copy(hw->port_info->mac.lan_addr,
127 resp[i].mac_addr);
128 ether_addr_copy(hw->port_info->mac.perm_addr,
129 resp[i].mac_addr);
130 break;
131 }
132
133 return 0;
134 }
135
136 /**
137 * ice_aq_get_phy_caps - returns PHY capabilities
138 * @pi: port information structure
139 * @qual_mods: report qualified modules
140 * @report_mode: report mode capabilities
141 * @pcaps: structure for PHY capabilities to be filled
142 * @cd: pointer to command details structure or NULL
143 *
144 * Returns the various PHY capabilities supported on the Port (0x0600)
145 */
146 enum ice_status
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)147 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
148 struct ice_aqc_get_phy_caps_data *pcaps,
149 struct ice_sq_cd *cd)
150 {
151 struct ice_aqc_get_phy_caps *cmd;
152 u16 pcaps_size = sizeof(*pcaps);
153 struct ice_aq_desc desc;
154 enum ice_status status;
155 struct ice_hw *hw;
156
157 cmd = &desc.params.get_phy;
158
159 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
160 return ICE_ERR_PARAM;
161 hw = pi->hw;
162
163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
164
165 if (qual_mods)
166 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
167
168 cmd->param0 |= cpu_to_le16(report_mode);
169 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
170
171 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
172 report_mode);
173 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
174 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
175 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
176 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
177 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
178 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
179 pcaps->low_power_ctrl_an);
180 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
181 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
182 pcaps->eeer_value);
183 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
184 pcaps->link_fec_options);
185 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
186 pcaps->module_compliance_enforcement);
187 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
188 pcaps->extended_compliance_code);
189 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
190 pcaps->module_type[0]);
191 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
192 pcaps->module_type[1]);
193 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
194 pcaps->module_type[2]);
195
196 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
197 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
198 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
199 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
200 sizeof(pi->phy.link_info.module_type));
201 }
202
203 return status;
204 }
205
206 /**
207 * ice_aq_get_link_topo_handle - get link topology node return status
208 * @pi: port information structure
209 * @node_type: requested node type
210 * @cd: pointer to command details structure or NULL
211 *
212 * Get link topology node return status for specified node type (0x06E0)
213 *
214 * Node type cage can be used to determine if cage is present. If AQC
215 * returns error (ENOENT), then no cage present. If no cage present, then
216 * connection type is backplane or BASE-T.
217 */
218 static enum ice_status
ice_aq_get_link_topo_handle(struct ice_port_info * pi,u8 node_type,struct ice_sq_cd * cd)219 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
220 struct ice_sq_cd *cd)
221 {
222 struct ice_aqc_get_link_topo *cmd;
223 struct ice_aq_desc desc;
224
225 cmd = &desc.params.get_link_topo;
226
227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
228
229 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
230 ICE_AQC_LINK_TOPO_NODE_CTX_S);
231
232 /* set node type */
233 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
234
235 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
236 }
237
238 /**
239 * ice_is_media_cage_present
240 * @pi: port information structure
241 *
242 * Returns true if media cage is present, else false. If no cage, then
243 * media type is backplane or BASE-T.
244 */
ice_is_media_cage_present(struct ice_port_info * pi)245 static bool ice_is_media_cage_present(struct ice_port_info *pi)
246 {
247 /* Node type cage can be used to determine if cage is present. If AQC
248 * returns error (ENOENT), then no cage present. If no cage present then
249 * connection type is backplane or BASE-T.
250 */
251 return !ice_aq_get_link_topo_handle(pi,
252 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
253 NULL);
254 }
255
256 /**
257 * ice_get_media_type - Gets media type
258 * @pi: port information structure
259 */
ice_get_media_type(struct ice_port_info * pi)260 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
261 {
262 struct ice_link_status *hw_link_info;
263
264 if (!pi)
265 return ICE_MEDIA_UNKNOWN;
266
267 hw_link_info = &pi->phy.link_info;
268 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
269 /* If more than one media type is selected, report unknown */
270 return ICE_MEDIA_UNKNOWN;
271
272 if (hw_link_info->phy_type_low) {
273 /* 1G SGMII is a special case where some DA cable PHYs
274 * may show this as an option when it really shouldn't
275 * be since SGMII is meant to be between a MAC and a PHY
276 * in a backplane. Try to detect this case and handle it
277 */
278 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
279 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
280 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
281 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
283 return ICE_MEDIA_DA;
284
285 switch (hw_link_info->phy_type_low) {
286 case ICE_PHY_TYPE_LOW_1000BASE_SX:
287 case ICE_PHY_TYPE_LOW_1000BASE_LX:
288 case ICE_PHY_TYPE_LOW_10GBASE_SR:
289 case ICE_PHY_TYPE_LOW_10GBASE_LR:
290 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
291 case ICE_PHY_TYPE_LOW_25GBASE_SR:
292 case ICE_PHY_TYPE_LOW_25GBASE_LR:
293 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
294 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
295 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
296 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
297 case ICE_PHY_TYPE_LOW_50GBASE_SR:
298 case ICE_PHY_TYPE_LOW_50GBASE_FR:
299 case ICE_PHY_TYPE_LOW_50GBASE_LR:
300 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
301 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
302 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
303 case ICE_PHY_TYPE_LOW_100GBASE_DR:
304 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
305 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
306 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
307 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
312 return ICE_MEDIA_FIBER;
313 case ICE_PHY_TYPE_LOW_100BASE_TX:
314 case ICE_PHY_TYPE_LOW_1000BASE_T:
315 case ICE_PHY_TYPE_LOW_2500BASE_T:
316 case ICE_PHY_TYPE_LOW_5GBASE_T:
317 case ICE_PHY_TYPE_LOW_10GBASE_T:
318 case ICE_PHY_TYPE_LOW_25GBASE_T:
319 return ICE_MEDIA_BASET;
320 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
321 case ICE_PHY_TYPE_LOW_25GBASE_CR:
322 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
323 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
324 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
325 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
326 case ICE_PHY_TYPE_LOW_50GBASE_CP:
327 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
328 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
329 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
330 return ICE_MEDIA_DA;
331 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
332 case ICE_PHY_TYPE_LOW_40G_XLAUI:
333 case ICE_PHY_TYPE_LOW_50G_LAUI2:
334 case ICE_PHY_TYPE_LOW_50G_AUI2:
335 case ICE_PHY_TYPE_LOW_50G_AUI1:
336 case ICE_PHY_TYPE_LOW_100G_AUI4:
337 case ICE_PHY_TYPE_LOW_100G_CAUI4:
338 if (ice_is_media_cage_present(pi))
339 return ICE_MEDIA_DA;
340 fallthrough;
341 case ICE_PHY_TYPE_LOW_1000BASE_KX:
342 case ICE_PHY_TYPE_LOW_2500BASE_KX:
343 case ICE_PHY_TYPE_LOW_2500BASE_X:
344 case ICE_PHY_TYPE_LOW_5GBASE_KR:
345 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
346 case ICE_PHY_TYPE_LOW_25GBASE_KR:
347 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
348 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
349 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
350 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
351 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
352 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
353 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
354 return ICE_MEDIA_BACKPLANE;
355 }
356 } else {
357 switch (hw_link_info->phy_type_high) {
358 case ICE_PHY_TYPE_HIGH_100G_AUI2:
359 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
360 if (ice_is_media_cage_present(pi))
361 return ICE_MEDIA_DA;
362 fallthrough;
363 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
364 return ICE_MEDIA_BACKPLANE;
365 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
366 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
367 return ICE_MEDIA_FIBER;
368 }
369 }
370 return ICE_MEDIA_UNKNOWN;
371 }
372
373 /**
374 * ice_aq_get_link_info
375 * @pi: port information structure
376 * @ena_lse: enable/disable LinkStatusEvent reporting
377 * @link: pointer to link status structure - optional
378 * @cd: pointer to command details structure or NULL
379 *
380 * Get Link Status (0x607). Returns the link status of the adapter.
381 */
382 enum ice_status
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)383 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
384 struct ice_link_status *link, struct ice_sq_cd *cd)
385 {
386 struct ice_aqc_get_link_status_data link_data = { 0 };
387 struct ice_aqc_get_link_status *resp;
388 struct ice_link_status *li_old, *li;
389 enum ice_media_type *hw_media_type;
390 struct ice_fc_info *hw_fc_info;
391 bool tx_pause, rx_pause;
392 struct ice_aq_desc desc;
393 enum ice_status status;
394 struct ice_hw *hw;
395 u16 cmd_flags;
396
397 if (!pi)
398 return ICE_ERR_PARAM;
399 hw = pi->hw;
400 li_old = &pi->phy.link_info_old;
401 hw_media_type = &pi->phy.media_type;
402 li = &pi->phy.link_info;
403 hw_fc_info = &pi->fc;
404
405 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
406 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
407 resp = &desc.params.get_link_status;
408 resp->cmd_flags = cpu_to_le16(cmd_flags);
409 resp->lport_num = pi->lport;
410
411 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
412
413 if (status)
414 return status;
415
416 /* save off old link status information */
417 *li_old = *li;
418
419 /* update current link status information */
420 li->link_speed = le16_to_cpu(link_data.link_speed);
421 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
422 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
423 *hw_media_type = ice_get_media_type(pi);
424 li->link_info = link_data.link_info;
425 li->an_info = link_data.an_info;
426 li->ext_info = link_data.ext_info;
427 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
428 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
429 li->topo_media_conflict = link_data.topo_media_conflict;
430 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
431 ICE_AQ_CFG_PACING_TYPE_M);
432
433 /* update fc info */
434 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
435 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
436 if (tx_pause && rx_pause)
437 hw_fc_info->current_mode = ICE_FC_FULL;
438 else if (tx_pause)
439 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
440 else if (rx_pause)
441 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
442 else
443 hw_fc_info->current_mode = ICE_FC_NONE;
444
445 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
446
447 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
448 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
449 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
450 (unsigned long long)li->phy_type_low);
451 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
452 (unsigned long long)li->phy_type_high);
453 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
454 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
455 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
456 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
457 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
458 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
459 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
460 li->max_frame_size);
461 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
462
463 /* save link status information */
464 if (link)
465 *link = *li;
466
467 /* flag cleared so calling functions don't call AQ again */
468 pi->phy.get_link_info = false;
469
470 return 0;
471 }
472
473 /**
474 * ice_fill_tx_timer_and_fc_thresh
475 * @hw: pointer to the HW struct
476 * @cmd: pointer to MAC cfg structure
477 *
478 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
479 * descriptor
480 */
481 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)482 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
483 struct ice_aqc_set_mac_cfg *cmd)
484 {
485 u16 fc_thres_val, tx_timer_val;
486 u32 val;
487
488 /* We read back the transmit timer and FC threshold value of
489 * LFC. Thus, we will use index =
490 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
491 *
492 * Also, because we are operating on transmit timer and FC
493 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
494 */
495 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
496
497 /* Retrieve the transmit timer */
498 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
499 tx_timer_val = val &
500 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
501 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
502
503 /* Retrieve the FC threshold */
504 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
505 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
506
507 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
508 }
509
510 /**
511 * ice_aq_set_mac_cfg
512 * @hw: pointer to the HW struct
513 * @max_frame_size: Maximum Frame Size to be supported
514 * @cd: pointer to command details structure or NULL
515 *
516 * Set MAC configuration (0x0603)
517 */
518 enum ice_status
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,struct ice_sq_cd * cd)519 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
520 {
521 struct ice_aqc_set_mac_cfg *cmd;
522 struct ice_aq_desc desc;
523
524 cmd = &desc.params.set_mac_cfg;
525
526 if (max_frame_size == 0)
527 return ICE_ERR_PARAM;
528
529 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
530
531 cmd->max_frame_size = cpu_to_le16(max_frame_size);
532
533 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
534
535 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
536 }
537
538 /**
539 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
540 * @hw: pointer to the HW struct
541 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)542 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
543 {
544 struct ice_switch_info *sw;
545 enum ice_status status;
546
547 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
548 sizeof(*hw->switch_info), GFP_KERNEL);
549 sw = hw->switch_info;
550
551 if (!sw)
552 return ICE_ERR_NO_MEMORY;
553
554 INIT_LIST_HEAD(&sw->vsi_list_map_head);
555
556 status = ice_init_def_sw_recp(hw);
557 if (status) {
558 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
559 return status;
560 }
561 return 0;
562 }
563
564 /**
565 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
566 * @hw: pointer to the HW struct
567 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)568 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
569 {
570 struct ice_switch_info *sw = hw->switch_info;
571 struct ice_vsi_list_map_info *v_pos_map;
572 struct ice_vsi_list_map_info *v_tmp_map;
573 struct ice_sw_recipe *recps;
574 u8 i;
575
576 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
577 list_entry) {
578 list_del(&v_pos_map->list_entry);
579 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
580 }
581 recps = hw->switch_info->recp_list;
582 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
583 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
584
585 recps[i].root_rid = i;
586 mutex_destroy(&recps[i].filt_rule_lock);
587 list_for_each_entry_safe(lst_itr, tmp_entry,
588 &recps[i].filt_rules, list_entry) {
589 list_del(&lst_itr->list_entry);
590 devm_kfree(ice_hw_to_dev(hw), lst_itr);
591 }
592 }
593 ice_rm_all_sw_replay_rule_info(hw);
594 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
595 devm_kfree(ice_hw_to_dev(hw), sw);
596 }
597
598 /**
599 * ice_get_fw_log_cfg - get FW logging configuration
600 * @hw: pointer to the HW struct
601 */
ice_get_fw_log_cfg(struct ice_hw * hw)602 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
603 {
604 struct ice_aq_desc desc;
605 enum ice_status status;
606 __le16 *config;
607 u16 size;
608
609 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
610 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
611 if (!config)
612 return ICE_ERR_NO_MEMORY;
613
614 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
615
616 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
617 if (!status) {
618 u16 i;
619
620 /* Save FW logging information into the HW structure */
621 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
622 u16 v, m, flgs;
623
624 v = le16_to_cpu(config[i]);
625 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
626 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
627
628 if (m < ICE_AQC_FW_LOG_ID_MAX)
629 hw->fw_log.evnts[m].cur = flgs;
630 }
631 }
632
633 devm_kfree(ice_hw_to_dev(hw), config);
634
635 return status;
636 }
637
638 /**
639 * ice_cfg_fw_log - configure FW logging
640 * @hw: pointer to the HW struct
641 * @enable: enable certain FW logging events if true, disable all if false
642 *
643 * This function enables/disables the FW logging via Rx CQ events and a UART
644 * port based on predetermined configurations. FW logging via the Rx CQ can be
645 * enabled/disabled for individual PF's. However, FW logging via the UART can
646 * only be enabled/disabled for all PFs on the same device.
647 *
648 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
649 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
650 * before initializing the device.
651 *
652 * When re/configuring FW logging, callers need to update the "cfg" elements of
653 * the hw->fw_log.evnts array with the desired logging event configurations for
654 * modules of interest. When disabling FW logging completely, the callers can
655 * just pass false in the "enable" parameter. On completion, the function will
656 * update the "cur" element of the hw->fw_log.evnts array with the resulting
657 * logging event configurations of the modules that are being re/configured. FW
658 * logging modules that are not part of a reconfiguration operation retain their
659 * previous states.
660 *
661 * Before resetting the device, it is recommended that the driver disables FW
662 * logging before shutting down the control queue. When disabling FW logging
663 * ("enable" = false), the latest configurations of FW logging events stored in
664 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
665 * a device reset.
666 *
667 * When enabling FW logging to emit log messages via the Rx CQ during the
668 * device's initialization phase, a mechanism alternative to interrupt handlers
669 * needs to be used to extract FW log messages from the Rx CQ periodically and
670 * to prevent the Rx CQ from being full and stalling other types of control
671 * messages from FW to SW. Interrupts are typically disabled during the device's
672 * initialization phase.
673 */
ice_cfg_fw_log(struct ice_hw * hw,bool enable)674 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
675 {
676 struct ice_aqc_fw_logging *cmd;
677 enum ice_status status = 0;
678 u16 i, chgs = 0, len = 0;
679 struct ice_aq_desc desc;
680 __le16 *data = NULL;
681 u8 actv_evnts = 0;
682 void *buf = NULL;
683
684 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
685 return 0;
686
687 /* Disable FW logging only when the control queue is still responsive */
688 if (!enable &&
689 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
690 return 0;
691
692 /* Get current FW log settings */
693 status = ice_get_fw_log_cfg(hw);
694 if (status)
695 return status;
696
697 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
698 cmd = &desc.params.fw_logging;
699
700 /* Indicate which controls are valid */
701 if (hw->fw_log.cq_en)
702 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
703
704 if (hw->fw_log.uart_en)
705 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
706
707 if (enable) {
708 /* Fill in an array of entries with FW logging modules and
709 * logging events being reconfigured.
710 */
711 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
712 u16 val;
713
714 /* Keep track of enabled event types */
715 actv_evnts |= hw->fw_log.evnts[i].cfg;
716
717 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
718 continue;
719
720 if (!data) {
721 data = devm_kcalloc(ice_hw_to_dev(hw),
722 ICE_AQC_FW_LOG_ID_MAX,
723 sizeof(*data),
724 GFP_KERNEL);
725 if (!data)
726 return ICE_ERR_NO_MEMORY;
727 }
728
729 val = i << ICE_AQC_FW_LOG_ID_S;
730 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
731 data[chgs++] = cpu_to_le16(val);
732 }
733
734 /* Only enable FW logging if at least one module is specified.
735 * If FW logging is currently enabled but all modules are not
736 * enabled to emit log messages, disable FW logging altogether.
737 */
738 if (actv_evnts) {
739 /* Leave if there is effectively no change */
740 if (!chgs)
741 goto out;
742
743 if (hw->fw_log.cq_en)
744 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
745
746 if (hw->fw_log.uart_en)
747 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
748
749 buf = data;
750 len = sizeof(*data) * chgs;
751 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
752 }
753 }
754
755 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
756 if (!status) {
757 /* Update the current configuration to reflect events enabled.
758 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
759 * logging mode is enabled for the device. They do not reflect
760 * actual modules being enabled to emit log messages. So, their
761 * values remain unchanged even when all modules are disabled.
762 */
763 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
764
765 hw->fw_log.actv_evnts = actv_evnts;
766 for (i = 0; i < cnt; i++) {
767 u16 v, m;
768
769 if (!enable) {
770 /* When disabling all FW logging events as part
771 * of device's de-initialization, the original
772 * configurations are retained, and can be used
773 * to reconfigure FW logging later if the device
774 * is re-initialized.
775 */
776 hw->fw_log.evnts[i].cur = 0;
777 continue;
778 }
779
780 v = le16_to_cpu(data[i]);
781 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
782 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
783 }
784 }
785
786 out:
787 if (data)
788 devm_kfree(ice_hw_to_dev(hw), data);
789
790 return status;
791 }
792
793 /**
794 * ice_output_fw_log
795 * @hw: pointer to the HW struct
796 * @desc: pointer to the AQ message descriptor
797 * @buf: pointer to the buffer accompanying the AQ message
798 *
799 * Formats a FW Log message and outputs it via the standard driver logs.
800 */
ice_output_fw_log(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf)801 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
802 {
803 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
804 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
805 le16_to_cpu(desc->datalen));
806 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
807 }
808
809 /**
810 * ice_get_itr_intrl_gran
811 * @hw: pointer to the HW struct
812 *
813 * Determines the ITR/INTRL granularities based on the maximum aggregate
814 * bandwidth according to the device's configuration during power-on.
815 */
ice_get_itr_intrl_gran(struct ice_hw * hw)816 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
817 {
818 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
819 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
820 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
821
822 switch (max_agg_bw) {
823 case ICE_MAX_AGG_BW_200G:
824 case ICE_MAX_AGG_BW_100G:
825 case ICE_MAX_AGG_BW_50G:
826 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
827 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
828 break;
829 case ICE_MAX_AGG_BW_25G:
830 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
831 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
832 break;
833 }
834 }
835
836 /**
837 * ice_init_hw - main hardware initialization routine
838 * @hw: pointer to the hardware structure
839 */
ice_init_hw(struct ice_hw * hw)840 enum ice_status ice_init_hw(struct ice_hw *hw)
841 {
842 struct ice_aqc_get_phy_caps_data *pcaps;
843 enum ice_status status;
844 u16 mac_buf_len;
845 void *mac_buf;
846
847 /* Set MAC type based on DeviceID */
848 status = ice_set_mac_type(hw);
849 if (status)
850 return status;
851
852 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
853 PF_FUNC_RID_FUNC_NUM_M) >>
854 PF_FUNC_RID_FUNC_NUM_S;
855
856 status = ice_reset(hw, ICE_RESET_PFR);
857 if (status)
858 return status;
859
860 ice_get_itr_intrl_gran(hw);
861
862 status = ice_create_all_ctrlq(hw);
863 if (status)
864 goto err_unroll_cqinit;
865
866 /* Enable FW logging. Not fatal if this fails. */
867 status = ice_cfg_fw_log(hw, true);
868 if (status)
869 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
870
871 status = ice_clear_pf_cfg(hw);
872 if (status)
873 goto err_unroll_cqinit;
874
875 /* Set bit to enable Flow Director filters */
876 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
877 INIT_LIST_HEAD(&hw->fdir_list_head);
878
879 ice_clear_pxe_mode(hw);
880
881 status = ice_init_nvm(hw);
882 if (status)
883 goto err_unroll_cqinit;
884
885 status = ice_get_caps(hw);
886 if (status)
887 goto err_unroll_cqinit;
888
889 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
890 sizeof(*hw->port_info), GFP_KERNEL);
891 if (!hw->port_info) {
892 status = ICE_ERR_NO_MEMORY;
893 goto err_unroll_cqinit;
894 }
895
896 /* set the back pointer to HW */
897 hw->port_info->hw = hw;
898
899 /* Initialize port_info struct with switch configuration data */
900 status = ice_get_initial_sw_cfg(hw);
901 if (status)
902 goto err_unroll_alloc;
903
904 hw->evb_veb = true;
905
906 /* Query the allocated resources for Tx scheduler */
907 status = ice_sched_query_res_alloc(hw);
908 if (status) {
909 ice_debug(hw, ICE_DBG_SCHED,
910 "Failed to get scheduler allocated resources\n");
911 goto err_unroll_alloc;
912 }
913
914 /* Initialize port_info struct with scheduler data */
915 status = ice_sched_init_port(hw->port_info);
916 if (status)
917 goto err_unroll_sched;
918
919 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
920 if (!pcaps) {
921 status = ICE_ERR_NO_MEMORY;
922 goto err_unroll_sched;
923 }
924
925 /* Initialize port_info struct with PHY capabilities */
926 status = ice_aq_get_phy_caps(hw->port_info, false,
927 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
928 devm_kfree(ice_hw_to_dev(hw), pcaps);
929 if (status)
930 goto err_unroll_sched;
931
932 /* Initialize port_info struct with link information */
933 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
934 if (status)
935 goto err_unroll_sched;
936
937 /* need a valid SW entry point to build a Tx tree */
938 if (!hw->sw_entry_point_layer) {
939 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
940 status = ICE_ERR_CFG;
941 goto err_unroll_sched;
942 }
943 INIT_LIST_HEAD(&hw->agg_list);
944 /* Initialize max burst size */
945 if (!hw->max_burst_size)
946 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
947
948 status = ice_init_fltr_mgmt_struct(hw);
949 if (status)
950 goto err_unroll_sched;
951
952 /* Get MAC information */
953 /* A single port can report up to two (LAN and WoL) addresses */
954 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
955 sizeof(struct ice_aqc_manage_mac_read_resp),
956 GFP_KERNEL);
957 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
958
959 if (!mac_buf) {
960 status = ICE_ERR_NO_MEMORY;
961 goto err_unroll_fltr_mgmt_struct;
962 }
963
964 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
965 devm_kfree(ice_hw_to_dev(hw), mac_buf);
966
967 if (status)
968 goto err_unroll_fltr_mgmt_struct;
969 /* enable jumbo frame support at MAC level */
970 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
971 if (status)
972 goto err_unroll_fltr_mgmt_struct;
973 /* Obtain counter base index which would be used by flow director */
974 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
975 if (status)
976 goto err_unroll_fltr_mgmt_struct;
977 status = ice_init_hw_tbls(hw);
978 if (status)
979 goto err_unroll_fltr_mgmt_struct;
980 mutex_init(&hw->tnl_lock);
981 return 0;
982
983 err_unroll_fltr_mgmt_struct:
984 ice_cleanup_fltr_mgmt_struct(hw);
985 err_unroll_sched:
986 ice_sched_cleanup_all(hw);
987 err_unroll_alloc:
988 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
989 err_unroll_cqinit:
990 ice_destroy_all_ctrlq(hw);
991 return status;
992 }
993
994 /**
995 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
996 * @hw: pointer to the hardware structure
997 *
998 * This should be called only during nominal operation, not as a result of
999 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1000 * applicable initializations if it fails for any reason.
1001 */
ice_deinit_hw(struct ice_hw * hw)1002 void ice_deinit_hw(struct ice_hw *hw)
1003 {
1004 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1005 ice_cleanup_fltr_mgmt_struct(hw);
1006
1007 ice_sched_cleanup_all(hw);
1008 ice_sched_clear_agg(hw);
1009 ice_free_seg(hw);
1010 ice_free_hw_tbls(hw);
1011 mutex_destroy(&hw->tnl_lock);
1012
1013 if (hw->port_info) {
1014 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1015 hw->port_info = NULL;
1016 }
1017
1018 /* Attempt to disable FW logging before shutting down control queues */
1019 ice_cfg_fw_log(hw, false);
1020 ice_destroy_all_ctrlq(hw);
1021
1022 /* Clear VSI contexts if not already cleared */
1023 ice_clear_all_vsi_ctx(hw);
1024 }
1025
1026 /**
1027 * ice_check_reset - Check to see if a global reset is complete
1028 * @hw: pointer to the hardware structure
1029 */
ice_check_reset(struct ice_hw * hw)1030 enum ice_status ice_check_reset(struct ice_hw *hw)
1031 {
1032 u32 cnt, reg = 0, grst_timeout, uld_mask;
1033
1034 /* Poll for Device Active state in case a recent CORER, GLOBR,
1035 * or EMPR has occurred. The grst delay value is in 100ms units.
1036 * Add 1sec for outstanding AQ commands that can take a long time.
1037 */
1038 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1039 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1040
1041 for (cnt = 0; cnt < grst_timeout; cnt++) {
1042 mdelay(100);
1043 reg = rd32(hw, GLGEN_RSTAT);
1044 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1045 break;
1046 }
1047
1048 if (cnt == grst_timeout) {
1049 ice_debug(hw, ICE_DBG_INIT,
1050 "Global reset polling failed to complete.\n");
1051 return ICE_ERR_RESET_FAILED;
1052 }
1053
1054 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1055 GLNVM_ULD_PCIER_DONE_1_M |\
1056 GLNVM_ULD_CORER_DONE_M |\
1057 GLNVM_ULD_GLOBR_DONE_M |\
1058 GLNVM_ULD_POR_DONE_M |\
1059 GLNVM_ULD_POR_DONE_1_M |\
1060 GLNVM_ULD_PCIER_DONE_2_M)
1061
1062 uld_mask = ICE_RESET_DONE_MASK;
1063
1064 /* Device is Active; check Global Reset processes are done */
1065 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1066 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1067 if (reg == uld_mask) {
1068 ice_debug(hw, ICE_DBG_INIT,
1069 "Global reset processes done. %d\n", cnt);
1070 break;
1071 }
1072 mdelay(10);
1073 }
1074
1075 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1076 ice_debug(hw, ICE_DBG_INIT,
1077 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1078 reg);
1079 return ICE_ERR_RESET_FAILED;
1080 }
1081
1082 return 0;
1083 }
1084
1085 /**
1086 * ice_pf_reset - Reset the PF
1087 * @hw: pointer to the hardware structure
1088 *
1089 * If a global reset has been triggered, this function checks
1090 * for its completion and then issues the PF reset
1091 */
ice_pf_reset(struct ice_hw * hw)1092 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1093 {
1094 u32 cnt, reg;
1095
1096 /* If at function entry a global reset was already in progress, i.e.
1097 * state is not 'device active' or any of the reset done bits are not
1098 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1099 * global reset is done.
1100 */
1101 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1102 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1103 /* poll on global reset currently in progress until done */
1104 if (ice_check_reset(hw))
1105 return ICE_ERR_RESET_FAILED;
1106
1107 return 0;
1108 }
1109
1110 /* Reset the PF */
1111 reg = rd32(hw, PFGEN_CTRL);
1112
1113 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1114
1115 /* Wait for the PFR to complete. The wait time is the global config lock
1116 * timeout plus the PFR timeout which will account for a possible reset
1117 * that is occurring during a download package operation.
1118 */
1119 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1120 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1121 reg = rd32(hw, PFGEN_CTRL);
1122 if (!(reg & PFGEN_CTRL_PFSWR_M))
1123 break;
1124
1125 mdelay(1);
1126 }
1127
1128 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1129 ice_debug(hw, ICE_DBG_INIT,
1130 "PF reset polling failed to complete.\n");
1131 return ICE_ERR_RESET_FAILED;
1132 }
1133
1134 return 0;
1135 }
1136
1137 /**
1138 * ice_reset - Perform different types of reset
1139 * @hw: pointer to the hardware structure
1140 * @req: reset request
1141 *
1142 * This function triggers a reset as specified by the req parameter.
1143 *
1144 * Note:
1145 * If anything other than a PF reset is triggered, PXE mode is restored.
1146 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1147 * interface has been restored in the rebuild flow.
1148 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1149 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1150 {
1151 u32 val = 0;
1152
1153 switch (req) {
1154 case ICE_RESET_PFR:
1155 return ice_pf_reset(hw);
1156 case ICE_RESET_CORER:
1157 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1158 val = GLGEN_RTRIG_CORER_M;
1159 break;
1160 case ICE_RESET_GLOBR:
1161 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1162 val = GLGEN_RTRIG_GLOBR_M;
1163 break;
1164 default:
1165 return ICE_ERR_PARAM;
1166 }
1167
1168 val |= rd32(hw, GLGEN_RTRIG);
1169 wr32(hw, GLGEN_RTRIG, val);
1170 ice_flush(hw);
1171
1172 /* wait for the FW to be ready */
1173 return ice_check_reset(hw);
1174 }
1175
1176 /**
1177 * ice_copy_rxq_ctx_to_hw
1178 * @hw: pointer to the hardware structure
1179 * @ice_rxq_ctx: pointer to the rxq context
1180 * @rxq_index: the index of the Rx queue
1181 *
1182 * Copies rxq context from dense structure to HW register space
1183 */
1184 static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1185 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1186 {
1187 u8 i;
1188
1189 if (!ice_rxq_ctx)
1190 return ICE_ERR_BAD_PTR;
1191
1192 if (rxq_index > QRX_CTRL_MAX_INDEX)
1193 return ICE_ERR_PARAM;
1194
1195 /* Copy each dword separately to HW */
1196 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1197 wr32(hw, QRX_CONTEXT(i, rxq_index),
1198 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1199
1200 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1201 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1202 }
1203
1204 return 0;
1205 }
1206
1207 /* LAN Rx Queue Context */
1208 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1209 /* Field Width LSB */
1210 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1211 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1212 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1213 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1214 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1215 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1216 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1217 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1218 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1219 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1220 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1221 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1222 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1223 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1224 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1225 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1226 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1227 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1228 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1229 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1230 { 0 }
1231 };
1232
1233 /**
1234 * ice_write_rxq_ctx
1235 * @hw: pointer to the hardware structure
1236 * @rlan_ctx: pointer to the rxq context
1237 * @rxq_index: the index of the Rx queue
1238 *
1239 * Converts rxq context from sparse to dense structure and then writes
1240 * it to HW register space and enables the hardware to prefetch descriptors
1241 * instead of only fetching them on demand
1242 */
1243 enum ice_status
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1244 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1245 u32 rxq_index)
1246 {
1247 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1248
1249 if (!rlan_ctx)
1250 return ICE_ERR_BAD_PTR;
1251
1252 rlan_ctx->prefena = 1;
1253
1254 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1255 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1256 }
1257
1258 /* LAN Tx Queue Context */
1259 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1260 /* Field Width LSB */
1261 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1262 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1263 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1264 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1265 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1266 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1267 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1268 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1269 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1270 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1271 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1272 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1273 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1274 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1275 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1276 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1277 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1278 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1279 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1280 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1281 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1282 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1283 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1284 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1285 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1286 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1287 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1288 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1289 { 0 }
1290 };
1291
1292 /* FW Admin Queue command wrappers */
1293
1294 /* Software lock/mutex that is meant to be held while the Global Config Lock
1295 * in firmware is acquired by the software to prevent most (but not all) types
1296 * of AQ commands from being sent to FW
1297 */
1298 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1299
1300 /**
1301 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1302 * @hw: pointer to the HW struct
1303 * @desc: descriptor describing the command
1304 * @buf: buffer to use for indirect commands (NULL for direct commands)
1305 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1306 * @cd: pointer to command details structure
1307 *
1308 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1309 */
1310 enum ice_status
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1311 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1312 u16 buf_size, struct ice_sq_cd *cd)
1313 {
1314 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1315 bool lock_acquired = false;
1316 enum ice_status status;
1317
1318 /* When a package download is in process (i.e. when the firmware's
1319 * Global Configuration Lock resource is held), only the Download
1320 * Package, Get Version, Get Package Info List and Release Resource
1321 * (with resource ID set to Global Config Lock) AdminQ commands are
1322 * allowed; all others must block until the package download completes
1323 * and the Global Config Lock is released. See also
1324 * ice_acquire_global_cfg_lock().
1325 */
1326 switch (le16_to_cpu(desc->opcode)) {
1327 case ice_aqc_opc_download_pkg:
1328 case ice_aqc_opc_get_pkg_info_list:
1329 case ice_aqc_opc_get_ver:
1330 break;
1331 case ice_aqc_opc_release_res:
1332 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1333 break;
1334 fallthrough;
1335 default:
1336 mutex_lock(&ice_global_cfg_lock_sw);
1337 lock_acquired = true;
1338 break;
1339 }
1340
1341 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1342 if (lock_acquired)
1343 mutex_unlock(&ice_global_cfg_lock_sw);
1344
1345 return status;
1346 }
1347
1348 /**
1349 * ice_aq_get_fw_ver
1350 * @hw: pointer to the HW struct
1351 * @cd: pointer to command details structure or NULL
1352 *
1353 * Get the firmware version (0x0001) from the admin queue commands
1354 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1355 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1356 {
1357 struct ice_aqc_get_ver *resp;
1358 struct ice_aq_desc desc;
1359 enum ice_status status;
1360
1361 resp = &desc.params.get_ver;
1362
1363 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1364
1365 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1366
1367 if (!status) {
1368 hw->fw_branch = resp->fw_branch;
1369 hw->fw_maj_ver = resp->fw_major;
1370 hw->fw_min_ver = resp->fw_minor;
1371 hw->fw_patch = resp->fw_patch;
1372 hw->fw_build = le32_to_cpu(resp->fw_build);
1373 hw->api_branch = resp->api_branch;
1374 hw->api_maj_ver = resp->api_major;
1375 hw->api_min_ver = resp->api_minor;
1376 hw->api_patch = resp->api_patch;
1377 }
1378
1379 return status;
1380 }
1381
1382 /**
1383 * ice_aq_send_driver_ver
1384 * @hw: pointer to the HW struct
1385 * @dv: driver's major, minor version
1386 * @cd: pointer to command details structure or NULL
1387 *
1388 * Send the driver version (0x0002) to the firmware
1389 */
1390 enum ice_status
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)1391 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1392 struct ice_sq_cd *cd)
1393 {
1394 struct ice_aqc_driver_ver *cmd;
1395 struct ice_aq_desc desc;
1396 u16 len;
1397
1398 cmd = &desc.params.driver_ver;
1399
1400 if (!dv)
1401 return ICE_ERR_PARAM;
1402
1403 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1404
1405 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1406 cmd->major_ver = dv->major_ver;
1407 cmd->minor_ver = dv->minor_ver;
1408 cmd->build_ver = dv->build_ver;
1409 cmd->subbuild_ver = dv->subbuild_ver;
1410
1411 len = 0;
1412 while (len < sizeof(dv->driver_string) &&
1413 isascii(dv->driver_string[len]) && dv->driver_string[len])
1414 len++;
1415
1416 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1417 }
1418
1419 /**
1420 * ice_aq_q_shutdown
1421 * @hw: pointer to the HW struct
1422 * @unloading: is the driver unloading itself
1423 *
1424 * Tell the Firmware that we're shutting down the AdminQ and whether
1425 * or not the driver is unloading as well (0x0003).
1426 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)1427 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1428 {
1429 struct ice_aqc_q_shutdown *cmd;
1430 struct ice_aq_desc desc;
1431
1432 cmd = &desc.params.q_shutdown;
1433
1434 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1435
1436 if (unloading)
1437 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1438
1439 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1440 }
1441
1442 /**
1443 * ice_aq_req_res
1444 * @hw: pointer to the HW struct
1445 * @res: resource ID
1446 * @access: access type
1447 * @sdp_number: resource number
1448 * @timeout: the maximum time in ms that the driver may hold the resource
1449 * @cd: pointer to command details structure or NULL
1450 *
1451 * Requests common resource using the admin queue commands (0x0008).
1452 * When attempting to acquire the Global Config Lock, the driver can
1453 * learn of three states:
1454 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1455 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1456 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1457 * successfully downloaded the package; the driver does
1458 * not have to download the package and can continue
1459 * loading
1460 *
1461 * Note that if the caller is in an acquire lock, perform action, release lock
1462 * phase of operation, it is possible that the FW may detect a timeout and issue
1463 * a CORER. In this case, the driver will receive a CORER interrupt and will
1464 * have to determine its cause. The calling thread that is handling this flow
1465 * will likely get an error propagated back to it indicating the Download
1466 * Package, Update Package or the Release Resource AQ commands timed out.
1467 */
1468 static enum ice_status
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)1469 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1470 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1471 struct ice_sq_cd *cd)
1472 {
1473 struct ice_aqc_req_res *cmd_resp;
1474 struct ice_aq_desc desc;
1475 enum ice_status status;
1476
1477 cmd_resp = &desc.params.res_owner;
1478
1479 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1480
1481 cmd_resp->res_id = cpu_to_le16(res);
1482 cmd_resp->access_type = cpu_to_le16(access);
1483 cmd_resp->res_number = cpu_to_le32(sdp_number);
1484 cmd_resp->timeout = cpu_to_le32(*timeout);
1485 *timeout = 0;
1486
1487 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1488
1489 /* The completion specifies the maximum time in ms that the driver
1490 * may hold the resource in the Timeout field.
1491 */
1492
1493 /* Global config lock response utilizes an additional status field.
1494 *
1495 * If the Global config lock resource is held by some other driver, the
1496 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1497 * and the timeout field indicates the maximum time the current owner
1498 * of the resource has to free it.
1499 */
1500 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1501 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1502 *timeout = le32_to_cpu(cmd_resp->timeout);
1503 return 0;
1504 } else if (le16_to_cpu(cmd_resp->status) ==
1505 ICE_AQ_RES_GLBL_IN_PROG) {
1506 *timeout = le32_to_cpu(cmd_resp->timeout);
1507 return ICE_ERR_AQ_ERROR;
1508 } else if (le16_to_cpu(cmd_resp->status) ==
1509 ICE_AQ_RES_GLBL_DONE) {
1510 return ICE_ERR_AQ_NO_WORK;
1511 }
1512
1513 /* invalid FW response, force a timeout immediately */
1514 *timeout = 0;
1515 return ICE_ERR_AQ_ERROR;
1516 }
1517
1518 /* If the resource is held by some other driver, the command completes
1519 * with a busy return value and the timeout field indicates the maximum
1520 * time the current owner of the resource has to free it.
1521 */
1522 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1523 *timeout = le32_to_cpu(cmd_resp->timeout);
1524
1525 return status;
1526 }
1527
1528 /**
1529 * ice_aq_release_res
1530 * @hw: pointer to the HW struct
1531 * @res: resource ID
1532 * @sdp_number: resource number
1533 * @cd: pointer to command details structure or NULL
1534 *
1535 * release common resource using the admin queue commands (0x0009)
1536 */
1537 static enum ice_status
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)1538 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1539 struct ice_sq_cd *cd)
1540 {
1541 struct ice_aqc_req_res *cmd;
1542 struct ice_aq_desc desc;
1543
1544 cmd = &desc.params.res_owner;
1545
1546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1547
1548 cmd->res_id = cpu_to_le16(res);
1549 cmd->res_number = cpu_to_le32(sdp_number);
1550
1551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1552 }
1553
1554 /**
1555 * ice_acquire_res
1556 * @hw: pointer to the HW structure
1557 * @res: resource ID
1558 * @access: access type (read or write)
1559 * @timeout: timeout in milliseconds
1560 *
1561 * This function will attempt to acquire the ownership of a resource.
1562 */
1563 enum ice_status
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)1564 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1565 enum ice_aq_res_access_type access, u32 timeout)
1566 {
1567 #define ICE_RES_POLLING_DELAY_MS 10
1568 u32 delay = ICE_RES_POLLING_DELAY_MS;
1569 u32 time_left = timeout;
1570 enum ice_status status;
1571
1572 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1573
1574 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1575 * previously acquired the resource and performed any necessary updates;
1576 * in this case the caller does not obtain the resource and has no
1577 * further work to do.
1578 */
1579 if (status == ICE_ERR_AQ_NO_WORK)
1580 goto ice_acquire_res_exit;
1581
1582 if (status)
1583 ice_debug(hw, ICE_DBG_RES,
1584 "resource %d acquire type %d failed.\n", res, access);
1585
1586 /* If necessary, poll until the current lock owner timeouts */
1587 timeout = time_left;
1588 while (status && timeout && time_left) {
1589 mdelay(delay);
1590 timeout = (timeout > delay) ? timeout - delay : 0;
1591 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1592
1593 if (status == ICE_ERR_AQ_NO_WORK)
1594 /* lock free, but no work to do */
1595 break;
1596
1597 if (!status)
1598 /* lock acquired */
1599 break;
1600 }
1601 if (status && status != ICE_ERR_AQ_NO_WORK)
1602 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1603
1604 ice_acquire_res_exit:
1605 if (status == ICE_ERR_AQ_NO_WORK) {
1606 if (access == ICE_RES_WRITE)
1607 ice_debug(hw, ICE_DBG_RES,
1608 "resource indicates no work to do.\n");
1609 else
1610 ice_debug(hw, ICE_DBG_RES,
1611 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1612 }
1613 return status;
1614 }
1615
1616 /**
1617 * ice_release_res
1618 * @hw: pointer to the HW structure
1619 * @res: resource ID
1620 *
1621 * This function will release a resource using the proper Admin Command.
1622 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)1623 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1624 {
1625 enum ice_status status;
1626 u32 total_delay = 0;
1627
1628 status = ice_aq_release_res(hw, res, 0, NULL);
1629
1630 /* there are some rare cases when trying to release the resource
1631 * results in an admin queue timeout, so handle them correctly
1632 */
1633 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1634 (total_delay < hw->adminq.sq_cmd_timeout)) {
1635 mdelay(1);
1636 status = ice_aq_release_res(hw, res, 0, NULL);
1637 total_delay++;
1638 }
1639 }
1640
1641 /**
1642 * ice_aq_alloc_free_res - command to allocate/free resources
1643 * @hw: pointer to the HW struct
1644 * @num_entries: number of resource entries in buffer
1645 * @buf: Indirect buffer to hold data parameters and response
1646 * @buf_size: size of buffer for indirect commands
1647 * @opc: pass in the command opcode
1648 * @cd: pointer to command details structure or NULL
1649 *
1650 * Helper function to allocate/free resources using the admin queue commands
1651 */
1652 enum ice_status
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1653 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1654 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1655 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1656 {
1657 struct ice_aqc_alloc_free_res_cmd *cmd;
1658 struct ice_aq_desc desc;
1659
1660 cmd = &desc.params.sw_res_ctrl;
1661
1662 if (!buf)
1663 return ICE_ERR_PARAM;
1664
1665 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1666 return ICE_ERR_PARAM;
1667
1668 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1669
1670 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1671
1672 cmd->num_entries = cpu_to_le16(num_entries);
1673
1674 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1675 }
1676
1677 /**
1678 * ice_alloc_hw_res - allocate resource
1679 * @hw: pointer to the HW struct
1680 * @type: type of resource
1681 * @num: number of resources to allocate
1682 * @btm: allocate from bottom
1683 * @res: pointer to array that will receive the resources
1684 */
1685 enum ice_status
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)1686 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1687 {
1688 struct ice_aqc_alloc_free_res_elem *buf;
1689 enum ice_status status;
1690 u16 buf_len;
1691
1692 buf_len = struct_size(buf, elem, num);
1693 buf = kzalloc(buf_len, GFP_KERNEL);
1694 if (!buf)
1695 return ICE_ERR_NO_MEMORY;
1696
1697 /* Prepare buffer to allocate resource. */
1698 buf->num_elems = cpu_to_le16(num);
1699 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1700 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1701 if (btm)
1702 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1703
1704 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1705 ice_aqc_opc_alloc_res, NULL);
1706 if (status)
1707 goto ice_alloc_res_exit;
1708
1709 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1710
1711 ice_alloc_res_exit:
1712 kfree(buf);
1713 return status;
1714 }
1715
1716 /**
1717 * ice_free_hw_res - free allocated HW resource
1718 * @hw: pointer to the HW struct
1719 * @type: type of resource to free
1720 * @num: number of resources
1721 * @res: pointer to array that contains the resources to free
1722 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)1723 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1724 {
1725 struct ice_aqc_alloc_free_res_elem *buf;
1726 enum ice_status status;
1727 u16 buf_len;
1728
1729 buf_len = struct_size(buf, elem, num);
1730 buf = kzalloc(buf_len, GFP_KERNEL);
1731 if (!buf)
1732 return ICE_ERR_NO_MEMORY;
1733
1734 /* Prepare buffer to free resource. */
1735 buf->num_elems = cpu_to_le16(num);
1736 buf->res_type = cpu_to_le16(type);
1737 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1738
1739 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1740 ice_aqc_opc_free_res, NULL);
1741 if (status)
1742 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1743
1744 kfree(buf);
1745 return status;
1746 }
1747
1748 /**
1749 * ice_get_num_per_func - determine number of resources per PF
1750 * @hw: pointer to the HW structure
1751 * @max: value to be evenly split between each PF
1752 *
1753 * Determine the number of valid functions by going through the bitmap returned
1754 * from parsing capabilities and use this to calculate the number of resources
1755 * per PF based on the max value passed in.
1756 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)1757 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1758 {
1759 u8 funcs;
1760
1761 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1762 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1763 ICE_CAPS_VALID_FUNCS_M);
1764
1765 if (!funcs)
1766 return 0;
1767
1768 return max / funcs;
1769 }
1770
1771 /**
1772 * ice_parse_common_caps - parse common device/function capabilities
1773 * @hw: pointer to the HW struct
1774 * @caps: pointer to common capabilities structure
1775 * @elem: the capability element to parse
1776 * @prefix: message prefix for tracing capabilities
1777 *
1778 * Given a capability element, extract relevant details into the common
1779 * capability structure.
1780 *
1781 * Returns: true if the capability matches one of the common capability ids,
1782 * false otherwise.
1783 */
1784 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)1785 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1786 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1787 {
1788 u32 logical_id = le32_to_cpu(elem->logical_id);
1789 u32 phys_id = le32_to_cpu(elem->phys_id);
1790 u32 number = le32_to_cpu(elem->number);
1791 u16 cap = le16_to_cpu(elem->cap);
1792 bool found = true;
1793
1794 switch (cap) {
1795 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1796 caps->valid_functions = number;
1797 ice_debug(hw, ICE_DBG_INIT,
1798 "%s: valid_functions (bitmap) = %d\n", prefix,
1799 caps->valid_functions);
1800 break;
1801 case ICE_AQC_CAPS_SRIOV:
1802 caps->sr_iov_1_1 = (number == 1);
1803 ice_debug(hw, ICE_DBG_INIT,
1804 "%s: sr_iov_1_1 = %d\n", prefix,
1805 caps->sr_iov_1_1);
1806 break;
1807 case ICE_AQC_CAPS_DCB:
1808 caps->dcb = (number == 1);
1809 caps->active_tc_bitmap = logical_id;
1810 caps->maxtc = phys_id;
1811 ice_debug(hw, ICE_DBG_INIT,
1812 "%s: dcb = %d\n", prefix, caps->dcb);
1813 ice_debug(hw, ICE_DBG_INIT,
1814 "%s: active_tc_bitmap = %d\n", prefix,
1815 caps->active_tc_bitmap);
1816 ice_debug(hw, ICE_DBG_INIT,
1817 "%s: maxtc = %d\n", prefix, caps->maxtc);
1818 break;
1819 case ICE_AQC_CAPS_RSS:
1820 caps->rss_table_size = number;
1821 caps->rss_table_entry_width = logical_id;
1822 ice_debug(hw, ICE_DBG_INIT,
1823 "%s: rss_table_size = %d\n", prefix,
1824 caps->rss_table_size);
1825 ice_debug(hw, ICE_DBG_INIT,
1826 "%s: rss_table_entry_width = %d\n", prefix,
1827 caps->rss_table_entry_width);
1828 break;
1829 case ICE_AQC_CAPS_RXQS:
1830 caps->num_rxq = number;
1831 caps->rxq_first_id = phys_id;
1832 ice_debug(hw, ICE_DBG_INIT,
1833 "%s: num_rxq = %d\n", prefix,
1834 caps->num_rxq);
1835 ice_debug(hw, ICE_DBG_INIT,
1836 "%s: rxq_first_id = %d\n", prefix,
1837 caps->rxq_first_id);
1838 break;
1839 case ICE_AQC_CAPS_TXQS:
1840 caps->num_txq = number;
1841 caps->txq_first_id = phys_id;
1842 ice_debug(hw, ICE_DBG_INIT,
1843 "%s: num_txq = %d\n", prefix,
1844 caps->num_txq);
1845 ice_debug(hw, ICE_DBG_INIT,
1846 "%s: txq_first_id = %d\n", prefix,
1847 caps->txq_first_id);
1848 break;
1849 case ICE_AQC_CAPS_MSIX:
1850 caps->num_msix_vectors = number;
1851 caps->msix_vector_first_id = phys_id;
1852 ice_debug(hw, ICE_DBG_INIT,
1853 "%s: num_msix_vectors = %d\n", prefix,
1854 caps->num_msix_vectors);
1855 ice_debug(hw, ICE_DBG_INIT,
1856 "%s: msix_vector_first_id = %d\n", prefix,
1857 caps->msix_vector_first_id);
1858 break;
1859 case ICE_AQC_CAPS_PENDING_NVM_VER:
1860 caps->nvm_update_pending_nvm = true;
1861 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1862 break;
1863 case ICE_AQC_CAPS_PENDING_OROM_VER:
1864 caps->nvm_update_pending_orom = true;
1865 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1866 break;
1867 case ICE_AQC_CAPS_PENDING_NET_VER:
1868 caps->nvm_update_pending_netlist = true;
1869 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1870 break;
1871 case ICE_AQC_CAPS_NVM_MGMT:
1872 caps->nvm_unified_update =
1873 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1874 true : false;
1875 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1876 caps->nvm_unified_update);
1877 break;
1878 case ICE_AQC_CAPS_MAX_MTU:
1879 caps->max_mtu = number;
1880 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1881 prefix, caps->max_mtu);
1882 break;
1883 default:
1884 /* Not one of the recognized common capabilities */
1885 found = false;
1886 }
1887
1888 return found;
1889 }
1890
1891 /**
1892 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1893 * @hw: pointer to the HW structure
1894 * @caps: pointer to capabilities structure to fix
1895 *
1896 * Re-calculate the capabilities that are dependent on the number of physical
1897 * ports; i.e. some features are not supported or function differently on
1898 * devices with more than 4 ports.
1899 */
1900 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)1901 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1902 {
1903 /* This assumes device capabilities are always scanned before function
1904 * capabilities during the initialization flow.
1905 */
1906 if (hw->dev_caps.num_funcs > 4) {
1907 /* Max 4 TCs per port */
1908 caps->maxtc = 4;
1909 ice_debug(hw, ICE_DBG_INIT,
1910 "reducing maxtc to %d (based on #ports)\n",
1911 caps->maxtc);
1912 }
1913 }
1914
1915 /**
1916 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1917 * @hw: pointer to the HW struct
1918 * @func_p: pointer to function capabilities structure
1919 * @cap: pointer to the capability element to parse
1920 *
1921 * Extract function capabilities for ICE_AQC_CAPS_VF.
1922 */
1923 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)1924 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1925 struct ice_aqc_list_caps_elem *cap)
1926 {
1927 u32 logical_id = le32_to_cpu(cap->logical_id);
1928 u32 number = le32_to_cpu(cap->number);
1929
1930 func_p->num_allocd_vfs = number;
1931 func_p->vf_base_id = logical_id;
1932 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1933 func_p->num_allocd_vfs);
1934 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1935 func_p->vf_base_id);
1936 }
1937
1938 /**
1939 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1940 * @hw: pointer to the HW struct
1941 * @func_p: pointer to function capabilities structure
1942 * @cap: pointer to the capability element to parse
1943 *
1944 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1945 */
1946 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)1947 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1948 struct ice_aqc_list_caps_elem *cap)
1949 {
1950 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1951 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1952 le32_to_cpu(cap->number));
1953 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1954 func_p->guar_num_vsi);
1955 }
1956
1957 /**
1958 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1959 * @hw: pointer to the HW struct
1960 * @func_p: pointer to function capabilities structure
1961 *
1962 * Extract function capabilities for ICE_AQC_CAPS_FD.
1963 */
1964 static void
ice_parse_fdir_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p)1965 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1966 {
1967 u32 reg_val, val;
1968
1969 reg_val = rd32(hw, GLQF_FD_SIZE);
1970 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1971 GLQF_FD_SIZE_FD_GSIZE_S;
1972 func_p->fd_fltr_guar =
1973 ice_get_num_per_func(hw, val);
1974 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1975 GLQF_FD_SIZE_FD_BSIZE_S;
1976 func_p->fd_fltr_best_effort = val;
1977
1978 ice_debug(hw, ICE_DBG_INIT,
1979 "func caps: fd_fltr_guar = %d\n",
1980 func_p->fd_fltr_guar);
1981 ice_debug(hw, ICE_DBG_INIT,
1982 "func caps: fd_fltr_best_effort = %d\n",
1983 func_p->fd_fltr_best_effort);
1984 }
1985
1986 /**
1987 * ice_parse_func_caps - Parse function capabilities
1988 * @hw: pointer to the HW struct
1989 * @func_p: pointer to function capabilities structure
1990 * @buf: buffer containing the function capability records
1991 * @cap_count: the number of capabilities
1992 *
1993 * Helper function to parse function (0x000A) capabilities list. For
1994 * capabilities shared between device and function, this relies on
1995 * ice_parse_common_caps.
1996 *
1997 * Loop through the list of provided capabilities and extract the relevant
1998 * data into the function capabilities structured.
1999 */
2000 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2001 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2002 void *buf, u32 cap_count)
2003 {
2004 struct ice_aqc_list_caps_elem *cap_resp;
2005 u32 i;
2006
2007 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2008
2009 memset(func_p, 0, sizeof(*func_p));
2010
2011 for (i = 0; i < cap_count; i++) {
2012 u16 cap = le16_to_cpu(cap_resp[i].cap);
2013 bool found;
2014
2015 found = ice_parse_common_caps(hw, &func_p->common_cap,
2016 &cap_resp[i], "func caps");
2017
2018 switch (cap) {
2019 case ICE_AQC_CAPS_VF:
2020 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2021 break;
2022 case ICE_AQC_CAPS_VSI:
2023 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2024 break;
2025 case ICE_AQC_CAPS_FD:
2026 ice_parse_fdir_func_caps(hw, func_p);
2027 break;
2028 default:
2029 /* Don't list common capabilities as unknown */
2030 if (!found)
2031 ice_debug(hw, ICE_DBG_INIT,
2032 "func caps: unknown capability[%d]: 0x%x\n",
2033 i, cap);
2034 break;
2035 }
2036 }
2037
2038 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2039 }
2040
2041 /**
2042 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2043 * @hw: pointer to the HW struct
2044 * @dev_p: pointer to device capabilities structure
2045 * @cap: capability element to parse
2046 *
2047 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2048 */
2049 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2050 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2051 struct ice_aqc_list_caps_elem *cap)
2052 {
2053 u32 number = le32_to_cpu(cap->number);
2054
2055 dev_p->num_funcs = hweight32(number);
2056 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2057 dev_p->num_funcs);
2058 }
2059
2060 /**
2061 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2062 * @hw: pointer to the HW struct
2063 * @dev_p: pointer to device capabilities structure
2064 * @cap: capability element to parse
2065 *
2066 * Parse ICE_AQC_CAPS_VF for device capabilities.
2067 */
2068 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2069 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2070 struct ice_aqc_list_caps_elem *cap)
2071 {
2072 u32 number = le32_to_cpu(cap->number);
2073
2074 dev_p->num_vfs_exposed = number;
2075 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2076 dev_p->num_vfs_exposed);
2077 }
2078
2079 /**
2080 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2081 * @hw: pointer to the HW struct
2082 * @dev_p: pointer to device capabilities structure
2083 * @cap: capability element to parse
2084 *
2085 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2086 */
2087 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2088 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2089 struct ice_aqc_list_caps_elem *cap)
2090 {
2091 u32 number = le32_to_cpu(cap->number);
2092
2093 dev_p->num_vsi_allocd_to_host = number;
2094 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2095 dev_p->num_vsi_allocd_to_host);
2096 }
2097
2098 /**
2099 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2100 * @hw: pointer to the HW struct
2101 * @dev_p: pointer to device capabilities structure
2102 * @cap: capability element to parse
2103 *
2104 * Parse ICE_AQC_CAPS_FD for device capabilities.
2105 */
2106 static void
ice_parse_fdir_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2107 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2108 struct ice_aqc_list_caps_elem *cap)
2109 {
2110 u32 number = le32_to_cpu(cap->number);
2111
2112 dev_p->num_flow_director_fltr = number;
2113 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2114 dev_p->num_flow_director_fltr);
2115 }
2116
2117 /**
2118 * ice_parse_dev_caps - Parse device capabilities
2119 * @hw: pointer to the HW struct
2120 * @dev_p: pointer to device capabilities structure
2121 * @buf: buffer containing the device capability records
2122 * @cap_count: the number of capabilities
2123 *
2124 * Helper device to parse device (0x000B) capabilities list. For
2125 * capabilities shared between device and function, this relies on
2126 * ice_parse_common_caps.
2127 *
2128 * Loop through the list of provided capabilities and extract the relevant
2129 * data into the device capabilities structured.
2130 */
2131 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2132 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2133 void *buf, u32 cap_count)
2134 {
2135 struct ice_aqc_list_caps_elem *cap_resp;
2136 u32 i;
2137
2138 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2139
2140 memset(dev_p, 0, sizeof(*dev_p));
2141
2142 for (i = 0; i < cap_count; i++) {
2143 u16 cap = le16_to_cpu(cap_resp[i].cap);
2144 bool found;
2145
2146 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2147 &cap_resp[i], "dev caps");
2148
2149 switch (cap) {
2150 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2151 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2152 break;
2153 case ICE_AQC_CAPS_VF:
2154 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2155 break;
2156 case ICE_AQC_CAPS_VSI:
2157 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2158 break;
2159 case ICE_AQC_CAPS_FD:
2160 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2161 break;
2162 default:
2163 /* Don't list common capabilities as unknown */
2164 if (!found)
2165 ice_debug(hw, ICE_DBG_INIT,
2166 "dev caps: unknown capability[%d]: 0x%x\n",
2167 i, cap);
2168 break;
2169 }
2170 }
2171
2172 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2173 }
2174
2175 /**
2176 * ice_aq_list_caps - query function/device capabilities
2177 * @hw: pointer to the HW struct
2178 * @buf: a buffer to hold the capabilities
2179 * @buf_size: size of the buffer
2180 * @cap_count: if not NULL, set to the number of capabilities reported
2181 * @opc: capabilities type to discover, device or function
2182 * @cd: pointer to command details structure or NULL
2183 *
2184 * Get the function (0x000A) or device (0x000B) capabilities description from
2185 * firmware and store it in the buffer.
2186 *
2187 * If the cap_count pointer is not NULL, then it is set to the number of
2188 * capabilities firmware will report. Note that if the buffer size is too
2189 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2190 * cap_count will still be updated in this case. It is recommended that the
2191 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2192 * firmware could return) to avoid this.
2193 */
2194 enum ice_status
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2195 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2196 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2197 {
2198 struct ice_aqc_list_caps *cmd;
2199 struct ice_aq_desc desc;
2200 enum ice_status status;
2201
2202 cmd = &desc.params.get_cap;
2203
2204 if (opc != ice_aqc_opc_list_func_caps &&
2205 opc != ice_aqc_opc_list_dev_caps)
2206 return ICE_ERR_PARAM;
2207
2208 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2209 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2210
2211 if (cap_count)
2212 *cap_count = le32_to_cpu(cmd->count);
2213
2214 return status;
2215 }
2216
2217 /**
2218 * ice_discover_dev_caps - Read and extract device capabilities
2219 * @hw: pointer to the hardware structure
2220 * @dev_caps: pointer to device capabilities structure
2221 *
2222 * Read the device capabilities and extract them into the dev_caps structure
2223 * for later use.
2224 */
2225 enum ice_status
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)2226 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2227 {
2228 enum ice_status status;
2229 u32 cap_count = 0;
2230 void *cbuf;
2231
2232 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2233 if (!cbuf)
2234 return ICE_ERR_NO_MEMORY;
2235
2236 /* Although the driver doesn't know the number of capabilities the
2237 * device will return, we can simply send a 4KB buffer, the maximum
2238 * possible size that firmware can return.
2239 */
2240 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2241
2242 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2243 ice_aqc_opc_list_dev_caps, NULL);
2244 if (!status)
2245 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2246 kfree(cbuf);
2247
2248 return status;
2249 }
2250
2251 /**
2252 * ice_discover_func_caps - Read and extract function capabilities
2253 * @hw: pointer to the hardware structure
2254 * @func_caps: pointer to function capabilities structure
2255 *
2256 * Read the function capabilities and extract them into the func_caps structure
2257 * for later use.
2258 */
2259 static enum ice_status
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)2260 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2261 {
2262 enum ice_status status;
2263 u32 cap_count = 0;
2264 void *cbuf;
2265
2266 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2267 if (!cbuf)
2268 return ICE_ERR_NO_MEMORY;
2269
2270 /* Although the driver doesn't know the number of capabilities the
2271 * device will return, we can simply send a 4KB buffer, the maximum
2272 * possible size that firmware can return.
2273 */
2274 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2275
2276 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2277 ice_aqc_opc_list_func_caps, NULL);
2278 if (!status)
2279 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2280 kfree(cbuf);
2281
2282 return status;
2283 }
2284
2285 /**
2286 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2287 * @hw: pointer to the hardware structure
2288 */
ice_set_safe_mode_caps(struct ice_hw * hw)2289 void ice_set_safe_mode_caps(struct ice_hw *hw)
2290 {
2291 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2292 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2293 struct ice_hw_common_caps cached_caps;
2294 u32 num_funcs;
2295
2296 /* cache some func_caps values that should be restored after memset */
2297 cached_caps = func_caps->common_cap;
2298
2299 /* unset func capabilities */
2300 memset(func_caps, 0, sizeof(*func_caps));
2301
2302 #define ICE_RESTORE_FUNC_CAP(name) \
2303 func_caps->common_cap.name = cached_caps.name
2304
2305 /* restore cached values */
2306 ICE_RESTORE_FUNC_CAP(valid_functions);
2307 ICE_RESTORE_FUNC_CAP(txq_first_id);
2308 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2309 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2310 ICE_RESTORE_FUNC_CAP(max_mtu);
2311 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2312 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2313 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2314 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2315
2316 /* one Tx and one Rx queue in safe mode */
2317 func_caps->common_cap.num_rxq = 1;
2318 func_caps->common_cap.num_txq = 1;
2319
2320 /* two MSIX vectors, one for traffic and one for misc causes */
2321 func_caps->common_cap.num_msix_vectors = 2;
2322 func_caps->guar_num_vsi = 1;
2323
2324 /* cache some dev_caps values that should be restored after memset */
2325 cached_caps = dev_caps->common_cap;
2326 num_funcs = dev_caps->num_funcs;
2327
2328 /* unset dev capabilities */
2329 memset(dev_caps, 0, sizeof(*dev_caps));
2330
2331 #define ICE_RESTORE_DEV_CAP(name) \
2332 dev_caps->common_cap.name = cached_caps.name
2333
2334 /* restore cached values */
2335 ICE_RESTORE_DEV_CAP(valid_functions);
2336 ICE_RESTORE_DEV_CAP(txq_first_id);
2337 ICE_RESTORE_DEV_CAP(rxq_first_id);
2338 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2339 ICE_RESTORE_DEV_CAP(max_mtu);
2340 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2341 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2342 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2343 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2344 dev_caps->num_funcs = num_funcs;
2345
2346 /* one Tx and one Rx queue per function in safe mode */
2347 dev_caps->common_cap.num_rxq = num_funcs;
2348 dev_caps->common_cap.num_txq = num_funcs;
2349
2350 /* two MSIX vectors per function */
2351 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2352 }
2353
2354 /**
2355 * ice_get_caps - get info about the HW
2356 * @hw: pointer to the hardware structure
2357 */
ice_get_caps(struct ice_hw * hw)2358 enum ice_status ice_get_caps(struct ice_hw *hw)
2359 {
2360 enum ice_status status;
2361
2362 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2363 if (status)
2364 return status;
2365
2366 return ice_discover_func_caps(hw, &hw->func_caps);
2367 }
2368
2369 /**
2370 * ice_aq_manage_mac_write - manage MAC address write command
2371 * @hw: pointer to the HW struct
2372 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2373 * @flags: flags to control write behavior
2374 * @cd: pointer to command details structure or NULL
2375 *
2376 * This function is used to write MAC address to the NVM (0x0108).
2377 */
2378 enum ice_status
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)2379 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2380 struct ice_sq_cd *cd)
2381 {
2382 struct ice_aqc_manage_mac_write *cmd;
2383 struct ice_aq_desc desc;
2384
2385 cmd = &desc.params.mac_write;
2386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2387
2388 cmd->flags = flags;
2389 ether_addr_copy(cmd->mac_addr, mac_addr);
2390
2391 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2392 }
2393
2394 /**
2395 * ice_aq_clear_pxe_mode
2396 * @hw: pointer to the HW struct
2397 *
2398 * Tell the firmware that the driver is taking over from PXE (0x0110).
2399 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)2400 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2401 {
2402 struct ice_aq_desc desc;
2403
2404 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2405 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2406
2407 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2408 }
2409
2410 /**
2411 * ice_clear_pxe_mode - clear pxe operations mode
2412 * @hw: pointer to the HW struct
2413 *
2414 * Make sure all PXE mode settings are cleared, including things
2415 * like descriptor fetch/write-back mode.
2416 */
ice_clear_pxe_mode(struct ice_hw * hw)2417 void ice_clear_pxe_mode(struct ice_hw *hw)
2418 {
2419 if (ice_check_sq_alive(hw, &hw->adminq))
2420 ice_aq_clear_pxe_mode(hw);
2421 }
2422
2423 /**
2424 * ice_get_link_speed_based_on_phy_type - returns link speed
2425 * @phy_type_low: lower part of phy_type
2426 * @phy_type_high: higher part of phy_type
2427 *
2428 * This helper function will convert an entry in PHY type structure
2429 * [phy_type_low, phy_type_high] to its corresponding link speed.
2430 * Note: In the structure of [phy_type_low, phy_type_high], there should
2431 * be one bit set, as this function will convert one PHY type to its
2432 * speed.
2433 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2434 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2435 */
2436 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)2437 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2438 {
2439 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2440 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2441
2442 switch (phy_type_low) {
2443 case ICE_PHY_TYPE_LOW_100BASE_TX:
2444 case ICE_PHY_TYPE_LOW_100M_SGMII:
2445 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2446 break;
2447 case ICE_PHY_TYPE_LOW_1000BASE_T:
2448 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2449 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2450 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2451 case ICE_PHY_TYPE_LOW_1G_SGMII:
2452 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2453 break;
2454 case ICE_PHY_TYPE_LOW_2500BASE_T:
2455 case ICE_PHY_TYPE_LOW_2500BASE_X:
2456 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2457 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2458 break;
2459 case ICE_PHY_TYPE_LOW_5GBASE_T:
2460 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2461 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2462 break;
2463 case ICE_PHY_TYPE_LOW_10GBASE_T:
2464 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2465 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2466 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2467 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2468 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2469 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2470 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2471 break;
2472 case ICE_PHY_TYPE_LOW_25GBASE_T:
2473 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2474 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2475 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2476 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2477 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2478 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2479 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2480 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2481 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2482 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2483 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2484 break;
2485 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2486 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2487 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2488 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2489 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2490 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2491 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2492 break;
2493 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2494 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2495 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2496 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2497 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2498 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2499 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2500 case ICE_PHY_TYPE_LOW_50G_AUI2:
2501 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2502 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2503 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2504 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2505 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2506 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2507 case ICE_PHY_TYPE_LOW_50G_AUI1:
2508 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2509 break;
2510 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2511 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2512 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2513 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2514 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2515 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2516 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2517 case ICE_PHY_TYPE_LOW_100G_AUI4:
2518 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2519 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2520 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2521 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2522 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2523 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2524 break;
2525 default:
2526 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2527 break;
2528 }
2529
2530 switch (phy_type_high) {
2531 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2532 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2533 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2534 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2535 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2536 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2537 break;
2538 default:
2539 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2540 break;
2541 }
2542
2543 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2544 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2545 return ICE_AQ_LINK_SPEED_UNKNOWN;
2546 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2547 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2548 return ICE_AQ_LINK_SPEED_UNKNOWN;
2549 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2550 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2551 return speed_phy_type_low;
2552 else
2553 return speed_phy_type_high;
2554 }
2555
2556 /**
2557 * ice_update_phy_type
2558 * @phy_type_low: pointer to the lower part of phy_type
2559 * @phy_type_high: pointer to the higher part of phy_type
2560 * @link_speeds_bitmap: targeted link speeds bitmap
2561 *
2562 * Note: For the link_speeds_bitmap structure, you can check it at
2563 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2564 * link_speeds_bitmap include multiple speeds.
2565 *
2566 * Each entry in this [phy_type_low, phy_type_high] structure will
2567 * present a certain link speed. This helper function will turn on bits
2568 * in [phy_type_low, phy_type_high] structure based on the value of
2569 * link_speeds_bitmap input parameter.
2570 */
2571 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)2572 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2573 u16 link_speeds_bitmap)
2574 {
2575 u64 pt_high;
2576 u64 pt_low;
2577 int index;
2578 u16 speed;
2579
2580 /* We first check with low part of phy_type */
2581 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2582 pt_low = BIT_ULL(index);
2583 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2584
2585 if (link_speeds_bitmap & speed)
2586 *phy_type_low |= BIT_ULL(index);
2587 }
2588
2589 /* We then check with high part of phy_type */
2590 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2591 pt_high = BIT_ULL(index);
2592 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2593
2594 if (link_speeds_bitmap & speed)
2595 *phy_type_high |= BIT_ULL(index);
2596 }
2597 }
2598
2599 /**
2600 * ice_aq_set_phy_cfg
2601 * @hw: pointer to the HW struct
2602 * @pi: port info structure of the interested logical port
2603 * @cfg: structure with PHY configuration data to be set
2604 * @cd: pointer to command details structure or NULL
2605 *
2606 * Set the various PHY configuration parameters supported on the Port.
2607 * One or more of the Set PHY config parameters may be ignored in an MFP
2608 * mode as the PF may not have the privilege to set some of the PHY Config
2609 * parameters. This status will be indicated by the command response (0x0601).
2610 */
2611 enum ice_status
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)2612 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2613 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2614 {
2615 struct ice_aq_desc desc;
2616 enum ice_status status;
2617
2618 if (!cfg)
2619 return ICE_ERR_PARAM;
2620
2621 /* Ensure that only valid bits of cfg->caps can be turned on. */
2622 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2623 ice_debug(hw, ICE_DBG_PHY,
2624 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2625 cfg->caps);
2626
2627 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2628 }
2629
2630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2631 desc.params.set_phy.lport_num = pi->lport;
2632 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2633
2634 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2635 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2636 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2637 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2638 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2639 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2640 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2641 cfg->low_power_ctrl_an);
2642 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2643 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2644 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2645 cfg->link_fec_opt);
2646
2647 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2648 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2649 status = 0;
2650
2651 if (!status)
2652 pi->phy.curr_user_phy_cfg = *cfg;
2653
2654 return status;
2655 }
2656
2657 /**
2658 * ice_update_link_info - update status of the HW network link
2659 * @pi: port info structure of the interested logical port
2660 */
ice_update_link_info(struct ice_port_info * pi)2661 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2662 {
2663 struct ice_link_status *li;
2664 enum ice_status status;
2665
2666 if (!pi)
2667 return ICE_ERR_PARAM;
2668
2669 li = &pi->phy.link_info;
2670
2671 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2672 if (status)
2673 return status;
2674
2675 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2676 struct ice_aqc_get_phy_caps_data *pcaps;
2677 struct ice_hw *hw;
2678
2679 hw = pi->hw;
2680 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2681 GFP_KERNEL);
2682 if (!pcaps)
2683 return ICE_ERR_NO_MEMORY;
2684
2685 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2686 pcaps, NULL);
2687
2688 devm_kfree(ice_hw_to_dev(hw), pcaps);
2689 }
2690
2691 return status;
2692 }
2693
2694 /**
2695 * ice_cache_phy_user_req
2696 * @pi: port information structure
2697 * @cache_data: PHY logging data
2698 * @cache_mode: PHY logging mode
2699 *
2700 * Log the user request on (FC, FEC, SPEED) for later use.
2701 */
2702 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)2703 ice_cache_phy_user_req(struct ice_port_info *pi,
2704 struct ice_phy_cache_mode_data cache_data,
2705 enum ice_phy_cache_mode cache_mode)
2706 {
2707 if (!pi)
2708 return;
2709
2710 switch (cache_mode) {
2711 case ICE_FC_MODE:
2712 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2713 break;
2714 case ICE_SPEED_MODE:
2715 pi->phy.curr_user_speed_req =
2716 cache_data.data.curr_user_speed_req;
2717 break;
2718 case ICE_FEC_MODE:
2719 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2720 break;
2721 default:
2722 break;
2723 }
2724 }
2725
2726 /**
2727 * ice_caps_to_fc_mode
2728 * @caps: PHY capabilities
2729 *
2730 * Convert PHY FC capabilities to ice FC mode
2731 */
ice_caps_to_fc_mode(u8 caps)2732 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2733 {
2734 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2735 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2736 return ICE_FC_FULL;
2737
2738 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2739 return ICE_FC_TX_PAUSE;
2740
2741 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2742 return ICE_FC_RX_PAUSE;
2743
2744 return ICE_FC_NONE;
2745 }
2746
2747 /**
2748 * ice_caps_to_fec_mode
2749 * @caps: PHY capabilities
2750 * @fec_options: Link FEC options
2751 *
2752 * Convert PHY FEC capabilities to ice FEC mode
2753 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)2754 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2755 {
2756 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2757 return ICE_FEC_AUTO;
2758
2759 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2760 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2761 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2762 ICE_AQC_PHY_FEC_25G_KR_REQ))
2763 return ICE_FEC_BASER;
2764
2765 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2766 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2767 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2768 return ICE_FEC_RS;
2769
2770 return ICE_FEC_NONE;
2771 }
2772
2773 /**
2774 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2775 * @pi: port information structure
2776 * @cfg: PHY configuration data to set FC mode
2777 * @req_mode: FC mode to configure
2778 */
2779 enum ice_status
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)2780 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2781 enum ice_fc_mode req_mode)
2782 {
2783 struct ice_phy_cache_mode_data cache_data;
2784 u8 pause_mask = 0x0;
2785
2786 if (!pi || !cfg)
2787 return ICE_ERR_BAD_PTR;
2788
2789 switch (req_mode) {
2790 case ICE_FC_FULL:
2791 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2792 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2793 break;
2794 case ICE_FC_RX_PAUSE:
2795 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2796 break;
2797 case ICE_FC_TX_PAUSE:
2798 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2799 break;
2800 default:
2801 break;
2802 }
2803
2804 /* clear the old pause settings */
2805 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2806 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2807
2808 /* set the new capabilities */
2809 cfg->caps |= pause_mask;
2810
2811 /* Cache user FC request */
2812 cache_data.data.curr_user_fc_req = req_mode;
2813 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2814
2815 return 0;
2816 }
2817
2818 /**
2819 * ice_set_fc
2820 * @pi: port information structure
2821 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2822 * @ena_auto_link_update: enable automatic link update
2823 *
2824 * Set the requested flow control mode.
2825 */
2826 enum ice_status
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)2827 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2828 {
2829 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2830 struct ice_aqc_get_phy_caps_data *pcaps;
2831 enum ice_status status;
2832 struct ice_hw *hw;
2833
2834 if (!pi || !aq_failures)
2835 return ICE_ERR_BAD_PTR;
2836
2837 *aq_failures = 0;
2838 hw = pi->hw;
2839
2840 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2841 if (!pcaps)
2842 return ICE_ERR_NO_MEMORY;
2843
2844 /* Get the current PHY config */
2845 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2846 NULL);
2847 if (status) {
2848 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2849 goto out;
2850 }
2851
2852 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2853
2854 /* Configure the set PHY data */
2855 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2856 if (status)
2857 goto out;
2858
2859 /* If the capabilities have changed, then set the new config */
2860 if (cfg.caps != pcaps->caps) {
2861 int retry_count, retry_max = 10;
2862
2863 /* Auto restart link so settings take effect */
2864 if (ena_auto_link_update)
2865 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2866
2867 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2868 if (status) {
2869 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2870 goto out;
2871 }
2872
2873 /* Update the link info
2874 * It sometimes takes a really long time for link to
2875 * come back from the atomic reset. Thus, we wait a
2876 * little bit.
2877 */
2878 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2879 status = ice_update_link_info(pi);
2880
2881 if (!status)
2882 break;
2883
2884 mdelay(100);
2885 }
2886
2887 if (status)
2888 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2889 }
2890
2891 out:
2892 devm_kfree(ice_hw_to_dev(hw), pcaps);
2893 return status;
2894 }
2895
2896 /**
2897 * ice_phy_caps_equals_cfg
2898 * @phy_caps: PHY capabilities
2899 * @phy_cfg: PHY configuration
2900 *
2901 * Helper function to determine if PHY capabilities matches PHY
2902 * configuration
2903 */
2904 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)2905 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2906 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2907 {
2908 u8 caps_mask, cfg_mask;
2909
2910 if (!phy_caps || !phy_cfg)
2911 return false;
2912
2913 /* These bits are not common between capabilities and configuration.
2914 * Do not use them to determine equality.
2915 */
2916 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2917 ICE_AQC_GET_PHY_EN_MOD_QUAL);
2918 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2919
2920 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2921 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2922 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2923 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2924 phy_caps->eee_cap != phy_cfg->eee_cap ||
2925 phy_caps->eeer_value != phy_cfg->eeer_value ||
2926 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2927 return false;
2928
2929 return true;
2930 }
2931
2932 /**
2933 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2934 * @pi: port information structure
2935 * @caps: PHY ability structure to copy date from
2936 * @cfg: PHY configuration structure to copy data to
2937 *
2938 * Helper function to copy AQC PHY get ability data to PHY set configuration
2939 * data structure
2940 */
2941 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)2942 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2943 struct ice_aqc_get_phy_caps_data *caps,
2944 struct ice_aqc_set_phy_cfg_data *cfg)
2945 {
2946 if (!pi || !caps || !cfg)
2947 return;
2948
2949 memset(cfg, 0, sizeof(*cfg));
2950 cfg->phy_type_low = caps->phy_type_low;
2951 cfg->phy_type_high = caps->phy_type_high;
2952 cfg->caps = caps->caps;
2953 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2954 cfg->eee_cap = caps->eee_cap;
2955 cfg->eeer_value = caps->eeer_value;
2956 cfg->link_fec_opt = caps->link_fec_options;
2957 cfg->module_compliance_enforcement =
2958 caps->module_compliance_enforcement;
2959
2960 if (ice_fw_supports_link_override(pi->hw)) {
2961 struct ice_link_default_override_tlv tlv;
2962
2963 if (ice_get_link_default_override(&tlv, pi))
2964 return;
2965
2966 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2967 cfg->module_compliance_enforcement |=
2968 ICE_LINK_OVERRIDE_STRICT_MODE;
2969 }
2970 }
2971
2972 /**
2973 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2974 * @pi: port information structure
2975 * @cfg: PHY configuration data to set FEC mode
2976 * @fec: FEC mode to configure
2977 */
2978 enum ice_status
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)2979 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2980 enum ice_fec_mode fec)
2981 {
2982 struct ice_aqc_get_phy_caps_data *pcaps;
2983 enum ice_status status;
2984
2985 if (!pi || !cfg)
2986 return ICE_ERR_BAD_PTR;
2987
2988 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2989 if (!pcaps)
2990 return ICE_ERR_NO_MEMORY;
2991
2992 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2993 NULL);
2994 if (status)
2995 goto out;
2996
2997 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2998 cfg->link_fec_opt = pcaps->link_fec_options;
2999
3000 switch (fec) {
3001 case ICE_FEC_BASER:
3002 /* Clear RS bits, and AND BASE-R ability
3003 * bits and OR request bits.
3004 */
3005 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3006 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3007 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3008 ICE_AQC_PHY_FEC_25G_KR_REQ;
3009 break;
3010 case ICE_FEC_RS:
3011 /* Clear BASE-R bits, and AND RS ability
3012 * bits and OR request bits.
3013 */
3014 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3015 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3016 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3017 break;
3018 case ICE_FEC_NONE:
3019 /* Clear all FEC option bits. */
3020 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3021 break;
3022 case ICE_FEC_AUTO:
3023 /* AND auto FEC bit, and all caps bits. */
3024 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3025 cfg->link_fec_opt |= pcaps->link_fec_options;
3026 break;
3027 default:
3028 status = ICE_ERR_PARAM;
3029 break;
3030 }
3031
3032 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3033 struct ice_link_default_override_tlv tlv;
3034
3035 if (ice_get_link_default_override(&tlv, pi))
3036 goto out;
3037
3038 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3039 (tlv.options & ICE_LINK_OVERRIDE_EN))
3040 cfg->link_fec_opt = tlv.fec_options;
3041 }
3042
3043 out:
3044 kfree(pcaps);
3045
3046 return status;
3047 }
3048
3049 /**
3050 * ice_get_link_status - get status of the HW network link
3051 * @pi: port information structure
3052 * @link_up: pointer to bool (true/false = linkup/linkdown)
3053 *
3054 * Variable link_up is true if link is up, false if link is down.
3055 * The variable link_up is invalid if status is non zero. As a
3056 * result of this call, link status reporting becomes enabled
3057 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)3058 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3059 {
3060 struct ice_phy_info *phy_info;
3061 enum ice_status status = 0;
3062
3063 if (!pi || !link_up)
3064 return ICE_ERR_PARAM;
3065
3066 phy_info = &pi->phy;
3067
3068 if (phy_info->get_link_info) {
3069 status = ice_update_link_info(pi);
3070
3071 if (status)
3072 ice_debug(pi->hw, ICE_DBG_LINK,
3073 "get link status error, status = %d\n",
3074 status);
3075 }
3076
3077 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3078
3079 return status;
3080 }
3081
3082 /**
3083 * ice_aq_set_link_restart_an
3084 * @pi: pointer to the port information structure
3085 * @ena_link: if true: enable link, if false: disable link
3086 * @cd: pointer to command details structure or NULL
3087 *
3088 * Sets up the link and restarts the Auto-Negotiation over the link.
3089 */
3090 enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)3091 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3092 struct ice_sq_cd *cd)
3093 {
3094 struct ice_aqc_restart_an *cmd;
3095 struct ice_aq_desc desc;
3096
3097 cmd = &desc.params.restart_an;
3098
3099 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3100
3101 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3102 cmd->lport_num = pi->lport;
3103 if (ena_link)
3104 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3105 else
3106 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3107
3108 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3109 }
3110
3111 /**
3112 * ice_aq_set_event_mask
3113 * @hw: pointer to the HW struct
3114 * @port_num: port number of the physical function
3115 * @mask: event mask to be set
3116 * @cd: pointer to command details structure or NULL
3117 *
3118 * Set event mask (0x0613)
3119 */
3120 enum ice_status
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)3121 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3122 struct ice_sq_cd *cd)
3123 {
3124 struct ice_aqc_set_event_mask *cmd;
3125 struct ice_aq_desc desc;
3126
3127 cmd = &desc.params.set_event_mask;
3128
3129 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3130
3131 cmd->lport_num = port_num;
3132
3133 cmd->event_mask = cpu_to_le16(mask);
3134 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3135 }
3136
3137 /**
3138 * ice_aq_set_mac_loopback
3139 * @hw: pointer to the HW struct
3140 * @ena_lpbk: Enable or Disable loopback
3141 * @cd: pointer to command details structure or NULL
3142 *
3143 * Enable/disable loopback on a given port
3144 */
3145 enum ice_status
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)3146 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3147 {
3148 struct ice_aqc_set_mac_lb *cmd;
3149 struct ice_aq_desc desc;
3150
3151 cmd = &desc.params.set_mac_lb;
3152
3153 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3154 if (ena_lpbk)
3155 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3156
3157 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3158 }
3159
3160 /**
3161 * ice_aq_set_port_id_led
3162 * @pi: pointer to the port information
3163 * @is_orig_mode: is this LED set to original mode (by the net-list)
3164 * @cd: pointer to command details structure or NULL
3165 *
3166 * Set LED value for the given port (0x06e9)
3167 */
3168 enum ice_status
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)3169 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3170 struct ice_sq_cd *cd)
3171 {
3172 struct ice_aqc_set_port_id_led *cmd;
3173 struct ice_hw *hw = pi->hw;
3174 struct ice_aq_desc desc;
3175
3176 cmd = &desc.params.set_port_id_led;
3177
3178 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3179
3180 if (is_orig_mode)
3181 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3182 else
3183 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3184
3185 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3186 }
3187
3188 /**
3189 * ice_aq_sff_eeprom
3190 * @hw: pointer to the HW struct
3191 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3192 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3193 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3194 * @page: QSFP page
3195 * @set_page: set or ignore the page
3196 * @data: pointer to data buffer to be read/written to the I2C device.
3197 * @length: 1-16 for read, 1 for write.
3198 * @write: 0 read, 1 for write.
3199 * @cd: pointer to command details structure or NULL
3200 *
3201 * Read/Write SFF EEPROM (0x06EE)
3202 */
3203 enum ice_status
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)3204 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3205 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3206 bool write, struct ice_sq_cd *cd)
3207 {
3208 struct ice_aqc_sff_eeprom *cmd;
3209 struct ice_aq_desc desc;
3210 enum ice_status status;
3211
3212 if (!data || (mem_addr & 0xff00))
3213 return ICE_ERR_PARAM;
3214
3215 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3216 cmd = &desc.params.read_write_sff_param;
3217 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3218 cmd->lport_num = (u8)(lport & 0xff);
3219 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3220 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3221 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3222 ((set_page <<
3223 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3224 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3225 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3226 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3227 if (write)
3228 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3229
3230 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3231 return status;
3232 }
3233
3234 /**
3235 * __ice_aq_get_set_rss_lut
3236 * @hw: pointer to the hardware structure
3237 * @vsi_id: VSI FW index
3238 * @lut_type: LUT table type
3239 * @lut: pointer to the LUT buffer provided by the caller
3240 * @lut_size: size of the LUT buffer
3241 * @glob_lut_idx: global LUT index
3242 * @set: set true to set the table, false to get the table
3243 *
3244 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3245 */
3246 static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw * hw,u16 vsi_id,u8 lut_type,u8 * lut,u16 lut_size,u8 glob_lut_idx,bool set)3247 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3248 u16 lut_size, u8 glob_lut_idx, bool set)
3249 {
3250 struct ice_aqc_get_set_rss_lut *cmd_resp;
3251 struct ice_aq_desc desc;
3252 enum ice_status status;
3253 u16 flags = 0;
3254
3255 cmd_resp = &desc.params.get_set_rss_lut;
3256
3257 if (set) {
3258 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3259 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3260 } else {
3261 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3262 }
3263
3264 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3265 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3266 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3267 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3268
3269 switch (lut_type) {
3270 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3271 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3272 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3273 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3274 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3275 break;
3276 default:
3277 status = ICE_ERR_PARAM;
3278 goto ice_aq_get_set_rss_lut_exit;
3279 }
3280
3281 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3282 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3283 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3284
3285 if (!set)
3286 goto ice_aq_get_set_rss_lut_send;
3287 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3288 if (!set)
3289 goto ice_aq_get_set_rss_lut_send;
3290 } else {
3291 goto ice_aq_get_set_rss_lut_send;
3292 }
3293
3294 /* LUT size is only valid for Global and PF table types */
3295 switch (lut_size) {
3296 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3297 break;
3298 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3299 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3300 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3301 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3302 break;
3303 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3304 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3305 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3306 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3307 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3308 break;
3309 }
3310 fallthrough;
3311 default:
3312 status = ICE_ERR_PARAM;
3313 goto ice_aq_get_set_rss_lut_exit;
3314 }
3315
3316 ice_aq_get_set_rss_lut_send:
3317 cmd_resp->flags = cpu_to_le16(flags);
3318 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3319
3320 ice_aq_get_set_rss_lut_exit:
3321 return status;
3322 }
3323
3324 /**
3325 * ice_aq_get_rss_lut
3326 * @hw: pointer to the hardware structure
3327 * @vsi_handle: software VSI handle
3328 * @lut_type: LUT table type
3329 * @lut: pointer to the LUT buffer provided by the caller
3330 * @lut_size: size of the LUT buffer
3331 *
3332 * get the RSS lookup table, PF or VSI type
3333 */
3334 enum ice_status
ice_aq_get_rss_lut(struct ice_hw * hw,u16 vsi_handle,u8 lut_type,u8 * lut,u16 lut_size)3335 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3336 u8 *lut, u16 lut_size)
3337 {
3338 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3339 return ICE_ERR_PARAM;
3340
3341 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3342 lut_type, lut, lut_size, 0, false);
3343 }
3344
3345 /**
3346 * ice_aq_set_rss_lut
3347 * @hw: pointer to the hardware structure
3348 * @vsi_handle: software VSI handle
3349 * @lut_type: LUT table type
3350 * @lut: pointer to the LUT buffer provided by the caller
3351 * @lut_size: size of the LUT buffer
3352 *
3353 * set the RSS lookup table, PF or VSI type
3354 */
3355 enum ice_status
ice_aq_set_rss_lut(struct ice_hw * hw,u16 vsi_handle,u8 lut_type,u8 * lut,u16 lut_size)3356 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3357 u8 *lut, u16 lut_size)
3358 {
3359 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3360 return ICE_ERR_PARAM;
3361
3362 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3363 lut_type, lut, lut_size, 0, true);
3364 }
3365
3366 /**
3367 * __ice_aq_get_set_rss_key
3368 * @hw: pointer to the HW struct
3369 * @vsi_id: VSI FW index
3370 * @key: pointer to key info struct
3371 * @set: set true to set the key, false to get the key
3372 *
3373 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3374 */
3375 static enum
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)3376 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3377 struct ice_aqc_get_set_rss_keys *key,
3378 bool set)
3379 {
3380 struct ice_aqc_get_set_rss_key *cmd_resp;
3381 u16 key_size = sizeof(*key);
3382 struct ice_aq_desc desc;
3383
3384 cmd_resp = &desc.params.get_set_rss_key;
3385
3386 if (set) {
3387 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3388 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3389 } else {
3390 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3391 }
3392
3393 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3394 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3395 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3396 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3397
3398 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3399 }
3400
3401 /**
3402 * ice_aq_get_rss_key
3403 * @hw: pointer to the HW struct
3404 * @vsi_handle: software VSI handle
3405 * @key: pointer to key info struct
3406 *
3407 * get the RSS key per VSI
3408 */
3409 enum ice_status
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)3410 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3411 struct ice_aqc_get_set_rss_keys *key)
3412 {
3413 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3414 return ICE_ERR_PARAM;
3415
3416 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3417 key, false);
3418 }
3419
3420 /**
3421 * ice_aq_set_rss_key
3422 * @hw: pointer to the HW struct
3423 * @vsi_handle: software VSI handle
3424 * @keys: pointer to key info struct
3425 *
3426 * set the RSS key per VSI
3427 */
3428 enum ice_status
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)3429 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3430 struct ice_aqc_get_set_rss_keys *keys)
3431 {
3432 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3433 return ICE_ERR_PARAM;
3434
3435 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3436 keys, true);
3437 }
3438
3439 /**
3440 * ice_aq_add_lan_txq
3441 * @hw: pointer to the hardware structure
3442 * @num_qgrps: Number of added queue groups
3443 * @qg_list: list of queue groups to be added
3444 * @buf_size: size of buffer for indirect command
3445 * @cd: pointer to command details structure or NULL
3446 *
3447 * Add Tx LAN queue (0x0C30)
3448 *
3449 * NOTE:
3450 * Prior to calling add Tx LAN queue:
3451 * Initialize the following as part of the Tx queue context:
3452 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3453 * Cache profile and Packet shaper profile.
3454 *
3455 * After add Tx LAN queue AQ command is completed:
3456 * Interrupts should be associated with specific queues,
3457 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3458 * flow.
3459 */
3460 static enum ice_status
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)3461 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3462 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3463 struct ice_sq_cd *cd)
3464 {
3465 struct ice_aqc_add_tx_qgrp *list;
3466 struct ice_aqc_add_txqs *cmd;
3467 struct ice_aq_desc desc;
3468 u16 i, sum_size = 0;
3469
3470 cmd = &desc.params.add_txqs;
3471
3472 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3473
3474 if (!qg_list)
3475 return ICE_ERR_PARAM;
3476
3477 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3478 return ICE_ERR_PARAM;
3479
3480 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3481 sum_size += struct_size(list, txqs, list->num_txqs);
3482 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3483 list->num_txqs);
3484 }
3485
3486 if (buf_size != sum_size)
3487 return ICE_ERR_PARAM;
3488
3489 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3490
3491 cmd->num_qgrps = num_qgrps;
3492
3493 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3494 }
3495
3496 /**
3497 * ice_aq_dis_lan_txq
3498 * @hw: pointer to the hardware structure
3499 * @num_qgrps: number of groups in the list
3500 * @qg_list: the list of groups to disable
3501 * @buf_size: the total size of the qg_list buffer in bytes
3502 * @rst_src: if called due to reset, specifies the reset source
3503 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3504 * @cd: pointer to command details structure or NULL
3505 *
3506 * Disable LAN Tx queue (0x0C31)
3507 */
3508 static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)3509 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3510 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3511 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3512 struct ice_sq_cd *cd)
3513 {
3514 struct ice_aqc_dis_txq_item *item;
3515 struct ice_aqc_dis_txqs *cmd;
3516 struct ice_aq_desc desc;
3517 enum ice_status status;
3518 u16 i, sz = 0;
3519
3520 cmd = &desc.params.dis_txqs;
3521 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3522
3523 /* qg_list can be NULL only in VM/VF reset flow */
3524 if (!qg_list && !rst_src)
3525 return ICE_ERR_PARAM;
3526
3527 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3528 return ICE_ERR_PARAM;
3529
3530 cmd->num_entries = num_qgrps;
3531
3532 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3533 ICE_AQC_Q_DIS_TIMEOUT_M);
3534
3535 switch (rst_src) {
3536 case ICE_VM_RESET:
3537 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3538 cmd->vmvf_and_timeout |=
3539 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3540 break;
3541 case ICE_VF_RESET:
3542 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3543 /* In this case, FW expects vmvf_num to be absolute VF ID */
3544 cmd->vmvf_and_timeout |=
3545 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3546 ICE_AQC_Q_DIS_VMVF_NUM_M);
3547 break;
3548 case ICE_NO_RESET:
3549 default:
3550 break;
3551 }
3552
3553 /* flush pipe on time out */
3554 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3555 /* If no queue group info, we are in a reset flow. Issue the AQ */
3556 if (!qg_list)
3557 goto do_aq;
3558
3559 /* set RD bit to indicate that command buffer is provided by the driver
3560 * and it needs to be read by the firmware
3561 */
3562 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3563
3564 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3565 u16 item_size = struct_size(item, q_id, item->num_qs);
3566
3567 /* If the num of queues is even, add 2 bytes of padding */
3568 if ((item->num_qs % 2) == 0)
3569 item_size += 2;
3570
3571 sz += item_size;
3572
3573 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3574 }
3575
3576 if (buf_size != sz)
3577 return ICE_ERR_PARAM;
3578
3579 do_aq:
3580 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3581 if (status) {
3582 if (!qg_list)
3583 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3584 vmvf_num, hw->adminq.sq_last_status);
3585 else
3586 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3587 le16_to_cpu(qg_list[0].q_id[0]),
3588 hw->adminq.sq_last_status);
3589 }
3590 return status;
3591 }
3592
3593 /* End of FW Admin Queue command wrappers */
3594
3595 /**
3596 * ice_write_byte - write a byte to a packed context structure
3597 * @src_ctx: the context structure to read from
3598 * @dest_ctx: the context to be written to
3599 * @ce_info: a description of the struct to be filled
3600 */
3601 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3602 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3603 {
3604 u8 src_byte, dest_byte, mask;
3605 u8 *from, *dest;
3606 u16 shift_width;
3607
3608 /* copy from the next struct field */
3609 from = src_ctx + ce_info->offset;
3610
3611 /* prepare the bits and mask */
3612 shift_width = ce_info->lsb % 8;
3613 mask = (u8)(BIT(ce_info->width) - 1);
3614
3615 src_byte = *from;
3616 src_byte &= mask;
3617
3618 /* shift to correct alignment */
3619 mask <<= shift_width;
3620 src_byte <<= shift_width;
3621
3622 /* get the current bits from the target bit string */
3623 dest = dest_ctx + (ce_info->lsb / 8);
3624
3625 memcpy(&dest_byte, dest, sizeof(dest_byte));
3626
3627 dest_byte &= ~mask; /* get the bits not changing */
3628 dest_byte |= src_byte; /* add in the new bits */
3629
3630 /* put it all back */
3631 memcpy(dest, &dest_byte, sizeof(dest_byte));
3632 }
3633
3634 /**
3635 * ice_write_word - write a word to a packed context structure
3636 * @src_ctx: the context structure to read from
3637 * @dest_ctx: the context to be written to
3638 * @ce_info: a description of the struct to be filled
3639 */
3640 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3641 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3642 {
3643 u16 src_word, mask;
3644 __le16 dest_word;
3645 u8 *from, *dest;
3646 u16 shift_width;
3647
3648 /* copy from the next struct field */
3649 from = src_ctx + ce_info->offset;
3650
3651 /* prepare the bits and mask */
3652 shift_width = ce_info->lsb % 8;
3653 mask = BIT(ce_info->width) - 1;
3654
3655 /* don't swizzle the bits until after the mask because the mask bits
3656 * will be in a different bit position on big endian machines
3657 */
3658 src_word = *(u16 *)from;
3659 src_word &= mask;
3660
3661 /* shift to correct alignment */
3662 mask <<= shift_width;
3663 src_word <<= shift_width;
3664
3665 /* get the current bits from the target bit string */
3666 dest = dest_ctx + (ce_info->lsb / 8);
3667
3668 memcpy(&dest_word, dest, sizeof(dest_word));
3669
3670 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
3671 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
3672
3673 /* put it all back */
3674 memcpy(dest, &dest_word, sizeof(dest_word));
3675 }
3676
3677 /**
3678 * ice_write_dword - write a dword to a packed context structure
3679 * @src_ctx: the context structure to read from
3680 * @dest_ctx: the context to be written to
3681 * @ce_info: a description of the struct to be filled
3682 */
3683 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3684 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3685 {
3686 u32 src_dword, mask;
3687 __le32 dest_dword;
3688 u8 *from, *dest;
3689 u16 shift_width;
3690
3691 /* copy from the next struct field */
3692 from = src_ctx + ce_info->offset;
3693
3694 /* prepare the bits and mask */
3695 shift_width = ce_info->lsb % 8;
3696
3697 /* if the field width is exactly 32 on an x86 machine, then the shift
3698 * operation will not work because the SHL instructions count is masked
3699 * to 5 bits so the shift will do nothing
3700 */
3701 if (ce_info->width < 32)
3702 mask = BIT(ce_info->width) - 1;
3703 else
3704 mask = (u32)~0;
3705
3706 /* don't swizzle the bits until after the mask because the mask bits
3707 * will be in a different bit position on big endian machines
3708 */
3709 src_dword = *(u32 *)from;
3710 src_dword &= mask;
3711
3712 /* shift to correct alignment */
3713 mask <<= shift_width;
3714 src_dword <<= shift_width;
3715
3716 /* get the current bits from the target bit string */
3717 dest = dest_ctx + (ce_info->lsb / 8);
3718
3719 memcpy(&dest_dword, dest, sizeof(dest_dword));
3720
3721 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
3722 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
3723
3724 /* put it all back */
3725 memcpy(dest, &dest_dword, sizeof(dest_dword));
3726 }
3727
3728 /**
3729 * ice_write_qword - write a qword to a packed context structure
3730 * @src_ctx: the context structure to read from
3731 * @dest_ctx: the context to be written to
3732 * @ce_info: a description of the struct to be filled
3733 */
3734 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3735 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3736 {
3737 u64 src_qword, mask;
3738 __le64 dest_qword;
3739 u8 *from, *dest;
3740 u16 shift_width;
3741
3742 /* copy from the next struct field */
3743 from = src_ctx + ce_info->offset;
3744
3745 /* prepare the bits and mask */
3746 shift_width = ce_info->lsb % 8;
3747
3748 /* if the field width is exactly 64 on an x86 machine, then the shift
3749 * operation will not work because the SHL instructions count is masked
3750 * to 6 bits so the shift will do nothing
3751 */
3752 if (ce_info->width < 64)
3753 mask = BIT_ULL(ce_info->width) - 1;
3754 else
3755 mask = (u64)~0;
3756
3757 /* don't swizzle the bits until after the mask because the mask bits
3758 * will be in a different bit position on big endian machines
3759 */
3760 src_qword = *(u64 *)from;
3761 src_qword &= mask;
3762
3763 /* shift to correct alignment */
3764 mask <<= shift_width;
3765 src_qword <<= shift_width;
3766
3767 /* get the current bits from the target bit string */
3768 dest = dest_ctx + (ce_info->lsb / 8);
3769
3770 memcpy(&dest_qword, dest, sizeof(dest_qword));
3771
3772 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
3773 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
3774
3775 /* put it all back */
3776 memcpy(dest, &dest_qword, sizeof(dest_qword));
3777 }
3778
3779 /**
3780 * ice_set_ctx - set context bits in packed structure
3781 * @hw: pointer to the hardware structure
3782 * @src_ctx: pointer to a generic non-packed context structure
3783 * @dest_ctx: pointer to memory for the packed structure
3784 * @ce_info: a description of the structure to be transformed
3785 */
3786 enum ice_status
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)3787 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3788 const struct ice_ctx_ele *ce_info)
3789 {
3790 int f;
3791
3792 for (f = 0; ce_info[f].width; f++) {
3793 /* We have to deal with each element of the FW response
3794 * using the correct size so that we are correct regardless
3795 * of the endianness of the machine.
3796 */
3797 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3798 ice_debug(hw, ICE_DBG_QCTX,
3799 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3800 f, ce_info[f].width, ce_info[f].size_of);
3801 continue;
3802 }
3803 switch (ce_info[f].size_of) {
3804 case sizeof(u8):
3805 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3806 break;
3807 case sizeof(u16):
3808 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3809 break;
3810 case sizeof(u32):
3811 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3812 break;
3813 case sizeof(u64):
3814 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3815 break;
3816 default:
3817 return ICE_ERR_INVAL_SIZE;
3818 }
3819 }
3820
3821 return 0;
3822 }
3823
3824 /**
3825 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3826 * @hw: pointer to the HW struct
3827 * @vsi_handle: software VSI handle
3828 * @tc: TC number
3829 * @q_handle: software queue handle
3830 */
3831 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)3832 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3833 {
3834 struct ice_vsi_ctx *vsi;
3835 struct ice_q_ctx *q_ctx;
3836
3837 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3838 if (!vsi)
3839 return NULL;
3840 if (q_handle >= vsi->num_lan_q_entries[tc])
3841 return NULL;
3842 if (!vsi->lan_q_ctx[tc])
3843 return NULL;
3844 q_ctx = vsi->lan_q_ctx[tc];
3845 return &q_ctx[q_handle];
3846 }
3847
3848 /**
3849 * ice_ena_vsi_txq
3850 * @pi: port information structure
3851 * @vsi_handle: software VSI handle
3852 * @tc: TC number
3853 * @q_handle: software queue handle
3854 * @num_qgrps: Number of added queue groups
3855 * @buf: list of queue groups to be added
3856 * @buf_size: size of buffer for indirect command
3857 * @cd: pointer to command details structure or NULL
3858 *
3859 * This function adds one LAN queue
3860 */
3861 enum ice_status
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)3862 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3863 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3864 struct ice_sq_cd *cd)
3865 {
3866 struct ice_aqc_txsched_elem_data node = { 0 };
3867 struct ice_sched_node *parent;
3868 struct ice_q_ctx *q_ctx;
3869 enum ice_status status;
3870 struct ice_hw *hw;
3871
3872 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3873 return ICE_ERR_CFG;
3874
3875 if (num_qgrps > 1 || buf->num_txqs > 1)
3876 return ICE_ERR_MAX_LIMIT;
3877
3878 hw = pi->hw;
3879
3880 if (!ice_is_vsi_valid(hw, vsi_handle))
3881 return ICE_ERR_PARAM;
3882
3883 mutex_lock(&pi->sched_lock);
3884
3885 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3886 if (!q_ctx) {
3887 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3888 q_handle);
3889 status = ICE_ERR_PARAM;
3890 goto ena_txq_exit;
3891 }
3892
3893 /* find a parent node */
3894 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3895 ICE_SCHED_NODE_OWNER_LAN);
3896 if (!parent) {
3897 status = ICE_ERR_PARAM;
3898 goto ena_txq_exit;
3899 }
3900
3901 buf->parent_teid = parent->info.node_teid;
3902 node.parent_teid = parent->info.node_teid;
3903 /* Mark that the values in the "generic" section as valid. The default
3904 * value in the "generic" section is zero. This means that :
3905 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3906 * - 0 priority among siblings, indicated by Bit 1-3.
3907 * - WFQ, indicated by Bit 4.
3908 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3909 * Bit 5-6.
3910 * - Bit 7 is reserved.
3911 * Without setting the generic section as valid in valid_sections, the
3912 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3913 */
3914 buf->txqs[0].info.valid_sections =
3915 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3916 ICE_AQC_ELEM_VALID_EIR;
3917 buf->txqs[0].info.generic = 0;
3918 buf->txqs[0].info.cir_bw.bw_profile_idx =
3919 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3920 buf->txqs[0].info.cir_bw.bw_alloc =
3921 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3922 buf->txqs[0].info.eir_bw.bw_profile_idx =
3923 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3924 buf->txqs[0].info.eir_bw.bw_alloc =
3925 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3926
3927 /* add the LAN queue */
3928 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3929 if (status) {
3930 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3931 le16_to_cpu(buf->txqs[0].txq_id),
3932 hw->adminq.sq_last_status);
3933 goto ena_txq_exit;
3934 }
3935
3936 node.node_teid = buf->txqs[0].q_teid;
3937 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3938 q_ctx->q_handle = q_handle;
3939 q_ctx->q_teid = le32_to_cpu(node.node_teid);
3940
3941 /* add a leaf node into scheduler tree queue layer */
3942 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3943 if (!status)
3944 status = ice_sched_replay_q_bw(pi, q_ctx);
3945
3946 ena_txq_exit:
3947 mutex_unlock(&pi->sched_lock);
3948 return status;
3949 }
3950
3951 /**
3952 * ice_dis_vsi_txq
3953 * @pi: port information structure
3954 * @vsi_handle: software VSI handle
3955 * @tc: TC number
3956 * @num_queues: number of queues
3957 * @q_handles: pointer to software queue handle array
3958 * @q_ids: pointer to the q_id array
3959 * @q_teids: pointer to queue node teids
3960 * @rst_src: if called due to reset, specifies the reset source
3961 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3962 * @cd: pointer to command details structure or NULL
3963 *
3964 * This function removes queues and their corresponding nodes in SW DB
3965 */
3966 enum ice_status
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)3967 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3968 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3969 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3970 struct ice_sq_cd *cd)
3971 {
3972 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3973 struct ice_aqc_dis_txq_item *qg_list;
3974 struct ice_q_ctx *q_ctx;
3975 struct ice_hw *hw;
3976 u16 i, buf_size;
3977
3978 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3979 return ICE_ERR_CFG;
3980
3981 hw = pi->hw;
3982
3983 if (!num_queues) {
3984 /* if queue is disabled already yet the disable queue command
3985 * has to be sent to complete the VF reset, then call
3986 * ice_aq_dis_lan_txq without any queue information
3987 */
3988 if (rst_src)
3989 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
3990 vmvf_num, NULL);
3991 return ICE_ERR_CFG;
3992 }
3993
3994 buf_size = struct_size(qg_list, q_id, 1);
3995 qg_list = kzalloc(buf_size, GFP_KERNEL);
3996 if (!qg_list)
3997 return ICE_ERR_NO_MEMORY;
3998
3999 mutex_lock(&pi->sched_lock);
4000
4001 for (i = 0; i < num_queues; i++) {
4002 struct ice_sched_node *node;
4003
4004 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4005 if (!node)
4006 continue;
4007 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4008 if (!q_ctx) {
4009 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4010 q_handles[i]);
4011 continue;
4012 }
4013 if (q_ctx->q_handle != q_handles[i]) {
4014 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4015 q_ctx->q_handle, q_handles[i]);
4016 continue;
4017 }
4018 qg_list->parent_teid = node->info.parent_teid;
4019 qg_list->num_qs = 1;
4020 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4021 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4022 vmvf_num, cd);
4023
4024 if (status)
4025 break;
4026 ice_free_sched_node(pi, node);
4027 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4028 }
4029 mutex_unlock(&pi->sched_lock);
4030 kfree(qg_list);
4031 return status;
4032 }
4033
4034 /**
4035 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4036 * @pi: port information structure
4037 * @vsi_handle: software VSI handle
4038 * @tc_bitmap: TC bitmap
4039 * @maxqs: max queues array per TC
4040 * @owner: LAN or RDMA
4041 *
4042 * This function adds/updates the VSI queues per TC.
4043 */
4044 static enum ice_status
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * maxqs,u8 owner)4045 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4046 u16 *maxqs, u8 owner)
4047 {
4048 enum ice_status status = 0;
4049 u8 i;
4050
4051 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4052 return ICE_ERR_CFG;
4053
4054 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4055 return ICE_ERR_PARAM;
4056
4057 mutex_lock(&pi->sched_lock);
4058
4059 ice_for_each_traffic_class(i) {
4060 /* configuration is possible only if TC node is present */
4061 if (!ice_sched_get_tc_node(pi, i))
4062 continue;
4063
4064 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4065 ice_is_tc_ena(tc_bitmap, i));
4066 if (status)
4067 break;
4068 }
4069
4070 mutex_unlock(&pi->sched_lock);
4071 return status;
4072 }
4073
4074 /**
4075 * ice_cfg_vsi_lan - configure VSI LAN queues
4076 * @pi: port information structure
4077 * @vsi_handle: software VSI handle
4078 * @tc_bitmap: TC bitmap
4079 * @max_lanqs: max LAN queues array per TC
4080 *
4081 * This function adds/updates the VSI LAN queues per TC.
4082 */
4083 enum ice_status
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u8 tc_bitmap,u16 * max_lanqs)4084 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4085 u16 *max_lanqs)
4086 {
4087 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4088 ICE_SCHED_NODE_OWNER_LAN);
4089 }
4090
4091 /**
4092 * ice_replay_pre_init - replay pre initialization
4093 * @hw: pointer to the HW struct
4094 *
4095 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4096 */
ice_replay_pre_init(struct ice_hw * hw)4097 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4098 {
4099 struct ice_switch_info *sw = hw->switch_info;
4100 u8 i;
4101
4102 /* Delete old entries from replay filter list head if there is any */
4103 ice_rm_all_sw_replay_rule_info(hw);
4104 /* In start of replay, move entries into replay_rules list, it
4105 * will allow adding rules entries back to filt_rules list,
4106 * which is operational list.
4107 */
4108 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4109 list_replace_init(&sw->recp_list[i].filt_rules,
4110 &sw->recp_list[i].filt_replay_rules);
4111
4112 return 0;
4113 }
4114
4115 /**
4116 * ice_replay_vsi - replay VSI configuration
4117 * @hw: pointer to the HW struct
4118 * @vsi_handle: driver VSI handle
4119 *
4120 * Restore all VSI configuration after reset. It is required to call this
4121 * function with main VSI first.
4122 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)4123 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4124 {
4125 enum ice_status status;
4126
4127 if (!ice_is_vsi_valid(hw, vsi_handle))
4128 return ICE_ERR_PARAM;
4129
4130 /* Replay pre-initialization if there is any */
4131 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4132 status = ice_replay_pre_init(hw);
4133 if (status)
4134 return status;
4135 }
4136 /* Replay per VSI all RSS configurations */
4137 status = ice_replay_rss_cfg(hw, vsi_handle);
4138 if (status)
4139 return status;
4140 /* Replay per VSI all filters */
4141 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4142 return status;
4143 }
4144
4145 /**
4146 * ice_replay_post - post replay configuration cleanup
4147 * @hw: pointer to the HW struct
4148 *
4149 * Post replay cleanup.
4150 */
ice_replay_post(struct ice_hw * hw)4151 void ice_replay_post(struct ice_hw *hw)
4152 {
4153 /* Delete old entries from replay filter list head */
4154 ice_rm_all_sw_replay_rule_info(hw);
4155 }
4156
4157 /**
4158 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4159 * @hw: ptr to the hardware info
4160 * @reg: offset of 64 bit HW register to read from
4161 * @prev_stat_loaded: bool to specify if previous stats are loaded
4162 * @prev_stat: ptr to previous loaded stat value
4163 * @cur_stat: ptr to current stat value
4164 */
4165 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4166 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4167 u64 *prev_stat, u64 *cur_stat)
4168 {
4169 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4170
4171 /* device stats are not reset at PFR, they likely will not be zeroed
4172 * when the driver starts. Thus, save the value from the first read
4173 * without adding to the statistic value so that we report stats which
4174 * count up from zero.
4175 */
4176 if (!prev_stat_loaded) {
4177 *prev_stat = new_data;
4178 return;
4179 }
4180
4181 /* Calculate the difference between the new and old values, and then
4182 * add it to the software stat value.
4183 */
4184 if (new_data >= *prev_stat)
4185 *cur_stat += new_data - *prev_stat;
4186 else
4187 /* to manage the potential roll-over */
4188 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4189
4190 /* Update the previously stored value to prepare for next read */
4191 *prev_stat = new_data;
4192 }
4193
4194 /**
4195 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4196 * @hw: ptr to the hardware info
4197 * @reg: offset of HW register to read from
4198 * @prev_stat_loaded: bool to specify if previous stats are loaded
4199 * @prev_stat: ptr to previous loaded stat value
4200 * @cur_stat: ptr to current stat value
4201 */
4202 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)4203 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4204 u64 *prev_stat, u64 *cur_stat)
4205 {
4206 u32 new_data;
4207
4208 new_data = rd32(hw, reg);
4209
4210 /* device stats are not reset at PFR, they likely will not be zeroed
4211 * when the driver starts. Thus, save the value from the first read
4212 * without adding to the statistic value so that we report stats which
4213 * count up from zero.
4214 */
4215 if (!prev_stat_loaded) {
4216 *prev_stat = new_data;
4217 return;
4218 }
4219
4220 /* Calculate the difference between the new and old values, and then
4221 * add it to the software stat value.
4222 */
4223 if (new_data >= *prev_stat)
4224 *cur_stat += new_data - *prev_stat;
4225 else
4226 /* to manage the potential roll-over */
4227 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4228
4229 /* Update the previously stored value to prepare for next read */
4230 *prev_stat = new_data;
4231 }
4232
4233 /**
4234 * ice_sched_query_elem - query element information from HW
4235 * @hw: pointer to the HW struct
4236 * @node_teid: node TEID to be queried
4237 * @buf: buffer to element information
4238 *
4239 * This function queries HW element information
4240 */
4241 enum ice_status
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)4242 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4243 struct ice_aqc_txsched_elem_data *buf)
4244 {
4245 u16 buf_size, num_elem_ret = 0;
4246 enum ice_status status;
4247
4248 buf_size = sizeof(*buf);
4249 memset(buf, 0, buf_size);
4250 buf->node_teid = cpu_to_le32(node_teid);
4251 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4252 NULL);
4253 if (status || num_elem_ret != 1)
4254 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4255 return status;
4256 }
4257
4258 /**
4259 * ice_fw_supports_link_override
4260 * @hw: pointer to the hardware structure
4261 *
4262 * Checks if the firmware supports link override
4263 */
ice_fw_supports_link_override(struct ice_hw * hw)4264 bool ice_fw_supports_link_override(struct ice_hw *hw)
4265 {
4266 /* Currently, only supported for E810 devices */
4267 if (hw->mac_type != ICE_MAC_E810)
4268 return false;
4269
4270 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4271 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4272 return true;
4273 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4274 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4275 return true;
4276 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4277 return true;
4278 }
4279
4280 return false;
4281 }
4282
4283 /**
4284 * ice_get_link_default_override
4285 * @ldo: pointer to the link default override struct
4286 * @pi: pointer to the port info struct
4287 *
4288 * Gets the link default override for a port
4289 */
4290 enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)4291 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4292 struct ice_port_info *pi)
4293 {
4294 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4295 struct ice_hw *hw = pi->hw;
4296 enum ice_status status;
4297
4298 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4299 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4300 if (status) {
4301 ice_debug(hw, ICE_DBG_INIT,
4302 "Failed to read link override TLV.\n");
4303 return status;
4304 }
4305
4306 /* Each port has its own config; calculate for our port */
4307 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4308 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4309
4310 /* link options first */
4311 status = ice_read_sr_word(hw, tlv_start, &buf);
4312 if (status) {
4313 ice_debug(hw, ICE_DBG_INIT,
4314 "Failed to read override link options.\n");
4315 return status;
4316 }
4317 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4318 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4319 ICE_LINK_OVERRIDE_PHY_CFG_S;
4320
4321 /* link PHY config */
4322 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4323 status = ice_read_sr_word(hw, offset, &buf);
4324 if (status) {
4325 ice_debug(hw, ICE_DBG_INIT,
4326 "Failed to read override phy config.\n");
4327 return status;
4328 }
4329 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4330
4331 /* PHY types low */
4332 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4333 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4334 status = ice_read_sr_word(hw, (offset + i), &buf);
4335 if (status) {
4336 ice_debug(hw, ICE_DBG_INIT,
4337 "Failed to read override link options.\n");
4338 return status;
4339 }
4340 /* shift 16 bits at a time to fill 64 bits */
4341 ldo->phy_type_low |= ((u64)buf << (i * 16));
4342 }
4343
4344 /* PHY types high */
4345 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4346 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4347 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4348 status = ice_read_sr_word(hw, (offset + i), &buf);
4349 if (status) {
4350 ice_debug(hw, ICE_DBG_INIT,
4351 "Failed to read override link options.\n");
4352 return status;
4353 }
4354 /* shift 16 bits at a time to fill 64 bits */
4355 ldo->phy_type_high |= ((u64)buf << (i * 16));
4356 }
4357
4358 return status;
4359 }
4360
4361 /**
4362 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4363 * @caps: get PHY capability data
4364 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)4365 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4366 {
4367 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4368 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4369 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4370 ICE_AQC_PHY_AN_EN_CLAUSE37))
4371 return true;
4372
4373 return false;
4374 }
4375
4376 /**
4377 * ice_aq_set_lldp_mib - Set the LLDP MIB
4378 * @hw: pointer to the HW struct
4379 * @mib_type: Local, Remote or both Local and Remote MIBs
4380 * @buf: pointer to the caller-supplied buffer to store the MIB block
4381 * @buf_size: size of the buffer (in bytes)
4382 * @cd: pointer to command details structure or NULL
4383 *
4384 * Set the LLDP MIB. (0x0A08)
4385 */
4386 enum ice_status
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)4387 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4388 struct ice_sq_cd *cd)
4389 {
4390 struct ice_aqc_lldp_set_local_mib *cmd;
4391 struct ice_aq_desc desc;
4392
4393 cmd = &desc.params.lldp_set_mib;
4394
4395 if (buf_size == 0 || !buf)
4396 return ICE_ERR_PARAM;
4397
4398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4399
4400 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4401 desc.datalen = cpu_to_le16(buf_size);
4402
4403 cmd->type = mib_type;
4404 cmd->length = cpu_to_le16(buf_size);
4405
4406 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4407 }
4408