1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_sched.h"
5
6 /**
7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8 * @pi: port information structure
9 * @info: Scheduler element information from firmware
10 *
11 * This function inserts the root node of the scheduling tree topology
12 * to the SW DB.
13 */
14 static enum ice_status
ice_sched_add_root_node(struct ice_port_info * pi,struct ice_aqc_txsched_elem_data * info)15 ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17 {
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30 /* coverity[suspicious_sizeof] */
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41 }
42
43 /**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54 struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node * start_node,u32 teid)55 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56 {
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85 }
86
87 /**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99 static enum ice_status
ice_aqc_send_sched_elem_cmd(struct ice_hw * hw,enum ice_adminq_opc cmd_opc,u16 elems_req,void * buf,u16 buf_size,u16 * elems_resp,struct ice_sq_cd * cd)100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103 {
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117 }
118
119 /**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130 enum ice_status
ice_aq_query_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134 {
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138 }
139
140 /**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 *
146 * This function inserts a scheduler node to the SW DB.
147 */
148 enum ice_status
ice_sched_add_node(struct ice_port_info * pi,u8 layer,struct ice_aqc_txsched_elem_data * info)149 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151 {
152 struct ice_aqc_txsched_elem_data elem;
153 struct ice_sched_node *parent;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163 /* A valid parent node should be there */
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
168 le32_to_cpu(info->parent_teid));
169 return ICE_ERR_PARAM;
170 }
171
172 /* query the current node information from FW before adding it
173 * to the SW DB
174 */
175 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
176 if (status)
177 return status;
178
179 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
180 if (!node)
181 return ICE_ERR_NO_MEMORY;
182 if (hw->max_children[layer]) {
183 /* coverity[suspicious_sizeof] */
184 node->children = devm_kcalloc(ice_hw_to_dev(hw),
185 hw->max_children[layer],
186 sizeof(*node), GFP_KERNEL);
187 if (!node->children) {
188 devm_kfree(ice_hw_to_dev(hw), node);
189 return ICE_ERR_NO_MEMORY;
190 }
191 }
192
193 node->in_use = true;
194 node->parent = parent;
195 node->tx_sched_layer = layer;
196 parent->children[parent->num_children++] = node;
197 node->info = elem;
198 return 0;
199 }
200
201 /**
202 * ice_aq_delete_sched_elems - delete scheduler elements
203 * @hw: pointer to the HW struct
204 * @grps_req: number of groups to delete
205 * @buf: pointer to buffer
206 * @buf_size: buffer size in bytes
207 * @grps_del: returns total number of elements deleted
208 * @cd: pointer to command details structure or NULL
209 *
210 * Delete scheduling elements (0x040F)
211 */
212 static enum ice_status
ice_aq_delete_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_delete_elem * buf,u16 buf_size,u16 * grps_del,struct ice_sq_cd * cd)213 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
214 struct ice_aqc_delete_elem *buf, u16 buf_size,
215 u16 *grps_del, struct ice_sq_cd *cd)
216 {
217 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
218 grps_req, (void *)buf, buf_size,
219 grps_del, cd);
220 }
221
222 /**
223 * ice_sched_remove_elems - remove nodes from HW
224 * @hw: pointer to the HW struct
225 * @parent: pointer to the parent node
226 * @num_nodes: number of nodes
227 * @node_teids: array of node teids to be deleted
228 *
229 * This function remove nodes from HW
230 */
231 static enum ice_status
ice_sched_remove_elems(struct ice_hw * hw,struct ice_sched_node * parent,u16 num_nodes,u32 * node_teids)232 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
233 u16 num_nodes, u32 *node_teids)
234 {
235 struct ice_aqc_delete_elem *buf;
236 u16 i, num_groups_removed = 0;
237 enum ice_status status;
238 u16 buf_size;
239
240 buf_size = struct_size(buf, teid, num_nodes);
241 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
242 if (!buf)
243 return ICE_ERR_NO_MEMORY;
244
245 buf->hdr.parent_teid = parent->info.node_teid;
246 buf->hdr.num_elems = cpu_to_le16(num_nodes);
247 for (i = 0; i < num_nodes; i++)
248 buf->teid[i] = cpu_to_le32(node_teids[i]);
249
250 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
251 &num_groups_removed, NULL);
252 if (status || num_groups_removed != 1)
253 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
254 hw->adminq.sq_last_status);
255
256 devm_kfree(ice_hw_to_dev(hw), buf);
257 return status;
258 }
259
260 /**
261 * ice_sched_get_first_node - get the first node of the given layer
262 * @pi: port information structure
263 * @parent: pointer the base node of the subtree
264 * @layer: layer number
265 *
266 * This function retrieves the first node of the given layer from the subtree
267 */
268 static struct ice_sched_node *
ice_sched_get_first_node(struct ice_port_info * pi,struct ice_sched_node * parent,u8 layer)269 ice_sched_get_first_node(struct ice_port_info *pi,
270 struct ice_sched_node *parent, u8 layer)
271 {
272 return pi->sib_head[parent->tc_num][layer];
273 }
274
275 /**
276 * ice_sched_get_tc_node - get pointer to TC node
277 * @pi: port information structure
278 * @tc: TC number
279 *
280 * This function returns the TC node pointer
281 */
ice_sched_get_tc_node(struct ice_port_info * pi,u8 tc)282 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
283 {
284 u8 i;
285
286 if (!pi || !pi->root)
287 return NULL;
288 for (i = 0; i < pi->root->num_children; i++)
289 if (pi->root->children[i]->tc_num == tc)
290 return pi->root->children[i];
291 return NULL;
292 }
293
294 /**
295 * ice_free_sched_node - Free a Tx scheduler node from SW DB
296 * @pi: port information structure
297 * @node: pointer to the ice_sched_node struct
298 *
299 * This function frees up a node from SW DB as well as from HW
300 *
301 * This function needs to be called with the port_info->sched_lock held
302 */
ice_free_sched_node(struct ice_port_info * pi,struct ice_sched_node * node)303 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
304 {
305 struct ice_sched_node *parent;
306 struct ice_hw *hw = pi->hw;
307 u8 i, j;
308
309 /* Free the children before freeing up the parent node
310 * The parent array is updated below and that shifts the nodes
311 * in the array. So always pick the first child if num children > 0
312 */
313 while (node->num_children)
314 ice_free_sched_node(pi, node->children[0]);
315
316 /* Leaf, TC and root nodes can't be deleted by SW */
317 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
318 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
321 u32 teid = le32_to_cpu(node->info.node_teid);
322
323 ice_sched_remove_elems(hw, node->parent, 1, &teid);
324 }
325 parent = node->parent;
326 /* root has no parent */
327 if (parent) {
328 struct ice_sched_node *p;
329
330 /* update the parent */
331 for (i = 0; i < parent->num_children; i++)
332 if (parent->children[i] == node) {
333 for (j = i + 1; j < parent->num_children; j++)
334 parent->children[j - 1] =
335 parent->children[j];
336 parent->num_children--;
337 break;
338 }
339
340 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
341 while (p) {
342 if (p->sibling == node) {
343 p->sibling = node->sibling;
344 break;
345 }
346 p = p->sibling;
347 }
348
349 /* update the sibling head if head is getting removed */
350 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
351 pi->sib_head[node->tc_num][node->tx_sched_layer] =
352 node->sibling;
353 }
354
355 /* leaf nodes have no children */
356 if (node->children)
357 devm_kfree(ice_hw_to_dev(hw), node->children);
358 devm_kfree(ice_hw_to_dev(hw), node);
359 }
360
361 /**
362 * ice_aq_get_dflt_topo - gets default scheduler topology
363 * @hw: pointer to the HW struct
364 * @lport: logical port number
365 * @buf: pointer to buffer
366 * @buf_size: buffer size in bytes
367 * @num_branches: returns total number of queue to port branches
368 * @cd: pointer to command details structure or NULL
369 *
370 * Get default scheduler topology (0x400)
371 */
372 static enum ice_status
ice_aq_get_dflt_topo(struct ice_hw * hw,u8 lport,struct ice_aqc_get_topo_elem * buf,u16 buf_size,u8 * num_branches,struct ice_sq_cd * cd)373 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
374 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
375 u8 *num_branches, struct ice_sq_cd *cd)
376 {
377 struct ice_aqc_get_topo *cmd;
378 struct ice_aq_desc desc;
379 enum ice_status status;
380
381 cmd = &desc.params.get_topo;
382 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
383 cmd->port_num = lport;
384 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
385 if (!status && num_branches)
386 *num_branches = cmd->num_branches;
387
388 return status;
389 }
390
391 /**
392 * ice_aq_add_sched_elems - adds scheduling element
393 * @hw: pointer to the HW struct
394 * @grps_req: the number of groups that are requested to be added
395 * @buf: pointer to buffer
396 * @buf_size: buffer size in bytes
397 * @grps_added: returns total number of groups added
398 * @cd: pointer to command details structure or NULL
399 *
400 * Add scheduling elements (0x0401)
401 */
402 static enum ice_status
ice_aq_add_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_add_elem * buf,u16 buf_size,u16 * grps_added,struct ice_sq_cd * cd)403 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
404 struct ice_aqc_add_elem *buf, u16 buf_size,
405 u16 *grps_added, struct ice_sq_cd *cd)
406 {
407 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
408 grps_req, (void *)buf, buf_size,
409 grps_added, cd);
410 }
411
412 /**
413 * ice_aq_cfg_sched_elems - configures scheduler elements
414 * @hw: pointer to the HW struct
415 * @elems_req: number of elements to configure
416 * @buf: pointer to buffer
417 * @buf_size: buffer size in bytes
418 * @elems_cfgd: returns total number of elements configured
419 * @cd: pointer to command details structure or NULL
420 *
421 * Configure scheduling elements (0x0403)
422 */
423 static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_cfgd,struct ice_sq_cd * cd)424 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
425 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
426 u16 *elems_cfgd, struct ice_sq_cd *cd)
427 {
428 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
429 elems_req, (void *)buf, buf_size,
430 elems_cfgd, cd);
431 }
432
433 /**
434 * ice_aq_move_sched_elems - move scheduler elements
435 * @hw: pointer to the HW struct
436 * @grps_req: number of groups to move
437 * @buf: pointer to buffer
438 * @buf_size: buffer size in bytes
439 * @grps_movd: returns total number of groups moved
440 * @cd: pointer to command details structure or NULL
441 *
442 * Move scheduling elements (0x0408)
443 */
444 static enum ice_status
ice_aq_move_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_move_elem * buf,u16 buf_size,u16 * grps_movd,struct ice_sq_cd * cd)445 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
446 struct ice_aqc_move_elem *buf, u16 buf_size,
447 u16 *grps_movd, struct ice_sq_cd *cd)
448 {
449 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
450 grps_req, (void *)buf, buf_size,
451 grps_movd, cd);
452 }
453
454 /**
455 * ice_aq_suspend_sched_elems - suspend scheduler elements
456 * @hw: pointer to the HW struct
457 * @elems_req: number of elements to suspend
458 * @buf: pointer to buffer
459 * @buf_size: buffer size in bytes
460 * @elems_ret: returns total number of elements suspended
461 * @cd: pointer to command details structure or NULL
462 *
463 * Suspend scheduling elements (0x0409)
464 */
465 static enum ice_status
ice_aq_suspend_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)466 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
467 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
468 {
469 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
470 elems_req, (void *)buf, buf_size,
471 elems_ret, cd);
472 }
473
474 /**
475 * ice_aq_resume_sched_elems - resume scheduler elements
476 * @hw: pointer to the HW struct
477 * @elems_req: number of elements to resume
478 * @buf: pointer to buffer
479 * @buf_size: buffer size in bytes
480 * @elems_ret: returns total number of elements resumed
481 * @cd: pointer to command details structure or NULL
482 *
483 * resume scheduling elements (0x040A)
484 */
485 static enum ice_status
ice_aq_resume_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)486 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
487 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
488 {
489 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
490 elems_req, (void *)buf, buf_size,
491 elems_ret, cd);
492 }
493
494 /**
495 * ice_aq_query_sched_res - query scheduler resource
496 * @hw: pointer to the HW struct
497 * @buf_size: buffer size in bytes
498 * @buf: pointer to buffer
499 * @cd: pointer to command details structure or NULL
500 *
501 * Query scheduler resource allocation (0x0412)
502 */
503 static enum ice_status
ice_aq_query_sched_res(struct ice_hw * hw,u16 buf_size,struct ice_aqc_query_txsched_res_resp * buf,struct ice_sq_cd * cd)504 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
505 struct ice_aqc_query_txsched_res_resp *buf,
506 struct ice_sq_cd *cd)
507 {
508 struct ice_aq_desc desc;
509
510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
511 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
512 }
513
514 /**
515 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
516 * @hw: pointer to the HW struct
517 * @num_nodes: number of nodes
518 * @node_teids: array of node teids to be suspended or resumed
519 * @suspend: true means suspend / false means resume
520 *
521 * This function suspends or resumes HW nodes
522 */
523 static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw * hw,u8 num_nodes,u32 * node_teids,bool suspend)524 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
525 bool suspend)
526 {
527 u16 i, buf_size, num_elem_ret = 0;
528 enum ice_status status;
529 __le32 *buf;
530
531 buf_size = sizeof(*buf) * num_nodes;
532 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
533 if (!buf)
534 return ICE_ERR_NO_MEMORY;
535
536 for (i = 0; i < num_nodes; i++)
537 buf[i] = cpu_to_le32(node_teids[i]);
538
539 if (suspend)
540 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
541 buf_size, &num_elem_ret,
542 NULL);
543 else
544 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
545 buf_size, &num_elem_ret,
546 NULL);
547 if (status || num_elem_ret != num_nodes)
548 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
549
550 devm_kfree(ice_hw_to_dev(hw), buf);
551 return status;
552 }
553
554 /**
555 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
556 * @hw: pointer to the HW struct
557 * @vsi_handle: VSI handle
558 * @tc: TC number
559 * @new_numqs: number of queues
560 */
561 static enum ice_status
ice_alloc_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)562 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
563 {
564 struct ice_vsi_ctx *vsi_ctx;
565 struct ice_q_ctx *q_ctx;
566
567 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
568 if (!vsi_ctx)
569 return ICE_ERR_PARAM;
570 /* allocate LAN queue contexts */
571 if (!vsi_ctx->lan_q_ctx[tc]) {
572 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
573 new_numqs,
574 sizeof(*q_ctx),
575 GFP_KERNEL);
576 if (!vsi_ctx->lan_q_ctx[tc])
577 return ICE_ERR_NO_MEMORY;
578 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
579 return 0;
580 }
581 /* num queues are increased, update the queue contexts */
582 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
583 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
584
585 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
586 sizeof(*q_ctx), GFP_KERNEL);
587 if (!q_ctx)
588 return ICE_ERR_NO_MEMORY;
589 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
590 prev_num * sizeof(*q_ctx));
591 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
592 vsi_ctx->lan_q_ctx[tc] = q_ctx;
593 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
594 }
595 return 0;
596 }
597
598 /**
599 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
600 * @hw: pointer to the HW struct
601 * @vsi_handle: VSI handle
602 * @tc: TC number
603 * @new_numqs: number of queues
604 */
605 static enum ice_status
ice_alloc_rdma_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)606 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
607 {
608 struct ice_vsi_ctx *vsi_ctx;
609 struct ice_q_ctx *q_ctx;
610
611 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
612 if (!vsi_ctx)
613 return ICE_ERR_PARAM;
614 /* allocate RDMA queue contexts */
615 if (!vsi_ctx->rdma_q_ctx[tc]) {
616 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
617 new_numqs,
618 sizeof(*q_ctx),
619 GFP_KERNEL);
620 if (!vsi_ctx->rdma_q_ctx[tc])
621 return ICE_ERR_NO_MEMORY;
622 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
623 return 0;
624 }
625 /* num queues are increased, update the queue contexts */
626 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
627 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
628
629 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
630 sizeof(*q_ctx), GFP_KERNEL);
631 if (!q_ctx)
632 return ICE_ERR_NO_MEMORY;
633 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
634 prev_num * sizeof(*q_ctx));
635 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
636 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
637 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
638 }
639 return 0;
640 }
641
642 /**
643 * ice_aq_rl_profile - performs a rate limiting task
644 * @hw: pointer to the HW struct
645 * @opcode: opcode for add, query, or remove profile(s)
646 * @num_profiles: the number of profiles
647 * @buf: pointer to buffer
648 * @buf_size: buffer size in bytes
649 * @num_processed: number of processed add or remove profile(s) to return
650 * @cd: pointer to command details structure
651 *
652 * RL profile function to add, query, or remove profile(s)
653 */
654 static enum ice_status
ice_aq_rl_profile(struct ice_hw * hw,enum ice_adminq_opc opcode,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_processed,struct ice_sq_cd * cd)655 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
656 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
657 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
658 {
659 struct ice_aqc_rl_profile *cmd;
660 struct ice_aq_desc desc;
661 enum ice_status status;
662
663 cmd = &desc.params.rl_profile;
664
665 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
666 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
667 cmd->num_profiles = cpu_to_le16(num_profiles);
668 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
669 if (!status && num_processed)
670 *num_processed = le16_to_cpu(cmd->num_processed);
671 return status;
672 }
673
674 /**
675 * ice_aq_add_rl_profile - adds rate limiting profile(s)
676 * @hw: pointer to the HW struct
677 * @num_profiles: the number of profile(s) to be add
678 * @buf: pointer to buffer
679 * @buf_size: buffer size in bytes
680 * @num_profiles_added: total number of profiles added to return
681 * @cd: pointer to command details structure
682 *
683 * Add RL profile (0x0410)
684 */
685 static enum ice_status
ice_aq_add_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_added,struct ice_sq_cd * cd)686 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
687 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
688 u16 *num_profiles_added, struct ice_sq_cd *cd)
689 {
690 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
691 buf, buf_size, num_profiles_added, cd);
692 }
693
694 /**
695 * ice_aq_remove_rl_profile - removes RL profile(s)
696 * @hw: pointer to the HW struct
697 * @num_profiles: the number of profile(s) to remove
698 * @buf: pointer to buffer
699 * @buf_size: buffer size in bytes
700 * @num_profiles_removed: total number of profiles removed to return
701 * @cd: pointer to command details structure or NULL
702 *
703 * Remove RL profile (0x0415)
704 */
705 static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_removed,struct ice_sq_cd * cd)706 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
707 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
708 u16 *num_profiles_removed, struct ice_sq_cd *cd)
709 {
710 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
711 num_profiles, buf, buf_size,
712 num_profiles_removed, cd);
713 }
714
715 /**
716 * ice_sched_del_rl_profile - remove RL profile
717 * @hw: pointer to the HW struct
718 * @rl_info: rate limit profile information
719 *
720 * If the profile ID is not referenced anymore, it removes profile ID with
721 * its associated parameters from HW DB,and locally. The caller needs to
722 * hold scheduler lock.
723 */
724 static enum ice_status
ice_sched_del_rl_profile(struct ice_hw * hw,struct ice_aqc_rl_profile_info * rl_info)725 ice_sched_del_rl_profile(struct ice_hw *hw,
726 struct ice_aqc_rl_profile_info *rl_info)
727 {
728 struct ice_aqc_rl_profile_elem *buf;
729 u16 num_profiles_removed;
730 enum ice_status status;
731 u16 num_profiles = 1;
732
733 if (rl_info->prof_id_ref != 0)
734 return ICE_ERR_IN_USE;
735
736 /* Safe to remove profile ID */
737 buf = &rl_info->profile;
738 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
739 &num_profiles_removed, NULL);
740 if (status || num_profiles_removed != num_profiles)
741 return ICE_ERR_CFG;
742
743 /* Delete stale entry now */
744 list_del(&rl_info->list_entry);
745 devm_kfree(ice_hw_to_dev(hw), rl_info);
746 return status;
747 }
748
749 /**
750 * ice_sched_clear_rl_prof - clears RL prof entries
751 * @pi: port information structure
752 *
753 * This function removes all RL profile from HW as well as from SW DB.
754 */
ice_sched_clear_rl_prof(struct ice_port_info * pi)755 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
756 {
757 u16 ln;
758
759 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
760 struct ice_aqc_rl_profile_info *rl_prof_elem;
761 struct ice_aqc_rl_profile_info *rl_prof_tmp;
762
763 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
764 &pi->rl_prof_list[ln], list_entry) {
765 struct ice_hw *hw = pi->hw;
766 enum ice_status status;
767
768 rl_prof_elem->prof_id_ref = 0;
769 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
770 if (status) {
771 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
772 /* On error, free mem required */
773 list_del(&rl_prof_elem->list_entry);
774 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
775 }
776 }
777 }
778 }
779
780 /**
781 * ice_sched_clear_agg - clears the aggregator related information
782 * @hw: pointer to the hardware structure
783 *
784 * This function removes aggregator list and free up aggregator related memory
785 * previously allocated.
786 */
ice_sched_clear_agg(struct ice_hw * hw)787 void ice_sched_clear_agg(struct ice_hw *hw)
788 {
789 struct ice_sched_agg_info *agg_info;
790 struct ice_sched_agg_info *atmp;
791
792 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
793 struct ice_sched_agg_vsi_info *agg_vsi_info;
794 struct ice_sched_agg_vsi_info *vtmp;
795
796 list_for_each_entry_safe(agg_vsi_info, vtmp,
797 &agg_info->agg_vsi_list, list_entry) {
798 list_del(&agg_vsi_info->list_entry);
799 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
800 }
801 list_del(&agg_info->list_entry);
802 devm_kfree(ice_hw_to_dev(hw), agg_info);
803 }
804 }
805
806 /**
807 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
808 * @pi: port information structure
809 *
810 * This function removes all the nodes from HW as well as from SW DB.
811 */
ice_sched_clear_tx_topo(struct ice_port_info * pi)812 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
813 {
814 if (!pi)
815 return;
816 /* remove RL profiles related lists */
817 ice_sched_clear_rl_prof(pi);
818 if (pi->root) {
819 ice_free_sched_node(pi, pi->root);
820 pi->root = NULL;
821 }
822 }
823
824 /**
825 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
826 * @pi: port information structure
827 *
828 * Cleanup scheduling elements from SW DB
829 */
ice_sched_clear_port(struct ice_port_info * pi)830 void ice_sched_clear_port(struct ice_port_info *pi)
831 {
832 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
833 return;
834
835 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
836 mutex_lock(&pi->sched_lock);
837 ice_sched_clear_tx_topo(pi);
838 mutex_unlock(&pi->sched_lock);
839 mutex_destroy(&pi->sched_lock);
840 }
841
842 /**
843 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
844 * @hw: pointer to the HW struct
845 *
846 * Cleanup scheduling elements from SW DB for all the ports
847 */
ice_sched_cleanup_all(struct ice_hw * hw)848 void ice_sched_cleanup_all(struct ice_hw *hw)
849 {
850 if (!hw)
851 return;
852
853 if (hw->layer_info) {
854 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
855 hw->layer_info = NULL;
856 }
857
858 ice_sched_clear_port(hw->port_info);
859
860 hw->num_tx_sched_layers = 0;
861 hw->num_tx_sched_phys_layers = 0;
862 hw->flattened_layers = 0;
863 hw->max_cgds = 0;
864 }
865
866 /**
867 * ice_sched_add_elems - add nodes to HW and SW DB
868 * @pi: port information structure
869 * @tc_node: pointer to the branch node
870 * @parent: pointer to the parent node
871 * @layer: layer number to add nodes
872 * @num_nodes: number of nodes
873 * @num_nodes_added: pointer to num nodes added
874 * @first_node_teid: if new nodes are added then return the TEID of first node
875 *
876 * This function add nodes to HW as well as to SW DB for a given layer
877 */
878 static enum ice_status
ice_sched_add_elems(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u16 * num_nodes_added,u32 * first_node_teid)879 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
880 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
881 u16 *num_nodes_added, u32 *first_node_teid)
882 {
883 struct ice_sched_node *prev, *new_node;
884 struct ice_aqc_add_elem *buf;
885 u16 i, num_groups_added = 0;
886 enum ice_status status = 0;
887 struct ice_hw *hw = pi->hw;
888 size_t buf_size;
889 u32 teid;
890
891 buf_size = struct_size(buf, generic, num_nodes);
892 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
893 if (!buf)
894 return ICE_ERR_NO_MEMORY;
895
896 buf->hdr.parent_teid = parent->info.node_teid;
897 buf->hdr.num_elems = cpu_to_le16(num_nodes);
898 for (i = 0; i < num_nodes; i++) {
899 buf->generic[i].parent_teid = parent->info.node_teid;
900 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
901 buf->generic[i].data.valid_sections =
902 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
903 ICE_AQC_ELEM_VALID_EIR;
904 buf->generic[i].data.generic = 0;
905 buf->generic[i].data.cir_bw.bw_profile_idx =
906 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
907 buf->generic[i].data.cir_bw.bw_alloc =
908 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
909 buf->generic[i].data.eir_bw.bw_profile_idx =
910 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
911 buf->generic[i].data.eir_bw.bw_alloc =
912 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
913 }
914
915 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
916 &num_groups_added, NULL);
917 if (status || num_groups_added != 1) {
918 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
919 hw->adminq.sq_last_status);
920 devm_kfree(ice_hw_to_dev(hw), buf);
921 return ICE_ERR_CFG;
922 }
923
924 *num_nodes_added = num_nodes;
925 /* add nodes to the SW DB */
926 for (i = 0; i < num_nodes; i++) {
927 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
928 if (status) {
929 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
930 status);
931 break;
932 }
933
934 teid = le32_to_cpu(buf->generic[i].node_teid);
935 new_node = ice_sched_find_node_by_teid(parent, teid);
936 if (!new_node) {
937 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
938 break;
939 }
940
941 new_node->sibling = NULL;
942 new_node->tc_num = tc_node->tc_num;
943
944 /* add it to previous node sibling pointer */
945 /* Note: siblings are not linked across branches */
946 prev = ice_sched_get_first_node(pi, tc_node, layer);
947 if (prev && prev != new_node) {
948 while (prev->sibling)
949 prev = prev->sibling;
950 prev->sibling = new_node;
951 }
952
953 /* initialize the sibling head */
954 if (!pi->sib_head[tc_node->tc_num][layer])
955 pi->sib_head[tc_node->tc_num][layer] = new_node;
956
957 if (i == 0)
958 *first_node_teid = teid;
959 }
960
961 devm_kfree(ice_hw_to_dev(hw), buf);
962 return status;
963 }
964
965 /**
966 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
967 * @pi: port information structure
968 * @tc_node: pointer to TC node
969 * @parent: pointer to parent node
970 * @layer: layer number to add nodes
971 * @num_nodes: number of nodes to be added
972 * @first_node_teid: pointer to the first node TEID
973 * @num_nodes_added: pointer to number of nodes added
974 *
975 * Add nodes into specific HW layer.
976 */
977 static enum ice_status
ice_sched_add_nodes_to_hw_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)978 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
979 struct ice_sched_node *tc_node,
980 struct ice_sched_node *parent, u8 layer,
981 u16 num_nodes, u32 *first_node_teid,
982 u16 *num_nodes_added)
983 {
984 u16 max_child_nodes;
985
986 *num_nodes_added = 0;
987
988 if (!num_nodes)
989 return 0;
990
991 if (!parent || layer < pi->hw->sw_entry_point_layer)
992 return ICE_ERR_PARAM;
993
994 /* max children per node per layer */
995 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
996
997 /* current number of children + required nodes exceed max children */
998 if ((parent->num_children + num_nodes) > max_child_nodes) {
999 /* Fail if the parent is a TC node */
1000 if (parent == tc_node)
1001 return ICE_ERR_CFG;
1002 return ICE_ERR_MAX_LIMIT;
1003 }
1004
1005 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1006 num_nodes_added, first_node_teid);
1007 }
1008
1009 /**
1010 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1011 * @pi: port information structure
1012 * @tc_node: pointer to TC node
1013 * @parent: pointer to parent node
1014 * @layer: layer number to add nodes
1015 * @num_nodes: number of nodes to be added
1016 * @first_node_teid: pointer to the first node TEID
1017 * @num_nodes_added: pointer to number of nodes added
1018 *
1019 * This function add nodes to a given layer.
1020 */
1021 static enum ice_status
ice_sched_add_nodes_to_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)1022 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1023 struct ice_sched_node *tc_node,
1024 struct ice_sched_node *parent, u8 layer,
1025 u16 num_nodes, u32 *first_node_teid,
1026 u16 *num_nodes_added)
1027 {
1028 u32 *first_teid_ptr = first_node_teid;
1029 u16 new_num_nodes = num_nodes;
1030 enum ice_status status = 0;
1031
1032 *num_nodes_added = 0;
1033 while (*num_nodes_added < num_nodes) {
1034 u16 max_child_nodes, num_added = 0;
1035 /* cppcheck-suppress unusedVariable */
1036 u32 temp;
1037
1038 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1039 layer, new_num_nodes,
1040 first_teid_ptr,
1041 &num_added);
1042 if (!status)
1043 *num_nodes_added += num_added;
1044 /* added more nodes than requested ? */
1045 if (*num_nodes_added > num_nodes) {
1046 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1047 *num_nodes_added);
1048 status = ICE_ERR_CFG;
1049 break;
1050 }
1051 /* break if all the nodes are added successfully */
1052 if (!status && (*num_nodes_added == num_nodes))
1053 break;
1054 /* break if the error is not max limit */
1055 if (status && status != ICE_ERR_MAX_LIMIT)
1056 break;
1057 /* Exceeded the max children */
1058 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1059 /* utilize all the spaces if the parent is not full */
1060 if (parent->num_children < max_child_nodes) {
1061 new_num_nodes = max_child_nodes - parent->num_children;
1062 } else {
1063 /* This parent is full, try the next sibling */
1064 parent = parent->sibling;
1065 /* Don't modify the first node TEID memory if the
1066 * first node was added already in the above call.
1067 * Instead send some temp memory for all other
1068 * recursive calls.
1069 */
1070 if (num_added)
1071 first_teid_ptr = &temp;
1072
1073 new_num_nodes = num_nodes - *num_nodes_added;
1074 }
1075 }
1076 return status;
1077 }
1078
1079 /**
1080 * ice_sched_get_qgrp_layer - get the current queue group layer number
1081 * @hw: pointer to the HW struct
1082 *
1083 * This function returns the current queue group layer number
1084 */
ice_sched_get_qgrp_layer(struct ice_hw * hw)1085 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1086 {
1087 /* It's always total layers - 1, the array is 0 relative so -2 */
1088 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1089 }
1090
1091 /**
1092 * ice_sched_get_vsi_layer - get the current VSI layer number
1093 * @hw: pointer to the HW struct
1094 *
1095 * This function returns the current VSI layer number
1096 */
ice_sched_get_vsi_layer(struct ice_hw * hw)1097 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1098 {
1099 /* Num Layers VSI layer
1100 * 9 6
1101 * 7 4
1102 * 5 or less sw_entry_point_layer
1103 */
1104 /* calculate the VSI layer based on number of layers. */
1105 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1106 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1107
1108 if (layer > hw->sw_entry_point_layer)
1109 return layer;
1110 }
1111 return hw->sw_entry_point_layer;
1112 }
1113
1114 /**
1115 * ice_sched_get_agg_layer - get the current aggregator layer number
1116 * @hw: pointer to the HW struct
1117 *
1118 * This function returns the current aggregator layer number
1119 */
ice_sched_get_agg_layer(struct ice_hw * hw)1120 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1121 {
1122 /* Num Layers aggregator layer
1123 * 9 4
1124 * 7 or less sw_entry_point_layer
1125 */
1126 /* calculate the aggregator layer based on number of layers. */
1127 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
1128 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1129
1130 if (layer > hw->sw_entry_point_layer)
1131 return layer;
1132 }
1133 return hw->sw_entry_point_layer;
1134 }
1135
1136 /**
1137 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1138 * @pi: port information structure
1139 *
1140 * This function removes the leaf node that was created by the FW
1141 * during initialization
1142 */
ice_rm_dflt_leaf_node(struct ice_port_info * pi)1143 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1144 {
1145 struct ice_sched_node *node;
1146
1147 node = pi->root;
1148 while (node) {
1149 if (!node->num_children)
1150 break;
1151 node = node->children[0];
1152 }
1153 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1154 u32 teid = le32_to_cpu(node->info.node_teid);
1155 enum ice_status status;
1156
1157 /* remove the default leaf node */
1158 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1159 if (!status)
1160 ice_free_sched_node(pi, node);
1161 }
1162 }
1163
1164 /**
1165 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1166 * @pi: port information structure
1167 *
1168 * This function frees all the nodes except root and TC that were created by
1169 * the FW during initialization
1170 */
ice_sched_rm_dflt_nodes(struct ice_port_info * pi)1171 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1172 {
1173 struct ice_sched_node *node;
1174
1175 ice_rm_dflt_leaf_node(pi);
1176
1177 /* remove the default nodes except TC and root nodes */
1178 node = pi->root;
1179 while (node) {
1180 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1181 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1182 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1183 ice_free_sched_node(pi, node);
1184 break;
1185 }
1186
1187 if (!node->num_children)
1188 break;
1189 node = node->children[0];
1190 }
1191 }
1192
1193 /**
1194 * ice_sched_init_port - Initialize scheduler by querying information from FW
1195 * @pi: port info structure for the tree to cleanup
1196 *
1197 * This function is the initial call to find the total number of Tx scheduler
1198 * resources, default topology created by firmware and storing the information
1199 * in SW DB.
1200 */
ice_sched_init_port(struct ice_port_info * pi)1201 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1202 {
1203 struct ice_aqc_get_topo_elem *buf;
1204 enum ice_status status;
1205 struct ice_hw *hw;
1206 u8 num_branches;
1207 u16 num_elems;
1208 u8 i, j;
1209
1210 if (!pi)
1211 return ICE_ERR_PARAM;
1212 hw = pi->hw;
1213
1214 /* Query the Default Topology from FW */
1215 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1216 if (!buf)
1217 return ICE_ERR_NO_MEMORY;
1218
1219 /* Query default scheduling tree topology */
1220 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1221 &num_branches, NULL);
1222 if (status)
1223 goto err_init_port;
1224
1225 /* num_branches should be between 1-8 */
1226 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1227 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1228 num_branches);
1229 status = ICE_ERR_PARAM;
1230 goto err_init_port;
1231 }
1232
1233 /* get the number of elements on the default/first branch */
1234 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1235
1236 /* num_elems should always be between 1-9 */
1237 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1238 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1239 num_elems);
1240 status = ICE_ERR_PARAM;
1241 goto err_init_port;
1242 }
1243
1244 /* If the last node is a leaf node then the index of the queue group
1245 * layer is two less than the number of elements.
1246 */
1247 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1248 ICE_AQC_ELEM_TYPE_LEAF)
1249 pi->last_node_teid =
1250 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1251 else
1252 pi->last_node_teid =
1253 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1254
1255 /* Insert the Tx Sched root node */
1256 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1257 if (status)
1258 goto err_init_port;
1259
1260 /* Parse the default tree and cache the information */
1261 for (i = 0; i < num_branches; i++) {
1262 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1263
1264 /* Skip root element as already inserted */
1265 for (j = 1; j < num_elems; j++) {
1266 /* update the sw entry point */
1267 if (buf[0].generic[j].data.elem_type ==
1268 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1269 hw->sw_entry_point_layer = j;
1270
1271 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1272 if (status)
1273 goto err_init_port;
1274 }
1275 }
1276
1277 /* Remove the default nodes. */
1278 if (pi->root)
1279 ice_sched_rm_dflt_nodes(pi);
1280
1281 /* initialize the port for handling the scheduler tree */
1282 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1283 mutex_init(&pi->sched_lock);
1284 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1285 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1286
1287 err_init_port:
1288 if (status && pi->root) {
1289 ice_free_sched_node(pi, pi->root);
1290 pi->root = NULL;
1291 }
1292
1293 devm_kfree(ice_hw_to_dev(hw), buf);
1294 return status;
1295 }
1296
1297 /**
1298 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1299 * @hw: pointer to the HW struct
1300 *
1301 * query FW for allocated scheduler resources and store in HW struct
1302 */
ice_sched_query_res_alloc(struct ice_hw * hw)1303 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1304 {
1305 struct ice_aqc_query_txsched_res_resp *buf;
1306 enum ice_status status = 0;
1307 __le16 max_sibl;
1308 u16 i;
1309
1310 if (hw->layer_info)
1311 return status;
1312
1313 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1314 if (!buf)
1315 return ICE_ERR_NO_MEMORY;
1316
1317 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1318 if (status)
1319 goto sched_query_out;
1320
1321 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1322 hw->num_tx_sched_phys_layers =
1323 le16_to_cpu(buf->sched_props.phys_levels);
1324 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1325 hw->max_cgds = buf->sched_props.max_pf_cgds;
1326
1327 /* max sibling group size of current layer refers to the max children
1328 * of the below layer node.
1329 * layer 1 node max children will be layer 2 max sibling group size
1330 * layer 2 node max children will be layer 3 max sibling group size
1331 * and so on. This array will be populated from root (index 0) to
1332 * qgroup layer 7. Leaf node has no children.
1333 */
1334 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1335 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1336 hw->max_children[i] = le16_to_cpu(max_sibl);
1337 }
1338
1339 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1340 (hw->num_tx_sched_layers *
1341 sizeof(*hw->layer_info)),
1342 GFP_KERNEL);
1343 if (!hw->layer_info) {
1344 status = ICE_ERR_NO_MEMORY;
1345 goto sched_query_out;
1346 }
1347
1348 sched_query_out:
1349 devm_kfree(ice_hw_to_dev(hw), buf);
1350 return status;
1351 }
1352
1353 /**
1354 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1355 * @hw: pointer to the HW struct
1356 *
1357 * Determine the PSM clock frequency and store in HW struct
1358 */
ice_sched_get_psm_clk_freq(struct ice_hw * hw)1359 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1360 {
1361 u32 val, clk_src;
1362
1363 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1364 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1365 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1366
1367 #define PSM_CLK_SRC_367_MHZ 0x0
1368 #define PSM_CLK_SRC_416_MHZ 0x1
1369 #define PSM_CLK_SRC_446_MHZ 0x2
1370 #define PSM_CLK_SRC_390_MHZ 0x3
1371
1372 switch (clk_src) {
1373 case PSM_CLK_SRC_367_MHZ:
1374 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1375 break;
1376 case PSM_CLK_SRC_416_MHZ:
1377 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1378 break;
1379 case PSM_CLK_SRC_446_MHZ:
1380 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1381 break;
1382 case PSM_CLK_SRC_390_MHZ:
1383 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1384 break;
1385 default:
1386 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1387 clk_src);
1388 /* fall back to a safe default */
1389 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1390 }
1391 }
1392
1393 /**
1394 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1395 * @hw: pointer to the HW struct
1396 * @base: pointer to the base node
1397 * @node: pointer to the node to search
1398 *
1399 * This function checks whether a given node is part of the base node
1400 * subtree or not
1401 */
1402 static bool
ice_sched_find_node_in_subtree(struct ice_hw * hw,struct ice_sched_node * base,struct ice_sched_node * node)1403 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1404 struct ice_sched_node *node)
1405 {
1406 u8 i;
1407
1408 for (i = 0; i < base->num_children; i++) {
1409 struct ice_sched_node *child = base->children[i];
1410
1411 if (node == child)
1412 return true;
1413
1414 if (child->tx_sched_layer > node->tx_sched_layer)
1415 return false;
1416
1417 /* this recursion is intentional, and wouldn't
1418 * go more than 8 calls
1419 */
1420 if (ice_sched_find_node_in_subtree(hw, child, node))
1421 return true;
1422 }
1423 return false;
1424 }
1425
1426 /**
1427 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1428 * @pi: port information structure
1429 * @vsi_node: software VSI handle
1430 * @qgrp_node: first queue group node identified for scanning
1431 * @owner: LAN or RDMA
1432 *
1433 * This function retrieves a free LAN or RDMA queue group node by scanning
1434 * qgrp_node and its siblings for the queue group with the fewest number
1435 * of queues currently assigned.
1436 */
1437 static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info * pi,struct ice_sched_node * vsi_node,struct ice_sched_node * qgrp_node,u8 owner)1438 ice_sched_get_free_qgrp(struct ice_port_info *pi,
1439 struct ice_sched_node *vsi_node,
1440 struct ice_sched_node *qgrp_node, u8 owner)
1441 {
1442 struct ice_sched_node *min_qgrp;
1443 u8 min_children;
1444
1445 if (!qgrp_node)
1446 return qgrp_node;
1447 min_children = qgrp_node->num_children;
1448 if (!min_children)
1449 return qgrp_node;
1450 min_qgrp = qgrp_node;
1451 /* scan all queue groups until find a node which has less than the
1452 * minimum number of children. This way all queue group nodes get
1453 * equal number of shares and active. The bandwidth will be equally
1454 * distributed across all queues.
1455 */
1456 while (qgrp_node) {
1457 /* make sure the qgroup node is part of the VSI subtree */
1458 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1459 if (qgrp_node->num_children < min_children &&
1460 qgrp_node->owner == owner) {
1461 /* replace the new min queue group node */
1462 min_qgrp = qgrp_node;
1463 min_children = min_qgrp->num_children;
1464 /* break if it has no children, */
1465 if (!min_children)
1466 break;
1467 }
1468 qgrp_node = qgrp_node->sibling;
1469 }
1470 return min_qgrp;
1471 }
1472
1473 /**
1474 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1475 * @pi: port information structure
1476 * @vsi_handle: software VSI handle
1477 * @tc: branch number
1478 * @owner: LAN or RDMA
1479 *
1480 * This function retrieves a free LAN or RDMA queue group node
1481 */
1482 struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 owner)1483 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1484 u8 owner)
1485 {
1486 struct ice_sched_node *vsi_node, *qgrp_node;
1487 struct ice_vsi_ctx *vsi_ctx;
1488 u16 max_children;
1489 u8 qgrp_layer;
1490
1491 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1492 max_children = pi->hw->max_children[qgrp_layer];
1493
1494 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1495 if (!vsi_ctx)
1496 return NULL;
1497 vsi_node = vsi_ctx->sched.vsi_node[tc];
1498 /* validate invalid VSI ID */
1499 if (!vsi_node)
1500 return NULL;
1501
1502 /* get the first queue group node from VSI sub-tree */
1503 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1504 while (qgrp_node) {
1505 /* make sure the qgroup node is part of the VSI subtree */
1506 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1507 if (qgrp_node->num_children < max_children &&
1508 qgrp_node->owner == owner)
1509 break;
1510 qgrp_node = qgrp_node->sibling;
1511 }
1512
1513 /* Select the best queue group */
1514 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1515 }
1516
1517 /**
1518 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1519 * @pi: pointer to the port information structure
1520 * @tc_node: pointer to the TC node
1521 * @vsi_handle: software VSI handle
1522 *
1523 * This function retrieves a VSI node for a given VSI ID from a given
1524 * TC branch
1525 */
1526 static struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 vsi_handle)1527 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1528 u16 vsi_handle)
1529 {
1530 struct ice_sched_node *node;
1531 u8 vsi_layer;
1532
1533 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1534 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1535
1536 /* Check whether it already exists */
1537 while (node) {
1538 if (node->vsi_handle == vsi_handle)
1539 return node;
1540 node = node->sibling;
1541 }
1542
1543 return node;
1544 }
1545
1546 /**
1547 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1548 * @pi: pointer to the port information structure
1549 * @tc_node: pointer to the TC node
1550 * @agg_id: aggregator ID
1551 *
1552 * This function retrieves an aggregator node for a given aggregator ID from
1553 * a given TC branch
1554 */
1555 static struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u32 agg_id)1556 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1557 u32 agg_id)
1558 {
1559 struct ice_sched_node *node;
1560 struct ice_hw *hw = pi->hw;
1561 u8 agg_layer;
1562
1563 if (!hw)
1564 return NULL;
1565 agg_layer = ice_sched_get_agg_layer(hw);
1566 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1567
1568 /* Check whether it already exists */
1569 while (node) {
1570 if (node->agg_id == agg_id)
1571 return node;
1572 node = node->sibling;
1573 }
1574
1575 return node;
1576 }
1577
1578 /**
1579 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1580 * @hw: pointer to the HW struct
1581 * @num_qs: number of queues
1582 * @num_nodes: num nodes array
1583 *
1584 * This function calculates the number of VSI child nodes based on the
1585 * number of queues.
1586 */
1587 static void
ice_sched_calc_vsi_child_nodes(struct ice_hw * hw,u16 num_qs,u16 * num_nodes)1588 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1589 {
1590 u16 num = num_qs;
1591 u8 i, qgl, vsil;
1592
1593 qgl = ice_sched_get_qgrp_layer(hw);
1594 vsil = ice_sched_get_vsi_layer(hw);
1595
1596 /* calculate num nodes from queue group to VSI layer */
1597 for (i = qgl; i > vsil; i--) {
1598 /* round to the next integer if there is a remainder */
1599 num = DIV_ROUND_UP(num, hw->max_children[i]);
1600
1601 /* need at least one node */
1602 num_nodes[i] = num ? num : 1;
1603 }
1604 }
1605
1606 /**
1607 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1608 * @pi: port information structure
1609 * @vsi_handle: software VSI handle
1610 * @tc_node: pointer to the TC node
1611 * @num_nodes: pointer to the num nodes that needs to be added per layer
1612 * @owner: node owner (LAN or RDMA)
1613 *
1614 * This function adds the VSI child nodes to tree. It gets called for
1615 * LAN and RDMA separately.
1616 */
1617 static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes,u8 owner)1618 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1619 struct ice_sched_node *tc_node, u16 *num_nodes,
1620 u8 owner)
1621 {
1622 struct ice_sched_node *parent, *node;
1623 struct ice_hw *hw = pi->hw;
1624 enum ice_status status;
1625 u32 first_node_teid;
1626 u16 num_added = 0;
1627 u8 i, qgl, vsil;
1628
1629 qgl = ice_sched_get_qgrp_layer(hw);
1630 vsil = ice_sched_get_vsi_layer(hw);
1631 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1632 for (i = vsil + 1; i <= qgl; i++) {
1633 if (!parent)
1634 return ICE_ERR_CFG;
1635
1636 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1637 num_nodes[i],
1638 &first_node_teid,
1639 &num_added);
1640 if (status || num_nodes[i] != num_added)
1641 return ICE_ERR_CFG;
1642
1643 /* The newly added node can be a new parent for the next
1644 * layer nodes
1645 */
1646 if (num_added) {
1647 parent = ice_sched_find_node_by_teid(tc_node,
1648 first_node_teid);
1649 node = parent;
1650 while (node) {
1651 node->owner = owner;
1652 node = node->sibling;
1653 }
1654 } else {
1655 parent = parent->children[0];
1656 }
1657 }
1658
1659 return 0;
1660 }
1661
1662 /**
1663 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1664 * @pi: pointer to the port info structure
1665 * @tc_node: pointer to TC node
1666 * @num_nodes: pointer to num nodes array
1667 *
1668 * This function calculates the number of supported nodes needed to add this
1669 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1670 * layers
1671 */
1672 static void
ice_sched_calc_vsi_support_nodes(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 * num_nodes)1673 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1674 struct ice_sched_node *tc_node, u16 *num_nodes)
1675 {
1676 struct ice_sched_node *node;
1677 u8 vsil;
1678 int i;
1679
1680 vsil = ice_sched_get_vsi_layer(pi->hw);
1681 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1682 /* Add intermediate nodes if TC has no children and
1683 * need at least one node for VSI
1684 */
1685 if (!tc_node->num_children || i == vsil) {
1686 num_nodes[i]++;
1687 } else {
1688 /* If intermediate nodes are reached max children
1689 * then add a new one.
1690 */
1691 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1692 /* scan all the siblings */
1693 while (node) {
1694 if (node->num_children < pi->hw->max_children[i])
1695 break;
1696 node = node->sibling;
1697 }
1698
1699 /* tree has one intermediate node to add this new VSI.
1700 * So no need to calculate supported nodes for below
1701 * layers.
1702 */
1703 if (node)
1704 break;
1705 /* all the nodes are full, allocate a new one */
1706 num_nodes[i]++;
1707 }
1708 }
1709
1710 /**
1711 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1712 * @pi: port information structure
1713 * @vsi_handle: software VSI handle
1714 * @tc_node: pointer to TC node
1715 * @num_nodes: pointer to num nodes array
1716 *
1717 * This function adds the VSI supported nodes into Tx tree including the
1718 * VSI, its parent and intermediate nodes in below layers
1719 */
1720 static enum ice_status
ice_sched_add_vsi_support_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes)1721 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1722 struct ice_sched_node *tc_node, u16 *num_nodes)
1723 {
1724 struct ice_sched_node *parent = tc_node;
1725 enum ice_status status;
1726 u32 first_node_teid;
1727 u16 num_added = 0;
1728 u8 i, vsil;
1729
1730 if (!pi)
1731 return ICE_ERR_PARAM;
1732
1733 vsil = ice_sched_get_vsi_layer(pi->hw);
1734 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1735 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1736 i, num_nodes[i],
1737 &first_node_teid,
1738 &num_added);
1739 if (status || num_nodes[i] != num_added)
1740 return ICE_ERR_CFG;
1741
1742 /* The newly added node can be a new parent for the next
1743 * layer nodes
1744 */
1745 if (num_added)
1746 parent = ice_sched_find_node_by_teid(tc_node,
1747 first_node_teid);
1748 else
1749 parent = parent->children[0];
1750
1751 if (!parent)
1752 return ICE_ERR_CFG;
1753
1754 if (i == vsil)
1755 parent->vsi_handle = vsi_handle;
1756 }
1757
1758 return 0;
1759 }
1760
1761 /**
1762 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1763 * @pi: port information structure
1764 * @vsi_handle: software VSI handle
1765 * @tc: TC number
1766 *
1767 * This function adds a new VSI into scheduler tree
1768 */
1769 static enum ice_status
ice_sched_add_vsi_to_topo(struct ice_port_info * pi,u16 vsi_handle,u8 tc)1770 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1771 {
1772 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1773 struct ice_sched_node *tc_node;
1774
1775 tc_node = ice_sched_get_tc_node(pi, tc);
1776 if (!tc_node)
1777 return ICE_ERR_PARAM;
1778
1779 /* calculate number of supported nodes needed for this VSI */
1780 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1781
1782 /* add VSI supported nodes to TC subtree */
1783 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1784 num_nodes);
1785 }
1786
1787 /**
1788 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1789 * @pi: port information structure
1790 * @vsi_handle: software VSI handle
1791 * @tc: TC number
1792 * @new_numqs: new number of max queues
1793 * @owner: owner of this subtree
1794 *
1795 * This function updates the VSI child nodes based on the number of queues
1796 */
1797 static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 new_numqs,u8 owner)1798 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1799 u8 tc, u16 new_numqs, u8 owner)
1800 {
1801 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1802 struct ice_sched_node *vsi_node;
1803 struct ice_sched_node *tc_node;
1804 struct ice_vsi_ctx *vsi_ctx;
1805 enum ice_status status = 0;
1806 struct ice_hw *hw = pi->hw;
1807 u16 prev_numqs;
1808
1809 tc_node = ice_sched_get_tc_node(pi, tc);
1810 if (!tc_node)
1811 return ICE_ERR_CFG;
1812
1813 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1814 if (!vsi_node)
1815 return ICE_ERR_CFG;
1816
1817 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1818 if (!vsi_ctx)
1819 return ICE_ERR_PARAM;
1820
1821 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1822 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1823 else
1824 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1825 /* num queues are not changed or less than the previous number */
1826 if (new_numqs <= prev_numqs)
1827 return status;
1828 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1829 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1830 if (status)
1831 return status;
1832 } else {
1833 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
1834 if (status)
1835 return status;
1836 }
1837
1838 if (new_numqs)
1839 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1840 /* Keep the max number of queue configuration all the time. Update the
1841 * tree only if number of queues > previous number of queues. This may
1842 * leave some extra nodes in the tree if number of queues < previous
1843 * number but that wouldn't harm anything. Removing those extra nodes
1844 * may complicate the code if those nodes are part of SRL or
1845 * individually rate limited.
1846 */
1847 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1848 new_num_nodes, owner);
1849 if (status)
1850 return status;
1851 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1852 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1853 else
1854 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
1855
1856 return 0;
1857 }
1858
1859 /**
1860 * ice_sched_cfg_vsi - configure the new/existing VSI
1861 * @pi: port information structure
1862 * @vsi_handle: software VSI handle
1863 * @tc: TC number
1864 * @maxqs: max number of queues
1865 * @owner: LAN or RDMA
1866 * @enable: TC enabled or disabled
1867 *
1868 * This function adds/updates VSI nodes based on the number of queues. If TC is
1869 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1870 * disabled then suspend the VSI if it is not already.
1871 */
1872 enum ice_status
ice_sched_cfg_vsi(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 maxqs,u8 owner,bool enable)1873 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1874 u8 owner, bool enable)
1875 {
1876 struct ice_sched_node *vsi_node, *tc_node;
1877 struct ice_vsi_ctx *vsi_ctx;
1878 enum ice_status status = 0;
1879 struct ice_hw *hw = pi->hw;
1880
1881 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1882 tc_node = ice_sched_get_tc_node(pi, tc);
1883 if (!tc_node)
1884 return ICE_ERR_PARAM;
1885 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1886 if (!vsi_ctx)
1887 return ICE_ERR_PARAM;
1888 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1889
1890 /* suspend the VSI if TC is not enabled */
1891 if (!enable) {
1892 if (vsi_node && vsi_node->in_use) {
1893 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1894
1895 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1896 true);
1897 if (!status)
1898 vsi_node->in_use = false;
1899 }
1900 return status;
1901 }
1902
1903 /* TC is enabled, if it is a new VSI then add it to the tree */
1904 if (!vsi_node) {
1905 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1906 if (status)
1907 return status;
1908
1909 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1910 if (!vsi_node)
1911 return ICE_ERR_CFG;
1912
1913 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1914 vsi_node->in_use = true;
1915 /* invalidate the max queues whenever VSI gets added first time
1916 * into the scheduler tree (boot or after reset). We need to
1917 * recreate the child nodes all the time in these cases.
1918 */
1919 vsi_ctx->sched.max_lanq[tc] = 0;
1920 vsi_ctx->sched.max_rdmaq[tc] = 0;
1921 }
1922
1923 /* update the VSI child nodes */
1924 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1925 owner);
1926 if (status)
1927 return status;
1928
1929 /* TC is enabled, resume the VSI if it is in the suspend state */
1930 if (!vsi_node->in_use) {
1931 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1932
1933 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1934 if (!status)
1935 vsi_node->in_use = true;
1936 }
1937
1938 return status;
1939 }
1940
1941 /**
1942 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
1943 * @pi: port information structure
1944 * @vsi_handle: software VSI handle
1945 *
1946 * This function removes single aggregator VSI info entry from
1947 * aggregator list.
1948 */
ice_sched_rm_agg_vsi_info(struct ice_port_info * pi,u16 vsi_handle)1949 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1950 {
1951 struct ice_sched_agg_info *agg_info;
1952 struct ice_sched_agg_info *atmp;
1953
1954 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1955 list_entry) {
1956 struct ice_sched_agg_vsi_info *agg_vsi_info;
1957 struct ice_sched_agg_vsi_info *vtmp;
1958
1959 list_for_each_entry_safe(agg_vsi_info, vtmp,
1960 &agg_info->agg_vsi_list, list_entry)
1961 if (agg_vsi_info->vsi_handle == vsi_handle) {
1962 list_del(&agg_vsi_info->list_entry);
1963 devm_kfree(ice_hw_to_dev(pi->hw),
1964 agg_vsi_info);
1965 return;
1966 }
1967 }
1968 }
1969
1970 /**
1971 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1972 * @node: pointer to the sub-tree node
1973 *
1974 * This function checks for a leaf node presence in a given sub-tree node.
1975 */
ice_sched_is_leaf_node_present(struct ice_sched_node * node)1976 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1977 {
1978 u8 i;
1979
1980 for (i = 0; i < node->num_children; i++)
1981 if (ice_sched_is_leaf_node_present(node->children[i]))
1982 return true;
1983 /* check for a leaf node */
1984 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1985 }
1986
1987 /**
1988 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1989 * @pi: port information structure
1990 * @vsi_handle: software VSI handle
1991 * @owner: LAN or RDMA
1992 *
1993 * This function removes the VSI and its LAN or RDMA children nodes from the
1994 * scheduler tree.
1995 */
1996 static enum ice_status
ice_sched_rm_vsi_cfg(struct ice_port_info * pi,u16 vsi_handle,u8 owner)1997 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1998 {
1999 enum ice_status status = ICE_ERR_PARAM;
2000 struct ice_vsi_ctx *vsi_ctx;
2001 u8 i;
2002
2003 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2004 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2005 return status;
2006 mutex_lock(&pi->sched_lock);
2007 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2008 if (!vsi_ctx)
2009 goto exit_sched_rm_vsi_cfg;
2010
2011 ice_for_each_traffic_class(i) {
2012 struct ice_sched_node *vsi_node, *tc_node;
2013 u8 j = 0;
2014
2015 tc_node = ice_sched_get_tc_node(pi, i);
2016 if (!tc_node)
2017 continue;
2018
2019 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2020 if (!vsi_node)
2021 continue;
2022
2023 if (ice_sched_is_leaf_node_present(vsi_node)) {
2024 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2025 status = ICE_ERR_IN_USE;
2026 goto exit_sched_rm_vsi_cfg;
2027 }
2028 while (j < vsi_node->num_children) {
2029 if (vsi_node->children[j]->owner == owner) {
2030 ice_free_sched_node(pi, vsi_node->children[j]);
2031
2032 /* reset the counter again since the num
2033 * children will be updated after node removal
2034 */
2035 j = 0;
2036 } else {
2037 j++;
2038 }
2039 }
2040 /* remove the VSI if it has no children */
2041 if (!vsi_node->num_children) {
2042 ice_free_sched_node(pi, vsi_node);
2043 vsi_ctx->sched.vsi_node[i] = NULL;
2044
2045 /* clean up aggregator related VSI info if any */
2046 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2047 }
2048 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2049 vsi_ctx->sched.max_lanq[i] = 0;
2050 else
2051 vsi_ctx->sched.max_rdmaq[i] = 0;
2052 }
2053 status = 0;
2054
2055 exit_sched_rm_vsi_cfg:
2056 mutex_unlock(&pi->sched_lock);
2057 return status;
2058 }
2059
2060 /**
2061 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2062 * @pi: port information structure
2063 * @vsi_handle: software VSI handle
2064 *
2065 * This function clears the VSI and its LAN children nodes from scheduler tree
2066 * for all TCs.
2067 */
ice_rm_vsi_lan_cfg(struct ice_port_info * pi,u16 vsi_handle)2068 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2069 {
2070 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2071 }
2072
2073 /**
2074 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2075 * @pi: port information structure
2076 * @vsi_handle: software VSI handle
2077 *
2078 * This function clears the VSI and its RDMA children nodes from scheduler tree
2079 * for all TCs.
2080 */
ice_rm_vsi_rdma_cfg(struct ice_port_info * pi,u16 vsi_handle)2081 enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2082 {
2083 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2084 }
2085
2086 /**
2087 * ice_get_agg_info - get the aggregator ID
2088 * @hw: pointer to the hardware structure
2089 * @agg_id: aggregator ID
2090 *
2091 * This function validates aggregator ID. The function returns info if
2092 * aggregator ID is present in list otherwise it returns null.
2093 */
2094 static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw * hw,u32 agg_id)2095 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2096 {
2097 struct ice_sched_agg_info *agg_info;
2098
2099 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
2100 if (agg_info->agg_id == agg_id)
2101 return agg_info;
2102
2103 return NULL;
2104 }
2105
2106 /**
2107 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2108 * @hw: pointer to the HW struct
2109 * @node: pointer to a child node
2110 * @num_nodes: num nodes count array
2111 *
2112 * This function walks through the aggregator subtree to find a free parent
2113 * node
2114 */
2115 static struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw * hw,struct ice_sched_node * node,u16 * num_nodes)2116 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2117 u16 *num_nodes)
2118 {
2119 u8 l = node->tx_sched_layer;
2120 u8 vsil, i;
2121
2122 vsil = ice_sched_get_vsi_layer(hw);
2123
2124 /* Is it VSI parent layer ? */
2125 if (l == vsil - 1)
2126 return (node->num_children < hw->max_children[l]) ? node : NULL;
2127
2128 /* We have intermediate nodes. Let's walk through the subtree. If the
2129 * intermediate node has space to add a new node then clear the count
2130 */
2131 if (node->num_children < hw->max_children[l])
2132 num_nodes[l] = 0;
2133 /* The below recursive call is intentional and wouldn't go more than
2134 * 2 or 3 iterations.
2135 */
2136
2137 for (i = 0; i < node->num_children; i++) {
2138 struct ice_sched_node *parent;
2139
2140 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2141 num_nodes);
2142 if (parent)
2143 return parent;
2144 }
2145
2146 return NULL;
2147 }
2148
2149 /**
2150 * ice_sched_update_parent - update the new parent in SW DB
2151 * @new_parent: pointer to a new parent node
2152 * @node: pointer to a child node
2153 *
2154 * This function removes the child from the old parent and adds it to a new
2155 * parent
2156 */
2157 static void
ice_sched_update_parent(struct ice_sched_node * new_parent,struct ice_sched_node * node)2158 ice_sched_update_parent(struct ice_sched_node *new_parent,
2159 struct ice_sched_node *node)
2160 {
2161 struct ice_sched_node *old_parent;
2162 u8 i, j;
2163
2164 old_parent = node->parent;
2165
2166 /* update the old parent children */
2167 for (i = 0; i < old_parent->num_children; i++)
2168 if (old_parent->children[i] == node) {
2169 for (j = i + 1; j < old_parent->num_children; j++)
2170 old_parent->children[j - 1] =
2171 old_parent->children[j];
2172 old_parent->num_children--;
2173 break;
2174 }
2175
2176 /* now move the node to a new parent */
2177 new_parent->children[new_parent->num_children++] = node;
2178 node->parent = new_parent;
2179 node->info.parent_teid = new_parent->info.node_teid;
2180 }
2181
2182 /**
2183 * ice_sched_move_nodes - move child nodes to a given parent
2184 * @pi: port information structure
2185 * @parent: pointer to parent node
2186 * @num_items: number of child nodes to be moved
2187 * @list: pointer to child node teids
2188 *
2189 * This function move the child nodes to a given parent.
2190 */
2191 static enum ice_status
ice_sched_move_nodes(struct ice_port_info * pi,struct ice_sched_node * parent,u16 num_items,u32 * list)2192 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2193 u16 num_items, u32 *list)
2194 {
2195 struct ice_aqc_move_elem *buf;
2196 struct ice_sched_node *node;
2197 enum ice_status status = 0;
2198 u16 i, grps_movd = 0;
2199 struct ice_hw *hw;
2200 u16 buf_len;
2201
2202 hw = pi->hw;
2203
2204 if (!parent || !num_items)
2205 return ICE_ERR_PARAM;
2206
2207 /* Does parent have enough space */
2208 if (parent->num_children + num_items >
2209 hw->max_children[parent->tx_sched_layer])
2210 return ICE_ERR_AQ_FULL;
2211
2212 buf_len = struct_size(buf, teid, 1);
2213 buf = kzalloc(buf_len, GFP_KERNEL);
2214 if (!buf)
2215 return ICE_ERR_NO_MEMORY;
2216
2217 for (i = 0; i < num_items; i++) {
2218 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2219 if (!node) {
2220 status = ICE_ERR_PARAM;
2221 goto move_err_exit;
2222 }
2223
2224 buf->hdr.src_parent_teid = node->info.parent_teid;
2225 buf->hdr.dest_parent_teid = parent->info.node_teid;
2226 buf->teid[0] = node->info.node_teid;
2227 buf->hdr.num_elems = cpu_to_le16(1);
2228 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
2229 &grps_movd, NULL);
2230 if (status && grps_movd != 1) {
2231 status = ICE_ERR_CFG;
2232 goto move_err_exit;
2233 }
2234
2235 /* update the SW DB */
2236 ice_sched_update_parent(parent, node);
2237 }
2238
2239 move_err_exit:
2240 kfree(buf);
2241 return status;
2242 }
2243
2244 /**
2245 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2246 * @pi: port information structure
2247 * @vsi_handle: software VSI handle
2248 * @agg_id: aggregator ID
2249 * @tc: TC number
2250 *
2251 * This function moves a VSI to an aggregator node or its subtree.
2252 * Intermediate nodes may be created if required.
2253 */
2254 static enum ice_status
ice_sched_move_vsi_to_agg(struct ice_port_info * pi,u16 vsi_handle,u32 agg_id,u8 tc)2255 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2256 u8 tc)
2257 {
2258 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2259 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2260 u32 first_node_teid, vsi_teid;
2261 enum ice_status status;
2262 u16 num_nodes_added;
2263 u8 aggl, vsil, i;
2264
2265 tc_node = ice_sched_get_tc_node(pi, tc);
2266 if (!tc_node)
2267 return ICE_ERR_CFG;
2268
2269 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2270 if (!agg_node)
2271 return ICE_ERR_DOES_NOT_EXIST;
2272
2273 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2274 if (!vsi_node)
2275 return ICE_ERR_DOES_NOT_EXIST;
2276
2277 /* Is this VSI already part of given aggregator? */
2278 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2279 return 0;
2280
2281 aggl = ice_sched_get_agg_layer(pi->hw);
2282 vsil = ice_sched_get_vsi_layer(pi->hw);
2283
2284 /* set intermediate node count to 1 between aggregator and VSI layers */
2285 for (i = aggl + 1; i < vsil; i++)
2286 num_nodes[i] = 1;
2287
2288 /* Check if the aggregator subtree has any free node to add the VSI */
2289 for (i = 0; i < agg_node->num_children; i++) {
2290 parent = ice_sched_get_free_vsi_parent(pi->hw,
2291 agg_node->children[i],
2292 num_nodes);
2293 if (parent)
2294 goto move_nodes;
2295 }
2296
2297 /* add new nodes */
2298 parent = agg_node;
2299 for (i = aggl + 1; i < vsil; i++) {
2300 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2301 num_nodes[i],
2302 &first_node_teid,
2303 &num_nodes_added);
2304 if (status || num_nodes[i] != num_nodes_added)
2305 return ICE_ERR_CFG;
2306
2307 /* The newly added node can be a new parent for the next
2308 * layer nodes
2309 */
2310 if (num_nodes_added)
2311 parent = ice_sched_find_node_by_teid(tc_node,
2312 first_node_teid);
2313 else
2314 parent = parent->children[0];
2315
2316 if (!parent)
2317 return ICE_ERR_CFG;
2318 }
2319
2320 move_nodes:
2321 vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
2322 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2323 }
2324
2325 /**
2326 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2327 * @pi: port information structure
2328 * @agg_info: aggregator info
2329 * @tc: traffic class number
2330 * @rm_vsi_info: true or false
2331 *
2332 * This function move all the VSI(s) to the default aggregator and delete
2333 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2334 * caller holds the scheduler lock.
2335 */
2336 static enum ice_status
ice_move_all_vsi_to_dflt_agg(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2337 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2338 struct ice_sched_agg_info *agg_info, u8 tc,
2339 bool rm_vsi_info)
2340 {
2341 struct ice_sched_agg_vsi_info *agg_vsi_info;
2342 struct ice_sched_agg_vsi_info *tmp;
2343 enum ice_status status = 0;
2344
2345 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2346 list_entry) {
2347 u16 vsi_handle = agg_vsi_info->vsi_handle;
2348
2349 /* Move VSI to default aggregator */
2350 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2351 continue;
2352
2353 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2354 ICE_DFLT_AGG_ID, tc);
2355 if (status)
2356 break;
2357
2358 clear_bit(tc, agg_vsi_info->tc_bitmap);
2359 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2360 list_del(&agg_vsi_info->list_entry);
2361 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
2362 }
2363 }
2364
2365 return status;
2366 }
2367
2368 /**
2369 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2370 * @pi: port information structure
2371 * @node: node pointer
2372 *
2373 * This function checks whether the aggregator is attached with any VSI or not.
2374 */
2375 static bool
ice_sched_is_agg_inuse(struct ice_port_info * pi,struct ice_sched_node * node)2376 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2377 {
2378 u8 vsil, i;
2379
2380 vsil = ice_sched_get_vsi_layer(pi->hw);
2381 if (node->tx_sched_layer < vsil - 1) {
2382 for (i = 0; i < node->num_children; i++)
2383 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2384 return true;
2385 return false;
2386 } else {
2387 return node->num_children ? true : false;
2388 }
2389 }
2390
2391 /**
2392 * ice_sched_rm_agg_cfg - remove the aggregator node
2393 * @pi: port information structure
2394 * @agg_id: aggregator ID
2395 * @tc: TC number
2396 *
2397 * This function removes the aggregator node and intermediate nodes if any
2398 * from the given TC
2399 */
2400 static enum ice_status
ice_sched_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2401 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2402 {
2403 struct ice_sched_node *tc_node, *agg_node;
2404 struct ice_hw *hw = pi->hw;
2405
2406 tc_node = ice_sched_get_tc_node(pi, tc);
2407 if (!tc_node)
2408 return ICE_ERR_CFG;
2409
2410 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2411 if (!agg_node)
2412 return ICE_ERR_DOES_NOT_EXIST;
2413
2414 /* Can't remove the aggregator node if it has children */
2415 if (ice_sched_is_agg_inuse(pi, agg_node))
2416 return ICE_ERR_IN_USE;
2417
2418 /* need to remove the whole subtree if aggregator node is the
2419 * only child.
2420 */
2421 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2422 struct ice_sched_node *parent = agg_node->parent;
2423
2424 if (!parent)
2425 return ICE_ERR_CFG;
2426
2427 if (parent->num_children > 1)
2428 break;
2429
2430 agg_node = parent;
2431 }
2432
2433 ice_free_sched_node(pi, agg_node);
2434 return 0;
2435 }
2436
2437 /**
2438 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2439 * @pi: port information structure
2440 * @agg_info: aggregator ID
2441 * @tc: TC number
2442 * @rm_vsi_info: bool value true or false
2443 *
2444 * This function removes aggregator reference to VSI of given TC. It removes
2445 * the aggregator configuration completely for requested TC. The caller needs
2446 * to hold the scheduler lock.
2447 */
2448 static enum ice_status
ice_rm_agg_cfg_tc(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2449 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2450 u8 tc, bool rm_vsi_info)
2451 {
2452 enum ice_status status = 0;
2453
2454 /* If nothing to remove - return success */
2455 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2456 goto exit_rm_agg_cfg_tc;
2457
2458 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2459 if (status)
2460 goto exit_rm_agg_cfg_tc;
2461
2462 /* Delete aggregator node(s) */
2463 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2464 if (status)
2465 goto exit_rm_agg_cfg_tc;
2466
2467 clear_bit(tc, agg_info->tc_bitmap);
2468 exit_rm_agg_cfg_tc:
2469 return status;
2470 }
2471
2472 /**
2473 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2474 * @pi: port information structure
2475 * @agg_id: aggregator ID
2476 * @tc_bitmap: 8 bits TC bitmap
2477 *
2478 * Save aggregator TC bitmap. This function needs to be called with scheduler
2479 * lock held.
2480 */
2481 static enum ice_status
ice_save_agg_tc_bitmap(struct ice_port_info * pi,u32 agg_id,unsigned long * tc_bitmap)2482 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2483 unsigned long *tc_bitmap)
2484 {
2485 struct ice_sched_agg_info *agg_info;
2486
2487 agg_info = ice_get_agg_info(pi->hw, agg_id);
2488 if (!agg_info)
2489 return ICE_ERR_PARAM;
2490 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
2491 ICE_MAX_TRAFFIC_CLASS);
2492 return 0;
2493 }
2494
2495 /**
2496 * ice_sched_add_agg_cfg - create an aggregator node
2497 * @pi: port information structure
2498 * @agg_id: aggregator ID
2499 * @tc: TC number
2500 *
2501 * This function creates an aggregator node and intermediate nodes if required
2502 * for the given TC
2503 */
2504 static enum ice_status
ice_sched_add_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2505 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2506 {
2507 struct ice_sched_node *parent, *agg_node, *tc_node;
2508 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2509 enum ice_status status = 0;
2510 struct ice_hw *hw = pi->hw;
2511 u32 first_node_teid;
2512 u16 num_nodes_added;
2513 u8 i, aggl;
2514
2515 tc_node = ice_sched_get_tc_node(pi, tc);
2516 if (!tc_node)
2517 return ICE_ERR_CFG;
2518
2519 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2520 /* Does Agg node already exist ? */
2521 if (agg_node)
2522 return status;
2523
2524 aggl = ice_sched_get_agg_layer(hw);
2525
2526 /* need one node in Agg layer */
2527 num_nodes[aggl] = 1;
2528
2529 /* Check whether the intermediate nodes have space to add the
2530 * new aggregator. If they are full, then SW needs to allocate a new
2531 * intermediate node on those layers
2532 */
2533 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2534 parent = ice_sched_get_first_node(pi, tc_node, i);
2535
2536 /* scan all the siblings */
2537 while (parent) {
2538 if (parent->num_children < hw->max_children[i])
2539 break;
2540 parent = parent->sibling;
2541 }
2542
2543 /* all the nodes are full, reserve one for this layer */
2544 if (!parent)
2545 num_nodes[i]++;
2546 }
2547
2548 /* add the aggregator node */
2549 parent = tc_node;
2550 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2551 if (!parent)
2552 return ICE_ERR_CFG;
2553
2554 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2555 num_nodes[i],
2556 &first_node_teid,
2557 &num_nodes_added);
2558 if (status || num_nodes[i] != num_nodes_added)
2559 return ICE_ERR_CFG;
2560
2561 /* The newly added node can be a new parent for the next
2562 * layer nodes
2563 */
2564 if (num_nodes_added) {
2565 parent = ice_sched_find_node_by_teid(tc_node,
2566 first_node_teid);
2567 /* register aggregator ID with the aggregator node */
2568 if (parent && i == aggl)
2569 parent->agg_id = agg_id;
2570 } else {
2571 parent = parent->children[0];
2572 }
2573 }
2574
2575 return 0;
2576 }
2577
2578 /**
2579 * ice_sched_cfg_agg - configure aggregator node
2580 * @pi: port information structure
2581 * @agg_id: aggregator ID
2582 * @agg_type: aggregator type queue, VSI, or aggregator group
2583 * @tc_bitmap: bits TC bitmap
2584 *
2585 * It registers a unique aggregator node into scheduler services. It
2586 * allows a user to register with a unique ID to track it's resources.
2587 * The aggregator type determines if this is a queue group, VSI group
2588 * or aggregator group. It then creates the aggregator node(s) for requested
2589 * TC(s) or removes an existing aggregator node including its configuration
2590 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2591 * resources and remove aggregator ID.
2592 * This function needs to be called with scheduler lock held.
2593 */
2594 static enum ice_status
ice_sched_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,unsigned long * tc_bitmap)2595 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2596 enum ice_agg_type agg_type, unsigned long *tc_bitmap)
2597 {
2598 struct ice_sched_agg_info *agg_info;
2599 enum ice_status status = 0;
2600 struct ice_hw *hw = pi->hw;
2601 u8 tc;
2602
2603 agg_info = ice_get_agg_info(hw, agg_id);
2604 if (!agg_info) {
2605 /* Create new entry for new aggregator ID */
2606 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
2607 GFP_KERNEL);
2608 if (!agg_info)
2609 return ICE_ERR_NO_MEMORY;
2610
2611 agg_info->agg_id = agg_id;
2612 agg_info->agg_type = agg_type;
2613 agg_info->tc_bitmap[0] = 0;
2614
2615 /* Initialize the aggregator VSI list head */
2616 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2617
2618 /* Add new entry in aggregator list */
2619 list_add(&agg_info->list_entry, &hw->agg_list);
2620 }
2621 /* Create aggregator node(s) for requested TC(s) */
2622 ice_for_each_traffic_class(tc) {
2623 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2624 /* Delete aggregator cfg TC if it exists previously */
2625 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2626 if (status)
2627 break;
2628 continue;
2629 }
2630
2631 /* Check if aggregator node for TC already exists */
2632 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2633 continue;
2634
2635 /* Create new aggregator node for TC */
2636 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2637 if (status)
2638 break;
2639
2640 /* Save aggregator node's TC information */
2641 set_bit(tc, agg_info->tc_bitmap);
2642 }
2643
2644 return status;
2645 }
2646
2647 /**
2648 * ice_cfg_agg - config aggregator node
2649 * @pi: port information structure
2650 * @agg_id: aggregator ID
2651 * @agg_type: aggregator type queue, VSI, or aggregator group
2652 * @tc_bitmap: bits TC bitmap
2653 *
2654 * This function configures aggregator node(s).
2655 */
2656 enum ice_status
ice_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,u8 tc_bitmap)2657 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2658 u8 tc_bitmap)
2659 {
2660 unsigned long bitmap = tc_bitmap;
2661 enum ice_status status;
2662
2663 mutex_lock(&pi->sched_lock);
2664 status = ice_sched_cfg_agg(pi, agg_id, agg_type,
2665 (unsigned long *)&bitmap);
2666 if (!status)
2667 status = ice_save_agg_tc_bitmap(pi, agg_id,
2668 (unsigned long *)&bitmap);
2669 mutex_unlock(&pi->sched_lock);
2670 return status;
2671 }
2672
2673 /**
2674 * ice_get_agg_vsi_info - get the aggregator ID
2675 * @agg_info: aggregator info
2676 * @vsi_handle: software VSI handle
2677 *
2678 * The function returns aggregator VSI info based on VSI handle. This function
2679 * needs to be called with scheduler lock held.
2680 */
2681 static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info * agg_info,u16 vsi_handle)2682 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2683 {
2684 struct ice_sched_agg_vsi_info *agg_vsi_info;
2685
2686 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
2687 if (agg_vsi_info->vsi_handle == vsi_handle)
2688 return agg_vsi_info;
2689
2690 return NULL;
2691 }
2692
2693 /**
2694 * ice_get_vsi_agg_info - get the aggregator info of VSI
2695 * @hw: pointer to the hardware structure
2696 * @vsi_handle: Sw VSI handle
2697 *
2698 * The function returns aggregator info of VSI represented via vsi_handle. The
2699 * VSI has in this case a different aggregator than the default one. This
2700 * function needs to be called with scheduler lock held.
2701 */
2702 static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw * hw,u16 vsi_handle)2703 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2704 {
2705 struct ice_sched_agg_info *agg_info;
2706
2707 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
2708 struct ice_sched_agg_vsi_info *agg_vsi_info;
2709
2710 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2711 if (agg_vsi_info)
2712 return agg_info;
2713 }
2714 return NULL;
2715 }
2716
2717 /**
2718 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2719 * @pi: port information structure
2720 * @agg_id: aggregator ID
2721 * @vsi_handle: software VSI handle
2722 * @tc_bitmap: TC bitmap of enabled TC(s)
2723 *
2724 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2725 * lock held.
2726 */
2727 static enum ice_status
ice_save_agg_vsi_tc_bitmap(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,unsigned long * tc_bitmap)2728 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2729 unsigned long *tc_bitmap)
2730 {
2731 struct ice_sched_agg_vsi_info *agg_vsi_info;
2732 struct ice_sched_agg_info *agg_info;
2733
2734 agg_info = ice_get_agg_info(pi->hw, agg_id);
2735 if (!agg_info)
2736 return ICE_ERR_PARAM;
2737 /* check if entry already exist */
2738 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2739 if (!agg_vsi_info)
2740 return ICE_ERR_PARAM;
2741 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2742 ICE_MAX_TRAFFIC_CLASS);
2743 return 0;
2744 }
2745
2746 /**
2747 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2748 * @pi: port information structure
2749 * @agg_id: aggregator ID
2750 * @vsi_handle: software VSI handle
2751 * @tc_bitmap: TC bitmap of enabled TC(s)
2752 *
2753 * This function moves VSI to a new or default aggregator node. If VSI is
2754 * already associated to the aggregator node then no operation is performed on
2755 * the tree. This function needs to be called with scheduler lock held.
2756 */
2757 static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,unsigned long * tc_bitmap)2758 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2759 u16 vsi_handle, unsigned long *tc_bitmap)
2760 {
2761 struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
2762 struct ice_sched_agg_info *agg_info, *old_agg_info;
2763 enum ice_status status = 0;
2764 struct ice_hw *hw = pi->hw;
2765 u8 tc;
2766
2767 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2768 return ICE_ERR_PARAM;
2769 agg_info = ice_get_agg_info(hw, agg_id);
2770 if (!agg_info)
2771 return ICE_ERR_PARAM;
2772 /* If the VSI is already part of another aggregator then update
2773 * its VSI info list
2774 */
2775 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2776 if (old_agg_info && old_agg_info != agg_info) {
2777 struct ice_sched_agg_vsi_info *vtmp;
2778
2779 list_for_each_entry_safe(iter, vtmp,
2780 &old_agg_info->agg_vsi_list,
2781 list_entry)
2782 if (iter->vsi_handle == vsi_handle) {
2783 old_agg_vsi_info = iter;
2784 break;
2785 }
2786 }
2787
2788 /* check if entry already exist */
2789 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2790 if (!agg_vsi_info) {
2791 /* Create new entry for VSI under aggregator list */
2792 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
2793 sizeof(*agg_vsi_info), GFP_KERNEL);
2794 if (!agg_vsi_info)
2795 return ICE_ERR_PARAM;
2796
2797 /* add VSI ID into the aggregator list */
2798 agg_vsi_info->vsi_handle = vsi_handle;
2799 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2800 }
2801 /* Move VSI node to new aggregator node for requested TC(s) */
2802 ice_for_each_traffic_class(tc) {
2803 if (!ice_is_tc_ena(*tc_bitmap, tc))
2804 continue;
2805
2806 /* Move VSI to new aggregator */
2807 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2808 if (status)
2809 break;
2810
2811 set_bit(tc, agg_vsi_info->tc_bitmap);
2812 if (old_agg_vsi_info)
2813 clear_bit(tc, old_agg_vsi_info->tc_bitmap);
2814 }
2815 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
2816 list_del(&old_agg_vsi_info->list_entry);
2817 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
2818 }
2819 return status;
2820 }
2821
2822 /**
2823 * ice_sched_rm_unused_rl_prof - remove unused RL profile
2824 * @pi: port information structure
2825 *
2826 * This function removes unused rate limit profiles from the HW and
2827 * SW DB. The caller needs to hold scheduler lock.
2828 */
ice_sched_rm_unused_rl_prof(struct ice_port_info * pi)2829 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2830 {
2831 u16 ln;
2832
2833 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2834 struct ice_aqc_rl_profile_info *rl_prof_elem;
2835 struct ice_aqc_rl_profile_info *rl_prof_tmp;
2836
2837 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
2838 &pi->rl_prof_list[ln], list_entry) {
2839 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2840 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
2841 }
2842 }
2843 }
2844
2845 /**
2846 * ice_sched_update_elem - update element
2847 * @hw: pointer to the HW struct
2848 * @node: pointer to node
2849 * @info: node info to update
2850 *
2851 * Update the HW DB, and local SW DB of node. Update the scheduling
2852 * parameters of node from argument info data buffer (Info->data buf) and
2853 * returns success or error on config sched element failure. The caller
2854 * needs to hold scheduler lock.
2855 */
2856 static enum ice_status
ice_sched_update_elem(struct ice_hw * hw,struct ice_sched_node * node,struct ice_aqc_txsched_elem_data * info)2857 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2858 struct ice_aqc_txsched_elem_data *info)
2859 {
2860 struct ice_aqc_txsched_elem_data buf;
2861 enum ice_status status;
2862 u16 elem_cfgd = 0;
2863 u16 num_elems = 1;
2864
2865 buf = *info;
2866 /* Parent TEID is reserved field in this aq call */
2867 buf.parent_teid = 0;
2868 /* Element type is reserved field in this aq call */
2869 buf.data.elem_type = 0;
2870 /* Flags is reserved field in this aq call */
2871 buf.data.flags = 0;
2872
2873 /* Update HW DB */
2874 /* Configure element node */
2875 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2876 &elem_cfgd, NULL);
2877 if (status || elem_cfgd != num_elems) {
2878 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2879 return ICE_ERR_CFG;
2880 }
2881
2882 /* Config success case */
2883 /* Now update local SW DB */
2884 /* Only copy the data portion of info buffer */
2885 node->info.data = info->data;
2886 return status;
2887 }
2888
2889 /**
2890 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2891 * @hw: pointer to the HW struct
2892 * @node: sched node to configure
2893 * @rl_type: rate limit type CIR, EIR, or shared
2894 * @bw_alloc: BW weight/allocation
2895 *
2896 * This function configures node element's BW allocation.
2897 */
2898 static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 bw_alloc)2899 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2900 enum ice_rl_type rl_type, u16 bw_alloc)
2901 {
2902 struct ice_aqc_txsched_elem_data buf;
2903 struct ice_aqc_txsched_elem *data;
2904
2905 buf = node->info;
2906 data = &buf.data;
2907 if (rl_type == ICE_MIN_BW) {
2908 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2909 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2910 } else if (rl_type == ICE_MAX_BW) {
2911 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2912 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2913 } else {
2914 return ICE_ERR_PARAM;
2915 }
2916
2917 /* Configure element */
2918 return ice_sched_update_elem(hw, node, &buf);
2919 }
2920
2921 /**
2922 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2923 * @pi: port information structure
2924 * @agg_id: aggregator ID
2925 * @vsi_handle: software VSI handle
2926 * @tc_bitmap: TC bitmap of enabled TC(s)
2927 *
2928 * Move or associate VSI to a new or default aggregator node.
2929 */
2930 enum ice_status
ice_move_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,u8 tc_bitmap)2931 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2932 u8 tc_bitmap)
2933 {
2934 unsigned long bitmap = tc_bitmap;
2935 enum ice_status status;
2936
2937 mutex_lock(&pi->sched_lock);
2938 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2939 (unsigned long *)&bitmap);
2940 if (!status)
2941 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2942 (unsigned long *)&bitmap);
2943 mutex_unlock(&pi->sched_lock);
2944 return status;
2945 }
2946
2947 /**
2948 * ice_set_clear_cir_bw - set or clear CIR BW
2949 * @bw_t_info: bandwidth type information structure
2950 * @bw: bandwidth in Kbps - Kilo bits per sec
2951 *
2952 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
2953 */
ice_set_clear_cir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2954 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2955 {
2956 if (bw == ICE_SCHED_DFLT_BW) {
2957 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2958 bw_t_info->cir_bw.bw = 0;
2959 } else {
2960 /* Save type of BW information */
2961 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2962 bw_t_info->cir_bw.bw = bw;
2963 }
2964 }
2965
2966 /**
2967 * ice_set_clear_eir_bw - set or clear EIR BW
2968 * @bw_t_info: bandwidth type information structure
2969 * @bw: bandwidth in Kbps - Kilo bits per sec
2970 *
2971 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2972 */
ice_set_clear_eir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2973 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2974 {
2975 if (bw == ICE_SCHED_DFLT_BW) {
2976 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2977 bw_t_info->eir_bw.bw = 0;
2978 } else {
2979 /* EIR BW and Shared BW profiles are mutually exclusive and
2980 * hence only one of them may be set for any given element.
2981 * First clear earlier saved shared BW information.
2982 */
2983 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2984 bw_t_info->shared_bw = 0;
2985 /* save EIR BW information */
2986 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2987 bw_t_info->eir_bw.bw = bw;
2988 }
2989 }
2990
2991 /**
2992 * ice_set_clear_shared_bw - set or clear shared BW
2993 * @bw_t_info: bandwidth type information structure
2994 * @bw: bandwidth in Kbps - Kilo bits per sec
2995 *
2996 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
2997 */
ice_set_clear_shared_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2998 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2999 {
3000 if (bw == ICE_SCHED_DFLT_BW) {
3001 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3002 bw_t_info->shared_bw = 0;
3003 } else {
3004 /* EIR BW and Shared BW profiles are mutually exclusive and
3005 * hence only one of them may be set for any given element.
3006 * First clear earlier saved EIR BW information.
3007 */
3008 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3009 bw_t_info->eir_bw.bw = 0;
3010 /* save shared BW information */
3011 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3012 bw_t_info->shared_bw = bw;
3013 }
3014 }
3015
3016 /**
3017 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3018 * @hw: pointer to the HW struct
3019 * @bw: bandwidth in Kbps
3020 *
3021 * This function calculates the wakeup parameter of RL profile.
3022 */
ice_sched_calc_wakeup(struct ice_hw * hw,s32 bw)3023 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3024 {
3025 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3026 s32 wakeup_f_int;
3027 u16 wakeup = 0;
3028
3029 /* Get the wakeup integer value */
3030 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3031 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
3032 if (wakeup_int > 63) {
3033 wakeup = (u16)((1 << 15) | wakeup_int);
3034 } else {
3035 /* Calculate fraction value up to 4 decimals
3036 * Convert Integer value to a constant multiplier
3037 */
3038 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3039 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
3040 hw->psm_clk_freq, bytes_per_sec);
3041
3042 /* Get Fraction value */
3043 wakeup_f = wakeup_a - wakeup_b;
3044
3045 /* Round up the Fractional value via Ceil(Fractional value) */
3046 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
3047 wakeup_f += 1;
3048
3049 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
3050 ICE_RL_PROF_MULTIPLIER);
3051 wakeup |= (u16)(wakeup_int << 9);
3052 wakeup |= (u16)(0x1ff & wakeup_f_int);
3053 }
3054
3055 return wakeup;
3056 }
3057
3058 /**
3059 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3060 * @hw: pointer to the HW struct
3061 * @bw: bandwidth in Kbps
3062 * @profile: profile parameters to return
3063 *
3064 * This function converts the BW to profile structure format.
3065 */
3066 static enum ice_status
ice_sched_bw_to_rl_profile(struct ice_hw * hw,u32 bw,struct ice_aqc_rl_profile_elem * profile)3067 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
3068 struct ice_aqc_rl_profile_elem *profile)
3069 {
3070 enum ice_status status = ICE_ERR_PARAM;
3071 s64 bytes_per_sec, ts_rate, mv_tmp;
3072 bool found = false;
3073 s32 encode = 0;
3074 s64 mv = 0;
3075 s32 i;
3076
3077 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
3078 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
3079 return status;
3080
3081 /* Bytes per second from Kbps */
3082 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3083
3084 /* encode is 6 bits but really useful are 5 bits */
3085 for (i = 0; i < 64; i++) {
3086 u64 pow_result = BIT_ULL(i);
3087
3088 ts_rate = div64_long((s64)hw->psm_clk_freq,
3089 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
3090 if (ts_rate <= 0)
3091 continue;
3092
3093 /* Multiplier value */
3094 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3095 ts_rate);
3096
3097 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
3098 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3099
3100 /* First multiplier value greater than the given
3101 * accuracy bytes
3102 */
3103 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3104 encode = i;
3105 found = true;
3106 break;
3107 }
3108 }
3109 if (found) {
3110 u16 wm;
3111
3112 wm = ice_sched_calc_wakeup(hw, bw);
3113 profile->rl_multiply = cpu_to_le16(mv);
3114 profile->wake_up_calc = cpu_to_le16(wm);
3115 profile->rl_encode = cpu_to_le16(encode);
3116 status = 0;
3117 } else {
3118 status = ICE_ERR_DOES_NOT_EXIST;
3119 }
3120
3121 return status;
3122 }
3123
3124 /**
3125 * ice_sched_add_rl_profile - add RL profile
3126 * @pi: port information structure
3127 * @rl_type: type of rate limit BW - min, max, or shared
3128 * @bw: bandwidth in Kbps - Kilo bits per sec
3129 * @layer_num: specifies in which layer to create profile
3130 *
3131 * This function first checks the existing list for corresponding BW
3132 * parameter. If it exists, it returns the associated profile otherwise
3133 * it creates a new rate limit profile for requested BW, and adds it to
3134 * the HW DB and local list. It returns the new profile or null on error.
3135 * The caller needs to hold the scheduler lock.
3136 */
3137 static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info * pi,enum ice_rl_type rl_type,u32 bw,u8 layer_num)3138 ice_sched_add_rl_profile(struct ice_port_info *pi,
3139 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3140 {
3141 struct ice_aqc_rl_profile_info *rl_prof_elem;
3142 u16 profiles_added = 0, num_profiles = 1;
3143 struct ice_aqc_rl_profile_elem *buf;
3144 enum ice_status status;
3145 struct ice_hw *hw;
3146 u8 profile_type;
3147
3148 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3149 return NULL;
3150 switch (rl_type) {
3151 case ICE_MIN_BW:
3152 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3153 break;
3154 case ICE_MAX_BW:
3155 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3156 break;
3157 case ICE_SHARED_BW:
3158 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3159 break;
3160 default:
3161 return NULL;
3162 }
3163
3164 if (!pi)
3165 return NULL;
3166 hw = pi->hw;
3167 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3168 list_entry)
3169 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3170 profile_type && rl_prof_elem->bw == bw)
3171 /* Return existing profile ID info */
3172 return rl_prof_elem;
3173
3174 /* Create new profile ID */
3175 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
3176 GFP_KERNEL);
3177
3178 if (!rl_prof_elem)
3179 return NULL;
3180
3181 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3182 if (status)
3183 goto exit_add_rl_prof;
3184
3185 rl_prof_elem->bw = bw;
3186 /* layer_num is zero relative, and fw expects level from 1 to 9 */
3187 rl_prof_elem->profile.level = layer_num + 1;
3188 rl_prof_elem->profile.flags = profile_type;
3189 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
3190
3191 /* Create new entry in HW DB */
3192 buf = &rl_prof_elem->profile;
3193 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3194 &profiles_added, NULL);
3195 if (status || profiles_added != num_profiles)
3196 goto exit_add_rl_prof;
3197
3198 /* Good entry - add in the list */
3199 rl_prof_elem->prof_id_ref = 0;
3200 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3201 return rl_prof_elem;
3202
3203 exit_add_rl_prof:
3204 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
3205 return NULL;
3206 }
3207
3208 /**
3209 * ice_sched_cfg_node_bw_lmt - configure node sched params
3210 * @hw: pointer to the HW struct
3211 * @node: sched node to configure
3212 * @rl_type: rate limit type CIR, EIR, or shared
3213 * @rl_prof_id: rate limit profile ID
3214 *
3215 * This function configures node element's BW limit.
3216 */
3217 static enum ice_status
ice_sched_cfg_node_bw_lmt(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 rl_prof_id)3218 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3219 enum ice_rl_type rl_type, u16 rl_prof_id)
3220 {
3221 struct ice_aqc_txsched_elem_data buf;
3222 struct ice_aqc_txsched_elem *data;
3223
3224 buf = node->info;
3225 data = &buf.data;
3226 switch (rl_type) {
3227 case ICE_MIN_BW:
3228 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3229 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3230 break;
3231 case ICE_MAX_BW:
3232 /* EIR BW and Shared BW profiles are mutually exclusive and
3233 * hence only one of them may be set for any given element
3234 */
3235 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3236 return ICE_ERR_CFG;
3237 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3238 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3239 break;
3240 case ICE_SHARED_BW:
3241 /* Check for removing shared BW */
3242 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3243 /* remove shared profile */
3244 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3245 data->srl_id = 0; /* clear SRL field */
3246
3247 /* enable back EIR to default profile */
3248 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3249 data->eir_bw.bw_profile_idx =
3250 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3251 break;
3252 }
3253 /* EIR BW and Shared BW profiles are mutually exclusive and
3254 * hence only one of them may be set for any given element
3255 */
3256 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3257 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
3258 ICE_SCHED_DFLT_RL_PROF_ID))
3259 return ICE_ERR_CFG;
3260 /* EIR BW is set to default, disable it */
3261 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3262 /* Okay to enable shared BW now */
3263 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3264 data->srl_id = cpu_to_le16(rl_prof_id);
3265 break;
3266 default:
3267 /* Unknown rate limit type */
3268 return ICE_ERR_PARAM;
3269 }
3270
3271 /* Configure element */
3272 return ice_sched_update_elem(hw, node, &buf);
3273 }
3274
3275 /**
3276 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
3277 * @node: sched node
3278 * @rl_type: rate limit type
3279 *
3280 * If existing profile matches, it returns the corresponding rate
3281 * limit profile ID, otherwise it returns an invalid ID as error.
3282 */
3283 static u16
ice_sched_get_node_rl_prof_id(struct ice_sched_node * node,enum ice_rl_type rl_type)3284 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
3285 enum ice_rl_type rl_type)
3286 {
3287 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
3288 struct ice_aqc_txsched_elem *data;
3289
3290 data = &node->info.data;
3291 switch (rl_type) {
3292 case ICE_MIN_BW:
3293 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
3294 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
3295 break;
3296 case ICE_MAX_BW:
3297 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
3298 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
3299 break;
3300 case ICE_SHARED_BW:
3301 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3302 rl_prof_id = le16_to_cpu(data->srl_id);
3303 break;
3304 default:
3305 break;
3306 }
3307
3308 return rl_prof_id;
3309 }
3310
3311 /**
3312 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
3313 * @pi: port information structure
3314 * @rl_type: type of rate limit BW - min, max, or shared
3315 * @layer_index: layer index
3316 *
3317 * This function returns requested profile creation layer.
3318 */
3319 static u8
ice_sched_get_rl_prof_layer(struct ice_port_info * pi,enum ice_rl_type rl_type,u8 layer_index)3320 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
3321 u8 layer_index)
3322 {
3323 struct ice_hw *hw = pi->hw;
3324
3325 if (layer_index >= hw->num_tx_sched_layers)
3326 return ICE_SCHED_INVAL_LAYER_NUM;
3327 switch (rl_type) {
3328 case ICE_MIN_BW:
3329 if (hw->layer_info[layer_index].max_cir_rl_profiles)
3330 return layer_index;
3331 break;
3332 case ICE_MAX_BW:
3333 if (hw->layer_info[layer_index].max_eir_rl_profiles)
3334 return layer_index;
3335 break;
3336 case ICE_SHARED_BW:
3337 /* if current layer doesn't support SRL profile creation
3338 * then try a layer up or down.
3339 */
3340 if (hw->layer_info[layer_index].max_srl_profiles)
3341 return layer_index;
3342 else if (layer_index < hw->num_tx_sched_layers - 1 &&
3343 hw->layer_info[layer_index + 1].max_srl_profiles)
3344 return layer_index + 1;
3345 else if (layer_index > 0 &&
3346 hw->layer_info[layer_index - 1].max_srl_profiles)
3347 return layer_index - 1;
3348 break;
3349 default:
3350 break;
3351 }
3352 return ICE_SCHED_INVAL_LAYER_NUM;
3353 }
3354
3355 /**
3356 * ice_sched_get_srl_node - get shared rate limit node
3357 * @node: tree node
3358 * @srl_layer: shared rate limit layer
3359 *
3360 * This function returns SRL node to be used for shared rate limit purpose.
3361 * The caller needs to hold scheduler lock.
3362 */
3363 static struct ice_sched_node *
ice_sched_get_srl_node(struct ice_sched_node * node,u8 srl_layer)3364 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
3365 {
3366 if (srl_layer > node->tx_sched_layer)
3367 return node->children[0];
3368 else if (srl_layer < node->tx_sched_layer)
3369 /* Node can't be created without a parent. It will always
3370 * have a valid parent except root node.
3371 */
3372 return node->parent;
3373 else
3374 return node;
3375 }
3376
3377 /**
3378 * ice_sched_rm_rl_profile - remove RL profile ID
3379 * @pi: port information structure
3380 * @layer_num: layer number where profiles are saved
3381 * @profile_type: profile type like EIR, CIR, or SRL
3382 * @profile_id: profile ID to remove
3383 *
3384 * This function removes rate limit profile from layer 'layer_num' of type
3385 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
3386 * scheduler lock.
3387 */
3388 static enum ice_status
ice_sched_rm_rl_profile(struct ice_port_info * pi,u8 layer_num,u8 profile_type,u16 profile_id)3389 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
3390 u16 profile_id)
3391 {
3392 struct ice_aqc_rl_profile_info *rl_prof_elem;
3393 enum ice_status status = 0;
3394
3395 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3396 return ICE_ERR_PARAM;
3397 /* Check the existing list for RL profile */
3398 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3399 list_entry)
3400 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3401 profile_type &&
3402 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
3403 profile_id) {
3404 if (rl_prof_elem->prof_id_ref)
3405 rl_prof_elem->prof_id_ref--;
3406
3407 /* Remove old profile ID from database */
3408 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
3409 if (status && status != ICE_ERR_IN_USE)
3410 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
3411 break;
3412 }
3413 if (status == ICE_ERR_IN_USE)
3414 status = 0;
3415 return status;
3416 }
3417
3418 /**
3419 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
3420 * @pi: port information structure
3421 * @node: pointer to node structure
3422 * @rl_type: rate limit type min, max, or shared
3423 * @layer_num: layer number where RL profiles are saved
3424 *
3425 * This function configures node element's BW rate limit profile ID of
3426 * type CIR, EIR, or SRL to default. This function needs to be called
3427 * with the scheduler lock held.
3428 */
3429 static enum ice_status
ice_sched_set_node_bw_dflt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u8 layer_num)3430 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
3431 struct ice_sched_node *node,
3432 enum ice_rl_type rl_type, u8 layer_num)
3433 {
3434 enum ice_status status;
3435 struct ice_hw *hw;
3436 u8 profile_type;
3437 u16 rl_prof_id;
3438 u16 old_id;
3439
3440 hw = pi->hw;
3441 switch (rl_type) {
3442 case ICE_MIN_BW:
3443 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3444 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3445 break;
3446 case ICE_MAX_BW:
3447 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3448 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3449 break;
3450 case ICE_SHARED_BW:
3451 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3452 /* No SRL is configured for default case */
3453 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
3454 break;
3455 default:
3456 return ICE_ERR_PARAM;
3457 }
3458 /* Save existing RL prof ID for later clean up */
3459 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3460 /* Configure BW scheduling parameters */
3461 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3462 if (status)
3463 return status;
3464
3465 /* Remove stale RL profile ID */
3466 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
3467 old_id == ICE_SCHED_INVAL_PROF_ID)
3468 return 0;
3469
3470 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
3471 }
3472
3473 /**
3474 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
3475 * @pi: port information structure
3476 * @node: pointer to node structure
3477 * @layer_num: layer number where rate limit profiles are saved
3478 * @rl_type: rate limit type min, max, or shared
3479 * @bw: bandwidth value
3480 *
3481 * This function prepares node element's bandwidth to SRL or EIR exclusively.
3482 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
3483 * them may be set for any given element. This function needs to be called
3484 * with the scheduler lock held.
3485 */
3486 static enum ice_status
ice_sched_set_eir_srl_excl(struct ice_port_info * pi,struct ice_sched_node * node,u8 layer_num,enum ice_rl_type rl_type,u32 bw)3487 ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
3488 struct ice_sched_node *node,
3489 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
3490 {
3491 if (rl_type == ICE_SHARED_BW) {
3492 /* SRL node passed in this case, it may be different node */
3493 if (bw == ICE_SCHED_DFLT_BW)
3494 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
3495 * enables EIR to default. EIR is not set in this
3496 * case, so no additional action is required.
3497 */
3498 return 0;
3499
3500 /* SRL being configured, set EIR to default here.
3501 * ice_sched_cfg_node_bw_lmt() disables EIR when it
3502 * configures SRL
3503 */
3504 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
3505 layer_num);
3506 } else if (rl_type == ICE_MAX_BW &&
3507 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
3508 /* Remove Shared profile. Set default shared BW call
3509 * removes shared profile for a node.
3510 */
3511 return ice_sched_set_node_bw_dflt(pi, node,
3512 ICE_SHARED_BW,
3513 layer_num);
3514 }
3515 return 0;
3516 }
3517
3518 /**
3519 * ice_sched_set_node_bw - set node's bandwidth
3520 * @pi: port information structure
3521 * @node: tree node
3522 * @rl_type: rate limit type min, max, or shared
3523 * @bw: bandwidth in Kbps - Kilo bits per sec
3524 * @layer_num: layer number
3525 *
3526 * This function adds new profile corresponding to requested BW, configures
3527 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
3528 * ID from local database. The caller needs to hold scheduler lock.
3529 */
3530 static enum ice_status
ice_sched_set_node_bw(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw,u8 layer_num)3531 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
3532 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3533 {
3534 struct ice_aqc_rl_profile_info *rl_prof_info;
3535 enum ice_status status = ICE_ERR_PARAM;
3536 struct ice_hw *hw = pi->hw;
3537 u16 old_id, rl_prof_id;
3538
3539 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
3540 if (!rl_prof_info)
3541 return status;
3542
3543 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
3544
3545 /* Save existing RL prof ID for later clean up */
3546 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3547 /* Configure BW scheduling parameters */
3548 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3549 if (status)
3550 return status;
3551
3552 /* New changes has been applied */
3553 /* Increment the profile ID reference count */
3554 rl_prof_info->prof_id_ref++;
3555
3556 /* Check for old ID removal */
3557 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
3558 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
3559 return 0;
3560
3561 return ice_sched_rm_rl_profile(pi, layer_num,
3562 rl_prof_info->profile.flags &
3563 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
3564 }
3565
3566 /**
3567 * ice_sched_set_node_bw_lmt - set node's BW limit
3568 * @pi: port information structure
3569 * @node: tree node
3570 * @rl_type: rate limit type min, max, or shared
3571 * @bw: bandwidth in Kbps - Kilo bits per sec
3572 *
3573 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
3574 * EIR, or SRL. The caller needs to hold scheduler lock.
3575 */
3576 static enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw)3577 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
3578 enum ice_rl_type rl_type, u32 bw)
3579 {
3580 struct ice_sched_node *cfg_node = node;
3581 enum ice_status status;
3582
3583 struct ice_hw *hw;
3584 u8 layer_num;
3585
3586 if (!pi)
3587 return ICE_ERR_PARAM;
3588 hw = pi->hw;
3589 /* Remove unused RL profile IDs from HW and SW DB */
3590 ice_sched_rm_unused_rl_prof(pi);
3591 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
3592 node->tx_sched_layer);
3593 if (layer_num >= hw->num_tx_sched_layers)
3594 return ICE_ERR_PARAM;
3595
3596 if (rl_type == ICE_SHARED_BW) {
3597 /* SRL node may be different */
3598 cfg_node = ice_sched_get_srl_node(node, layer_num);
3599 if (!cfg_node)
3600 return ICE_ERR_CFG;
3601 }
3602 /* EIR BW and Shared BW profiles are mutually exclusive and
3603 * hence only one of them may be set for any given element
3604 */
3605 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
3606 bw);
3607 if (status)
3608 return status;
3609 if (bw == ICE_SCHED_DFLT_BW)
3610 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
3611 layer_num);
3612 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
3613 }
3614
3615 /**
3616 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
3617 * @pi: port information structure
3618 * @node: pointer to node structure
3619 * @rl_type: rate limit type min, max, or shared
3620 *
3621 * This function configures node element's BW rate limit profile ID of
3622 * type CIR, EIR, or SRL to default. This function needs to be called
3623 * with the scheduler lock held.
3624 */
3625 static enum ice_status
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type)3626 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
3627 struct ice_sched_node *node,
3628 enum ice_rl_type rl_type)
3629 {
3630 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
3631 ICE_SCHED_DFLT_BW);
3632 }
3633
3634 /**
3635 * ice_sched_validate_srl_node - Check node for SRL applicability
3636 * @node: sched node to configure
3637 * @sel_layer: selected SRL layer
3638 *
3639 * This function checks if the SRL can be applied to a selected layer node on
3640 * behalf of the requested node (first argument). This function needs to be
3641 * called with scheduler lock held.
3642 */
3643 static enum ice_status
ice_sched_validate_srl_node(struct ice_sched_node * node,u8 sel_layer)3644 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
3645 {
3646 /* SRL profiles are not available on all layers. Check if the
3647 * SRL profile can be applied to a node above or below the
3648 * requested node. SRL configuration is possible only if the
3649 * selected layer's node has single child.
3650 */
3651 if (sel_layer == node->tx_sched_layer ||
3652 ((sel_layer == node->tx_sched_layer + 1) &&
3653 node->num_children == 1) ||
3654 ((sel_layer == node->tx_sched_layer - 1) &&
3655 (node->parent && node->parent->num_children == 1)))
3656 return 0;
3657
3658 return ICE_ERR_CFG;
3659 }
3660
3661 /**
3662 * ice_sched_save_q_bw - save queue node's BW information
3663 * @q_ctx: queue context structure
3664 * @rl_type: rate limit type min, max, or shared
3665 * @bw: bandwidth in Kbps - Kilo bits per sec
3666 *
3667 * Save BW information of queue type node for post replay use.
3668 */
3669 static enum ice_status
ice_sched_save_q_bw(struct ice_q_ctx * q_ctx,enum ice_rl_type rl_type,u32 bw)3670 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
3671 {
3672 switch (rl_type) {
3673 case ICE_MIN_BW:
3674 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
3675 break;
3676 case ICE_MAX_BW:
3677 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
3678 break;
3679 case ICE_SHARED_BW:
3680 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
3681 break;
3682 default:
3683 return ICE_ERR_PARAM;
3684 }
3685 return 0;
3686 }
3687
3688 /**
3689 * ice_sched_set_q_bw_lmt - sets queue BW limit
3690 * @pi: port information structure
3691 * @vsi_handle: sw VSI handle
3692 * @tc: traffic class
3693 * @q_handle: software queue handle
3694 * @rl_type: min, max, or shared
3695 * @bw: bandwidth in Kbps
3696 *
3697 * This function sets BW limit of queue scheduling node.
3698 */
3699 static enum ice_status
ice_sched_set_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)3700 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3701 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3702 {
3703 enum ice_status status = ICE_ERR_PARAM;
3704 struct ice_sched_node *node;
3705 struct ice_q_ctx *q_ctx;
3706
3707 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3708 return ICE_ERR_PARAM;
3709 mutex_lock(&pi->sched_lock);
3710 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
3711 if (!q_ctx)
3712 goto exit_q_bw_lmt;
3713 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3714 if (!node) {
3715 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
3716 goto exit_q_bw_lmt;
3717 }
3718
3719 /* Return error if it is not a leaf node */
3720 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
3721 goto exit_q_bw_lmt;
3722
3723 /* SRL bandwidth layer selection */
3724 if (rl_type == ICE_SHARED_BW) {
3725 u8 sel_layer; /* selected layer */
3726
3727 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
3728 node->tx_sched_layer);
3729 if (sel_layer >= pi->hw->num_tx_sched_layers) {
3730 status = ICE_ERR_PARAM;
3731 goto exit_q_bw_lmt;
3732 }
3733 status = ice_sched_validate_srl_node(node, sel_layer);
3734 if (status)
3735 goto exit_q_bw_lmt;
3736 }
3737
3738 if (bw == ICE_SCHED_DFLT_BW)
3739 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3740 else
3741 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3742
3743 if (!status)
3744 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
3745
3746 exit_q_bw_lmt:
3747 mutex_unlock(&pi->sched_lock);
3748 return status;
3749 }
3750
3751 /**
3752 * ice_cfg_q_bw_lmt - configure queue BW limit
3753 * @pi: port information structure
3754 * @vsi_handle: sw VSI handle
3755 * @tc: traffic class
3756 * @q_handle: software queue handle
3757 * @rl_type: min, max, or shared
3758 * @bw: bandwidth in Kbps
3759 *
3760 * This function configures BW limit of queue scheduling node.
3761 */
3762 enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)3763 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3764 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3765 {
3766 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3767 bw);
3768 }
3769
3770 /**
3771 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
3772 * @pi: port information structure
3773 * @vsi_handle: sw VSI handle
3774 * @tc: traffic class
3775 * @q_handle: software queue handle
3776 * @rl_type: min, max, or shared
3777 *
3778 * This function configures BW default limit of queue scheduling node.
3779 */
3780 enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type)3781 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3782 u16 q_handle, enum ice_rl_type rl_type)
3783 {
3784 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3785 ICE_SCHED_DFLT_BW);
3786 }
3787
3788 /**
3789 * ice_cfg_rl_burst_size - Set burst size value
3790 * @hw: pointer to the HW struct
3791 * @bytes: burst size in bytes
3792 *
3793 * This function configures/set the burst size to requested new value. The new
3794 * burst size value is used for future rate limit calls. It doesn't change the
3795 * existing or previously created RL profiles.
3796 */
ice_cfg_rl_burst_size(struct ice_hw * hw,u32 bytes)3797 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
3798 {
3799 u16 burst_size_to_prog;
3800
3801 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
3802 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
3803 return ICE_ERR_PARAM;
3804 if (ice_round_to_num(bytes, 64) <=
3805 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
3806 /* 64 byte granularity case */
3807 /* Disable MSB granularity bit */
3808 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
3809 /* round number to nearest 64 byte granularity */
3810 bytes = ice_round_to_num(bytes, 64);
3811 /* The value is in 64 byte chunks */
3812 burst_size_to_prog |= (u16)(bytes / 64);
3813 } else {
3814 /* k bytes granularity case */
3815 /* Enable MSB granularity bit */
3816 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
3817 /* round number to nearest 1024 granularity */
3818 bytes = ice_round_to_num(bytes, 1024);
3819 /* check rounding doesn't go beyond allowed */
3820 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
3821 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
3822 /* The value is in k bytes */
3823 burst_size_to_prog |= (u16)(bytes / 1024);
3824 }
3825 hw->max_burst_size = burst_size_to_prog;
3826 return 0;
3827 }
3828
3829 /**
3830 * ice_sched_replay_node_prio - re-configure node priority
3831 * @hw: pointer to the HW struct
3832 * @node: sched node to configure
3833 * @priority: priority value
3834 *
3835 * This function configures node element's priority value. It
3836 * needs to be called with scheduler lock held.
3837 */
3838 static enum ice_status
ice_sched_replay_node_prio(struct ice_hw * hw,struct ice_sched_node * node,u8 priority)3839 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
3840 u8 priority)
3841 {
3842 struct ice_aqc_txsched_elem_data buf;
3843 struct ice_aqc_txsched_elem *data;
3844 enum ice_status status;
3845
3846 buf = node->info;
3847 data = &buf.data;
3848 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
3849 data->generic = priority;
3850
3851 /* Configure element */
3852 status = ice_sched_update_elem(hw, node, &buf);
3853 return status;
3854 }
3855
3856 /**
3857 * ice_sched_replay_node_bw - replay node(s) BW
3858 * @hw: pointer to the HW struct
3859 * @node: sched node to configure
3860 * @bw_t_info: BW type information
3861 *
3862 * This function restores node's BW from bw_t_info. The caller needs
3863 * to hold the scheduler lock.
3864 */
3865 static enum ice_status
ice_sched_replay_node_bw(struct ice_hw * hw,struct ice_sched_node * node,struct ice_bw_type_info * bw_t_info)3866 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
3867 struct ice_bw_type_info *bw_t_info)
3868 {
3869 struct ice_port_info *pi = hw->port_info;
3870 enum ice_status status = ICE_ERR_PARAM;
3871 u16 bw_alloc;
3872
3873 if (!node)
3874 return status;
3875 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
3876 return 0;
3877 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
3878 status = ice_sched_replay_node_prio(hw, node,
3879 bw_t_info->generic);
3880 if (status)
3881 return status;
3882 }
3883 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
3884 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
3885 bw_t_info->cir_bw.bw);
3886 if (status)
3887 return status;
3888 }
3889 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
3890 bw_alloc = bw_t_info->cir_bw.bw_alloc;
3891 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
3892 bw_alloc);
3893 if (status)
3894 return status;
3895 }
3896 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
3897 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
3898 bw_t_info->eir_bw.bw);
3899 if (status)
3900 return status;
3901 }
3902 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
3903 bw_alloc = bw_t_info->eir_bw.bw_alloc;
3904 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
3905 bw_alloc);
3906 if (status)
3907 return status;
3908 }
3909 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
3910 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
3911 bw_t_info->shared_bw);
3912 return status;
3913 }
3914
3915 /**
3916 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
3917 * @pi: port info struct
3918 * @tc_bitmap: 8 bits TC bitmap to check
3919 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
3920 *
3921 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
3922 * may be missing, it returns enabled TCs. This function needs to be called with
3923 * scheduler lock held.
3924 */
3925 static void
ice_sched_get_ena_tc_bitmap(struct ice_port_info * pi,unsigned long * tc_bitmap,unsigned long * ena_tc_bitmap)3926 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
3927 unsigned long *tc_bitmap,
3928 unsigned long *ena_tc_bitmap)
3929 {
3930 u8 tc;
3931
3932 /* Some TC(s) may be missing after reset, adjust for replay */
3933 ice_for_each_traffic_class(tc)
3934 if (ice_is_tc_ena(*tc_bitmap, tc) &&
3935 (ice_sched_get_tc_node(pi, tc)))
3936 set_bit(tc, ena_tc_bitmap);
3937 }
3938
3939 /**
3940 * ice_sched_replay_agg - recreate aggregator node(s)
3941 * @hw: pointer to the HW struct
3942 *
3943 * This function recreate aggregator type nodes which are not replayed earlier.
3944 * It also replay aggregator BW information. These aggregator nodes are not
3945 * associated with VSI type node yet.
3946 */
ice_sched_replay_agg(struct ice_hw * hw)3947 void ice_sched_replay_agg(struct ice_hw *hw)
3948 {
3949 struct ice_port_info *pi = hw->port_info;
3950 struct ice_sched_agg_info *agg_info;
3951
3952 mutex_lock(&pi->sched_lock);
3953 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
3954 /* replay aggregator (re-create aggregator node) */
3955 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
3956 ICE_MAX_TRAFFIC_CLASS)) {
3957 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3958 enum ice_status status;
3959
3960 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3961 ice_sched_get_ena_tc_bitmap(pi,
3962 agg_info->replay_tc_bitmap,
3963 replay_bitmap);
3964 status = ice_sched_cfg_agg(hw->port_info,
3965 agg_info->agg_id,
3966 ICE_AGG_TYPE_AGG,
3967 replay_bitmap);
3968 if (status) {
3969 dev_info(ice_hw_to_dev(hw),
3970 "Replay agg id[%d] failed\n",
3971 agg_info->agg_id);
3972 /* Move on to next one */
3973 continue;
3974 }
3975 }
3976 mutex_unlock(&pi->sched_lock);
3977 }
3978
3979 /**
3980 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
3981 * @hw: pointer to the HW struct
3982 *
3983 * This function initialize aggregator(s) TC bitmap to zero. A required
3984 * preinit step for replaying aggregators.
3985 */
ice_sched_replay_agg_vsi_preinit(struct ice_hw * hw)3986 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
3987 {
3988 struct ice_port_info *pi = hw->port_info;
3989 struct ice_sched_agg_info *agg_info;
3990
3991 mutex_lock(&pi->sched_lock);
3992 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
3993 struct ice_sched_agg_vsi_info *agg_vsi_info;
3994
3995 agg_info->tc_bitmap[0] = 0;
3996 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
3997 list_entry)
3998 agg_vsi_info->tc_bitmap[0] = 0;
3999 }
4000 mutex_unlock(&pi->sched_lock);
4001 }
4002
4003 /**
4004 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
4005 * @hw: pointer to the HW struct
4006 * @vsi_handle: software VSI handle
4007 *
4008 * This function replays aggregator node, VSI to aggregator type nodes, and
4009 * their node bandwidth information. This function needs to be called with
4010 * scheduler lock held.
4011 */
4012 static enum ice_status
ice_sched_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)4013 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4014 {
4015 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4016 struct ice_sched_agg_vsi_info *agg_vsi_info;
4017 struct ice_port_info *pi = hw->port_info;
4018 struct ice_sched_agg_info *agg_info;
4019 enum ice_status status;
4020
4021 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4022 if (!ice_is_vsi_valid(hw, vsi_handle))
4023 return ICE_ERR_PARAM;
4024 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
4025 if (!agg_info)
4026 return 0; /* Not present in list - default Agg case */
4027 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
4028 if (!agg_vsi_info)
4029 return 0; /* Not present in list - default Agg case */
4030 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
4031 replay_bitmap);
4032 /* Replay aggregator node associated to vsi_handle */
4033 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
4034 ICE_AGG_TYPE_AGG, replay_bitmap);
4035 if (status)
4036 return status;
4037
4038 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4039 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
4040 replay_bitmap);
4041 /* Move this VSI (vsi_handle) to above aggregator */
4042 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
4043 replay_bitmap);
4044 }
4045
4046 /**
4047 * ice_replay_vsi_agg - replay VSI to aggregator node
4048 * @hw: pointer to the HW struct
4049 * @vsi_handle: software VSI handle
4050 *
4051 * This function replays association of VSI to aggregator type nodes, and
4052 * node bandwidth information.
4053 */
ice_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)4054 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4055 {
4056 struct ice_port_info *pi = hw->port_info;
4057 enum ice_status status;
4058
4059 mutex_lock(&pi->sched_lock);
4060 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
4061 mutex_unlock(&pi->sched_lock);
4062 return status;
4063 }
4064
4065 /**
4066 * ice_sched_replay_q_bw - replay queue type node BW
4067 * @pi: port information structure
4068 * @q_ctx: queue context structure
4069 *
4070 * This function replays queue type node bandwidth. This function needs to be
4071 * called with scheduler lock held.
4072 */
4073 enum ice_status
ice_sched_replay_q_bw(struct ice_port_info * pi,struct ice_q_ctx * q_ctx)4074 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
4075 {
4076 struct ice_sched_node *q_node;
4077
4078 /* Following also checks the presence of node in tree */
4079 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4080 if (!q_node)
4081 return ICE_ERR_PARAM;
4082 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
4083 }
4084