1
2
3
4#include "ice_sched.h"
5
6
7
8
9
10
11
12
13
14static enum ice_status
15ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17{
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20 u16 max_children;
21
22 if (!pi)
23 return ICE_ERR_PARAM;
24
25 hw = pi->hw;
26
27 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
28 if (!root)
29 return ICE_ERR_NO_MEMORY;
30
31 max_children = le16_to_cpu(hw->layer_info[0].max_children);
32 root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
33 sizeof(*root), GFP_KERNEL);
34 if (!root->children) {
35 devm_kfree(ice_hw_to_dev(hw), root);
36 return ICE_ERR_NO_MEMORY;
37 }
38
39 memcpy(&root->info, info, sizeof(*info));
40 pi->root = root;
41 return 0;
42}
43
44
45
46
47
48
49
50
51
52
53
54
55struct ice_sched_node *
56ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
57{
58 u16 i;
59
60
61 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
62 return start_node;
63
64
65 if (!start_node->num_children ||
66 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
67 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
68 return NULL;
69
70
71 for (i = 0; i < start_node->num_children; i++)
72 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
73 return start_node->children[i];
74
75
76 for (i = 0; i < start_node->num_children; i++) {
77 struct ice_sched_node *tmp;
78
79 tmp = ice_sched_find_node_by_teid(start_node->children[i],
80 teid);
81 if (tmp)
82 return tmp;
83 }
84
85 return NULL;
86}
87
88
89
90
91
92
93
94
95
96enum ice_status
97ice_sched_add_node(struct ice_port_info *pi, u8 layer,
98 struct ice_aqc_txsched_elem_data *info)
99{
100 struct ice_sched_node *parent;
101 struct ice_sched_node *node;
102 struct ice_hw *hw;
103 u16 max_children;
104
105 if (!pi)
106 return ICE_ERR_PARAM;
107
108 hw = pi->hw;
109
110
111 parent = ice_sched_find_node_by_teid(pi->root,
112 le32_to_cpu(info->parent_teid));
113 if (!parent) {
114 ice_debug(hw, ICE_DBG_SCHED,
115 "Parent Node not found for parent_teid=0x%x\n",
116 le32_to_cpu(info->parent_teid));
117 return ICE_ERR_PARAM;
118 }
119
120 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
121 if (!node)
122 return ICE_ERR_NO_MEMORY;
123 max_children = le16_to_cpu(hw->layer_info[layer].max_children);
124 if (max_children) {
125 node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
126 sizeof(*node), GFP_KERNEL);
127 if (!node->children) {
128 devm_kfree(ice_hw_to_dev(hw), node);
129 return ICE_ERR_NO_MEMORY;
130 }
131 }
132
133 node->in_use = true;
134 node->parent = parent;
135 node->tx_sched_layer = layer;
136 parent->children[parent->num_children++] = node;
137 memcpy(&node->info, info, sizeof(*info));
138 return 0;
139}
140
141
142
143
144
145
146
147
148
149
150
151
152static enum ice_status
153ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
154 struct ice_aqc_delete_elem *buf, u16 buf_size,
155 u16 *grps_del, struct ice_sq_cd *cd)
156{
157 struct ice_aqc_add_move_delete_elem *cmd;
158 struct ice_aq_desc desc;
159 enum ice_status status;
160
161 cmd = &desc.params.add_move_delete_elem;
162 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
163 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
164 cmd->num_grps_req = cpu_to_le16(grps_req);
165
166 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
167 if (!status && grps_del)
168 *grps_del = le16_to_cpu(cmd->num_grps_updated);
169
170 return status;
171}
172
173
174
175
176
177
178
179
180
181
182static enum ice_status
183ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
184 u16 num_nodes, u32 *node_teids)
185{
186 struct ice_aqc_delete_elem *buf;
187 u16 i, num_groups_removed = 0;
188 enum ice_status status;
189 u16 buf_size;
190
191 buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
192 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
193 if (!buf)
194 return ICE_ERR_NO_MEMORY;
195 buf->hdr.parent_teid = parent->info.node_teid;
196 buf->hdr.num_elems = cpu_to_le16(num_nodes);
197 for (i = 0; i < num_nodes; i++)
198 buf->teid[i] = cpu_to_le32(node_teids[i]);
199 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
200 &num_groups_removed, NULL);
201 if (status || num_groups_removed != 1)
202 ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
203 devm_kfree(ice_hw_to_dev(hw), buf);
204 return status;
205}
206
207
208
209
210
211
212
213
214
215static struct ice_sched_node *
216ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
217 u8 layer)
218{
219 u8 i;
220
221 if (layer < hw->sw_entry_point_layer)
222 return NULL;
223 for (i = 0; i < parent->num_children; i++) {
224 struct ice_sched_node *node = parent->children[i];
225
226 if (node) {
227 if (node->tx_sched_layer == layer)
228 return node;
229
230
231
232 return ice_sched_get_first_node(hw, node, layer);
233 }
234 }
235 return NULL;
236}
237
238
239
240
241
242
243
244
245struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
246{
247 u8 i;
248
249 if (!pi)
250 return NULL;
251 for (i = 0; i < pi->root->num_children; i++)
252 if (pi->root->children[i]->tc_num == tc)
253 return pi->root->children[i];
254 return NULL;
255}
256
257
258
259
260
261
262
263
264
265
266void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
267{
268 struct ice_sched_node *parent;
269 struct ice_hw *hw = pi->hw;
270 u8 i, j;
271
272
273
274
275
276 while (node->num_children)
277 ice_free_sched_node(pi, node->children[0]);
278
279
280 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
281 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
282 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
283 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
284 u32 teid = le32_to_cpu(node->info.node_teid);
285 enum ice_status status;
286
287 status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
288 if (status)
289 ice_debug(hw, ICE_DBG_SCHED,
290 "remove element failed %d\n", status);
291 }
292 parent = node->parent;
293
294 if (parent) {
295 struct ice_sched_node *p, *tc_node;
296
297
298 for (i = 0; i < parent->num_children; i++)
299 if (parent->children[i] == node) {
300 for (j = i + 1; j < parent->num_children; j++)
301 parent->children[j - 1] =
302 parent->children[j];
303 parent->num_children--;
304 break;
305 }
306
307
308
309
310 tc_node = ice_sched_get_tc_node(pi, node->tc_num);
311 if (!tc_node) {
312 ice_debug(hw, ICE_DBG_SCHED,
313 "Invalid TC number %d\n", node->tc_num);
314 goto err_exit;
315 }
316 p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
317 while (p) {
318 if (p->sibling == node) {
319 p->sibling = node->sibling;
320 break;
321 }
322 p = p->sibling;
323 }
324 }
325err_exit:
326
327 if (node->children)
328 devm_kfree(ice_hw_to_dev(hw), node->children);
329 devm_kfree(ice_hw_to_dev(hw), node);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343static enum ice_status
344ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
345 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
346 u8 *num_branches, struct ice_sq_cd *cd)
347{
348 struct ice_aqc_get_topo *cmd;
349 struct ice_aq_desc desc;
350 enum ice_status status;
351
352 cmd = &desc.params.get_topo;
353 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
354 cmd->port_num = lport;
355 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
356 if (!status && num_branches)
357 *num_branches = cmd->num_branches;
358
359 return status;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373static enum ice_status
374ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
375 struct ice_aqc_add_elem *buf, u16 buf_size,
376 u16 *grps_added, struct ice_sq_cd *cd)
377{
378 struct ice_aqc_add_move_delete_elem *cmd;
379 struct ice_aq_desc desc;
380 enum ice_status status;
381
382 cmd = &desc.params.add_move_delete_elem;
383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
384 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
385
386 cmd->num_grps_req = cpu_to_le16(grps_req);
387 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
388 if (!status && grps_added)
389 *grps_added = le16_to_cpu(cmd->num_grps_updated);
390
391 return status;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406static enum ice_status
407ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
408 struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
409 u16 *elems_ret, struct ice_sq_cd *cd,
410 enum ice_adminq_opc cmd_code)
411{
412 struct ice_aqc_get_cfg_elem *cmd;
413 struct ice_aq_desc desc;
414 enum ice_status status;
415
416 cmd = &desc.params.get_update_elem;
417 ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
418 cmd->num_elem_req = cpu_to_le16(elems_req);
419 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
420 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
421 if (!status && elems_ret)
422 *elems_ret = le16_to_cpu(cmd->num_elem_resp);
423 return status;
424}
425
426
427
428
429
430
431
432
433
434
435
436
437static enum ice_status
438ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
439 struct ice_aqc_suspend_resume_elem *buf,
440 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
441{
442 return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
443 cd, ice_aqc_opc_suspend_sched_elems);
444}
445
446
447
448
449
450
451
452
453
454
455
456
457static enum ice_status
458ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
459 struct ice_aqc_suspend_resume_elem *buf,
460 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
461{
462 return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
463 cd, ice_aqc_opc_resume_sched_elems);
464}
465
466
467
468
469
470
471
472
473
474
475static enum ice_status
476ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
477 struct ice_aqc_query_txsched_res_resp *buf,
478 struct ice_sq_cd *cd)
479{
480 struct ice_aq_desc desc;
481
482 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
483 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
484}
485
486
487
488
489
490
491
492
493
494
495static enum ice_status
496ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
497 bool suspend)
498{
499 struct ice_aqc_suspend_resume_elem *buf;
500 u16 i, buf_size, num_elem_ret = 0;
501 enum ice_status status;
502
503 buf_size = sizeof(*buf) * num_nodes;
504 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
505 if (!buf)
506 return ICE_ERR_NO_MEMORY;
507
508 for (i = 0; i < num_nodes; i++)
509 buf->teid[i] = cpu_to_le32(node_teids[i]);
510
511 if (suspend)
512 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
513 buf_size, &num_elem_ret,
514 NULL);
515 else
516 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
517 buf_size, &num_elem_ret,
518 NULL);
519 if (status || num_elem_ret != num_nodes)
520 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
521
522 devm_kfree(ice_hw_to_dev(hw), buf);
523 return status;
524}
525
526
527
528
529
530
531
532static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
533{
534 struct ice_sched_agg_info *agg_info;
535 struct ice_sched_vsi_info *vsi_elem;
536 struct ice_sched_agg_info *atmp;
537 struct ice_sched_vsi_info *tmp;
538 struct ice_hw *hw;
539
540 if (!pi)
541 return;
542
543 hw = pi->hw;
544
545 list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
546 struct ice_sched_agg_vsi_info *agg_vsi_info;
547 struct ice_sched_agg_vsi_info *vtmp;
548
549 list_for_each_entry_safe(agg_vsi_info, vtmp,
550 &agg_info->agg_vsi_list, list_entry) {
551 list_del(&agg_vsi_info->list_entry);
552 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
553 }
554 }
555
556
557 list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
558 list_entry) {
559 list_del(&vsi_elem->list_entry);
560 devm_kfree(ice_hw_to_dev(hw), vsi_elem);
561 }
562
563 if (pi->root) {
564 ice_free_sched_node(pi, pi->root);
565 pi->root = NULL;
566 }
567}
568
569
570
571
572
573
574
575static void ice_sched_clear_port(struct ice_port_info *pi)
576{
577 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
578 return;
579
580 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
581 mutex_lock(&pi->sched_lock);
582 ice_sched_clear_tx_topo(pi);
583 mutex_unlock(&pi->sched_lock);
584 mutex_destroy(&pi->sched_lock);
585}
586
587
588
589
590
591
592
593void ice_sched_cleanup_all(struct ice_hw *hw)
594{
595 if (!hw || !hw->port_info)
596 return;
597
598 if (hw->layer_info)
599 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
600
601 ice_sched_clear_port(hw->port_info);
602
603 hw->num_tx_sched_layers = 0;
604 hw->num_tx_sched_phys_layers = 0;
605 hw->flattened_layers = 0;
606 hw->max_cgds = 0;
607}
608
609
610
611
612
613
614
615
616static struct ice_sched_vsi_info *
617ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
618{
619 struct ice_sched_vsi_info *vsi_elem;
620
621 if (!pi)
622 return NULL;
623
624 vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
625 GFP_KERNEL);
626 if (!vsi_elem)
627 return NULL;
628
629 list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
630 vsi_elem->vsi_id = vsi_id;
631 return vsi_elem;
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646static enum ice_status
647ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
648 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
649 u16 *num_nodes_added, u32 *first_node_teid)
650{
651 struct ice_sched_node *prev, *new_node;
652 struct ice_aqc_add_elem *buf;
653 u16 i, num_groups_added = 0;
654 enum ice_status status = 0;
655 struct ice_hw *hw = pi->hw;
656 u16 buf_size;
657 u32 teid;
658
659 buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
660 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
661 if (!buf)
662 return ICE_ERR_NO_MEMORY;
663
664 buf->hdr.parent_teid = parent->info.node_teid;
665 buf->hdr.num_elems = cpu_to_le16(num_nodes);
666 for (i = 0; i < num_nodes; i++) {
667 buf->generic[i].parent_teid = parent->info.node_teid;
668 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
669 buf->generic[i].data.valid_sections =
670 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
671 ICE_AQC_ELEM_VALID_EIR;
672 buf->generic[i].data.generic = 0;
673 buf->generic[i].data.cir_bw.bw_profile_idx =
674 ICE_SCHED_DFLT_RL_PROF_ID;
675 buf->generic[i].data.eir_bw.bw_profile_idx =
676 ICE_SCHED_DFLT_RL_PROF_ID;
677 }
678
679 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
680 &num_groups_added, NULL);
681 if (status || num_groups_added != 1) {
682 ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
683 devm_kfree(ice_hw_to_dev(hw), buf);
684 return ICE_ERR_CFG;
685 }
686
687 *num_nodes_added = num_nodes;
688
689 for (i = 0; i < num_nodes; i++) {
690 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
691 if (status) {
692 ice_debug(hw, ICE_DBG_SCHED,
693 "add nodes in SW DB failed status =%d\n",
694 status);
695 break;
696 }
697
698 teid = le32_to_cpu(buf->generic[i].node_teid);
699 new_node = ice_sched_find_node_by_teid(parent, teid);
700
701 if (!new_node) {
702 ice_debug(hw, ICE_DBG_SCHED,
703 "Node is missing for teid =%d\n", teid);
704 break;
705 }
706
707 new_node->sibling = NULL;
708 new_node->tc_num = tc_node->tc_num;
709
710
711
712 prev = ice_sched_get_first_node(hw, tc_node, layer);
713
714 if (prev && prev != new_node) {
715 while (prev->sibling)
716 prev = prev->sibling;
717 prev->sibling = new_node;
718 }
719
720 if (i == 0)
721 *first_node_teid = teid;
722 }
723
724 devm_kfree(ice_hw_to_dev(hw), buf);
725 return status;
726}
727
728
729
730
731
732
733
734
735
736
737
738
739
740static enum ice_status
741ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
742 struct ice_sched_node *tc_node,
743 struct ice_sched_node *parent, u8 layer,
744 u16 num_nodes, u32 *first_node_teid,
745 u16 *num_nodes_added)
746{
747 u32 *first_teid_ptr = first_node_teid;
748 u16 new_num_nodes, max_child_nodes;
749 enum ice_status status = 0;
750 struct ice_hw *hw = pi->hw;
751 u16 num_added = 0;
752 u32 temp;
753
754 *num_nodes_added = 0;
755
756 if (!num_nodes)
757 return status;
758
759 if (!parent || layer < hw->sw_entry_point_layer)
760 return ICE_ERR_PARAM;
761
762
763 max_child_nodes =
764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
765
766
767 if ((parent->num_children + num_nodes) > max_child_nodes) {
768
769 if (parent == tc_node)
770 return ICE_ERR_CFG;
771
772
773 if (parent->num_children < max_child_nodes) {
774 new_num_nodes = max_child_nodes - parent->num_children;
775
776
777
778 status = ice_sched_add_nodes_to_layer(pi, tc_node,
779 parent, layer,
780 new_num_nodes,
781 first_node_teid,
782 &num_added);
783 if (status)
784 return status;
785
786 *num_nodes_added += num_added;
787 }
788
789
790
791
792 if (num_added)
793 first_teid_ptr = &temp;
794
795 new_num_nodes = num_nodes - num_added;
796
797
798 parent = parent->sibling;
799
800
801
802
803
804
805 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
806 layer, new_num_nodes,
807 first_teid_ptr,
808 &num_added);
809 *num_nodes_added += num_added;
810 return status;
811 }
812
813 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
814 num_nodes_added, first_node_teid);
815 return status;
816}
817
818
819
820
821
822
823
824static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
825{
826
827 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
828}
829
830
831
832
833
834
835
836static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
837{
838
839
840
841
842
843
844 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
845 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
846
847 if (layer > hw->sw_entry_point_layer)
848 return layer;
849 }
850 return hw->sw_entry_point_layer;
851}
852
853
854
855
856
857
858
859
860
861static u16
862ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
863{
864 struct ice_hw *hw;
865 u16 num_nodes = 0;
866 u8 i;
867
868 if (!pi)
869 return num_nodes;
870
871 hw = pi->hw;
872
873
874 for (i = 0; i < pi->root->num_children; i++) {
875 struct ice_sched_node *tc_node, *node;
876
877 tc_node = pi->root->children[i];
878
879
880 node = ice_sched_get_first_node(hw, tc_node, layer);
881 if (!node)
882 continue;
883
884
885 while (node) {
886 num_nodes++;
887 node = node->sibling;
888 }
889 }
890
891 return num_nodes;
892}
893
894
895
896
897
898
899
900
901
902static enum ice_status
903ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
904 u16 *new_num_nodes_per_layer)
905{
906 struct ice_hw *hw = pi->hw;
907 u8 i, qg_layer;
908 u16 num_nodes;
909
910 qg_layer = ice_sched_get_qgrp_layer(hw);
911
912
913 for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
914 num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
915 if (num_nodes + new_num_nodes_per_layer[i] >
916 le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
917 ice_debug(hw, ICE_DBG_SCHED,
918 "max nodes reached for layer = %d\n", i);
919 return ICE_ERR_CFG;
920 }
921 }
922 return 0;
923}
924
925
926
927
928
929
930
931
932static void
933ice_rm_dflt_leaf_node(struct ice_port_info *pi)
934{
935 struct ice_sched_node *node;
936
937 node = pi->root;
938 while (node) {
939 if (!node->num_children)
940 break;
941 node = node->children[0];
942 }
943 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
944 u32 teid = le32_to_cpu(node->info.node_teid);
945 enum ice_status status;
946
947
948 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
949 if (!status)
950 ice_free_sched_node(pi, node);
951 }
952}
953
954
955
956
957
958
959
960
961static void
962ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
963{
964 struct ice_sched_node *node;
965
966 ice_rm_dflt_leaf_node(pi);
967
968
969 node = pi->root;
970 while (node) {
971 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
972 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
973 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
974 ice_free_sched_node(pi, node);
975 break;
976 }
977
978 if (!node->num_children)
979 break;
980 node = node->children[0];
981 }
982}
983
984
985
986
987
988
989
990
991
992enum ice_status ice_sched_init_port(struct ice_port_info *pi)
993{
994 struct ice_aqc_get_topo_elem *buf;
995 enum ice_status status;
996 struct ice_hw *hw;
997 u8 num_branches;
998 u16 num_elems;
999 u8 i, j;
1000
1001 if (!pi)
1002 return ICE_ERR_PARAM;
1003 hw = pi->hw;
1004
1005
1006 buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
1007 sizeof(*buf), GFP_KERNEL);
1008 if (!buf)
1009 return ICE_ERR_NO_MEMORY;
1010
1011
1012 status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
1013 sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
1014 &num_branches, NULL);
1015 if (status)
1016 goto err_init_port;
1017
1018
1019 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1020 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1021 num_branches);
1022 status = ICE_ERR_PARAM;
1023 goto err_init_port;
1024 }
1025
1026
1027 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1028
1029
1030 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1031 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1032 num_elems);
1033 status = ICE_ERR_PARAM;
1034 goto err_init_port;
1035 }
1036
1037
1038
1039
1040 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1041 ICE_AQC_ELEM_TYPE_LEAF)
1042 pi->last_node_teid =
1043 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1044 else
1045 pi->last_node_teid =
1046 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1047
1048
1049 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1050 if (status)
1051 goto err_init_port;
1052
1053
1054 for (i = 0; i < num_branches; i++) {
1055 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1056
1057
1058 for (j = 1; j < num_elems; j++) {
1059
1060 if (buf[0].generic[j].data.elem_type ==
1061 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1062 hw->sw_entry_point_layer = j;
1063
1064 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1065 if (status)
1066 goto err_init_port;
1067 }
1068 }
1069
1070
1071 if (pi->root)
1072 ice_sched_rm_dflt_nodes(pi);
1073
1074
1075 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1076 mutex_init(&pi->sched_lock);
1077 INIT_LIST_HEAD(&pi->agg_list);
1078 INIT_LIST_HEAD(&pi->vsi_info_list);
1079
1080err_init_port:
1081 if (status && pi->root) {
1082 ice_free_sched_node(pi, pi->root);
1083 pi->root = NULL;
1084 }
1085
1086 devm_kfree(ice_hw_to_dev(hw), buf);
1087 return status;
1088}
1089
1090
1091
1092
1093
1094
1095
1096enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1097{
1098 struct ice_aqc_query_txsched_res_resp *buf;
1099 enum ice_status status = 0;
1100
1101 if (hw->layer_info)
1102 return status;
1103
1104 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1105 if (!buf)
1106 return ICE_ERR_NO_MEMORY;
1107
1108 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1109 if (status)
1110 goto sched_query_out;
1111
1112 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1113 hw->num_tx_sched_phys_layers =
1114 le16_to_cpu(buf->sched_props.phys_levels);
1115 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1116 hw->max_cgds = buf->sched_props.max_pf_cgds;
1117
1118 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1119 (hw->num_tx_sched_layers *
1120 sizeof(*hw->layer_info)),
1121 GFP_KERNEL);
1122 if (!hw->layer_info) {
1123 status = ICE_ERR_NO_MEMORY;
1124 goto sched_query_out;
1125 }
1126
1127sched_query_out:
1128 devm_kfree(ice_hw_to_dev(hw), buf);
1129 return status;
1130}
1131
1132
1133
1134
1135
1136
1137
1138
1139static struct ice_sched_vsi_info *
1140ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
1141{
1142 struct ice_sched_vsi_info *list_elem;
1143
1144 if (!pi)
1145 return NULL;
1146
1147 list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
1148 if (list_elem->vsi_id == vsi_id)
1149 return list_elem;
1150 return NULL;
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static bool
1163ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1164 struct ice_sched_node *node)
1165{
1166 u8 i;
1167
1168 for (i = 0; i < base->num_children; i++) {
1169 struct ice_sched_node *child = base->children[i];
1170
1171 if (node == child)
1172 return true;
1173
1174 if (child->tx_sched_layer > node->tx_sched_layer)
1175 return false;
1176
1177
1178
1179
1180 if (ice_sched_find_node_in_subtree(hw, child, node))
1181 return true;
1182 }
1183 return false;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195struct ice_sched_node *
1196ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1197 u8 owner)
1198{
1199 struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1200 struct ice_sched_vsi_info *list_elem;
1201 u16 max_children;
1202 u8 qgrp_layer;
1203
1204 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1205 max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
1206
1207 list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
1208 if (!list_elem)
1209 goto lan_q_exit;
1210
1211 vsi_node = list_elem->vsi_node[tc];
1212
1213
1214 if (!vsi_node)
1215 goto lan_q_exit;
1216
1217
1218 qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
1219 while (qgrp_node) {
1220
1221 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1222 if (qgrp_node->num_children < max_children &&
1223 qgrp_node->owner == owner)
1224 break;
1225 qgrp_node = qgrp_node->sibling;
1226 }
1227
1228lan_q_exit:
1229 return qgrp_node;
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static struct ice_sched_node *
1242ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1243 u16 vsi_id)
1244{
1245 struct ice_sched_node *node;
1246 u8 vsi_layer;
1247
1248 vsi_layer = ice_sched_get_vsi_layer(hw);
1249 node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
1250
1251
1252 while (node) {
1253 if (node->vsi_id == vsi_id)
1254 return node;
1255 node = node->sibling;
1256 }
1257
1258 return node;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static void
1271ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1272{
1273 u16 num = num_qs;
1274 u8 i, qgl, vsil;
1275
1276 qgl = ice_sched_get_qgrp_layer(hw);
1277 vsil = ice_sched_get_vsi_layer(hw);
1278
1279
1280 for (i = qgl; i > vsil; i--) {
1281 u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
1282
1283
1284 num = DIV_ROUND_UP(num, max_children);
1285
1286
1287 num_nodes[i] = num ? num : 1;
1288 }
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static enum ice_status
1303ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
1304 struct ice_sched_node *tc_node, u16 *num_nodes,
1305 u8 owner)
1306{
1307 struct ice_sched_node *parent, *node;
1308 struct ice_hw *hw = pi->hw;
1309 enum ice_status status;
1310 u32 first_node_teid;
1311 u16 num_added = 0;
1312 u8 i, qgl, vsil;
1313
1314 status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1315 if (status)
1316 return status;
1317
1318 qgl = ice_sched_get_qgrp_layer(hw);
1319 vsil = ice_sched_get_vsi_layer(hw);
1320 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1321 for (i = vsil + 1; i <= qgl; i++) {
1322 if (!parent)
1323 return ICE_ERR_CFG;
1324 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1325 num_nodes[i],
1326 &first_node_teid,
1327 &num_added);
1328 if (status || num_nodes[i] != num_added)
1329 return ICE_ERR_CFG;
1330
1331
1332
1333
1334 if (num_added) {
1335 parent = ice_sched_find_node_by_teid(tc_node,
1336 first_node_teid);
1337 node = parent;
1338 while (node) {
1339 node->owner = owner;
1340 node = node->sibling;
1341 }
1342 } else {
1343 parent = parent->children[0];
1344 }
1345 }
1346
1347 return 0;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360static void
1361ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
1362 struct ice_sched_node *vsi_node, u16 *num_nodes,
1363 u8 owner)
1364{
1365 struct ice_sched_node *node, *next;
1366 u8 i, qgl, vsil;
1367 u16 num;
1368
1369 qgl = ice_sched_get_qgrp_layer(pi->hw);
1370 vsil = ice_sched_get_vsi_layer(pi->hw);
1371
1372 for (i = qgl; i > vsil; i--) {
1373 num = num_nodes[i];
1374 node = ice_sched_get_first_node(pi->hw, vsi_node, i);
1375 while (node && num) {
1376 next = node->sibling;
1377 if (node->owner == owner && !node->num_children) {
1378 ice_free_sched_node(pi, node);
1379 num--;
1380 }
1381 node = next;
1382 }
1383 }
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static void
1397ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1398 struct ice_sched_node *tc_node, u16 *num_nodes)
1399{
1400 struct ice_sched_node *node;
1401 u16 max_child;
1402 u8 i, vsil;
1403
1404 vsil = ice_sched_get_vsi_layer(hw);
1405 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1406
1407
1408
1409 if (!tc_node->num_children || i == vsil) {
1410 num_nodes[i]++;
1411 } else {
1412
1413
1414
1415 node = ice_sched_get_first_node(hw, tc_node, i);
1416 max_child = le16_to_cpu(hw->layer_info[i].max_children);
1417
1418
1419 while (node) {
1420 if (node->num_children < max_child)
1421 break;
1422 node = node->sibling;
1423 }
1424
1425
1426 if (!node)
1427 num_nodes[i]++;
1428 }
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static enum ice_status
1442ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
1443 struct ice_sched_node *tc_node, u16 *num_nodes)
1444{
1445 struct ice_sched_node *parent = tc_node;
1446 enum ice_status status;
1447 u32 first_node_teid;
1448 u16 num_added = 0;
1449 u8 i, vsil;
1450
1451 if (!pi)
1452 return ICE_ERR_PARAM;
1453
1454 status = ice_sched_validate_for_max_nodes(pi, num_nodes);
1455 if (status)
1456 return status;
1457
1458 vsil = ice_sched_get_vsi_layer(pi->hw);
1459 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1460 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1461 i, num_nodes[i],
1462 &first_node_teid,
1463 &num_added);
1464 if (status || num_nodes[i] != num_added)
1465 return ICE_ERR_CFG;
1466
1467
1468
1469
1470 if (num_added)
1471 parent = ice_sched_find_node_by_teid(tc_node,
1472 first_node_teid);
1473 else
1474 parent = parent->children[0];
1475
1476 if (!parent)
1477 return ICE_ERR_CFG;
1478
1479 if (i == vsil)
1480 parent->vsi_id = vsi_id;
1481 }
1482 return 0;
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493static enum ice_status
1494ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
1495{
1496 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1497 struct ice_sched_node *tc_node;
1498 struct ice_hw *hw = pi->hw;
1499
1500 tc_node = ice_sched_get_tc_node(pi, tc);
1501 if (!tc_node)
1502 return ICE_ERR_PARAM;
1503
1504
1505 ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1506
1507
1508 return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static enum ice_status
1522ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1523 u16 new_numqs, u8 owner)
1524{
1525 u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1526 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1527 struct ice_sched_node *vsi_node;
1528 struct ice_sched_node *tc_node;
1529 struct ice_sched_vsi_info *vsi;
1530 enum ice_status status = 0;
1531 struct ice_hw *hw = pi->hw;
1532 u16 prev_numqs;
1533 u8 i;
1534
1535 tc_node = ice_sched_get_tc_node(pi, tc);
1536 if (!tc_node)
1537 return ICE_ERR_CFG;
1538
1539 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1540 if (!vsi_node)
1541 return ICE_ERR_CFG;
1542
1543 vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
1544 if (!vsi)
1545 return ICE_ERR_CFG;
1546
1547 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1548 prev_numqs = vsi->max_lanq[tc];
1549 else
1550 return ICE_ERR_PARAM;
1551
1552
1553 if (prev_numqs == new_numqs)
1554 return status;
1555
1556
1557 if (prev_numqs)
1558 ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
1559
1560 if (new_numqs)
1561 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1562
1563 if (prev_numqs > new_numqs) {
1564 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1565 new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
1566
1567 ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
1568 owner);
1569 } else {
1570 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1571 new_num_nodes[i] -= prev_num_nodes[i];
1572
1573 status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
1574 new_num_nodes, owner);
1575 if (status)
1576 return status;
1577 }
1578
1579 vsi->max_lanq[tc] = new_numqs;
1580
1581 return status;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597enum ice_status
1598ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
1599 u8 owner, bool enable)
1600{
1601 struct ice_sched_node *vsi_node, *tc_node;
1602 struct ice_sched_vsi_info *vsi;
1603 enum ice_status status = 0;
1604 struct ice_hw *hw = pi->hw;
1605
1606 tc_node = ice_sched_get_tc_node(pi, tc);
1607 if (!tc_node)
1608 return ICE_ERR_PARAM;
1609
1610 vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
1611 if (!vsi)
1612 vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
1613 if (!vsi)
1614 return ICE_ERR_NO_MEMORY;
1615
1616 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1617
1618
1619 if (!enable) {
1620 if (vsi_node && vsi_node->in_use) {
1621 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1622
1623 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1624 true);
1625 if (!status)
1626 vsi_node->in_use = false;
1627 }
1628 return status;
1629 }
1630
1631
1632 if (!vsi_node) {
1633 status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
1634 if (status)
1635 return status;
1636 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
1637 if (!vsi_node)
1638 return ICE_ERR_CFG;
1639 vsi->vsi_node[tc] = vsi_node;
1640 vsi_node->in_use = true;
1641 }
1642
1643
1644 status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
1645 if (status)
1646 return status;
1647
1648
1649 if (!vsi_node->in_use) {
1650 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1651
1652 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1653 if (!status)
1654 vsi_node->in_use = true;
1655 }
1656
1657 return status;
1658}
1659