1
2
3
4
5#include <stddef.h>
6#include <string.h>
7#include <stdbool.h>
8
9#include <rte_alarm.h>
10#include <rte_malloc.h>
11#include <rte_errno.h>
12#include <rte_cycles.h>
13#include <rte_compat.h>
14
15#include "eth_bond_private.h"
16
17static void bond_mode_8023ad_ext_periodic_cb(void *arg);
18#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
19
20#define MODE4_DEBUG(fmt, ...) \
21 rte_log(RTE_LOG_DEBUG, bond_logtype, \
22 "%6u [Port %u: %s] " fmt, \
23 bond_dbg_get_time_diff_ms(), slave_id, \
24 __func__, ##__VA_ARGS__)
25
26static uint64_t start_time;
27
28static unsigned
29bond_dbg_get_time_diff_ms(void)
30{
31 uint64_t now;
32
33 now = rte_rdtsc();
34 if (start_time == 0)
35 start_time = now;
36
37 return ((now - start_time) * 1000) / rte_get_tsc_hz();
38}
39
40static void
41bond_print_lacp(struct lacpdu *l)
42{
43 char a_address[18];
44 char p_address[18];
45 char a_state[256] = { 0 };
46 char p_state[256] = { 0 };
47
48 static const char * const state_labels[] = {
49 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
50 };
51
52 int a_len = 0;
53 int p_len = 0;
54 uint8_t i;
55 uint8_t *addr;
56
57 addr = l->actor.port_params.system.addr_bytes;
58 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
59 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
60
61 addr = l->partner.port_params.system.addr_bytes;
62 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
63 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
64
65 for (i = 0; i < 8; i++) {
66 if ((l->actor.state >> i) & 1) {
67 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
68 state_labels[i]);
69 }
70
71 if ((l->partner.state >> i) & 1) {
72 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
73 state_labels[i]);
74 }
75 }
76
77 if (a_len && a_state[a_len-1] == ' ')
78 a_state[a_len-1] = '\0';
79
80 if (p_len && p_state[p_len-1] == ' ')
81 p_state[p_len-1] = '\0';
82
83 RTE_BOND_LOG(DEBUG,
84 "LACP: {\n"
85 " subtype= %02X\n"
86 " ver_num=%02X\n"
87 " actor={ tlv=%02X, len=%02X\n"
88 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
89 " state={ %s }\n"
90 " }\n"
91 " partner={ tlv=%02X, len=%02X\n"
92 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
93 " state={ %s }\n"
94 " }\n"
95 " collector={info=%02X, length=%02X, max_delay=%04X\n, "
96 "type_term=%02X, terminator_length = %02X }",
97 l->subtype,
98 l->version_number,
99 l->actor.tlv_type_info,
100 l->actor.info_length,
101 l->actor.port_params.system_priority,
102 a_address,
103 l->actor.port_params.key,
104 l->actor.port_params.port_priority,
105 l->actor.port_params.port_number,
106 a_state,
107 l->partner.tlv_type_info,
108 l->partner.info_length,
109 l->partner.port_params.system_priority,
110 p_address,
111 l->partner.port_params.key,
112 l->partner.port_params.port_priority,
113 l->partner.port_params.port_number,
114 p_state,
115 l->tlv_type_collector_info,
116 l->collector_info_length,
117 l->collector_max_delay,
118 l->tlv_type_terminator,
119 l->terminator_length);
120
121}
122
123#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
124#else
125#define BOND_PRINT_LACP(lacpdu) do { } while (0)
126#define MODE4_DEBUG(fmt, ...) do { } while (0)
127#endif
128
129static const struct rte_ether_addr lacp_mac_addr = {
130 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
131};
132
133struct port bond_mode_8023ad_ports[RTE_MAX_ETHPORTS];
134
135static void
136timer_cancel(uint64_t *timer)
137{
138 *timer = 0;
139}
140
141static void
142timer_set(uint64_t *timer, uint64_t timeout)
143{
144 *timer = rte_rdtsc() + timeout;
145}
146
147
148static void
149timer_force_expired(uint64_t *timer)
150{
151 *timer = rte_rdtsc();
152}
153
154static bool
155timer_is_stopped(uint64_t *timer)
156{
157 return *timer == 0;
158}
159
160static bool
161timer_is_expired(uint64_t *timer)
162{
163 return *timer < rte_rdtsc();
164}
165
166
167static bool
168timer_is_running(uint64_t *timer)
169{
170 return !timer_is_stopped(timer) && !timer_is_expired(timer);
171}
172
173static void
174set_warning_flags(struct port *port, uint16_t flags)
175{
176 int retval;
177 uint16_t old;
178 uint16_t new_flag = 0;
179
180 do {
181 old = port->warnings_to_show;
182 new_flag = old | flags;
183 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
184 } while (unlikely(retval == 0));
185}
186
187static void
188show_warnings(uint16_t slave_id)
189{
190 struct port *port = &bond_mode_8023ad_ports[slave_id];
191 uint8_t warnings;
192
193 do {
194 warnings = port->warnings_to_show;
195 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
196
197 if (!warnings)
198 return;
199
200 if (!timer_is_expired(&port->warning_timer))
201 return;
202
203
204 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
205 rte_get_tsc_hz() / 1000);
206
207 if (warnings & WRN_RX_QUEUE_FULL) {
208 RTE_BOND_LOG(DEBUG,
209 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
210 "Receive and transmit functions must be invoked on bonded"
211 "interface at least 10 times per second or LACP will notwork correctly",
212 slave_id);
213 }
214
215 if (warnings & WRN_TX_QUEUE_FULL) {
216 RTE_BOND_LOG(DEBUG,
217 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
218 "Receive and transmit functions must be invoked on bonded"
219 "interface at least 10 times per second or LACP will not work correctly",
220 slave_id);
221 }
222
223 if (warnings & WRN_RX_MARKER_TO_FAST)
224 RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.",
225 slave_id);
226
227 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
228 RTE_BOND_LOG(INFO,
229 "Slave %u: ignoring unknown slow protocol frame type",
230 slave_id);
231 }
232
233 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
234 RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type",
235 slave_id);
236
237 if (warnings & WRN_NOT_LACP_CAPABLE)
238 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
239}
240
241static void
242record_default(struct port *port)
243{
244
245
246
247 port->partner_state = STATE_LACP_ACTIVE;
248 ACTOR_STATE_SET(port, DEFAULTED);
249}
250
251
252
253
254
255
256
257
258
259static void
260rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
261 struct lacpdu *lacp)
262{
263 struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
264 uint64_t timeout;
265
266 if (SM_FLAG(port, BEGIN)) {
267
268 MODE4_DEBUG("-> INITIALIZE\n");
269 SM_FLAG_CLR(port, MOVED);
270 port->selected = UNSELECTED;
271
272 record_default(port);
273
274 ACTOR_STATE_CLR(port, EXPIRED);
275 timer_cancel(&port->current_while_timer);
276
277
278 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
279
280
281 if (!SM_FLAG(port, LACP_ENABLED))
282 PARTNER_STATE_CLR(port, AGGREGATION);
283 else
284 PARTNER_STATE_SET(port, AGGREGATION);
285 }
286
287 if (!SM_FLAG(port, LACP_ENABLED)) {
288
289 if (!timer_is_stopped(&port->current_while_timer)) {
290 port->selected = UNSELECTED;
291 record_default(port);
292 PARTNER_STATE_CLR(port, AGGREGATION);
293 ACTOR_STATE_CLR(port, EXPIRED);
294 timer_cancel(&port->current_while_timer);
295 }
296 return;
297 }
298
299 if (lacp) {
300 MODE4_DEBUG("LACP -> CURRENT\n");
301 BOND_PRINT_LACP(lacp);
302
303
304
305 if (!ACTOR_STATE(port, DEFAULTED) &&
306 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
307 || memcmp(&port->partner, &lacp->actor.port_params,
308 sizeof(port->partner)) != 0)) {
309 MODE4_DEBUG("selected <- UNSELECTED\n");
310 port->selected = UNSELECTED;
311 }
312
313
314 memcpy(&port->partner, &lacp->actor.port_params,
315 sizeof(struct port_params));
316 port->partner_state = lacp->actor.state;
317
318
319 ACTOR_STATE_CLR(port, DEFAULTED);
320
321
322 agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
323 bool match = port->actor.system_priority ==
324 lacp->partner.port_params.system_priority &&
325 rte_is_same_ether_addr(&agg->actor.system,
326 &lacp->partner.port_params.system) &&
327 port->actor.port_priority ==
328 lacp->partner.port_params.port_priority &&
329 port->actor.port_number ==
330 lacp->partner.port_params.port_number;
331
332
333
334 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
335 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
336
337 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
338 match == false) {
339 SM_FLAG_SET(port, NTT);
340 }
341
342
343 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
344 PARTNER_STATE(port, AGGREGATION))
345 PARTNER_STATE_SET(port, SYNCHRONIZATION);
346 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
347 AGGREGATION))
348 PARTNER_STATE_SET(port, SYNCHRONIZATION);
349 else
350 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
351
352 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
353 timeout = internals->mode4.short_timeout;
354 else
355 timeout = internals->mode4.long_timeout;
356
357 timer_set(&port->current_while_timer, timeout);
358 ACTOR_STATE_CLR(port, EXPIRED);
359 SM_FLAG_CLR(port, EXPIRED);
360 return;
361 }
362
363
364
365 if (!timer_is_running(&port->current_while_timer)) {
366 if (SM_FLAG(port, EXPIRED)) {
367 port->selected = UNSELECTED;
368 memcpy(&port->partner, &port->partner_admin,
369 sizeof(struct port_params));
370 record_default(port);
371 ACTOR_STATE_CLR(port, EXPIRED);
372 timer_cancel(&port->current_while_timer);
373 } else {
374 SM_FLAG_SET(port, EXPIRED);
375 ACTOR_STATE_SET(port, EXPIRED);
376 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
377 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
378 timer_set(&port->current_while_timer,
379 internals->mode4.short_timeout);
380 }
381 }
382}
383
384
385
386
387
388
389
390
391
392static void
393periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
394{
395 struct port *port = &bond_mode_8023ad_ports[slave_id];
396
397 uint64_t timeout;
398 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
399 PARTNER_STATE(port, LACP_ACTIVE);
400
401 uint8_t is_partner_fast, was_partner_fast;
402
403 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
404 timer_cancel(&port->periodic_timer);
405 timer_force_expired(&port->tx_machine_timer);
406 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
407
408 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
409 SM_FLAG(port, BEGIN) ? "begind " : "",
410 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
411 active ? "LACP active " : "LACP pasive ");
412 return;
413 }
414
415 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
416 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
417
418
419
420 if (!timer_is_stopped(&port->periodic_timer)) {
421 if (timer_is_expired(&port->periodic_timer)) {
422 SM_FLAG_SET(port, NTT);
423 } else if (is_partner_fast != was_partner_fast) {
424
425
426
427
428 if (is_partner_fast)
429 SM_FLAG_SET(port, NTT);
430 } else
431 return;
432 }
433
434
435 if (is_partner_fast) {
436 timeout = internals->mode4.fast_periodic_timeout;
437 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
438 } else {
439 timeout = internals->mode4.slow_periodic_timeout;
440 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
441 }
442
443 timer_set(&port->periodic_timer, timeout);
444}
445
446
447
448
449
450
451
452
453
454static void
455mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
456{
457 struct port *port = &bond_mode_8023ad_ports[slave_id];
458
459
460 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
461 STATE_COLLECTING;
462
463
464
465 if (SM_FLAG(port, BEGIN) ||
466 port->selected == UNSELECTED || (port->selected == STANDBY &&
467 (port->actor_state & state_mask) != 0)) {
468
469 port->actor_state &= ~state_mask;
470
471
472 if (SM_FLAG(port, BEGIN) ||
473 !timer_is_stopped(&port->wait_while_timer)) {
474 SM_FLAG_SET(port, NTT);
475 MODE4_DEBUG("-> DETACHED\n");
476 }
477 timer_cancel(&port->wait_while_timer);
478 }
479
480 if (timer_is_stopped(&port->wait_while_timer)) {
481 if (port->selected == SELECTED || port->selected == STANDBY) {
482 timer_set(&port->wait_while_timer,
483 internals->mode4.aggregate_wait_timeout);
484
485 MODE4_DEBUG("DETACHED -> WAITING\n");
486 }
487
488 return;
489 }
490
491
492 if (!timer_is_expired(&port->wait_while_timer))
493 return;
494
495 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
496 !PARTNER_STATE(port, SYNCHRONIZATION)) {
497
498
499 ACTOR_STATE_CLR(port, DISTRIBUTING);
500 ACTOR_STATE_CLR(port, COLLECTING);
501
502 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
503 MODE4_DEBUG("Out of sync -> ATTACHED\n");
504 }
505
506 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
507
508 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
509 STATE_DISTRIBUTING)) == 0);
510
511 ACTOR_STATE_SET(port, SYNCHRONIZATION);
512 SM_FLAG_SET(port, NTT);
513 MODE4_DEBUG("ATTACHED Entered\n");
514 } else if (!ACTOR_STATE(port, COLLECTING)) {
515
516 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
517 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
518 ACTOR_STATE_SET(port, COLLECTING);
519 SM_FLAG_SET(port, NTT);
520 }
521 } else if (ACTOR_STATE(port, COLLECTING)) {
522
523
524 if (!ACTOR_STATE(port, DISTRIBUTING)) {
525 if (PARTNER_STATE(port, COLLECTING)) {
526
527 ACTOR_STATE_SET(port, DISTRIBUTING);
528 SM_FLAG_SET(port, NTT);
529 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
530 RTE_BOND_LOG(INFO,
531 "Bond %u: slave id %u distributing started.",
532 internals->port_id, slave_id);
533 }
534 } else {
535 if (!PARTNER_STATE(port, COLLECTING)) {
536
537
538 ACTOR_STATE_CLR(port, DISTRIBUTING);
539 SM_FLAG_SET(port, NTT);
540 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
541 RTE_BOND_LOG(INFO,
542 "Bond %u: slave id %u distributing stopped.",
543 internals->port_id, slave_id);
544 }
545 }
546 }
547}
548
549
550
551
552
553
554
555
556
557static void
558tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
559{
560 struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
561
562 struct rte_mbuf *lacp_pkt = NULL;
563 struct lacpdu_header *hdr;
564 struct lacpdu *lacpdu;
565
566
567
568
569 if (timer_is_stopped(&port->periodic_timer))
570 SM_FLAG_CLR(port, NTT);
571
572 if (!SM_FLAG(port, NTT))
573 return;
574
575 if (!timer_is_expired(&port->tx_machine_timer))
576 return;
577
578 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
579 if (lacp_pkt == NULL) {
580 RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool");
581 return;
582 }
583
584 lacp_pkt->data_len = sizeof(*hdr);
585 lacp_pkt->pkt_len = sizeof(*hdr);
586
587 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
588
589
590 rte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
591 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
592 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);
593
594 lacpdu = &hdr->lacpdu;
595 memset(lacpdu, 0, sizeof(*lacpdu));
596
597
598 lacpdu->subtype = SLOW_SUBTYPE_LACP;
599 lacpdu->version_number = 1;
600
601
602 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
603 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
604 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
605 sizeof(port->actor));
606 agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
607 rte_ether_addr_copy(&agg->actor.system,
608 &hdr->lacpdu.actor.port_params.system);
609 lacpdu->actor.state = port->actor_state;
610
611
612 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
613 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
614 memcpy(&lacpdu->partner.port_params, &port->partner,
615 sizeof(struct port_params));
616 lacpdu->partner.state = port->partner_state;
617
618
619 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
620 lacpdu->collector_info_length = 0x10;
621 lacpdu->collector_max_delay = 0;
622
623 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
624 lacpdu->terminator_length = 0;
625
626 MODE4_DEBUG("Sending LACP frame\n");
627 BOND_PRINT_LACP(lacpdu);
628
629 if (internals->mode4.dedicated_queues.enabled == 0) {
630 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
631 if (retval != 0) {
632
633
634 rte_pktmbuf_free(lacp_pkt);
635 set_warning_flags(port, WRN_TX_QUEUE_FULL);
636 return;
637 }
638 } else {
639 uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
640 internals->mode4.dedicated_queues.tx_qid,
641 &lacp_pkt, 1);
642 if (pkts_sent != 1) {
643 rte_pktmbuf_free(lacp_pkt);
644 set_warning_flags(port, WRN_TX_QUEUE_FULL);
645 return;
646 }
647 }
648
649
650 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
651 SM_FLAG_CLR(port, NTT);
652}
653
654static uint16_t
655max_index(uint64_t *a, int n)
656{
657 if (n <= 0)
658 return -1;
659
660 int i, max_i = 0;
661 uint64_t max = a[0];
662
663 for (i = 1; i < n; ++i) {
664 if (a[i] > max) {
665 max = a[i];
666 max_i = i;
667 }
668 }
669
670 return max_i;
671}
672
673
674
675
676
677
678
679static void
680selection_logic(struct bond_dev_private *internals, uint16_t slave_id)
681{
682 struct port *agg, *port;
683 uint16_t slaves_count, new_agg_id, i, j = 0;
684 uint16_t *slaves;
685 uint64_t agg_bandwidth[RTE_MAX_ETHPORTS] = {0};
686 uint64_t agg_count[RTE_MAX_ETHPORTS] = {0};
687 uint16_t default_slave = 0;
688 struct rte_eth_link link_info;
689 uint16_t agg_new_idx = 0;
690 int ret;
691
692 slaves = internals->active_slaves;
693 slaves_count = internals->active_slave_count;
694 port = &bond_mode_8023ad_ports[slave_id];
695
696
697 for (i = 0; i < slaves_count; ++i) {
698 agg = &bond_mode_8023ad_ports[slaves[i]];
699
700 if (agg->aggregator_port_id != slaves[i])
701 continue;
702
703 ret = rte_eth_link_get_nowait(slaves[i], &link_info);
704 if (ret < 0) {
705 RTE_BOND_LOG(ERR,
706 "Slave (port %u) link get failed: %s\n",
707 slaves[i], rte_strerror(-ret));
708 continue;
709 }
710 agg_count[i] += 1;
711 agg_bandwidth[i] += link_info.link_speed;
712
713
714
715 if ((agg->actor.key == port->actor.key &&
716 agg->partner.system_priority == port->partner.system_priority &&
717 rte_is_same_ether_addr(&agg->partner.system,
718 &port->partner.system) == 1
719 && (agg->partner.key == port->partner.key)) &&
720 rte_is_zero_ether_addr(&port->partner.system) != 1 &&
721 (agg->actor.key &
722 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
723
724 if (j == 0)
725 default_slave = i;
726 j++;
727 }
728 }
729
730 switch (internals->mode4.agg_selection) {
731 case AGG_COUNT:
732 agg_new_idx = max_index(agg_count, slaves_count);
733 new_agg_id = slaves[agg_new_idx];
734 break;
735 case AGG_BANDWIDTH:
736 agg_new_idx = max_index(agg_bandwidth, slaves_count);
737 new_agg_id = slaves[agg_new_idx];
738 break;
739 case AGG_STABLE:
740 if (default_slave == slaves_count)
741 new_agg_id = slaves[slave_id];
742 else
743 new_agg_id = slaves[default_slave];
744 break;
745 default:
746 if (default_slave == slaves_count)
747 new_agg_id = slaves[slave_id];
748 else
749 new_agg_id = slaves[default_slave];
750 break;
751 }
752
753 if (new_agg_id != port->aggregator_port_id) {
754 port->aggregator_port_id = new_agg_id;
755
756 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
757 "\t%s aggregator ID=%3u\n",
758 port->aggregator_port_id,
759 port->aggregator_port_id == slave_id ?
760 "aggregator not found, using default" : "aggregator found",
761 port->aggregator_port_id);
762 }
763
764 port->selected = SELECTED;
765}
766
767
768static uint16_t
769link_speed_key(uint16_t speed) {
770 uint16_t key_speed;
771
772 switch (speed) {
773 case ETH_SPEED_NUM_NONE:
774 key_speed = 0x00;
775 break;
776 case ETH_SPEED_NUM_10M:
777 key_speed = BOND_LINK_SPEED_KEY_10M;
778 break;
779 case ETH_SPEED_NUM_100M:
780 key_speed = BOND_LINK_SPEED_KEY_100M;
781 break;
782 case ETH_SPEED_NUM_1G:
783 key_speed = BOND_LINK_SPEED_KEY_1000M;
784 break;
785 case ETH_SPEED_NUM_10G:
786 key_speed = BOND_LINK_SPEED_KEY_10G;
787 break;
788 case ETH_SPEED_NUM_20G:
789 key_speed = BOND_LINK_SPEED_KEY_20G;
790 break;
791 case ETH_SPEED_NUM_40G:
792 key_speed = BOND_LINK_SPEED_KEY_40G;
793 break;
794 default:
795
796 key_speed = 0xFFFF;
797 }
798
799 return key_speed;
800}
801
802static void
803rx_machine_update(struct bond_dev_private *internals, uint16_t slave_id,
804 struct rte_mbuf *lacp_pkt) {
805 struct lacpdu_header *lacp;
806 struct lacpdu_actor_partner_params *partner;
807
808 if (lacp_pkt != NULL) {
809 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
810 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
811
812 partner = &lacp->lacpdu.partner;
813 if (rte_is_zero_ether_addr(&partner->port_params.system) ||
814 rte_is_same_ether_addr(&partner->port_params.system,
815 &internals->mode4.mac_addr)) {
816
817
818
819 rx_machine(internals, slave_id, &lacp->lacpdu);
820 }
821 rte_pktmbuf_free(lacp_pkt);
822 } else
823 rx_machine(internals, slave_id, NULL);
824}
825
826static void
827bond_mode_8023ad_periodic_cb(void *arg)
828{
829 struct rte_eth_dev *bond_dev = arg;
830 struct bond_dev_private *internals = bond_dev->data->dev_private;
831 struct port *port;
832 struct rte_eth_link link_info;
833 struct rte_ether_addr slave_addr;
834 struct rte_mbuf *lacp_pkt = NULL;
835 uint16_t slave_id;
836 uint16_t i;
837
838
839
840 for (i = 0; i < internals->active_slave_count; i++) {
841 uint16_t key;
842 int ret;
843
844 slave_id = internals->active_slaves[i];
845 ret = rte_eth_link_get_nowait(slave_id, &link_info);
846 if (ret < 0) {
847 RTE_BOND_LOG(ERR,
848 "Slave (port %u) link get failed: %s\n",
849 slave_id, rte_strerror(-ret));
850 }
851
852 if (ret >= 0 && link_info.link_status != 0) {
853 key = link_speed_key(link_info.link_speed) << 1;
854 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
855 key |= BOND_LINK_FULL_DUPLEX_KEY;
856 } else {
857 key = 0;
858 }
859
860 rte_eth_macaddr_get(slave_id, &slave_addr);
861 port = &bond_mode_8023ad_ports[slave_id];
862
863 key = rte_cpu_to_be_16(key);
864 if (key != port->actor.key) {
865 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
866 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
867
868 port->actor.key = key;
869 SM_FLAG_SET(port, NTT);
870 }
871
872 if (!rte_is_same_ether_addr(&port->actor.system, &slave_addr)) {
873 rte_ether_addr_copy(&slave_addr, &port->actor.system);
874 if (port->aggregator_port_id == slave_id)
875 SM_FLAG_SET(port, NTT);
876 }
877 }
878
879 for (i = 0; i < internals->active_slave_count; i++) {
880 slave_id = internals->active_slaves[i];
881 port = &bond_mode_8023ad_ports[slave_id];
882
883 if ((port->actor.key &
884 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
885
886 SM_FLAG_SET(port, BEGIN);
887
888
889 if (SM_FLAG(port, LACP_ENABLED)) {
890
891 SM_FLAG_CLR(port, LACP_ENABLED);
892 ACTOR_STATE_CLR(port, DISTRIBUTING);
893 ACTOR_STATE_CLR(port, COLLECTING);
894 }
895
896
897 continue;
898 }
899
900 SM_FLAG_SET(port, LACP_ENABLED);
901
902 if (internals->mode4.dedicated_queues.enabled == 0) {
903
904
905
906 int retval = rte_ring_dequeue(port->rx_ring,
907 (void **)&lacp_pkt);
908
909 if (retval != 0)
910 lacp_pkt = NULL;
911
912 rx_machine_update(internals, slave_id, lacp_pkt);
913 } else {
914 uint16_t rx_count = rte_eth_rx_burst(slave_id,
915 internals->mode4.dedicated_queues.rx_qid,
916 &lacp_pkt, 1);
917
918 if (rx_count == 1)
919 bond_mode_8023ad_handle_slow_pkt(internals,
920 slave_id, lacp_pkt);
921 else
922 rx_machine_update(internals, slave_id, NULL);
923 }
924
925 periodic_machine(internals, slave_id);
926 mux_machine(internals, slave_id);
927 tx_machine(internals, slave_id);
928 selection_logic(internals, slave_id);
929
930 SM_FLAG_CLR(port, BEGIN);
931 show_warnings(slave_id);
932 }
933
934 rte_eal_alarm_set(internals->mode4.update_timeout_us,
935 bond_mode_8023ad_periodic_cb, arg);
936}
937
938static int
939bond_mode_8023ad_register_lacp_mac(uint16_t slave_id)
940{
941 int ret;
942
943 ret = rte_eth_allmulticast_enable(slave_id);
944 if (ret != 0) {
945 RTE_BOND_LOG(ERR,
946 "failed to enable allmulti mode for port %u: %s",
947 slave_id, rte_strerror(-ret));
948 }
949 if (rte_eth_allmulticast_get(slave_id)) {
950 RTE_BOND_LOG(DEBUG, "forced allmulti for port %u",
951 slave_id);
952 bond_mode_8023ad_ports[slave_id].forced_rx_flags =
953 BOND_8023AD_FORCED_ALLMULTI;
954 return 0;
955 }
956
957 ret = rte_eth_promiscuous_enable(slave_id);
958 if (ret != 0) {
959 RTE_BOND_LOG(ERR,
960 "failed to enable promiscuous mode for port %u: %s",
961 slave_id, rte_strerror(-ret));
962 }
963 if (rte_eth_promiscuous_get(slave_id)) {
964 RTE_BOND_LOG(DEBUG, "forced promiscuous for port %u",
965 slave_id);
966 bond_mode_8023ad_ports[slave_id].forced_rx_flags =
967 BOND_8023AD_FORCED_PROMISC;
968 return 0;
969 }
970
971 return -1;
972}
973
974static void
975bond_mode_8023ad_unregister_lacp_mac(uint16_t slave_id)
976{
977 int ret;
978
979 switch (bond_mode_8023ad_ports[slave_id].forced_rx_flags) {
980 case BOND_8023AD_FORCED_ALLMULTI:
981 RTE_BOND_LOG(DEBUG, "unset allmulti for port %u", slave_id);
982 ret = rte_eth_allmulticast_disable(slave_id);
983 if (ret != 0)
984 RTE_BOND_LOG(ERR,
985 "failed to disable allmulti mode for port %u: %s",
986 slave_id, rte_strerror(-ret));
987 break;
988
989 case BOND_8023AD_FORCED_PROMISC:
990 RTE_BOND_LOG(DEBUG, "unset promisc for port %u", slave_id);
991 ret = rte_eth_promiscuous_disable(slave_id);
992 if (ret != 0)
993 RTE_BOND_LOG(ERR,
994 "failed to disable promiscuous mode for port %u: %s",
995 slave_id, rte_strerror(-ret));
996 break;
997
998 default:
999 break;
1000 }
1001}
1002
1003void
1004bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
1005 uint16_t slave_id)
1006{
1007 struct bond_dev_private *internals = bond_dev->data->dev_private;
1008
1009 struct port *port = &bond_mode_8023ad_ports[slave_id];
1010 struct port_params initial = {
1011 .system = { { 0 } },
1012 .system_priority = rte_cpu_to_be_16(0xFFFF),
1013 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
1014 .port_priority = rte_cpu_to_be_16(0x00FF),
1015 .port_number = 0,
1016 };
1017
1018 char mem_name[RTE_ETH_NAME_MAX_LEN];
1019 int socket_id;
1020 unsigned element_size;
1021 uint32_t total_tx_desc;
1022 struct bond_tx_queue *bd_tx_q;
1023 uint16_t q_id;
1024
1025
1026 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
1027 internals->active_slave_count, slave_id) == internals->active_slave_count);
1028 RTE_SET_USED(internals);
1029
1030 memcpy(&port->actor, &initial, sizeof(struct port_params));
1031
1032
1033 port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
1034
1035 memcpy(&port->partner, &initial, sizeof(struct port_params));
1036 memcpy(&port->partner_admin, &initial, sizeof(struct port_params));
1037
1038
1039 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
1040 port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
1041 port->sm_flags = SM_FLAGS_BEGIN;
1042
1043
1044 port->aggregator_port_id = slave_id;
1045
1046 if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) {
1047 RTE_BOND_LOG(WARNING, "slave %u is most likely broken and won't receive LACP packets",
1048 slave_id);
1049 }
1050
1051 timer_cancel(&port->warning_timer);
1052
1053 if (port->mbuf_pool != NULL)
1054 return;
1055
1056 RTE_ASSERT(port->rx_ring == NULL);
1057 RTE_ASSERT(port->tx_ring == NULL);
1058
1059 socket_id = rte_eth_dev_socket_id(slave_id);
1060 if (socket_id == -1)
1061 socket_id = rte_socket_id();
1062
1063 element_size = sizeof(struct slow_protocol_frame) +
1064 RTE_PKTMBUF_HEADROOM;
1065
1066
1067
1068 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
1069 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
1070 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
1071 total_tx_desc += bd_tx_q->nb_tx_desc;
1072 }
1073
1074 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
1075 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
1076 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
1077 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
1078 0, element_size, socket_id);
1079
1080
1081
1082 if (port->mbuf_pool == NULL) {
1083 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1084 slave_id, mem_name, rte_strerror(rte_errno));
1085 }
1086
1087 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
1088 port->rx_ring = rte_ring_create(mem_name,
1089 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
1090
1091 if (port->rx_ring == NULL) {
1092 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
1093 mem_name, rte_strerror(rte_errno));
1094 }
1095
1096
1097 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
1098 port->tx_ring = rte_ring_create(mem_name,
1099 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
1100
1101 if (port->tx_ring == NULL) {
1102 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
1103 mem_name, rte_strerror(rte_errno));
1104 }
1105}
1106
1107int
1108bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
1109 uint16_t slave_id)
1110{
1111 void *pkt = NULL;
1112 struct port *port = NULL;
1113 uint8_t old_partner_state;
1114
1115 port = &bond_mode_8023ad_ports[slave_id];
1116
1117 ACTOR_STATE_CLR(port, AGGREGATION);
1118 port->selected = UNSELECTED;
1119
1120 old_partner_state = port->partner_state;
1121 record_default(port);
1122
1123 bond_mode_8023ad_unregister_lacp_mac(slave_id);
1124
1125
1126 if (!((old_partner_state ^ port->partner_state) &
1127 STATE_LACP_SHORT_TIMEOUT))
1128 timer_cancel(&port->current_while_timer);
1129
1130 PARTNER_STATE_CLR(port, AGGREGATION);
1131 ACTOR_STATE_CLR(port, EXPIRED);
1132
1133
1134 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
1135 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1136
1137 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
1138 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1139 return 0;
1140}
1141
1142void
1143bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
1144{
1145 struct bond_dev_private *internals = bond_dev->data->dev_private;
1146 struct rte_ether_addr slave_addr;
1147 struct port *slave, *agg_slave;
1148 uint16_t slave_id, i, j;
1149
1150 bond_mode_8023ad_stop(bond_dev);
1151
1152 for (i = 0; i < internals->active_slave_count; i++) {
1153 slave_id = internals->active_slaves[i];
1154 slave = &bond_mode_8023ad_ports[slave_id];
1155 rte_eth_macaddr_get(slave_id, &slave_addr);
1156
1157 if (rte_is_same_ether_addr(&slave_addr, &slave->actor.system))
1158 continue;
1159
1160 rte_ether_addr_copy(&slave_addr, &slave->actor.system);
1161
1162
1163 if (slave->aggregator_port_id != slave_id)
1164 continue;
1165
1166 for (j = 0; j < internals->active_slave_count; j++) {
1167 agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]];
1168 if (agg_slave->aggregator_port_id == slave_id)
1169 SM_FLAG_SET(agg_slave, NTT);
1170 }
1171 }
1172
1173 if (bond_dev->data->dev_started)
1174 bond_mode_8023ad_start(bond_dev);
1175}
1176
1177static void
1178bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1179 struct rte_eth_bond_8023ad_conf *conf)
1180{
1181 struct bond_dev_private *internals = dev->data->dev_private;
1182 struct mode8023ad_private *mode4 = &internals->mode4;
1183 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1184
1185 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1186 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1187 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1188 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1189 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1190 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1191 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1192 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1193 conf->slowrx_cb = mode4->slowrx_cb;
1194 conf->agg_selection = mode4->agg_selection;
1195}
1196
1197static void
1198bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1199{
1200 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1201 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1202 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1203 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1204 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1205 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1206 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1207 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1208 conf->slowrx_cb = NULL;
1209 conf->agg_selection = AGG_STABLE;
1210}
1211
1212static void
1213bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1214 struct rte_eth_bond_8023ad_conf *conf)
1215{
1216 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1217
1218 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1219 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1220 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1221 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1222 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1223 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1224 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1225 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1226
1227 mode4->dedicated_queues.enabled = 0;
1228 mode4->dedicated_queues.rx_qid = UINT16_MAX;
1229 mode4->dedicated_queues.tx_qid = UINT16_MAX;
1230}
1231
1232void
1233bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1234 struct rte_eth_bond_8023ad_conf *conf)
1235{
1236 struct rte_eth_bond_8023ad_conf def_conf;
1237 struct bond_dev_private *internals = dev->data->dev_private;
1238 struct mode8023ad_private *mode4 = &internals->mode4;
1239
1240 if (conf == NULL) {
1241 conf = &def_conf;
1242 bond_mode_8023ad_conf_get_default(conf);
1243 }
1244
1245 bond_mode_8023ad_stop(dev);
1246 bond_mode_8023ad_conf_assign(mode4, conf);
1247 mode4->slowrx_cb = conf->slowrx_cb;
1248 mode4->agg_selection = AGG_STABLE;
1249
1250 if (dev->data->dev_started)
1251 bond_mode_8023ad_start(dev);
1252}
1253
1254int
1255bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1256{
1257 struct bond_dev_private *internals = bond_dev->data->dev_private;
1258 uint16_t i;
1259
1260 for (i = 0; i < internals->active_slave_count; i++)
1261 bond_mode_8023ad_activate_slave(bond_dev,
1262 internals->active_slaves[i]);
1263
1264 return 0;
1265}
1266
1267int
1268bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1269{
1270 struct bond_dev_private *internals = bond_dev->data->dev_private;
1271 struct mode8023ad_private *mode4 = &internals->mode4;
1272 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1273
1274 rte_eth_macaddr_get(internals->port_id, &mode4->mac_addr);
1275 if (mode4->slowrx_cb)
1276 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1277 bond_dev);
1278
1279 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1280}
1281
1282void
1283bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1284{
1285 struct bond_dev_private *internals = bond_dev->data->dev_private;
1286 struct mode8023ad_private *mode4 = &internals->mode4;
1287
1288 if (mode4->slowrx_cb) {
1289 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1290 bond_dev);
1291 return;
1292 }
1293 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1294}
1295
1296void
1297bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1298 uint16_t slave_id, struct rte_mbuf *pkt)
1299{
1300 struct mode8023ad_private *mode4 = &internals->mode4;
1301 struct port *port = &bond_mode_8023ad_ports[slave_id];
1302 struct marker_header *m_hdr;
1303 uint64_t marker_timer, old_marker_timer;
1304 int retval;
1305 uint8_t wrn, subtype;
1306
1307
1308
1309 subtype = rte_pktmbuf_mtod(pkt,
1310 struct slow_protocol_frame *)->slow_protocol.subtype;
1311
1312 if (subtype == SLOW_SUBTYPE_MARKER) {
1313 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1314
1315 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1316 wrn = WRN_UNKNOWN_MARKER_TYPE;
1317 goto free_out;
1318 }
1319
1320
1321 do {
1322 old_marker_timer = port->rx_marker_timer;
1323 if (!timer_is_expired(&old_marker_timer)) {
1324 wrn = WRN_RX_MARKER_TO_FAST;
1325 goto free_out;
1326 }
1327
1328 timer_set(&marker_timer, mode4->rx_marker_timeout);
1329 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1330 old_marker_timer, marker_timer);
1331 } while (unlikely(retval == 0));
1332
1333 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1334 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1335
1336 if (internals->mode4.dedicated_queues.enabled == 0) {
1337 if (rte_ring_enqueue(port->tx_ring, pkt) != 0) {
1338
1339 port->rx_marker_timer = 0;
1340 wrn = WRN_TX_QUEUE_FULL;
1341 goto free_out;
1342 }
1343 } else {
1344
1345 uint16_t tx_count = rte_eth_tx_burst(slave_id,
1346 internals->mode4.dedicated_queues.tx_qid,
1347 &pkt, 1);
1348 if (tx_count != 1) {
1349
1350 port->rx_marker_timer = 0;
1351 wrn = WRN_TX_QUEUE_FULL;
1352 goto free_out;
1353 }
1354 }
1355 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1356 if (internals->mode4.dedicated_queues.enabled == 0) {
1357 if (rte_ring_enqueue(port->rx_ring, pkt) != 0) {
1358
1359 wrn = WRN_RX_QUEUE_FULL;
1360 goto free_out;
1361 }
1362 } else
1363 rx_machine_update(internals, slave_id, pkt);
1364 } else {
1365 wrn = WRN_UNKNOWN_SLOW_TYPE;
1366 goto free_out;
1367 }
1368
1369 return;
1370
1371free_out:
1372 set_warning_flags(port, wrn);
1373 rte_pktmbuf_free(pkt);
1374}
1375
1376int
1377rte_eth_bond_8023ad_conf_get(uint16_t port_id,
1378 struct rte_eth_bond_8023ad_conf *conf)
1379{
1380 struct rte_eth_dev *bond_dev;
1381
1382 if (valid_bonded_port_id(port_id) != 0)
1383 return -EINVAL;
1384
1385 if (conf == NULL)
1386 return -EINVAL;
1387
1388 bond_dev = &rte_eth_devices[port_id];
1389 bond_mode_8023ad_conf_get(bond_dev, conf);
1390 return 0;
1391}
1392
1393int
1394rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
1395 enum rte_bond_8023ad_agg_selection agg_selection)
1396{
1397 struct rte_eth_dev *bond_dev;
1398 struct bond_dev_private *internals;
1399 struct mode8023ad_private *mode4;
1400
1401 if (valid_bonded_port_id(port_id) != 0)
1402 return -EINVAL;
1403
1404 bond_dev = &rte_eth_devices[port_id];
1405 internals = bond_dev->data->dev_private;
1406
1407 if (internals->mode != 4)
1408 return -EINVAL;
1409
1410 mode4 = &internals->mode4;
1411 if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
1412 || agg_selection == AGG_STABLE)
1413 mode4->agg_selection = agg_selection;
1414 return 0;
1415}
1416
1417int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
1418{
1419 struct rte_eth_dev *bond_dev;
1420 struct bond_dev_private *internals;
1421 struct mode8023ad_private *mode4;
1422
1423 if (valid_bonded_port_id(port_id) != 0)
1424 return -EINVAL;
1425
1426 bond_dev = &rte_eth_devices[port_id];
1427 internals = bond_dev->data->dev_private;
1428
1429 if (internals->mode != 4)
1430 return -EINVAL;
1431 mode4 = &internals->mode4;
1432
1433 return mode4->agg_selection;
1434}
1435
1436
1437
1438static int
1439bond_8023ad_setup_validate(uint16_t port_id,
1440 struct rte_eth_bond_8023ad_conf *conf)
1441{
1442 if (valid_bonded_port_id(port_id) != 0)
1443 return -EINVAL;
1444
1445 if (conf != NULL) {
1446
1447 if (conf->slow_periodic_ms == 0 ||
1448 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1449 conf->long_timeout_ms == 0 ||
1450 conf->short_timeout_ms >= conf->long_timeout_ms ||
1451 conf->aggregate_wait_timeout_ms == 0 ||
1452 conf->tx_period_ms == 0 ||
1453 conf->rx_marker_period_ms == 0 ||
1454 conf->update_timeout_ms == 0) {
1455 RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid");
1456 return -EINVAL;
1457 }
1458 }
1459
1460 return 0;
1461}
1462
1463
1464int
1465rte_eth_bond_8023ad_setup(uint16_t port_id,
1466 struct rte_eth_bond_8023ad_conf *conf)
1467{
1468 struct rte_eth_dev *bond_dev;
1469 int err;
1470
1471 err = bond_8023ad_setup_validate(port_id, conf);
1472 if (err != 0)
1473 return err;
1474
1475 bond_dev = &rte_eth_devices[port_id];
1476 bond_mode_8023ad_setup(bond_dev, conf);
1477
1478 return 0;
1479}
1480
1481
1482
1483
1484
1485int
1486rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
1487 struct rte_eth_bond_8023ad_slave_info *info)
1488{
1489 struct rte_eth_dev *bond_dev;
1490 struct bond_dev_private *internals;
1491 struct port *port;
1492
1493 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1494 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1495 return -EINVAL;
1496
1497 bond_dev = &rte_eth_devices[port_id];
1498
1499 internals = bond_dev->data->dev_private;
1500 if (find_slave_by_id(internals->active_slaves,
1501 internals->active_slave_count, slave_id) ==
1502 internals->active_slave_count)
1503 return -EINVAL;
1504
1505 port = &bond_mode_8023ad_ports[slave_id];
1506 info->selected = port->selected;
1507
1508 info->actor_state = port->actor_state;
1509 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1510
1511 info->partner_state = port->partner_state;
1512 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1513
1514 info->agg_port_id = port->aggregator_port_id;
1515 return 0;
1516}
1517
1518static int
1519bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
1520{
1521 struct rte_eth_dev *bond_dev;
1522 struct bond_dev_private *internals;
1523 struct mode8023ad_private *mode4;
1524
1525 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1526 return -EINVAL;
1527
1528 bond_dev = &rte_eth_devices[port_id];
1529
1530 if (!bond_dev->data->dev_started)
1531 return -EINVAL;
1532
1533 internals = bond_dev->data->dev_private;
1534 if (find_slave_by_id(internals->active_slaves,
1535 internals->active_slave_count, slave_id) ==
1536 internals->active_slave_count)
1537 return -EINVAL;
1538
1539 mode4 = &internals->mode4;
1540 if (mode4->slowrx_cb == NULL)
1541 return -EINVAL;
1542
1543 return 0;
1544}
1545
1546int
1547rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
1548 int enabled)
1549{
1550 struct port *port;
1551 int res;
1552
1553 res = bond_8023ad_ext_validate(port_id, slave_id);
1554 if (res != 0)
1555 return res;
1556
1557 port = &bond_mode_8023ad_ports[slave_id];
1558
1559 if (enabled)
1560 ACTOR_STATE_SET(port, COLLECTING);
1561 else
1562 ACTOR_STATE_CLR(port, COLLECTING);
1563
1564 return 0;
1565}
1566
1567int
1568rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
1569 int enabled)
1570{
1571 struct port *port;
1572 int res;
1573
1574 res = bond_8023ad_ext_validate(port_id, slave_id);
1575 if (res != 0)
1576 return res;
1577
1578 port = &bond_mode_8023ad_ports[slave_id];
1579
1580 if (enabled)
1581 ACTOR_STATE_SET(port, DISTRIBUTING);
1582 else
1583 ACTOR_STATE_CLR(port, DISTRIBUTING);
1584
1585 return 0;
1586}
1587
1588int
1589rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
1590{
1591 struct port *port;
1592 int err;
1593
1594 err = bond_8023ad_ext_validate(port_id, slave_id);
1595 if (err != 0)
1596 return err;
1597
1598 port = &bond_mode_8023ad_ports[slave_id];
1599 return ACTOR_STATE(port, DISTRIBUTING);
1600}
1601
1602int
1603rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
1604{
1605 struct port *port;
1606 int err;
1607
1608 err = bond_8023ad_ext_validate(port_id, slave_id);
1609 if (err != 0)
1610 return err;
1611
1612 port = &bond_mode_8023ad_ports[slave_id];
1613 return ACTOR_STATE(port, COLLECTING);
1614}
1615
1616int
1617rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
1618 struct rte_mbuf *lacp_pkt)
1619{
1620 struct port *port;
1621 int res;
1622
1623 res = bond_8023ad_ext_validate(port_id, slave_id);
1624 if (res != 0)
1625 return res;
1626
1627 port = &bond_mode_8023ad_ports[slave_id];
1628
1629 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1630 return -EINVAL;
1631
1632 struct lacpdu_header *lacp;
1633
1634
1635 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1636 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1637 return -EINVAL;
1638
1639 MODE4_DEBUG("sending LACP frame\n");
1640
1641 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1642}
1643
1644static void
1645bond_mode_8023ad_ext_periodic_cb(void *arg)
1646{
1647 struct rte_eth_dev *bond_dev = arg;
1648 struct bond_dev_private *internals = bond_dev->data->dev_private;
1649 struct mode8023ad_private *mode4 = &internals->mode4;
1650 struct port *port;
1651 void *pkt = NULL;
1652 uint16_t i, slave_id;
1653
1654 for (i = 0; i < internals->active_slave_count; i++) {
1655 slave_id = internals->active_slaves[i];
1656 port = &bond_mode_8023ad_ports[slave_id];
1657
1658 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1659 struct rte_mbuf *lacp_pkt = pkt;
1660 struct lacpdu_header *lacp;
1661
1662 lacp = rte_pktmbuf_mtod(lacp_pkt,
1663 struct lacpdu_header *);
1664 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1665
1666
1667
1668
1669 mode4->slowrx_cb(slave_id, lacp_pkt);
1670 }
1671 }
1672
1673 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1674 bond_mode_8023ad_ext_periodic_cb, arg);
1675}
1676
1677int
1678rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
1679{
1680 int retval = 0;
1681 struct rte_eth_dev *dev;
1682 struct bond_dev_private *internals;
1683
1684 if (valid_bonded_port_id(port) != 0)
1685 return -EINVAL;
1686
1687 dev = &rte_eth_devices[port];
1688 internals = dev->data->dev_private;
1689
1690 if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
1691 return -1;
1692
1693
1694 if (dev->data->dev_started)
1695 return -1;
1696
1697 internals->mode4.dedicated_queues.enabled = 1;
1698
1699 bond_ethdev_mode_set(dev, internals->mode);
1700 return retval;
1701}
1702
1703int
1704rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
1705{
1706 int retval = 0;
1707 struct rte_eth_dev *dev;
1708 struct bond_dev_private *internals;
1709
1710 if (valid_bonded_port_id(port) != 0)
1711 return -EINVAL;
1712
1713 dev = &rte_eth_devices[port];
1714 internals = dev->data->dev_private;
1715
1716
1717 if (dev->data->dev_started)
1718 return -1;
1719
1720 internals->mode4.dedicated_queues.enabled = 0;
1721
1722 bond_ethdev_mode_set(dev, internals->mode);
1723
1724 return retval;
1725}
1726