1
2
3
4
5
6#ifndef _LINUX_IF_TEAM_H_
7#define _LINUX_IF_TEAM_H_
8
9#include <linux/netpoll.h>
10#include <net/sch_generic.h>
11#include <linux/types.h>
12#include <uapi/linux/if_team.h>
13
14struct team_pcpu_stats {
15 u64 rx_packets;
16 u64 rx_bytes;
17 u64 rx_multicast;
18 u64 tx_packets;
19 u64 tx_bytes;
20 struct u64_stats_sync syncp;
21 u32 rx_dropped;
22 u32 tx_dropped;
23 u32 rx_nohandler;
24};
25
26struct team;
27
28struct team_port {
29 struct net_device *dev;
30 struct hlist_node hlist;
31 struct list_head list;
32 struct team *team;
33 int index;
34
35 bool linkup;
36
37 struct {
38 bool linkup;
39 u32 speed;
40 u8 duplex;
41 } state;
42
43
44 struct {
45 bool linkup;
46 bool linkup_enabled;
47 } user;
48
49
50 bool changed;
51 bool removed;
52
53
54
55
56
57 struct {
58 unsigned char dev_addr[MAX_ADDR_LEN];
59 unsigned int mtu;
60 } orig;
61
62#ifdef CONFIG_NET_POLL_CONTROLLER
63 struct netpoll *np;
64#endif
65
66 s32 priority;
67 u16 queue_id;
68 struct list_head qom_list;
69 struct rcu_head rcu;
70 long mode_priv[0];
71};
72
73static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
74{
75 return rcu_dereference(dev->rx_handler_data);
76}
77
78static inline bool team_port_enabled(struct team_port *port)
79{
80 return port->index != -1;
81}
82
83static inline bool team_port_txable(struct team_port *port)
84{
85 return port->linkup && team_port_enabled(port);
86}
87
88static inline bool team_port_dev_txable(const struct net_device *port_dev)
89{
90 struct team_port *port;
91 bool txable;
92
93 rcu_read_lock();
94 port = team_port_get_rcu(port_dev);
95 txable = port ? team_port_txable(port) : false;
96 rcu_read_unlock();
97
98 return txable;
99}
100
101#ifdef CONFIG_NET_POLL_CONTROLLER
102static inline void team_netpoll_send_skb(struct team_port *port,
103 struct sk_buff *skb)
104{
105 struct netpoll *np = port->np;
106
107 if (np)
108 netpoll_send_skb(np, skb);
109}
110#else
111static inline void team_netpoll_send_skb(struct team_port *port,
112 struct sk_buff *skb)
113{
114}
115#endif
116
117struct team_mode_ops {
118 int (*init)(struct team *team);
119 void (*exit)(struct team *team);
120 rx_handler_result_t (*receive)(struct team *team,
121 struct team_port *port,
122 struct sk_buff *skb);
123 bool (*transmit)(struct team *team, struct sk_buff *skb);
124 int (*port_enter)(struct team *team, struct team_port *port);
125 void (*port_leave)(struct team *team, struct team_port *port);
126 void (*port_change_dev_addr)(struct team *team, struct team_port *port);
127 void (*port_enabled)(struct team *team, struct team_port *port);
128 void (*port_disabled)(struct team *team, struct team_port *port);
129};
130
131extern int team_modeop_port_enter(struct team *team, struct team_port *port);
132extern void team_modeop_port_change_dev_addr(struct team *team,
133 struct team_port *port);
134
135enum team_option_type {
136 TEAM_OPTION_TYPE_U32,
137 TEAM_OPTION_TYPE_STRING,
138 TEAM_OPTION_TYPE_BINARY,
139 TEAM_OPTION_TYPE_BOOL,
140 TEAM_OPTION_TYPE_S32,
141};
142
143struct team_option_inst_info {
144 u32 array_index;
145 struct team_port *port;
146};
147
148struct team_gsetter_ctx {
149 union {
150 u32 u32_val;
151 const char *str_val;
152 struct {
153 const void *ptr;
154 u32 len;
155 } bin_val;
156 bool bool_val;
157 s32 s32_val;
158 } data;
159 struct team_option_inst_info *info;
160};
161
162struct team_option {
163 struct list_head list;
164 const char *name;
165 bool per_port;
166 unsigned int array_size;
167 enum team_option_type type;
168 int (*init)(struct team *team, struct team_option_inst_info *info);
169 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
170 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
171};
172
173extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
174extern void team_options_change_check(struct team *team);
175
176struct team_mode {
177 const char *kind;
178 struct module *owner;
179 size_t priv_size;
180 size_t port_priv_size;
181 const struct team_mode_ops *ops;
182 enum netdev_lag_tx_type lag_tx_type;
183};
184
185#define TEAM_PORT_HASHBITS 4
186#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
187
188#define TEAM_MODE_PRIV_LONGS 4
189#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
190
191struct team {
192 struct net_device *dev;
193 struct team_pcpu_stats __percpu *pcpu_stats;
194
195 struct mutex lock;
196
197
198
199
200 int en_port_count;
201 struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
202
203 struct list_head port_list;
204
205 struct list_head option_list;
206 struct list_head option_inst_list;
207
208 const struct team_mode *mode;
209 struct team_mode_ops ops;
210 bool user_carrier_enabled;
211 bool queue_override_enabled;
212 struct list_head *qom_lists;
213 bool port_mtu_change_allowed;
214 struct {
215 unsigned int count;
216 unsigned int interval;
217 atomic_t count_pending;
218 struct delayed_work dw;
219 } notify_peers;
220 struct {
221 unsigned int count;
222 unsigned int interval;
223 atomic_t count_pending;
224 struct delayed_work dw;
225 } mcast_rejoin;
226 long mode_priv[TEAM_MODE_PRIV_LONGS];
227};
228
229static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
230 struct sk_buff *skb)
231{
232 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
233 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
234 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
235
236 skb->dev = port->dev;
237 if (unlikely(netpoll_tx_running(team->dev))) {
238 team_netpoll_send_skb(port, skb);
239 return 0;
240 }
241 return dev_queue_xmit(skb);
242}
243
244static inline struct hlist_head *team_port_index_hash(struct team *team,
245 int port_index)
246{
247 return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
248}
249
250static inline struct team_port *team_get_port_by_index(struct team *team,
251 int port_index)
252{
253 struct team_port *port;
254 struct hlist_head *head = team_port_index_hash(team, port_index);
255
256 hlist_for_each_entry(port, head, hlist)
257 if (port->index == port_index)
258 return port;
259 return NULL;
260}
261
262static inline int team_num_to_port_index(struct team *team, unsigned int num)
263{
264 int en_port_count = READ_ONCE(team->en_port_count);
265
266 if (unlikely(!en_port_count))
267 return 0;
268 return num % en_port_count;
269}
270
271static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
272 int port_index)
273{
274 struct team_port *port;
275 struct hlist_head *head = team_port_index_hash(team, port_index);
276
277 hlist_for_each_entry_rcu(port, head, hlist)
278 if (port->index == port_index)
279 return port;
280 return NULL;
281}
282
283static inline struct team_port *
284team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
285{
286 struct team_port *cur;
287
288 if (likely(team_port_txable(port)))
289 return port;
290 cur = port;
291 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
292 if (team_port_txable(cur))
293 return cur;
294 list_for_each_entry_rcu(cur, &team->port_list, list) {
295 if (cur == port)
296 break;
297 if (team_port_txable(cur))
298 return cur;
299 }
300 return NULL;
301}
302
303extern int team_options_register(struct team *team,
304 const struct team_option *option,
305 size_t option_count);
306extern void team_options_unregister(struct team *team,
307 const struct team_option *option,
308 size_t option_count);
309extern int team_mode_register(const struct team_mode *mode);
310extern void team_mode_unregister(const struct team_mode *mode);
311
312#define TEAM_DEFAULT_NUM_TX_QUEUES 16
313#define TEAM_DEFAULT_NUM_RX_QUEUES 16
314
315#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
316
317#endif
318