1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef __MLX5E_REP_H__
34#define __MLX5E_REP_H__
35
36#include <net/ip_tunnels.h>
37#include <linux/rhashtable.h>
38#include <linux/mutex.h>
39#include "eswitch.h"
40#include "en.h"
41#include "lib/port_tun.h"
42
43#ifdef CONFIG_MLX5_ESWITCH
44extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep;
45
46struct mlx5e_neigh_update_table {
47 struct rhashtable neigh_ht;
48
49
50
51
52 struct list_head neigh_list;
53
54 struct mutex encap_lock;
55 struct notifier_block netevent_nb;
56 struct delayed_work neigh_stats_work;
57 unsigned long min_interval;
58};
59
60struct mlx5_tc_ct_priv;
61struct mlx5e_rep_bond;
62struct mlx5e_tc_tun_encap;
63struct mlx5e_post_act;
64
65struct mlx5_rep_uplink_priv {
66
67
68
69 struct rhashtable tc_ht;
70
71
72
73
74
75
76
77
78 struct list_head tc_indr_block_priv_list;
79
80 struct mlx5_tun_entropy tun_entropy;
81
82
83 struct mutex unready_flows_lock;
84 struct list_head unready_flows;
85 struct work_struct reoffload_flows_work;
86
87
88 struct mapping_ctx *tunnel_mapping;
89
90 struct mapping_ctx *tunnel_enc_opts_mapping;
91
92 struct mlx5e_post_act *post_act;
93 struct mlx5_tc_ct_priv *ct_priv;
94 struct mlx5e_tc_psample *tc_psample;
95
96
97 struct mlx5e_rep_bond *bond;
98
99
100 struct mlx5e_tc_tun_encap *encap;
101};
102
103struct mlx5e_rep_priv {
104 struct mlx5_eswitch_rep *rep;
105 struct mlx5e_neigh_update_table neigh_update;
106 struct net_device *netdev;
107 struct mlx5_flow_table *root_ft;
108 struct mlx5_flow_handle *vport_rx_rule;
109 struct list_head vport_sqs_list;
110 struct mlx5_rep_uplink_priv uplink_priv;
111 struct rtnl_link_stats64 prev_vf_vport_stats;
112};
113
114static inline
115struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
116{
117 return rep->rep_data[REP_ETH].priv;
118}
119
120struct mlx5e_neigh {
121 union {
122 __be32 v4;
123 struct in6_addr v6;
124 } dst_ip;
125 int family;
126};
127
128struct mlx5e_neigh_hash_entry {
129 struct rhash_head rhash_node;
130 struct mlx5e_neigh m_neigh;
131 struct mlx5e_priv *priv;
132 struct net_device *neigh_dev;
133
134
135
136
137
138 struct list_head neigh_list;
139
140
141 spinlock_t encap_list_lock;
142
143 struct list_head encap_list;
144
145
146
147
148
149 refcount_t refcnt;
150
151
152
153
154
155 unsigned long reported_lastuse;
156
157 struct rcu_head rcu;
158};
159
160enum {
161
162 MLX5_ENCAP_ENTRY_VALID = BIT(0),
163 MLX5_REFORMAT_DECAP = BIT(1),
164 MLX5_ENCAP_ENTRY_NO_ROUTE = BIT(2),
165};
166
167struct mlx5e_decap_key {
168 struct ethhdr key;
169};
170
171struct mlx5e_decap_entry {
172 struct mlx5e_decap_key key;
173 struct list_head flows;
174 struct hlist_node hlist;
175 refcount_t refcnt;
176 struct completion res_ready;
177 int compl_result;
178 struct mlx5_pkt_reformat *pkt_reformat;
179 struct rcu_head rcu;
180};
181
182struct mlx5e_encap_entry {
183
184 struct mlx5e_neigh_hash_entry *nhe;
185
186 struct list_head encap_list;
187
188
189
190 struct hlist_node encap_hlist;
191 struct list_head flows;
192 struct list_head route_list;
193 struct mlx5_pkt_reformat *pkt_reformat;
194 const struct ip_tunnel_info *tun_info;
195 unsigned char h_dest[ETH_ALEN];
196
197 struct net_device *out_dev;
198 int route_dev_ifindex;
199 struct mlx5e_tc_tunnel *tunnel;
200 int reformat_type;
201 u8 flags;
202 char *encap_header;
203 int encap_size;
204 refcount_t refcnt;
205 struct completion res_ready;
206 int compl_result;
207 struct rcu_head rcu;
208};
209
210struct mlx5e_rep_sq {
211 struct mlx5_flow_handle *send_to_vport_rule;
212 struct mlx5_flow_handle *send_to_vport_rule_peer;
213 u32 sqn;
214 struct list_head list;
215};
216
217int mlx5e_rep_init(void);
218void mlx5e_rep_cleanup(void);
219int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
220void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
221int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
222 struct net_device *lag_dev);
223void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
224 const struct net_device *netdev,
225 const struct net_device *lag_dev);
226int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
227
228bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id);
229int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
230 void *sp);
231
232bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
233int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
234void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
235
236void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
237
238bool mlx5e_eswitch_vf_rep(const struct net_device *netdev);
239bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev);
240static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
241{
242 return mlx5e_eswitch_vf_rep(netdev) ||
243 mlx5e_eswitch_uplink_rep(netdev);
244}
245
246#else
247static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
248static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
249static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
250static inline int mlx5e_rep_init(void) { return 0; };
251static inline void mlx5e_rep_cleanup(void) {};
252static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
253 int attr_id) { return false; }
254static inline int mlx5e_rep_get_offload_stats(int attr_id,
255 const struct net_device *dev,
256 void *sp) { return -EOPNOTSUPP; }
257#endif
258
259static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
260{
261 return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv);
262}
263#endif
264