1
2
3
4
5
6#include "ib_rep.h"
7#include "srq.h"
8
9static const struct mlx5_ib_profile rep_profile = {
10 STAGE_CREATE(MLX5_IB_STAGE_INIT,
11 mlx5_ib_stage_init_init,
12 mlx5_ib_stage_init_cleanup),
13 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
14 mlx5_ib_stage_rep_flow_db_init,
15 NULL),
16 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
17 mlx5_ib_stage_caps_init,
18 NULL),
19 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
20 mlx5_ib_stage_rep_non_default_cb,
21 NULL),
22 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
23 mlx5_ib_stage_rep_roce_init,
24 mlx5_ib_stage_rep_roce_cleanup),
25 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
26 mlx5_init_srq_table,
27 mlx5_cleanup_srq_table),
28 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
29 mlx5_ib_stage_dev_res_init,
30 mlx5_ib_stage_dev_res_cleanup),
31 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
32 mlx5_ib_stage_counters_init,
33 mlx5_ib_stage_counters_cleanup),
34 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
35 mlx5_ib_stage_bfrag_init,
36 mlx5_ib_stage_bfrag_cleanup),
37 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
38 NULL,
39 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
40 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
41 mlx5_ib_stage_ib_reg_init,
42 mlx5_ib_stage_ib_reg_cleanup),
43 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
44 mlx5_ib_stage_post_ib_reg_umr_init,
45 NULL),
46};
47
48static int
49mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
50{
51 struct mlx5_ib_dev *ibdev;
52
53 ibdev = mlx5_ib_rep_to_dev(rep);
54 if (!__mlx5_ib_add(ibdev, ibdev->profile))
55 return -EINVAL;
56 return 0;
57}
58
59static void
60mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
61{
62 struct mlx5_ib_dev *ibdev;
63
64 ibdev = mlx5_ib_rep_to_dev(rep);
65 __mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
66}
67
68static int
69mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
70{
71 struct mlx5_ib_dev *ibdev;
72
73 ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
74 if (!ibdev)
75 return -ENOMEM;
76
77 ibdev->rep = rep;
78 ibdev->mdev = dev;
79 ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
80 MLX5_CAP_GEN(dev, num_vhca_ports));
81 if (!__mlx5_ib_add(ibdev, &rep_profile))
82 return -EINVAL;
83
84 rep->rep_if[REP_IB].priv = ibdev;
85
86 return 0;
87}
88
89static void
90mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
91{
92 struct mlx5_ib_dev *dev;
93
94 if (!rep->rep_if[REP_IB].priv)
95 return;
96
97 dev = mlx5_ib_rep_to_dev(rep);
98 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
99 rep->rep_if[REP_IB].priv = NULL;
100 ib_dealloc_device(&dev->ib_dev);
101}
102
103static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
104{
105 return mlx5_ib_rep_to_dev(rep);
106}
107
108static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
109{
110 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
111 int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
112 int vport;
113
114 for (vport = 1; vport < total_vfs; vport++) {
115 struct mlx5_eswitch_rep_if rep_if = {};
116
117 rep_if.load = mlx5_ib_vport_rep_load;
118 rep_if.unload = mlx5_ib_vport_rep_unload;
119 rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
120 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
121 }
122}
123
124static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
125{
126 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
127 int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
128 int vport;
129
130 for (vport = 1; vport < total_vfs; vport++)
131 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
132}
133
134void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
135{
136 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
137 struct mlx5_eswitch_rep_if rep_if = {};
138
139 rep_if.load = mlx5_ib_nic_rep_load;
140 rep_if.unload = mlx5_ib_nic_rep_unload;
141 rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
142 rep_if.priv = dev;
143
144 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
145
146 mlx5_ib_rep_register_vf_vports(dev);
147}
148
149void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
150{
151 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
152
153 mlx5_ib_rep_unregister_vf_vports(dev);
154 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB);
155}
156
157u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
158{
159 return mlx5_eswitch_mode(esw);
160}
161
162struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
163 int vport_index)
164{
165 return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
166}
167
168struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
169 int vport_index)
170{
171 return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
172}
173
174struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
175{
176 return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
177}
178
179struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
180{
181 return mlx5_eswitch_vport_rep(esw, vport);
182}
183
184int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
185 struct mlx5_ib_sq *sq)
186{
187 struct mlx5_flow_handle *flow_rule;
188 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
189
190 if (!dev->rep)
191 return 0;
192
193 flow_rule =
194 mlx5_eswitch_add_send_to_vport_rule(esw,
195 dev->rep->vport,
196 sq->base.mqp.qpn);
197 if (IS_ERR(flow_rule))
198 return PTR_ERR(flow_rule);
199 sq->flow_rule = flow_rule;
200
201 return 0;
202}
203