1
2
3
4
5
6#include <linux/mlx5/vport.h>
7#include "ib_rep.h"
8#include "srq.h"
9
10static int
11mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
12{
13 struct mlx5_ib_dev *ibdev;
14 int vport_index;
15
16 ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
17 vport_index = rep->vport_index;
18
19 ibdev->port[vport_index].rep = rep;
20 rep->rep_data[REP_IB].priv = ibdev;
21 write_lock(&ibdev->port[vport_index].roce.netdev_lock);
22 ibdev->port[vport_index].roce.netdev =
23 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
24 write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
25
26 return 0;
27}
28
29static int
30mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
31{
32 int num_ports = mlx5_eswitch_get_total_vports(dev);
33 const struct mlx5_ib_profile *profile;
34 struct mlx5_ib_dev *ibdev;
35 int vport_index;
36
37 if (rep->vport == MLX5_VPORT_UPLINK)
38 profile = &uplink_rep_profile;
39 else
40 return mlx5_ib_set_vport_rep(dev, rep);
41
42 ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
43 if (!ibdev)
44 return -ENOMEM;
45
46 ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
47 GFP_KERNEL);
48 if (!ibdev->port) {
49 ib_dealloc_device(&ibdev->ib_dev);
50 return -ENOMEM;
51 }
52
53 ibdev->is_rep = true;
54 vport_index = rep->vport_index;
55 ibdev->port[vport_index].rep = rep;
56 ibdev->port[vport_index].roce.netdev =
57 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
58 ibdev->mdev = dev;
59 ibdev->num_ports = num_ports;
60
61 if (!__mlx5_ib_add(ibdev, profile))
62 return -EINVAL;
63
64 rep->rep_data[REP_IB].priv = ibdev;
65
66 return 0;
67}
68
69static void
70mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
71{
72 struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
73 struct mlx5_ib_port *port;
74
75 port = &dev->port[rep->vport_index];
76 write_lock(&port->roce.netdev_lock);
77 port->roce.netdev = NULL;
78 write_unlock(&port->roce.netdev_lock);
79 rep->rep_data[REP_IB].priv = NULL;
80 port->rep = NULL;
81
82 if (rep->vport == MLX5_VPORT_UPLINK)
83 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
84}
85
86static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
87{
88 return mlx5_ib_rep_to_dev(rep);
89}
90
91static const struct mlx5_eswitch_rep_ops rep_ops = {
92 .load = mlx5_ib_vport_rep_load,
93 .unload = mlx5_ib_vport_rep_unload,
94 .get_proto_dev = mlx5_ib_vport_get_proto_dev,
95};
96
97void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
98{
99 struct mlx5_eswitch *esw = mdev->priv.eswitch;
100
101 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
102}
103
104void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
105{
106 struct mlx5_eswitch *esw = mdev->priv.eswitch;
107
108 mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
109}
110
111u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
112{
113 return mlx5_eswitch_mode(esw);
114}
115
116struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
117 u16 vport_num)
118{
119 return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB);
120}
121
122struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
123 u16 vport_num)
124{
125 return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
126}
127
128struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
129{
130 return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
131}
132
133struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
134 u16 vport_num)
135{
136 return mlx5_eswitch_vport_rep(esw, vport_num);
137}
138
139struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
140 struct mlx5_ib_sq *sq,
141 u16 port)
142{
143 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
144 struct mlx5_eswitch_rep *rep;
145
146 if (!dev->is_rep || !port)
147 return NULL;
148
149 if (!dev->port[port - 1].rep)
150 return ERR_PTR(-EINVAL);
151
152 rep = dev->port[port - 1].rep;
153
154 return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
155 sq->base.mqp.qpn);
156}
157