1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/ib_verbs.h>
34#include <linux/mlx5/fs.h>
35#include "en.h"
36#include "ipoib.h"
37
38#define IB_DEFAULT_Q_KEY 0xb1b
39#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
40
41static int mlx5i_open(struct net_device *netdev);
42static int mlx5i_close(struct net_device *netdev);
43static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
44
45static const struct net_device_ops mlx5i_netdev_ops = {
46 .ndo_open = mlx5i_open,
47 .ndo_stop = mlx5i_close,
48 .ndo_init = mlx5i_dev_init,
49 .ndo_uninit = mlx5i_dev_cleanup,
50 .ndo_change_mtu = mlx5i_change_mtu,
51 .ndo_do_ioctl = mlx5i_ioctl,
52};
53
54
55static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
56 struct mlx5e_params *params)
57{
58
59 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
60 mlx5e_set_rq_type(mdev, params);
61 mlx5e_init_rq_type_params(mdev, params);
62
63
64 params->log_rq_mtu_frames = is_kdump_kernel() ?
65 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
66 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
67
68 params->lro_en = false;
69 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
70}
71
72
73void mlx5i_init(struct mlx5_core_dev *mdev,
74 struct net_device *netdev,
75 const struct mlx5e_profile *profile,
76 void *ppriv)
77{
78 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
79 u16 max_mtu;
80
81
82 priv->mdev = mdev;
83 priv->netdev = netdev;
84 priv->profile = profile;
85 priv->ppriv = ppriv;
86 mutex_init(&priv->state_lock);
87
88 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
89 netdev->mtu = max_mtu;
90
91 mlx5e_build_nic_params(mdev, &priv->channels.params,
92 profile->max_nch(mdev), netdev->mtu);
93 mlx5i_build_nic_params(mdev, &priv->channels.params);
94
95 mlx5e_timestamp_init(priv);
96
97
98 netdev->hw_features |= NETIF_F_SG;
99 netdev->hw_features |= NETIF_F_IP_CSUM;
100 netdev->hw_features |= NETIF_F_IPV6_CSUM;
101 netdev->hw_features |= NETIF_F_GRO;
102 netdev->hw_features |= NETIF_F_TSO;
103 netdev->hw_features |= NETIF_F_TSO6;
104 netdev->hw_features |= NETIF_F_RXCSUM;
105 netdev->hw_features |= NETIF_F_RXHASH;
106
107 netdev->netdev_ops = &mlx5i_netdev_ops;
108 netdev->ethtool_ops = &mlx5i_ethtool_ops;
109}
110
111
112static void mlx5i_cleanup(struct mlx5e_priv *priv)
113{
114
115}
116
117int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
118{
119 struct mlx5_core_dev *mdev = priv->mdev;
120 struct mlx5i_priv *ipriv = priv->ppriv;
121 struct mlx5_core_qp *qp = &ipriv->qp;
122 struct mlx5_qp_context *context;
123 int ret;
124
125
126 context = kzalloc(sizeof(*context), GFP_KERNEL);
127 if (!context)
128 return -ENOMEM;
129
130 context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
131 context->pri_path.port = 1;
132 context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
133 context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
134
135 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
136 if (ret) {
137 mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
138 goto err_qp_modify_to_err;
139 }
140 memset(context, 0, sizeof(*context));
141
142 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
143 if (ret) {
144 mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
145 goto err_qp_modify_to_err;
146 }
147
148 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
149 if (ret) {
150 mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
151 goto err_qp_modify_to_err;
152 }
153
154 kfree(context);
155 return 0;
156
157err_qp_modify_to_err:
158 mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
159 kfree(context);
160 return ret;
161}
162
163void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
164{
165 struct mlx5i_priv *ipriv = priv->ppriv;
166 struct mlx5_core_dev *mdev = priv->mdev;
167 struct mlx5_qp_context context;
168 int err;
169
170 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
171 &ipriv->qp);
172 if (err)
173 mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
174}
175
176#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
177
178int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
179{
180 u32 *in = NULL;
181 void *addr_path;
182 int ret = 0;
183 int inlen;
184 void *qpc;
185
186 inlen = MLX5_ST_SZ_BYTES(create_qp_in);
187 in = kvzalloc(inlen, GFP_KERNEL);
188 if (!in)
189 return -ENOMEM;
190
191 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
192 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
193 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
194 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
195 MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
196
197 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
198 MLX5_SET(ads, addr_path, vhca_port_num, 1);
199 MLX5_SET(ads, addr_path, grh, 1);
200
201 ret = mlx5_core_create_qp(mdev, qp, in, inlen);
202 if (ret) {
203 mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
204 goto out;
205 }
206
207out:
208 kvfree(in);
209 return ret;
210}
211
212void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
213{
214 mlx5_core_destroy_qp(mdev, qp);
215}
216
217static int mlx5i_init_tx(struct mlx5e_priv *priv)
218{
219 struct mlx5i_priv *ipriv = priv->ppriv;
220 int err;
221
222 err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
223 if (err) {
224 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
225 return err;
226 }
227
228 err = mlx5e_create_tis(priv->mdev, 0 , ipriv->qp.qpn, &priv->tisn[0]);
229 if (err) {
230 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
231 goto err_destroy_underlay_qp;
232 }
233
234 return 0;
235
236err_destroy_underlay_qp:
237 mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
238 return err;
239}
240
241static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
242{
243 struct mlx5i_priv *ipriv = priv->ppriv;
244
245 mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
246 mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
247}
248
249static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
250{
251 struct ttc_params ttc_params = {};
252 int tt, err;
253
254 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
255 MLX5_FLOW_NAMESPACE_KERNEL);
256
257 if (!priv->fs.ns)
258 return -EINVAL;
259
260 err = mlx5e_arfs_create_tables(priv);
261 if (err) {
262 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
263 err);
264 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
265 }
266
267 mlx5e_set_ttc_basic_params(priv, &ttc_params);
268 mlx5e_set_inner_ttc_ft_params(&ttc_params);
269 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
270 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
271
272 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
273 if (err) {
274 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
275 err);
276 goto err_destroy_arfs_tables;
277 }
278
279 mlx5e_set_ttc_ft_params(&ttc_params);
280 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
281 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
282
283 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
284 if (err) {
285 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
286 err);
287 goto err_destroy_inner_ttc_table;
288 }
289
290 return 0;
291
292err_destroy_inner_ttc_table:
293 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
294err_destroy_arfs_tables:
295 mlx5e_arfs_destroy_tables(priv);
296
297 return err;
298}
299
300static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
301{
302 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
303 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
304 mlx5e_arfs_destroy_tables(priv);
305}
306
307static int mlx5i_init_rx(struct mlx5e_priv *priv)
308{
309 int err;
310
311 err = mlx5e_create_indirect_rqt(priv);
312 if (err)
313 return err;
314
315 err = mlx5e_create_direct_rqts(priv);
316 if (err)
317 goto err_destroy_indirect_rqts;
318
319 err = mlx5e_create_indirect_tirs(priv);
320 if (err)
321 goto err_destroy_direct_rqts;
322
323 err = mlx5e_create_direct_tirs(priv);
324 if (err)
325 goto err_destroy_indirect_tirs;
326
327 err = mlx5i_create_flow_steering(priv);
328 if (err)
329 goto err_destroy_direct_tirs;
330
331 return 0;
332
333err_destroy_direct_tirs:
334 mlx5e_destroy_direct_tirs(priv);
335err_destroy_indirect_tirs:
336 mlx5e_destroy_indirect_tirs(priv);
337err_destroy_direct_rqts:
338 mlx5e_destroy_direct_rqts(priv);
339err_destroy_indirect_rqts:
340 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
341 return err;
342}
343
344static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
345{
346 mlx5i_destroy_flow_steering(priv);
347 mlx5e_destroy_direct_tirs(priv);
348 mlx5e_destroy_indirect_tirs(priv);
349 mlx5e_destroy_direct_rqts(priv);
350 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
351}
352
353static const struct mlx5e_profile mlx5i_nic_profile = {
354 .init = mlx5i_init,
355 .cleanup = mlx5i_cleanup,
356 .init_tx = mlx5i_init_tx,
357 .cleanup_tx = mlx5i_cleanup_tx,
358 .init_rx = mlx5i_init_rx,
359 .cleanup_rx = mlx5i_cleanup_rx,
360 .enable = NULL,
361 .disable = NULL,
362 .update_stats = NULL,
363 .max_nch = mlx5e_get_max_num_channels,
364 .update_carrier = NULL,
365 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
366 .rx_handlers.handle_rx_cqe_mpwqe = NULL,
367 .max_tc = MLX5I_MAX_NUM_TC,
368};
369
370
371
372static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
373{
374 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
375 struct mlx5e_channels new_channels = {};
376 struct mlx5e_params *params;
377 int err = 0;
378
379 mutex_lock(&priv->state_lock);
380
381 params = &priv->channels.params;
382
383 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
384 params->sw_mtu = new_mtu;
385 netdev->mtu = params->sw_mtu;
386 goto out;
387 }
388
389 new_channels.params = *params;
390 new_channels.params.sw_mtu = new_mtu;
391 err = mlx5e_open_channels(priv, &new_channels);
392 if (err)
393 goto out;
394
395 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
396 netdev->mtu = new_channels.params.sw_mtu;
397
398out:
399 mutex_unlock(&priv->state_lock);
400 return err;
401}
402
403int mlx5i_dev_init(struct net_device *dev)
404{
405 struct mlx5e_priv *priv = mlx5i_epriv(dev);
406 struct mlx5i_priv *ipriv = priv->ppriv;
407
408
409 dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
410 dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
411 dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
412
413
414 mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
415
416 return 0;
417}
418
419int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
420{
421 struct mlx5e_priv *priv = mlx5i_epriv(dev);
422
423 switch (cmd) {
424 case SIOCSHWTSTAMP:
425 return mlx5e_hwstamp_set(priv, ifr);
426 case SIOCGHWTSTAMP:
427 return mlx5e_hwstamp_get(priv, ifr);
428 default:
429 return -EOPNOTSUPP;
430 }
431}
432
433void mlx5i_dev_cleanup(struct net_device *dev)
434{
435 struct mlx5e_priv *priv = mlx5i_epriv(dev);
436 struct mlx5i_priv *ipriv = priv->ppriv;
437
438 mlx5i_uninit_underlay_qp(priv);
439
440
441 mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
442}
443
444static int mlx5i_open(struct net_device *netdev)
445{
446 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
447 struct mlx5i_priv *ipriv = epriv->ppriv;
448 struct mlx5_core_dev *mdev = epriv->mdev;
449 int err;
450
451 mutex_lock(&epriv->state_lock);
452
453 set_bit(MLX5E_STATE_OPENED, &epriv->state);
454
455 err = mlx5i_init_underlay_qp(epriv);
456 if (err) {
457 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
458 goto err_clear_state_opened_flag;
459 }
460
461 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
462 if (err) {
463 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
464 goto err_reset_qp;
465 }
466
467 err = mlx5e_open_channels(epriv, &epriv->channels);
468 if (err)
469 goto err_remove_fs_underlay_qp;
470
471 mlx5e_refresh_tirs(epriv, false);
472 mlx5e_activate_priv_channels(epriv);
473
474 mutex_unlock(&epriv->state_lock);
475 return 0;
476
477err_remove_fs_underlay_qp:
478 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
479err_reset_qp:
480 mlx5i_uninit_underlay_qp(epriv);
481err_clear_state_opened_flag:
482 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
483 mutex_unlock(&epriv->state_lock);
484 return err;
485}
486
487static int mlx5i_close(struct net_device *netdev)
488{
489 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
490 struct mlx5i_priv *ipriv = epriv->ppriv;
491 struct mlx5_core_dev *mdev = epriv->mdev;
492
493
494
495
496 mutex_lock(&epriv->state_lock);
497
498 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
499 goto unlock;
500
501 clear_bit(MLX5E_STATE_OPENED, &epriv->state);
502
503 netif_carrier_off(epriv->netdev);
504 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
505 mlx5i_uninit_underlay_qp(epriv);
506 mlx5e_deactivate_priv_channels(epriv);
507 mlx5e_close_channels(&epriv->channels);
508unlock:
509 mutex_unlock(&epriv->state_lock);
510 return 0;
511}
512
513
514static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
515 union ib_gid *gid, u16 lid, int set_qkey,
516 u32 qkey)
517{
518 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
519 struct mlx5_core_dev *mdev = epriv->mdev;
520 struct mlx5i_priv *ipriv = epriv->ppriv;
521 int err;
522
523 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
524 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
525 if (err)
526 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
527 ipriv->qp.qpn, gid->raw);
528
529 if (set_qkey) {
530 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
531 netdev->name, qkey);
532 ipriv->qkey = qkey;
533 }
534
535 return err;
536}
537
538static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
539 union ib_gid *gid, u16 lid)
540{
541 struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
542 struct mlx5_core_dev *mdev = epriv->mdev;
543 struct mlx5i_priv *ipriv = epriv->ppriv;
544 int err;
545
546 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
547
548 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
549 if (err)
550 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
551 ipriv->qp.qpn, gid->raw);
552
553 return err;
554}
555
556static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
557 struct ib_ah *address, u32 dqpn)
558{
559 struct mlx5e_priv *epriv = mlx5i_epriv(dev);
560 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
561 struct mlx5_ib_ah *mah = to_mah(address);
562 struct mlx5i_priv *ipriv = epriv->ppriv;
563
564 return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
565}
566
567static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
568{
569 struct mlx5i_priv *ipriv = netdev_priv(netdev);
570
571 ipriv->pkey_index = (u16)id;
572}
573
574static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
575{
576 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
577 return -EOPNOTSUPP;
578
579 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
580 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
581 return -EOPNOTSUPP;
582 }
583
584 return 0;
585}
586
587static void mlx5_rdma_netdev_free(struct net_device *netdev)
588{
589 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
590 struct mlx5i_priv *ipriv = priv->ppriv;
591 const struct mlx5e_profile *profile = priv->profile;
592
593 mlx5e_detach_netdev(priv);
594 profile->cleanup(priv);
595 destroy_workqueue(priv->wq);
596
597 if (!ipriv->sub_interface) {
598 mlx5i_pkey_qpn_ht_cleanup(netdev);
599 mlx5e_destroy_mdev_resources(priv->mdev);
600 }
601}
602
603struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
604 struct ib_device *ibdev,
605 const char *name,
606 void (*setup)(struct net_device *))
607{
608 const struct mlx5e_profile *profile;
609 struct net_device *netdev;
610 struct mlx5i_priv *ipriv;
611 struct mlx5e_priv *epriv;
612 struct rdma_netdev *rn;
613 bool sub_interface;
614 int nch;
615 int err;
616
617 if (mlx5i_check_required_hca_cap(mdev)) {
618 mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
619 return ERR_PTR(-EOPNOTSUPP);
620 }
621
622
623 sub_interface = (mdev->mlx5e_res.pdn != 0);
624
625 if (sub_interface)
626 profile = mlx5i_pkey_get_profile();
627 else
628 profile = &mlx5i_nic_profile;
629
630 nch = profile->max_nch(mdev);
631
632 netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
633 name, NET_NAME_UNKNOWN,
634 setup,
635 nch * MLX5E_MAX_NUM_TC,
636 nch);
637 if (!netdev) {
638 mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
639 return NULL;
640 }
641
642 ipriv = netdev_priv(netdev);
643 epriv = mlx5i_epriv(netdev);
644
645 epriv->wq = create_singlethread_workqueue("mlx5i");
646 if (!epriv->wq)
647 goto err_free_netdev;
648
649 ipriv->sub_interface = sub_interface;
650 if (!ipriv->sub_interface) {
651 err = mlx5i_pkey_qpn_ht_init(netdev);
652 if (err) {
653 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
654 goto destroy_wq;
655 }
656
657
658 err = mlx5e_create_mdev_resources(mdev);
659 if (err)
660 goto destroy_ht;
661 }
662
663 profile->init(mdev, netdev, profile, ipriv);
664
665 mlx5e_attach_netdev(epriv);
666 netif_carrier_off(netdev);
667
668
669 rn = &ipriv->rn;
670 rn->hca = ibdev;
671 rn->send = mlx5i_xmit;
672 rn->attach_mcast = mlx5i_attach_mcast;
673 rn->detach_mcast = mlx5i_detach_mcast;
674 rn->set_id = mlx5i_set_pkey_index;
675
676 netdev->priv_destructor = mlx5_rdma_netdev_free;
677 netdev->needs_free_netdev = 1;
678
679 return netdev;
680
681destroy_ht:
682 mlx5i_pkey_qpn_ht_cleanup(netdev);
683destroy_wq:
684 destroy_workqueue(epriv->wq);
685err_free_netdev:
686 free_netdev(netdev);
687
688 return NULL;
689}
690EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
691