1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/pci.h>
19#include <linux/if_vlan.h>
20#include "liquidio_common.h"
21#include "octeon_droq.h"
22#include "octeon_iq.h"
23#include "response_manager.h"
24#include "octeon_device.h"
25#include "octeon_nic.h"
26#include "octeon_main.h"
27#include "octeon_network.h"
28#include <net/switchdev.h>
29#include "lio_vf_rep.h"
30#include "octeon_network.h"
31
32static int lio_vf_rep_open(struct net_device *ndev);
33static int lio_vf_rep_stop(struct net_device *ndev);
34static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev);
35static void lio_vf_rep_tx_timeout(struct net_device *netdev);
36static int lio_vf_rep_phys_port_name(struct net_device *dev,
37 char *buf, size_t len);
38static void lio_vf_rep_get_stats64(struct net_device *dev,
39 struct rtnl_link_stats64 *stats64);
40static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
41
42static const struct net_device_ops lio_vf_rep_ndev_ops = {
43 .ndo_open = lio_vf_rep_open,
44 .ndo_stop = lio_vf_rep_stop,
45 .ndo_start_xmit = lio_vf_rep_pkt_xmit,
46 .ndo_tx_timeout = lio_vf_rep_tx_timeout,
47 .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
48 .ndo_get_stats64 = lio_vf_rep_get_stats64,
49 .ndo_change_mtu = lio_vf_rep_change_mtu,
50};
51
52static void
53lio_vf_rep_send_sc_complete(struct octeon_device *oct,
54 u32 status, void *ptr)
55{
56 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
57 struct lio_vf_rep_sc_ctx *ctx =
58 (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
59 struct lio_vf_rep_resp *resp =
60 (struct lio_vf_rep_resp *)sc->virtrptr;
61
62 if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
63 WRITE_ONCE(resp->status, 0);
64
65 complete(&ctx->complete);
66}
67
68static int
69lio_vf_rep_send_soft_command(struct octeon_device *oct,
70 void *req, int req_size,
71 void *resp, int resp_size)
72{
73 int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
74 int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
75 struct octeon_soft_command *sc = NULL;
76 struct lio_vf_rep_resp *rep_resp;
77 struct lio_vf_rep_sc_ctx *ctx;
78 void *sc_req;
79 int err;
80
81 sc = (struct octeon_soft_command *)
82 octeon_alloc_soft_command(oct, req_size,
83 tot_resp_size, ctx_size);
84 if (!sc)
85 return -ENOMEM;
86
87 ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
88 memset(ctx, 0, ctx_size);
89 init_completion(&ctx->complete);
90
91 sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
92 memcpy(sc_req, req, req_size);
93
94 rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
95 memset(rep_resp, 0, tot_resp_size);
96 WRITE_ONCE(rep_resp->status, 1);
97
98 sc->iq_no = 0;
99 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
100 OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
101 sc->callback = lio_vf_rep_send_sc_complete;
102 sc->callback_arg = sc;
103 sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
104
105 err = octeon_send_soft_command(oct, sc);
106 if (err == IQ_SEND_FAILED)
107 goto free_buff;
108
109 wait_for_completion_timeout(&ctx->complete,
110 msecs_to_jiffies
111 (2 * LIO_VF_REP_REQ_TMO_MS));
112 err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
113 if (err)
114 dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
115
116 if (resp)
117 memcpy(resp, (rep_resp + 1), resp_size);
118free_buff:
119 octeon_free_soft_command(oct, sc);
120
121 return err;
122}
123
124static int
125lio_vf_rep_open(struct net_device *ndev)
126{
127 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
128 struct lio_vf_rep_req rep_cfg;
129 struct octeon_device *oct;
130 int ret;
131
132 oct = vf_rep->oct;
133
134 memset(&rep_cfg, 0, sizeof(rep_cfg));
135 rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
136 rep_cfg.ifidx = vf_rep->ifidx;
137 rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
138
139 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
140 sizeof(rep_cfg), NULL, 0);
141
142 if (ret) {
143 dev_err(&oct->pci_dev->dev,
144 "VF_REP open failed with err %d\n", ret);
145 return -EIO;
146 }
147
148 atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
149 LIO_IFSTATE_RUNNING));
150
151 netif_carrier_on(ndev);
152 netif_start_queue(ndev);
153
154 return 0;
155}
156
157static int
158lio_vf_rep_stop(struct net_device *ndev)
159{
160 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
161 struct lio_vf_rep_req rep_cfg;
162 struct octeon_device *oct;
163 int ret;
164
165 oct = vf_rep->oct;
166
167 memset(&rep_cfg, 0, sizeof(rep_cfg));
168 rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
169 rep_cfg.ifidx = vf_rep->ifidx;
170 rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
171
172 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
173 sizeof(rep_cfg), NULL, 0);
174
175 if (ret) {
176 dev_err(&oct->pci_dev->dev,
177 "VF_REP dev stop failed with err %d\n", ret);
178 return -EIO;
179 }
180
181 atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
182 ~LIO_IFSTATE_RUNNING));
183
184 netif_tx_disable(ndev);
185 netif_carrier_off(ndev);
186
187 return 0;
188}
189
190static void
191lio_vf_rep_tx_timeout(struct net_device *ndev)
192{
193 netif_trans_update(ndev);
194
195 netif_wake_queue(ndev);
196}
197
198static void
199lio_vf_rep_get_stats64(struct net_device *dev,
200 struct rtnl_link_stats64 *stats64)
201{
202 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
203
204
205 stats64->tx_packets = vf_rep->stats.rx_packets;
206 stats64->tx_bytes = vf_rep->stats.rx_bytes;
207 stats64->tx_dropped = vf_rep->stats.rx_dropped;
208
209 stats64->rx_packets = vf_rep->stats.tx_packets;
210 stats64->rx_bytes = vf_rep->stats.tx_bytes;
211 stats64->rx_dropped = vf_rep->stats.tx_dropped;
212}
213
214static int
215lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
216{
217 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
218 struct lio_vf_rep_req rep_cfg;
219 struct octeon_device *oct;
220 int ret;
221
222 oct = vf_rep->oct;
223
224 memset(&rep_cfg, 0, sizeof(rep_cfg));
225 rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
226 rep_cfg.ifidx = vf_rep->ifidx;
227 rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
228
229 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
230 sizeof(rep_cfg), NULL, 0);
231 if (ret) {
232 dev_err(&oct->pci_dev->dev,
233 "Change MTU failed with err %d\n", ret);
234 return -EIO;
235 }
236
237 ndev->mtu = new_mtu;
238
239 return 0;
240}
241
242static int
243lio_vf_rep_phys_port_name(struct net_device *dev,
244 char *buf, size_t len)
245{
246 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
247 struct octeon_device *oct = vf_rep->oct;
248 int ret;
249
250 ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
251 vf_rep->ifidx - oct->pf_num * 64 - 1);
252 if (ret >= len)
253 return -EOPNOTSUPP;
254
255 return 0;
256}
257
258static struct net_device *
259lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
260{
261 int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
262 int vfid_mask = max_vfs - 1;
263
264 if (ifidx <= oct->pf_num * max_vfs ||
265 ifidx >= oct->pf_num * max_vfs + max_vfs)
266 return NULL;
267
268
269
270
271 vf_id = (ifidx & vfid_mask) - 1;
272
273 return oct->vf_rep_list.ndev[vf_id];
274}
275
276static void
277lio_vf_rep_copy_packet(struct octeon_device *oct,
278 struct sk_buff *skb,
279 int len)
280{
281 if (likely(len > MIN_SKB_SIZE)) {
282 struct octeon_skb_page_info *pg_info;
283 unsigned char *va;
284
285 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
286 if (pg_info->page) {
287 va = page_address(pg_info->page) +
288 pg_info->page_offset;
289 memcpy(skb->data, va, MIN_SKB_SIZE);
290 skb_put(skb, MIN_SKB_SIZE);
291 }
292
293 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
294 pg_info->page,
295 pg_info->page_offset + MIN_SKB_SIZE,
296 len - MIN_SKB_SIZE,
297 LIO_RXBUFFER_SZ);
298 } else {
299 struct octeon_skb_page_info *pg_info =
300 ((struct octeon_skb_page_info *)(skb->cb));
301
302 skb_copy_to_linear_data(skb, page_address(pg_info->page) +
303 pg_info->page_offset, len);
304 skb_put(skb, len);
305 put_page(pg_info->page);
306 }
307}
308
309static int
310lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
311{
312 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
313 struct lio_vf_rep_desc *vf_rep;
314 struct net_device *vf_ndev;
315 struct octeon_device *oct;
316 union octeon_rh *rh;
317 struct sk_buff *skb;
318 int i, ifidx;
319
320 oct = lio_get_device(recv_pkt->octeon_id);
321 if (!oct)
322 goto free_buffers;
323
324 skb = recv_pkt->buffer_ptr[0];
325 rh = &recv_pkt->rh;
326 ifidx = rh->r.ossp;
327
328 vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
329 if (!vf_ndev)
330 goto free_buffers;
331
332 vf_rep = netdev_priv(vf_ndev);
333 if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
334 recv_pkt->buffer_count > 1)
335 goto free_buffers;
336
337 skb->dev = vf_ndev;
338
339
340
341
342 lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
343
344 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
345 skb->protocol = eth_type_trans(skb, skb->dev);
346 skb->ip_summed = CHECKSUM_NONE;
347
348 netif_rx(skb);
349
350 octeon_free_recv_info(recv_info);
351
352 return 0;
353
354free_buffers:
355 for (i = 0; i < recv_pkt->buffer_count; i++)
356 recv_buffer_free(recv_pkt->buffer_ptr[i]);
357
358 octeon_free_recv_info(recv_info);
359
360 return 0;
361}
362
363static void
364lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
365 u32 status, void *buf)
366{
367 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
368 struct sk_buff *skb = sc->ctxptr;
369 struct net_device *ndev = skb->dev;
370
371 dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
372 sc->datasize, DMA_TO_DEVICE);
373 dev_kfree_skb_any(skb);
374 octeon_free_soft_command(oct, sc);
375
376 if (octnet_iq_is_full(oct, sc->iq_no))
377 return;
378
379 if (netif_queue_stopped(ndev))
380 netif_wake_queue(ndev);
381}
382
383static int
384lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
385{
386 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
387 struct net_device *parent_ndev = vf_rep->parent_ndev;
388 struct octeon_device *oct = vf_rep->oct;
389 struct octeon_instr_pki_ih3 *pki_ih3;
390 struct octeon_soft_command *sc;
391 struct lio *parent_lio;
392 int status;
393
394 parent_lio = GET_LIO(parent_ndev);
395
396 if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
397 skb->len <= 0)
398 goto xmit_failed;
399
400 if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
401 dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
402 netif_stop_queue(ndev);
403 return NETDEV_TX_BUSY;
404 }
405
406 sc = (struct octeon_soft_command *)
407 octeon_alloc_soft_command(oct, 0, 0, 0);
408 if (!sc) {
409 dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
410 goto xmit_failed;
411 }
412
413
414 if (skb_shinfo(skb)->nr_frags != 0) {
415 dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
416 goto xmit_failed;
417 }
418
419 sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
420 skb->data, skb->len, DMA_TO_DEVICE);
421 if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
422 dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
423 goto xmit_failed;
424 }
425
426 sc->virtdptr = skb->data;
427 sc->datasize = skb->len;
428 sc->ctxptr = skb;
429 sc->iq_no = parent_lio->txq;
430
431 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
432 vf_rep->ifidx, 0, 0);
433 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
434 pki_ih3->tagtype = ORDERED_TAG;
435
436 sc->callback = lio_vf_rep_packet_sent_callback;
437 sc->callback_arg = sc;
438
439 status = octeon_send_soft_command(oct, sc);
440 if (status == IQ_SEND_FAILED) {
441 dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
442 sc->datasize, DMA_TO_DEVICE);
443 goto xmit_failed;
444 }
445
446 if (status == IQ_SEND_STOP)
447 netif_stop_queue(ndev);
448
449 netif_trans_update(ndev);
450
451 return NETDEV_TX_OK;
452
453xmit_failed:
454 dev_kfree_skb_any(skb);
455
456 return NETDEV_TX_OK;
457}
458
459static int
460lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
461{
462 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
463 struct net_device *parent_ndev = vf_rep->parent_ndev;
464 struct lio *lio = GET_LIO(parent_ndev);
465
466 switch (attr->id) {
467 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
468 attr->u.ppid.id_len = ETH_ALEN;
469 ether_addr_copy(attr->u.ppid.id,
470 (void *)&lio->linfo.hw_addr + 2);
471 break;
472
473 default:
474 return -EOPNOTSUPP;
475 }
476
477 return 0;
478}
479
480static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
481 .switchdev_port_attr_get = lio_vf_rep_attr_get,
482};
483
484static void
485lio_vf_rep_fetch_stats(struct work_struct *work)
486{
487 struct cavium_wk *wk = (struct cavium_wk *)work;
488 struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
489 struct lio_vf_rep_stats stats;
490 struct lio_vf_rep_req rep_cfg;
491 struct octeon_device *oct;
492 int ret;
493
494 oct = vf_rep->oct;
495
496 memset(&rep_cfg, 0, sizeof(rep_cfg));
497 rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
498 rep_cfg.ifidx = vf_rep->ifidx;
499
500 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
501 &stats, sizeof(stats));
502
503 if (!ret) {
504 octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
505 memcpy(&vf_rep->stats, &stats, sizeof(stats));
506 }
507
508 schedule_delayed_work(&vf_rep->stats_wk.work,
509 msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
510}
511
512int
513lio_vf_rep_create(struct octeon_device *oct)
514{
515 struct lio_vf_rep_desc *vf_rep;
516 struct net_device *ndev;
517 int i, num_vfs;
518
519 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
520 return 0;
521
522 if (!oct->sriov_info.sriov_enabled)
523 return 0;
524
525 num_vfs = oct->sriov_info.num_vfs_alloced;
526
527 oct->vf_rep_list.num_vfs = 0;
528 for (i = 0; i < num_vfs; i++) {
529 ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
530
531 if (!ndev) {
532 dev_err(&oct->pci_dev->dev,
533 "VF rep device %d creation failed\n", i);
534 goto cleanup;
535 }
536
537 ndev->min_mtu = LIO_MIN_MTU_SIZE;
538 ndev->max_mtu = LIO_MAX_MTU_SIZE;
539 ndev->netdev_ops = &lio_vf_rep_ndev_ops;
540 SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
541
542 vf_rep = netdev_priv(ndev);
543 memset(vf_rep, 0, sizeof(*vf_rep));
544
545 vf_rep->ndev = ndev;
546 vf_rep->oct = oct;
547 vf_rep->parent_ndev = oct->props[0].netdev;
548 vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
549
550 eth_hw_addr_random(ndev);
551
552 if (register_netdev(ndev)) {
553 dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
554
555 free_netdev(ndev);
556 goto cleanup;
557 }
558
559 netif_carrier_off(ndev);
560
561 INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
562 lio_vf_rep_fetch_stats);
563 vf_rep->stats_wk.ctxptr = (void *)vf_rep;
564 schedule_delayed_work(&vf_rep->stats_wk.work,
565 msecs_to_jiffies
566 (LIO_VF_REP_STATS_POLL_TIME_MS));
567 oct->vf_rep_list.num_vfs++;
568 oct->vf_rep_list.ndev[i] = ndev;
569 }
570
571 if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
572 OPCODE_NIC_VF_REP_PKT,
573 lio_vf_rep_pkt_recv, oct)) {
574 dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
575
576 goto cleanup;
577 }
578
579 return 0;
580
581cleanup:
582 for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
583 ndev = oct->vf_rep_list.ndev[i];
584 oct->vf_rep_list.ndev[i] = NULL;
585 if (ndev) {
586 vf_rep = netdev_priv(ndev);
587 cancel_delayed_work_sync
588 (&vf_rep->stats_wk.work);
589 unregister_netdev(ndev);
590 free_netdev(ndev);
591 }
592 }
593
594 oct->vf_rep_list.num_vfs = 0;
595
596 return -1;
597}
598
599void
600lio_vf_rep_destroy(struct octeon_device *oct)
601{
602 struct lio_vf_rep_desc *vf_rep;
603 struct net_device *ndev;
604 int i;
605
606 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
607 return;
608
609 if (!oct->sriov_info.sriov_enabled)
610 return;
611
612 for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
613 ndev = oct->vf_rep_list.ndev[i];
614 oct->vf_rep_list.ndev[i] = NULL;
615 if (ndev) {
616 vf_rep = netdev_priv(ndev);
617 cancel_delayed_work_sync
618 (&vf_rep->stats_wk.work);
619 netif_tx_disable(ndev);
620 netif_carrier_off(ndev);
621
622 unregister_netdev(ndev);
623 free_netdev(ndev);
624 }
625 }
626
627 oct->vf_rep_list.num_vfs = 0;
628}
629
630static int
631lio_vf_rep_netdev_event(struct notifier_block *nb,
632 unsigned long event, void *ptr)
633{
634 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
635 struct lio_vf_rep_desc *vf_rep;
636 struct lio_vf_rep_req rep_cfg;
637 struct octeon_device *oct;
638 int ret;
639
640 switch (event) {
641 case NETDEV_REGISTER:
642 case NETDEV_CHANGENAME:
643 break;
644
645 default:
646 return NOTIFY_DONE;
647 }
648
649 if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
650 return NOTIFY_DONE;
651
652 vf_rep = netdev_priv(ndev);
653 oct = vf_rep->oct;
654
655 if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
656 dev_err(&oct->pci_dev->dev,
657 "Device name change sync failed as the size is > %d\n",
658 LIO_IF_NAME_SIZE);
659 return NOTIFY_DONE;
660 }
661
662 memset(&rep_cfg, 0, sizeof(rep_cfg));
663 rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
664 rep_cfg.ifidx = vf_rep->ifidx;
665 strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
666
667 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
668 sizeof(rep_cfg), NULL, 0);
669 if (ret)
670 dev_err(&oct->pci_dev->dev,
671 "vf_rep netdev name change failed with err %d\n", ret);
672
673 return NOTIFY_DONE;
674}
675
676static struct notifier_block lio_vf_rep_netdev_notifier = {
677 .notifier_call = lio_vf_rep_netdev_event,
678};
679
680int
681lio_vf_rep_modinit(void)
682{
683 if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
684 pr_err("netdev notifier registration failed\n");
685 return -EFAULT;
686 }
687
688 return 0;
689}
690
691void
692lio_vf_rep_modexit(void)
693{
694 if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
695 pr_err("netdev notifier unregister failed\n");
696}
697