1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/delay.h>
37#include <linux/moduleparam.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40
41#include <linux/ip.h>
42#include <linux/tcp.h>
43
44#include "ipoib.h"
45
46#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47static int data_debug_level;
48
49module_param(data_debug_level, int, 0644);
50MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
52#endif
53
54static DEFINE_MUTEX(pkey_mutex);
55
56struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{
59 struct ipoib_ah *ah;
60 struct ib_ah *vah;
61
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
63 if (!ah)
64 return ERR_PTR(-ENOMEM);
65
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
69
70 vah = ib_create_ah(pd, attr);
71 if (IS_ERR(vah)) {
72 kfree(ah);
73 ah = (struct ipoib_ah *)vah;
74 } else {
75 ah->ah = vah;
76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77 }
78
79 return ah;
80}
81
82void ipoib_free_ah(struct kref *kref)
83{
84 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
85 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
86
87 unsigned long flags;
88
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
92}
93
94static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
95 u64 mapping[IPOIB_UD_RX_SG])
96{
97 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
98 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
99 DMA_FROM_DEVICE);
100 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
101 DMA_FROM_DEVICE);
102 } else
103 ib_dma_unmap_single(priv->ca, mapping[0],
104 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
105 DMA_FROM_DEVICE);
106}
107
108static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
109 struct sk_buff *skb,
110 unsigned int length)
111{
112 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
113 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
114 unsigned int size;
115
116
117
118
119 skb->tail += IPOIB_UD_HEAD_SIZE;
120 skb->len += length;
121
122 size = length - IPOIB_UD_HEAD_SIZE;
123
124 skb_frag_size_set(frag, size);
125 skb->data_len += size;
126 skb->truesize += PAGE_SIZE;
127 } else
128 skb_put(skb, length);
129
130}
131
132static int ipoib_ib_post_receive(struct net_device *dev, int id)
133{
134 struct ipoib_dev_priv *priv = netdev_priv(dev);
135 struct ib_recv_wr *bad_wr;
136 int ret;
137
138 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
139 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
140 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
141
142
143 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
144 if (unlikely(ret)) {
145 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
146 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
147 dev_kfree_skb_any(priv->rx_ring[id].skb);
148 priv->rx_ring[id].skb = NULL;
149 }
150
151 return ret;
152}
153
154static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
155{
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct sk_buff *skb;
158 int buf_size;
159 int tailroom;
160 u64 *mapping;
161
162 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
163 buf_size = IPOIB_UD_HEAD_SIZE;
164 tailroom = 128;
165 } else {
166 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
167 tailroom = 0;
168 }
169
170 skb = dev_alloc_skb(buf_size + tailroom + 4);
171 if (unlikely(!skb))
172 return NULL;
173
174
175
176
177
178
179 skb_reserve(skb, 4);
180
181 mapping = priv->rx_ring[id].mapping;
182 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
183 DMA_FROM_DEVICE);
184 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
185 goto error;
186
187 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
188 struct page *page = alloc_page(GFP_ATOMIC);
189 if (!page)
190 goto partial_error;
191 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
192 mapping[1] =
193 ib_dma_map_page(priv->ca, page,
194 0, PAGE_SIZE, DMA_FROM_DEVICE);
195 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
196 goto partial_error;
197 }
198
199 priv->rx_ring[id].skb = skb;
200 return skb;
201
202partial_error:
203 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
204error:
205 dev_kfree_skb_any(skb);
206 return NULL;
207}
208
209static int ipoib_ib_post_receives(struct net_device *dev)
210{
211 struct ipoib_dev_priv *priv = netdev_priv(dev);
212 int i;
213
214 for (i = 0; i < ipoib_recvq_size; ++i) {
215 if (!ipoib_alloc_rx_skb(dev, i)) {
216 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
217 return -ENOMEM;
218 }
219 if (ipoib_ib_post_receive(dev, i)) {
220 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
221 return -EIO;
222 }
223 }
224
225 return 0;
226}
227
228static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
229{
230 struct ipoib_dev_priv *priv = netdev_priv(dev);
231 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
232 struct sk_buff *skb;
233 u64 mapping[IPOIB_UD_RX_SG];
234 union ib_gid *dgid;
235
236 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
237 wr_id, wc->status);
238
239 if (unlikely(wr_id >= ipoib_recvq_size)) {
240 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
241 wr_id, ipoib_recvq_size);
242 return;
243 }
244
245 skb = priv->rx_ring[wr_id].skb;
246
247 if (unlikely(wc->status != IB_WC_SUCCESS)) {
248 if (wc->status != IB_WC_WR_FLUSH_ERR)
249 ipoib_warn(priv, "failed recv event "
250 "(status=%d, wrid=%d vend_err %x)\n",
251 wc->status, wr_id, wc->vendor_err);
252 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
253 dev_kfree_skb_any(skb);
254 priv->rx_ring[wr_id].skb = NULL;
255 return;
256 }
257
258
259
260
261
262 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
263 goto repost;
264
265 memcpy(mapping, priv->rx_ring[wr_id].mapping,
266 IPOIB_UD_RX_SG * sizeof *mapping);
267
268
269
270
271
272 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
273 ++dev->stats.rx_dropped;
274 goto repost;
275 }
276
277 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
278 wc->byte_len, wc->slid);
279
280 ipoib_ud_dma_unmap_rx(priv, mapping);
281 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
282
283
284 dgid = &((struct ib_grh *)skb->data)->dgid;
285
286 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
287 skb->pkt_type = PACKET_HOST;
288 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
289 skb->pkt_type = PACKET_BROADCAST;
290 else
291 skb->pkt_type = PACKET_MULTICAST;
292
293 skb_pull(skb, IB_GRH_BYTES);
294
295 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
296 skb_reset_mac_header(skb);
297 skb_pull(skb, IPOIB_ENCAP_LEN);
298
299 ++dev->stats.rx_packets;
300 dev->stats.rx_bytes += skb->len;
301
302 skb->dev = dev;
303 if ((dev->features & NETIF_F_RXCSUM) &&
304 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
305 skb->ip_summed = CHECKSUM_UNNECESSARY;
306
307 napi_gro_receive(&priv->napi, skb);
308
309repost:
310 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
311 ipoib_warn(priv, "ipoib_ib_post_receive failed "
312 "for buf %d\n", wr_id);
313}
314
315static int ipoib_dma_map_tx(struct ib_device *ca,
316 struct ipoib_tx_buf *tx_req)
317{
318 struct sk_buff *skb = tx_req->skb;
319 u64 *mapping = tx_req->mapping;
320 int i;
321 int off;
322
323 if (skb_headlen(skb)) {
324 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
325 DMA_TO_DEVICE);
326 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
327 return -EIO;
328
329 off = 1;
330 } else
331 off = 0;
332
333 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
334 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
335 mapping[i + off] = ib_dma_map_page(ca,
336 skb_frag_page(frag),
337 frag->page_offset, skb_frag_size(frag),
338 DMA_TO_DEVICE);
339 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
340 goto partial_error;
341 }
342 return 0;
343
344partial_error:
345 for (; i > 0; --i) {
346 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
347
348 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
349 }
350
351 if (off)
352 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
353
354 return -EIO;
355}
356
357static void ipoib_dma_unmap_tx(struct ib_device *ca,
358 struct ipoib_tx_buf *tx_req)
359{
360 struct sk_buff *skb = tx_req->skb;
361 u64 *mapping = tx_req->mapping;
362 int i;
363 int off;
364
365 if (skb_headlen(skb)) {
366 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
367 off = 1;
368 } else
369 off = 0;
370
371 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
372 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
373
374 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
375 DMA_TO_DEVICE);
376 }
377}
378
379static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
380{
381 struct ipoib_dev_priv *priv = netdev_priv(dev);
382 unsigned int wr_id = wc->wr_id;
383 struct ipoib_tx_buf *tx_req;
384
385 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
386 wr_id, wc->status);
387
388 if (unlikely(wr_id >= ipoib_sendq_size)) {
389 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
390 wr_id, ipoib_sendq_size);
391 return;
392 }
393
394 tx_req = &priv->tx_ring[wr_id];
395
396 ipoib_dma_unmap_tx(priv->ca, tx_req);
397
398 ++dev->stats.tx_packets;
399 dev->stats.tx_bytes += tx_req->skb->len;
400
401 dev_kfree_skb_any(tx_req->skb);
402
403 ++priv->tx_tail;
404 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
405 netif_queue_stopped(dev) &&
406 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
407 netif_wake_queue(dev);
408
409 if (wc->status != IB_WC_SUCCESS &&
410 wc->status != IB_WC_WR_FLUSH_ERR)
411 ipoib_warn(priv, "failed send event "
412 "(status=%d, wrid=%d vend_err %x)\n",
413 wc->status, wr_id, wc->vendor_err);
414}
415
416static int poll_tx(struct ipoib_dev_priv *priv)
417{
418 int n, i;
419
420 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
421 for (i = 0; i < n; ++i)
422 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
423
424 return n == MAX_SEND_CQE;
425}
426
427int ipoib_poll(struct napi_struct *napi, int budget)
428{
429 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
430 struct net_device *dev = priv->dev;
431 int done;
432 int t;
433 int n, i;
434
435 done = 0;
436
437poll_more:
438 while (done < budget) {
439 int max = (budget - done);
440
441 t = min(IPOIB_NUM_WC, max);
442 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
443
444 for (i = 0; i < n; i++) {
445 struct ib_wc *wc = priv->ibwc + i;
446
447 if (wc->wr_id & IPOIB_OP_RECV) {
448 ++done;
449 if (wc->wr_id & IPOIB_OP_CM)
450 ipoib_cm_handle_rx_wc(dev, wc);
451 else
452 ipoib_ib_handle_rx_wc(dev, wc);
453 } else
454 ipoib_cm_handle_tx_wc(priv->dev, wc);
455 }
456
457 if (n != t)
458 break;
459 }
460
461 if (done < budget) {
462 napi_complete(napi);
463 if (unlikely(ib_req_notify_cq(priv->recv_cq,
464 IB_CQ_NEXT_COMP |
465 IB_CQ_REPORT_MISSED_EVENTS)) &&
466 napi_reschedule(napi))
467 goto poll_more;
468 }
469
470 return done;
471}
472
473void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
474{
475 struct net_device *dev = dev_ptr;
476 struct ipoib_dev_priv *priv = netdev_priv(dev);
477
478 napi_schedule(&priv->napi);
479}
480
481static void drain_tx_cq(struct net_device *dev)
482{
483 struct ipoib_dev_priv *priv = netdev_priv(dev);
484
485 netif_tx_lock(dev);
486 while (poll_tx(priv))
487 ;
488
489 if (netif_queue_stopped(dev))
490 mod_timer(&priv->poll_timer, jiffies + 1);
491
492 netif_tx_unlock(dev);
493}
494
495void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
496{
497 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
498
499 mod_timer(&priv->poll_timer, jiffies);
500}
501
502static inline int post_send(struct ipoib_dev_priv *priv,
503 unsigned int wr_id,
504 struct ib_ah *address, u32 qpn,
505 struct ipoib_tx_buf *tx_req,
506 void *head, int hlen)
507{
508 struct ib_send_wr *bad_wr;
509 int i, off;
510 struct sk_buff *skb = tx_req->skb;
511 skb_frag_t *frags = skb_shinfo(skb)->frags;
512 int nr_frags = skb_shinfo(skb)->nr_frags;
513 u64 *mapping = tx_req->mapping;
514
515 if (skb_headlen(skb)) {
516 priv->tx_sge[0].addr = mapping[0];
517 priv->tx_sge[0].length = skb_headlen(skb);
518 off = 1;
519 } else
520 off = 0;
521
522 for (i = 0; i < nr_frags; ++i) {
523 priv->tx_sge[i + off].addr = mapping[i + off];
524 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
525 }
526 priv->tx_wr.num_sge = nr_frags + off;
527 priv->tx_wr.wr_id = wr_id;
528 priv->tx_wr.wr.ud.remote_qpn = qpn;
529 priv->tx_wr.wr.ud.ah = address;
530
531 if (head) {
532 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
533 priv->tx_wr.wr.ud.header = head;
534 priv->tx_wr.wr.ud.hlen = hlen;
535 priv->tx_wr.opcode = IB_WR_LSO;
536 } else
537 priv->tx_wr.opcode = IB_WR_SEND;
538
539 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
540}
541
542void ipoib_send(struct net_device *dev, struct sk_buff *skb,
543 struct ipoib_ah *address, u32 qpn)
544{
545 struct ipoib_dev_priv *priv = netdev_priv(dev);
546 struct ipoib_tx_buf *tx_req;
547 int hlen, rc;
548 void *phead;
549
550 if (skb_is_gso(skb)) {
551 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
552 phead = skb->data;
553 if (unlikely(!skb_pull(skb, hlen))) {
554 ipoib_warn(priv, "linear data too small\n");
555 ++dev->stats.tx_dropped;
556 ++dev->stats.tx_errors;
557 dev_kfree_skb_any(skb);
558 return;
559 }
560 } else {
561 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
562 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
563 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
564 ++dev->stats.tx_dropped;
565 ++dev->stats.tx_errors;
566 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
567 return;
568 }
569 phead = NULL;
570 hlen = 0;
571 }
572
573 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
574 skb->len, address, qpn);
575
576
577
578
579
580
581
582
583 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
584 tx_req->skb = skb;
585 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
586 ++dev->stats.tx_errors;
587 dev_kfree_skb_any(skb);
588 return;
589 }
590
591 if (skb->ip_summed == CHECKSUM_PARTIAL)
592 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
593 else
594 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
595
596 if (++priv->tx_outstanding == ipoib_sendq_size) {
597 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
598 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
599 ipoib_warn(priv, "request notify on send CQ failed\n");
600 netif_stop_queue(dev);
601 }
602
603 skb_orphan(skb);
604 skb_dst_drop(skb);
605
606 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
607 address->ah, qpn, tx_req, phead, hlen);
608 if (unlikely(rc)) {
609 ipoib_warn(priv, "post_send failed, error %d\n", rc);
610 ++dev->stats.tx_errors;
611 --priv->tx_outstanding;
612 ipoib_dma_unmap_tx(priv->ca, tx_req);
613 dev_kfree_skb_any(skb);
614 if (netif_queue_stopped(dev))
615 netif_wake_queue(dev);
616 } else {
617 dev->trans_start = jiffies;
618
619 address->last_send = priv->tx_head;
620 ++priv->tx_head;
621 }
622
623 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
624 while (poll_tx(priv))
625 ;
626}
627
628static void __ipoib_reap_ah(struct net_device *dev)
629{
630 struct ipoib_dev_priv *priv = netdev_priv(dev);
631 struct ipoib_ah *ah, *tah;
632 LIST_HEAD(remove_list);
633 unsigned long flags;
634
635 netif_tx_lock_bh(dev);
636 spin_lock_irqsave(&priv->lock, flags);
637
638 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
639 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
640 list_del(&ah->list);
641 ib_destroy_ah(ah->ah);
642 kfree(ah);
643 }
644
645 spin_unlock_irqrestore(&priv->lock, flags);
646 netif_tx_unlock_bh(dev);
647}
648
649void ipoib_reap_ah(struct work_struct *work)
650{
651 struct ipoib_dev_priv *priv =
652 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
653 struct net_device *dev = priv->dev;
654
655 __ipoib_reap_ah(dev);
656
657 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
658 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
659 round_jiffies_relative(HZ));
660}
661
662static void ipoib_ib_tx_timer_func(unsigned long ctx)
663{
664 drain_tx_cq((struct net_device *)ctx);
665}
666
667int ipoib_ib_dev_open(struct net_device *dev)
668{
669 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 int ret;
671
672 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
673 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
674 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
675 return -1;
676 }
677 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
678
679 ret = ipoib_init_qp(dev);
680 if (ret) {
681 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
682 return -1;
683 }
684
685 ret = ipoib_ib_post_receives(dev);
686 if (ret) {
687 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
688 ipoib_ib_dev_stop(dev, 1);
689 return -1;
690 }
691
692 ret = ipoib_cm_dev_open(dev);
693 if (ret) {
694 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
695 ipoib_ib_dev_stop(dev, 1);
696 return -1;
697 }
698
699 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
700 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
701 round_jiffies_relative(HZ));
702
703 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
704 napi_enable(&priv->napi);
705
706 return 0;
707}
708
709static void ipoib_pkey_dev_check_presence(struct net_device *dev)
710{
711 struct ipoib_dev_priv *priv = netdev_priv(dev);
712 u16 pkey_index = 0;
713
714 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
715 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
716 else
717 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
718}
719
720int ipoib_ib_dev_up(struct net_device *dev)
721{
722 struct ipoib_dev_priv *priv = netdev_priv(dev);
723
724 ipoib_pkey_dev_check_presence(dev);
725
726 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
727 ipoib_dbg(priv, "PKEY is not assigned.\n");
728 return 0;
729 }
730
731 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
732
733 return ipoib_mcast_start_thread(dev);
734}
735
736int ipoib_ib_dev_down(struct net_device *dev, int flush)
737{
738 struct ipoib_dev_priv *priv = netdev_priv(dev);
739
740 ipoib_dbg(priv, "downing ib_dev\n");
741
742 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
743 netif_carrier_off(dev);
744
745
746 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
747 mutex_lock(&pkey_mutex);
748 set_bit(IPOIB_PKEY_STOP, &priv->flags);
749 cancel_delayed_work(&priv->pkey_poll_task);
750 mutex_unlock(&pkey_mutex);
751 if (flush)
752 flush_workqueue(ipoib_workqueue);
753 }
754
755 ipoib_mcast_stop_thread(dev, flush);
756 ipoib_mcast_dev_flush(dev);
757
758 ipoib_flush_paths(dev);
759
760 return 0;
761}
762
763static int recvs_pending(struct net_device *dev)
764{
765 struct ipoib_dev_priv *priv = netdev_priv(dev);
766 int pending = 0;
767 int i;
768
769 for (i = 0; i < ipoib_recvq_size; ++i)
770 if (priv->rx_ring[i].skb)
771 ++pending;
772
773 return pending;
774}
775
776void ipoib_drain_cq(struct net_device *dev)
777{
778 struct ipoib_dev_priv *priv = netdev_priv(dev);
779 int i, n;
780
781
782
783
784
785
786 local_bh_disable();
787
788 do {
789 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
790 for (i = 0; i < n; ++i) {
791
792
793
794
795
796 if (priv->ibwc[i].status == IB_WC_SUCCESS)
797 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
798
799 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
800 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
801 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
802 else
803 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
804 } else
805 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
806 }
807 } while (n == IPOIB_NUM_WC);
808
809 while (poll_tx(priv))
810 ;
811
812 local_bh_enable();
813}
814
815int ipoib_ib_dev_stop(struct net_device *dev, int flush)
816{
817 struct ipoib_dev_priv *priv = netdev_priv(dev);
818 struct ib_qp_attr qp_attr;
819 unsigned long begin;
820 struct ipoib_tx_buf *tx_req;
821 int i;
822
823 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
824 napi_disable(&priv->napi);
825
826 ipoib_cm_dev_stop(dev);
827
828
829
830
831
832 qp_attr.qp_state = IB_QPS_ERR;
833 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
834 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
835
836
837 begin = jiffies;
838
839 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
840 if (time_after(jiffies, begin + 5 * HZ)) {
841 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
842 priv->tx_head - priv->tx_tail, recvs_pending(dev));
843
844
845
846
847
848 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
849 tx_req = &priv->tx_ring[priv->tx_tail &
850 (ipoib_sendq_size - 1)];
851 ipoib_dma_unmap_tx(priv->ca, tx_req);
852 dev_kfree_skb_any(tx_req->skb);
853 ++priv->tx_tail;
854 --priv->tx_outstanding;
855 }
856
857 for (i = 0; i < ipoib_recvq_size; ++i) {
858 struct ipoib_rx_buf *rx_req;
859
860 rx_req = &priv->rx_ring[i];
861 if (!rx_req->skb)
862 continue;
863 ipoib_ud_dma_unmap_rx(priv,
864 priv->rx_ring[i].mapping);
865 dev_kfree_skb_any(rx_req->skb);
866 rx_req->skb = NULL;
867 }
868
869 goto timeout;
870 }
871
872 ipoib_drain_cq(dev);
873
874 msleep(1);
875 }
876
877 ipoib_dbg(priv, "All sends and receives done.\n");
878
879timeout:
880 del_timer_sync(&priv->poll_timer);
881 qp_attr.qp_state = IB_QPS_RESET;
882 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
883 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
884
885
886 set_bit(IPOIB_STOP_REAPER, &priv->flags);
887 cancel_delayed_work(&priv->ah_reap_task);
888 if (flush)
889 flush_workqueue(ipoib_workqueue);
890
891 begin = jiffies;
892
893 while (!list_empty(&priv->dead_ahs)) {
894 __ipoib_reap_ah(dev);
895
896 if (time_after(jiffies, begin + HZ)) {
897 ipoib_warn(priv, "timing out; will leak address handles\n");
898 break;
899 }
900
901 msleep(1);
902 }
903
904 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
905
906 return 0;
907}
908
909int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
910{
911 struct ipoib_dev_priv *priv = netdev_priv(dev);
912
913 priv->ca = ca;
914 priv->port = port;
915 priv->qp = NULL;
916
917 if (ipoib_transport_dev_init(dev, ca)) {
918 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
919 return -ENODEV;
920 }
921
922 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
923 (unsigned long) dev);
924
925 if (dev->flags & IFF_UP) {
926 if (ipoib_ib_dev_open(dev)) {
927 ipoib_transport_dev_cleanup(dev);
928 return -ENODEV;
929 }
930 }
931
932 return 0;
933}
934
935static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
936 enum ipoib_flush_level level)
937{
938 struct ipoib_dev_priv *cpriv;
939 struct net_device *dev = priv->dev;
940 u16 new_index;
941
942 mutex_lock(&priv->vlan_mutex);
943
944
945
946
947
948 list_for_each_entry(cpriv, &priv->child_intfs, list)
949 __ipoib_ib_dev_flush(cpriv, level);
950
951 mutex_unlock(&priv->vlan_mutex);
952
953 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
954 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
955 return;
956 }
957
958 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
959 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
960 return;
961 }
962
963 if (level == IPOIB_FLUSH_HEAVY) {
964 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
965 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
966 ipoib_ib_dev_down(dev, 0);
967 ipoib_ib_dev_stop(dev, 0);
968 if (ipoib_pkey_dev_delay_open(dev))
969 return;
970 }
971
972
973 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
974 new_index == priv->pkey_index) {
975 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
976 return;
977 }
978 priv->pkey_index = new_index;
979 }
980
981 if (level == IPOIB_FLUSH_LIGHT) {
982 ipoib_mark_paths_invalid(dev);
983 ipoib_mcast_dev_flush(dev);
984 }
985
986 if (level >= IPOIB_FLUSH_NORMAL)
987 ipoib_ib_dev_down(dev, 0);
988
989 if (level == IPOIB_FLUSH_HEAVY) {
990 ipoib_ib_dev_stop(dev, 0);
991 ipoib_ib_dev_open(dev);
992 }
993
994
995
996
997
998 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
999 if (level >= IPOIB_FLUSH_NORMAL)
1000 ipoib_ib_dev_up(dev);
1001 ipoib_mcast_restart_task(&priv->restart_task);
1002 }
1003}
1004
1005void ipoib_ib_dev_flush_light(struct work_struct *work)
1006{
1007 struct ipoib_dev_priv *priv =
1008 container_of(work, struct ipoib_dev_priv, flush_light);
1009
1010 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
1011}
1012
1013void ipoib_ib_dev_flush_normal(struct work_struct *work)
1014{
1015 struct ipoib_dev_priv *priv =
1016 container_of(work, struct ipoib_dev_priv, flush_normal);
1017
1018 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
1019}
1020
1021void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1022{
1023 struct ipoib_dev_priv *priv =
1024 container_of(work, struct ipoib_dev_priv, flush_heavy);
1025
1026 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
1027}
1028
1029void ipoib_ib_dev_cleanup(struct net_device *dev)
1030{
1031 struct ipoib_dev_priv *priv = netdev_priv(dev);
1032
1033 ipoib_dbg(priv, "cleaning up ib_dev\n");
1034
1035 ipoib_mcast_stop_thread(dev, 1);
1036 ipoib_mcast_dev_flush(dev);
1037
1038 ipoib_transport_dev_cleanup(dev);
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051void ipoib_pkey_poll(struct work_struct *work)
1052{
1053 struct ipoib_dev_priv *priv =
1054 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
1055 struct net_device *dev = priv->dev;
1056
1057 ipoib_pkey_dev_check_presence(dev);
1058
1059 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1060 ipoib_open(dev);
1061 else {
1062 mutex_lock(&pkey_mutex);
1063 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1064 queue_delayed_work(ipoib_workqueue,
1065 &priv->pkey_poll_task,
1066 HZ);
1067 mutex_unlock(&pkey_mutex);
1068 }
1069}
1070
1071int ipoib_pkey_dev_delay_open(struct net_device *dev)
1072{
1073 struct ipoib_dev_priv *priv = netdev_priv(dev);
1074
1075
1076
1077 ipoib_pkey_dev_check_presence(dev);
1078
1079
1080 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1081 mutex_lock(&pkey_mutex);
1082 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1083 queue_delayed_work(ipoib_workqueue,
1084 &priv->pkey_poll_task,
1085 HZ);
1086 mutex_unlock(&pkey_mutex);
1087 return 1;
1088 }
1089
1090 return 0;
1091}
1092