1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/spinlock.h>
34#include <linux/pci.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/moduleparam.h>
40
41#include "qib.h"
42
43static unsigned qib_hol_timeout_ms = 3000;
44module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
45MODULE_PARM_DESC(hol_timeout_ms,
46 "duration of user app suspension after link failure");
47
48unsigned qib_sdma_fetch_arb = 1;
49module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
50MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
51
52
53
54
55
56
57
58
59
60
61void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
62{
63 unsigned long flags;
64 unsigned i;
65 unsigned last;
66
67 last = first + cnt;
68 spin_lock_irqsave(&dd->pioavail_lock, flags);
69 for (i = first; i < last; i++) {
70 __clear_bit(i, dd->pio_need_disarm);
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
72 }
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
74}
75
76
77
78
79
80int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
81{
82 struct qib_devdata *dd = rcd->dd;
83 unsigned i;
84 unsigned last;
85 unsigned n = 0;
86
87 last = rcd->pio_base + rcd->piocnt;
88
89
90
91
92
93 if (rcd->user_event_mask) {
94
95
96
97
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
99 for (i = 1; i < rcd->subctxt_cnt; i++)
100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
101 &rcd->user_event_mask[i]);
102 }
103 spin_lock_irq(&dd->pioavail_lock);
104 for (i = rcd->pio_base; i < last; i++) {
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
106 n++;
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
108 }
109 }
110 spin_unlock_irq(&dd->pioavail_lock);
111 return 0;
112}
113
114static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
115{
116 struct qib_pportdata *ppd;
117 unsigned pidx;
118
119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
120 ppd = dd->pport + pidx;
121 if (i >= ppd->sdma_state.first_sendbuf &&
122 i < ppd->sdma_state.last_sendbuf)
123 return ppd;
124 }
125 return NULL;
126}
127
128
129
130
131
132static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
133{
134 struct qib_ctxtdata *rcd;
135 unsigned ctxt;
136 int ret = 0;
137
138 spin_lock(&dd->uctxt_lock);
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
140 rcd = dd->rcd[ctxt];
141 if (!rcd || bufn < rcd->pio_base ||
142 bufn >= rcd->pio_base + rcd->piocnt)
143 continue;
144 if (rcd->user_event_mask) {
145 int i;
146
147
148
149
150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
151 &rcd->user_event_mask[0]);
152 for (i = 1; i < rcd->subctxt_cnt; i++)
153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
154 &rcd->user_event_mask[i]);
155 }
156 ret = 1;
157 break;
158 }
159 spin_unlock(&dd->uctxt_lock);
160
161 return ret;
162}
163
164
165
166
167
168
169
170
171void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
172 unsigned cnt)
173{
174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
175 unsigned i;
176 unsigned long flags;
177
178 for (i = 0; i < dd->num_pports; i++)
179 pppd[i] = NULL;
180
181 for (i = 0; i < cnt; i++) {
182 int which;
183
184 if (!test_bit(i, mask))
185 continue;
186
187
188
189
190 ppd = is_sdma_buf(dd, i);
191 if (ppd) {
192 pppd[ppd->port] = ppd;
193 continue;
194 }
195
196
197
198
199 spin_lock_irqsave(&dd->pioavail_lock, flags);
200 if (test_bit(i, dd->pio_writing) ||
201 (!test_bit(i << 1, dd->pioavailkernel) &&
202 find_ctxt(dd, i))) {
203 __set_bit(i, dd->pio_need_disarm);
204 which = 0;
205 } else {
206 which = 1;
207 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
208 }
209 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
210 }
211
212
213 for (i = 0; i < dd->num_pports; i++)
214 if (pppd[i])
215 qib_cancel_sends(pppd[i]);
216}
217
218
219
220
221
222
223
224static void update_send_bufs(struct qib_devdata *dd)
225{
226 unsigned long flags;
227 unsigned i;
228 const unsigned piobregs = dd->pioavregs;
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 if (!dd->pioavailregs_dma)
249 return;
250 spin_lock_irqsave(&dd->pioavail_lock, flags);
251 for (i = 0; i < piobregs; i++) {
252 u64 pchbusy, pchg, piov, pnew;
253
254 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
255 pchg = dd->pioavailkernel[i] &
256 ~(dd->pioavailshadow[i] ^ piov);
257 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
258 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
259 pnew = dd->pioavailshadow[i] & ~pchbusy;
260 pnew |= piov & pchbusy;
261 dd->pioavailshadow[i] = pnew;
262 }
263 }
264 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
265}
266
267
268
269
270static noinline void no_send_bufs(struct qib_devdata *dd)
271{
272 dd->upd_pio_shadow = 1;
273
274
275 qib_stats.sps_nopiobufs++;
276}
277
278
279
280
281
282
283
284
285u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
286 u32 first, u32 last)
287{
288 unsigned i, j, updated = 0;
289 unsigned nbufs;
290 unsigned long flags;
291 unsigned long *shadow = dd->pioavailshadow;
292 u32 __iomem *buf;
293
294 if (!(dd->flags & QIB_PRESENT))
295 return NULL;
296
297 nbufs = last - first + 1;
298 if (dd->upd_pio_shadow) {
299update_shadow:
300
301
302
303
304
305 update_send_bufs(dd);
306 updated++;
307 }
308 i = first;
309
310
311
312
313
314 spin_lock_irqsave(&dd->pioavail_lock, flags);
315 if (dd->last_pio >= first && dd->last_pio <= last)
316 i = dd->last_pio + 1;
317 if (!first)
318
319 nbufs = last - dd->min_kernel_pio + 1;
320 for (j = 0; j < nbufs; j++, i++) {
321 if (i > last)
322 i = !first ? dd->min_kernel_pio : first;
323 if (__test_and_set_bit((2 * i) + 1, shadow))
324 continue;
325
326 __change_bit(2 * i, shadow);
327
328 __set_bit(i, dd->pio_writing);
329 if (!first && first != last)
330 dd->last_pio = i;
331 break;
332 }
333 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
334
335 if (j == nbufs) {
336 if (!updated)
337
338
339
340
341 goto update_shadow;
342 no_send_bufs(dd);
343 buf = NULL;
344 } else {
345 if (i < dd->piobcnt2k)
346 buf = (u32 __iomem *)(dd->pio2kbase +
347 i * dd->palign);
348 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
349 buf = (u32 __iomem *)(dd->pio4kbase +
350 (i - dd->piobcnt2k) * dd->align4k);
351 else
352 buf = (u32 __iomem *)(dd->piovl15base +
353 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
354 dd->align4k);
355 if (pbufnum)
356 *pbufnum = i;
357 dd->upd_pio_shadow = 0;
358 }
359
360 return buf;
361}
362
363
364
365
366
367void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
368{
369 unsigned long flags;
370
371 spin_lock_irqsave(&dd->pioavail_lock, flags);
372 __clear_bit(n, dd->pio_writing);
373 if (__test_and_clear_bit(n, dd->pio_need_disarm))
374 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
375 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
376}
377
378
379
380
381
382
383
384
385void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
386 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
387{
388 unsigned long flags;
389 unsigned end;
390 unsigned ostart = start;
391
392
393 start *= 2;
394 end = start + len * 2;
395
396 spin_lock_irqsave(&dd->pioavail_lock, flags);
397
398 while (start < end) {
399 if (avail) {
400 unsigned long dma;
401 int i;
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 i = start / BITS_PER_LONG;
417 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
418 dd->pioavailshadow);
419 dma = (unsigned long)
420 le64_to_cpu(dd->pioavailregs_dma[i]);
421 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
422 start) % BITS_PER_LONG, &dma))
423 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
424 start, dd->pioavailshadow);
425 else
426 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
427 + start, dd->pioavailshadow);
428 __set_bit(start, dd->pioavailkernel);
429 if ((start >> 1) < dd->min_kernel_pio)
430 dd->min_kernel_pio = start >> 1;
431 } else {
432 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
433 dd->pioavailshadow);
434 __clear_bit(start, dd->pioavailkernel);
435 if ((start >> 1) > dd->min_kernel_pio)
436 dd->min_kernel_pio = start >> 1;
437 }
438 start += 2;
439 }
440
441 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
442 dd->last_pio = dd->min_kernel_pio - 1;
443 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
444
445 dd->f_txchk_change(dd, ostart, len, avail, rcd);
446}
447
448
449
450
451
452
453
454
455
456
457void qib_cancel_sends(struct qib_pportdata *ppd)
458{
459 struct qib_devdata *dd = ppd->dd;
460 struct qib_ctxtdata *rcd;
461 unsigned long flags;
462 unsigned ctxt;
463 unsigned i;
464 unsigned last;
465
466
467
468
469
470
471
472
473
474 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
475 spin_lock_irqsave(&dd->uctxt_lock, flags);
476 rcd = dd->rcd[ctxt];
477 if (rcd && rcd->ppd == ppd) {
478 last = rcd->pio_base + rcd->piocnt;
479 if (rcd->user_event_mask) {
480
481
482
483
484
485 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
486 &rcd->user_event_mask[0]);
487 for (i = 1; i < rcd->subctxt_cnt; i++)
488 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
489 &rcd->user_event_mask[i]);
490 }
491 i = rcd->pio_base;
492 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
493 spin_lock_irqsave(&dd->pioavail_lock, flags);
494 for (; i < last; i++)
495 __set_bit(i, dd->pio_need_disarm);
496 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
497 } else
498 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
499 }
500
501 if (!(dd->flags & QIB_HAS_SEND_DMA))
502 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
503 QIB_SENDCTRL_FLUSH);
504}
505
506
507
508
509
510
511
512
513void qib_force_pio_avail_update(struct qib_devdata *dd)
514{
515 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
516}
517
518void qib_hol_down(struct qib_pportdata *ppd)
519{
520
521
522
523
524 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
525 qib_cancel_sends(ppd);
526}
527
528
529
530
531
532
533void qib_hol_init(struct qib_pportdata *ppd)
534{
535 if (ppd->hol_state != QIB_HOL_INIT) {
536 ppd->hol_state = QIB_HOL_INIT;
537 mod_timer(&ppd->hol_timer,
538 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
539 }
540}
541
542
543
544
545
546
547void qib_hol_up(struct qib_pportdata *ppd)
548{
549 ppd->hol_state = QIB_HOL_UP;
550}
551
552
553
554
555void qib_hol_event(unsigned long opaque)
556{
557 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
558
559
560 if (!(ppd->dd->flags & QIB_INITTED))
561 return;
562
563 if (ppd->hol_state != QIB_HOL_UP) {
564
565
566
567
568 qib_hol_down(ppd);
569 mod_timer(&ppd->hol_timer,
570 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
571 }
572}
573