1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/spinlock.h>
34#include <linux/pci.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/moduleparam.h>
40
41#include "qib.h"
42
43static unsigned qib_hol_timeout_ms = 3000;
44module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
45MODULE_PARM_DESC(hol_timeout_ms,
46 "duration of user app suspension after link failure");
47
48unsigned qib_sdma_fetch_arb = 1;
49module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
50MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
51
52
53
54
55
56
57
58
59
60
61void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
62{
63 unsigned long flags;
64 unsigned i;
65 unsigned last;
66
67 last = first + cnt;
68 spin_lock_irqsave(&dd->pioavail_lock, flags);
69 for (i = first; i < last; i++) {
70 __clear_bit(i, dd->pio_need_disarm);
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
72 }
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
74}
75
76
77
78
79
80int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
81{
82 struct qib_devdata *dd = rcd->dd;
83 unsigned i;
84 unsigned last;
85 unsigned n = 0;
86
87 last = rcd->pio_base + rcd->piocnt;
88
89
90
91
92
93 if (rcd->user_event_mask) {
94
95
96
97
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
99 for (i = 1; i < rcd->subctxt_cnt; i++)
100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
101 &rcd->user_event_mask[i]);
102 }
103 spin_lock_irq(&dd->pioavail_lock);
104 for (i = rcd->pio_base; i < last; i++) {
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
106 n++;
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
108 }
109 }
110 spin_unlock_irq(&dd->pioavail_lock);
111 return 0;
112}
113
114static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
115{
116 struct qib_pportdata *ppd;
117 unsigned pidx;
118
119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
120 ppd = dd->pport + pidx;
121 if (i >= ppd->sdma_state.first_sendbuf &&
122 i < ppd->sdma_state.last_sendbuf)
123 return ppd;
124 }
125 return NULL;
126}
127
128
129
130
131
132static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
133{
134 struct qib_ctxtdata *rcd;
135 unsigned ctxt;
136 int ret = 0;
137
138 spin_lock(&dd->uctxt_lock);
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
140 rcd = dd->rcd[ctxt];
141 if (!rcd || bufn < rcd->pio_base ||
142 bufn >= rcd->pio_base + rcd->piocnt)
143 continue;
144 if (rcd->user_event_mask) {
145 int i;
146
147
148
149
150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
151 &rcd->user_event_mask[0]);
152 for (i = 1; i < rcd->subctxt_cnt; i++)
153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
154 &rcd->user_event_mask[i]);
155 }
156 ret = 1;
157 break;
158 }
159 spin_unlock(&dd->uctxt_lock);
160
161 return ret;
162}
163
164
165
166
167
168
169
170
171void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
172 unsigned cnt)
173{
174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
175 unsigned i;
176 unsigned long flags;
177
178 for (i = 0; i < dd->num_pports; i++)
179 pppd[i] = NULL;
180
181 for (i = 0; i < cnt; i++) {
182 if (!test_bit(i, mask))
183 continue;
184
185
186
187
188 ppd = is_sdma_buf(dd, i);
189 if (ppd) {
190 pppd[ppd->port] = ppd;
191 continue;
192 }
193
194
195
196
197 spin_lock_irqsave(&dd->pioavail_lock, flags);
198 if (test_bit(i, dd->pio_writing) ||
199 (!test_bit(i << 1, dd->pioavailkernel) &&
200 find_ctxt(dd, i))) {
201 __set_bit(i, dd->pio_need_disarm);
202 } else {
203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
204 }
205 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
206 }
207
208
209 for (i = 0; i < dd->num_pports; i++)
210 if (pppd[i])
211 qib_cancel_sends(pppd[i]);
212}
213
214
215
216
217
218
219
220static void update_send_bufs(struct qib_devdata *dd)
221{
222 unsigned long flags;
223 unsigned i;
224 const unsigned piobregs = dd->pioavregs;
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 if (!dd->pioavailregs_dma)
245 return;
246 spin_lock_irqsave(&dd->pioavail_lock, flags);
247 for (i = 0; i < piobregs; i++) {
248 u64 pchbusy, pchg, piov, pnew;
249
250 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
251 pchg = dd->pioavailkernel[i] &
252 ~(dd->pioavailshadow[i] ^ piov);
253 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
254 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
255 pnew = dd->pioavailshadow[i] & ~pchbusy;
256 pnew |= piov & pchbusy;
257 dd->pioavailshadow[i] = pnew;
258 }
259 }
260 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
261}
262
263
264
265
266static noinline void no_send_bufs(struct qib_devdata *dd)
267{
268 dd->upd_pio_shadow = 1;
269
270
271 qib_stats.sps_nopiobufs++;
272}
273
274
275
276
277
278
279
280
281u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
282 u32 first, u32 last)
283{
284 unsigned i, j, updated = 0;
285 unsigned nbufs;
286 unsigned long flags;
287 unsigned long *shadow = dd->pioavailshadow;
288 u32 __iomem *buf;
289
290 if (!(dd->flags & QIB_PRESENT))
291 return NULL;
292
293 nbufs = last - first + 1;
294 if (dd->upd_pio_shadow) {
295update_shadow:
296
297
298
299
300
301 update_send_bufs(dd);
302 updated++;
303 }
304 i = first;
305
306
307
308
309
310 spin_lock_irqsave(&dd->pioavail_lock, flags);
311 if (dd->last_pio >= first && dd->last_pio <= last)
312 i = dd->last_pio + 1;
313 if (!first)
314
315 nbufs = last - dd->min_kernel_pio + 1;
316 for (j = 0; j < nbufs; j++, i++) {
317 if (i > last)
318 i = !first ? dd->min_kernel_pio : first;
319 if (__test_and_set_bit((2 * i) + 1, shadow))
320 continue;
321
322 __change_bit(2 * i, shadow);
323
324 __set_bit(i, dd->pio_writing);
325 if (!first && first != last)
326 dd->last_pio = i;
327 break;
328 }
329 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
330
331 if (j == nbufs) {
332 if (!updated)
333
334
335
336
337 goto update_shadow;
338 no_send_bufs(dd);
339 buf = NULL;
340 } else {
341 if (i < dd->piobcnt2k)
342 buf = (u32 __iomem *)(dd->pio2kbase +
343 i * dd->palign);
344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
345 buf = (u32 __iomem *)(dd->pio4kbase +
346 (i - dd->piobcnt2k) * dd->align4k);
347 else
348 buf = (u32 __iomem *)(dd->piovl15base +
349 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
350 dd->align4k);
351 if (pbufnum)
352 *pbufnum = i;
353 dd->upd_pio_shadow = 0;
354 }
355
356 return buf;
357}
358
359
360
361
362
363void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
364{
365 unsigned long flags;
366
367 spin_lock_irqsave(&dd->pioavail_lock, flags);
368 __clear_bit(n, dd->pio_writing);
369 if (__test_and_clear_bit(n, dd->pio_need_disarm))
370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
371 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
372}
373
374
375
376
377
378
379
380
381
382void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
383 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
384{
385 unsigned long flags;
386 unsigned end;
387 unsigned ostart = start;
388
389
390 start *= 2;
391 end = start + len * 2;
392
393 spin_lock_irqsave(&dd->pioavail_lock, flags);
394
395 while (start < end) {
396 if (avail) {
397 unsigned long dma;
398 int i;
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413 i = start / BITS_PER_LONG;
414 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
415 dd->pioavailshadow);
416 dma = (unsigned long)
417 le64_to_cpu(dd->pioavailregs_dma[i]);
418 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
419 start) % BITS_PER_LONG, &dma))
420 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
421 start, dd->pioavailshadow);
422 else
423 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
424 + start, dd->pioavailshadow);
425 __set_bit(start, dd->pioavailkernel);
426 if ((start >> 1) < dd->min_kernel_pio)
427 dd->min_kernel_pio = start >> 1;
428 } else {
429 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
430 dd->pioavailshadow);
431 __clear_bit(start, dd->pioavailkernel);
432 if ((start >> 1) > dd->min_kernel_pio)
433 dd->min_kernel_pio = start >> 1;
434 }
435 start += 2;
436 }
437
438 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
439 dd->last_pio = dd->min_kernel_pio - 1;
440 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
441
442 dd->f_txchk_change(dd, ostart, len, avail, rcd);
443}
444
445
446
447
448
449
450
451
452
453
454void qib_cancel_sends(struct qib_pportdata *ppd)
455{
456 struct qib_devdata *dd = ppd->dd;
457 struct qib_ctxtdata *rcd;
458 unsigned long flags;
459 unsigned ctxt;
460 unsigned i;
461 unsigned last;
462
463
464
465
466
467
468
469
470
471 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
472 spin_lock_irqsave(&dd->uctxt_lock, flags);
473 rcd = dd->rcd[ctxt];
474 if (rcd && rcd->ppd == ppd) {
475 last = rcd->pio_base + rcd->piocnt;
476 if (rcd->user_event_mask) {
477
478
479
480
481
482 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
483 &rcd->user_event_mask[0]);
484 for (i = 1; i < rcd->subctxt_cnt; i++)
485 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
486 &rcd->user_event_mask[i]);
487 }
488 i = rcd->pio_base;
489 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
490 spin_lock_irqsave(&dd->pioavail_lock, flags);
491 for (; i < last; i++)
492 __set_bit(i, dd->pio_need_disarm);
493 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
494 } else
495 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
496 }
497
498 if (!(dd->flags & QIB_HAS_SEND_DMA))
499 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
500 QIB_SENDCTRL_FLUSH);
501}
502
503
504
505
506
507
508
509
510void qib_force_pio_avail_update(struct qib_devdata *dd)
511{
512 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
513}
514
515void qib_hol_down(struct qib_pportdata *ppd)
516{
517
518
519
520
521 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
522 qib_cancel_sends(ppd);
523}
524
525
526
527
528
529
530void qib_hol_init(struct qib_pportdata *ppd)
531{
532 if (ppd->hol_state != QIB_HOL_INIT) {
533 ppd->hol_state = QIB_HOL_INIT;
534 mod_timer(&ppd->hol_timer,
535 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
536 }
537}
538
539
540
541
542
543
544void qib_hol_up(struct qib_pportdata *ppd)
545{
546 ppd->hol_state = QIB_HOL_UP;
547}
548
549
550
551
552void qib_hol_event(struct timer_list *t)
553{
554 struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
555
556
557 if (!(ppd->dd->flags & QIB_INITTED))
558 return;
559
560 if (ppd->hol_state != QIB_HOL_UP) {
561
562
563
564
565 qib_hol_down(ppd);
566 mod_timer(&ppd->hol_timer,
567 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
568 }
569}
570