1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/delay.h>
15#include <linux/gfp.h>
16#include <linux/io.h>
17#include <linux/atomic.h>
18#include <asm/debug.h>
19#include <asm/qdio.h>
20#include <asm/ipl.h>
21
22#include "cio.h"
23#include "css.h"
24#include "device.h"
25#include "qdio.h"
26#include "qdio_debug.h"
27
28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30MODULE_DESCRIPTION("QDIO base support");
31MODULE_LICENSE("GPL");
32
33static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
36{
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
41 int cc;
42
43 asm volatile(
44 " siga 0\n"
45 " ipm %0\n"
46 " srl %0,28\n"
47 : "=d" (cc)
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 return cc;
50}
51
52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
54{
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
58 int cc;
59
60 asm volatile(
61 " siga 0\n"
62 " ipm %0\n"
63 " srl %0,28\n"
64 : "=d" (cc)
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
66 return cc;
67}
68
69
70
71
72
73
74
75
76
77
78
79
80static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
83{
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
88 int cc;
89
90 asm volatile(
91 " siga 0\n"
92 " ipm %0\n"
93 " srl %0,28\n"
94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
96 : "cc");
97 *bb = __fc >> 31;
98 return cc;
99}
100
101
102
103
104
105
106
107
108
109
110
111
112static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
113 int start, int count, int auto_ack)
114{
115 int tmp_count = count, tmp_start = start, nr = q->nr;
116 unsigned int ccq = 0;
117
118 qperf_inc(q, eqbs);
119
120 if (!q->is_input_q)
121 nr += q->irq_ptr->nr_input_qs;
122again:
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
124 auto_ack);
125
126 switch (ccq) {
127 case 0:
128 case 32:
129
130 return count - tmp_count;
131 case 96:
132
133 qperf_inc(q, eqbs_partial);
134 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
135 tmp_count);
136 return count - tmp_count;
137 case 97:
138
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
140 goto again;
141 default:
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_check, count, q->irq_ptr->int_parm);
147 return 0;
148 }
149}
150
151
152
153
154
155
156
157
158
159
160
161
162static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
163 int count)
164{
165 unsigned int ccq = 0;
166 int tmp_count = count, tmp_start = start;
167 int nr = q->nr;
168
169 if (!count)
170 return 0;
171 qperf_inc(q, sqbs);
172
173 if (!q->is_input_q)
174 nr += q->irq_ptr->nr_input_qs;
175again:
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
177
178 switch (ccq) {
179 case 0:
180 case 32:
181
182 WARN_ON_ONCE(tmp_count);
183 return count - tmp_count;
184 case 96:
185
186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
187 qperf_inc(q, sqbs_partial);
188 goto again;
189 default:
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_check, count, q->irq_ptr->int_parm);
195 return 0;
196 }
197}
198
199
200
201
202
203static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
204 unsigned char *state, unsigned int count,
205 int auto_ack)
206{
207 unsigned char __state = 0;
208 int i = 1;
209
210 if (is_qebsm(q))
211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
212
213
214 __state = q->slsb.val[bufnr];
215
216
217 if (__state & SLSB_OWNER_CU)
218 goto out;
219
220 for (; i < count; i++) {
221 bufnr = next_buf(bufnr);
222
223
224 if (q->slsb.val[bufnr] != __state)
225 break;
226 }
227
228out:
229 *state = __state;
230 return i;
231}
232
233static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
234 unsigned char *state, int auto_ack)
235{
236 return get_buf_states(q, bufnr, state, 1, auto_ack);
237}
238
239
240static inline int set_buf_states(struct qdio_q *q, int bufnr,
241 unsigned char state, int count)
242{
243 int i;
244
245 if (is_qebsm(q))
246 return qdio_do_sqbs(q, state, bufnr, count);
247
248
249 mb();
250
251 for (i = 0; i < count; i++) {
252 WRITE_ONCE(q->slsb.val[bufnr], state);
253 bufnr = next_buf(bufnr);
254 }
255
256
257 mb();
258
259 return count;
260}
261
262static inline int set_buf_state(struct qdio_q *q, int bufnr,
263 unsigned char state)
264{
265 return set_buf_states(q, bufnr, state, 1);
266}
267
268
269static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
270{
271 struct qdio_q *q;
272 int i;
273
274 for_each_input_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277 for_each_output_queue(irq_ptr, q, i)
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 QDIO_MAX_BUFFERS_PER_Q);
280}
281
282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
283 unsigned int input)
284{
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
287 int cc;
288
289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
290 qperf_inc(q, siga_sync);
291
292 if (is_qebsm(q)) {
293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
295 }
296
297 cc = do_siga_sync(schid, output, input, fc);
298 if (unlikely(cc))
299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
300 return (cc) ? -EIO : 0;
301}
302
303static inline int qdio_siga_sync_q(struct qdio_q *q)
304{
305 if (q->is_input_q)
306 return qdio_siga_sync(q, 0, q->mask);
307 else
308 return qdio_siga_sync(q, q->mask, 0);
309}
310
311static int qdio_siga_output(struct qdio_q *q, unsigned int count,
312 unsigned int *busy_bit, unsigned long aob)
313{
314 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
315 unsigned int fc = QDIO_SIGA_WRITE;
316 u64 start_time = 0;
317 int retries = 0, cc;
318
319 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
320 if (count > 1)
321 fc = QDIO_SIGA_WRITEM;
322 else if (aob)
323 fc = QDIO_SIGA_WRITEQ;
324 }
325
326 if (is_qebsm(q)) {
327 schid = q->irq_ptr->sch_token;
328 fc |= QDIO_SIGA_QEBSM_FLAG;
329 }
330again:
331 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
332
333
334 if (unlikely(*busy_bit)) {
335 retries++;
336
337 if (!start_time) {
338 start_time = get_tod_clock_fast();
339 goto again;
340 }
341 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
342 goto again;
343 }
344 if (retries) {
345 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
346 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
347 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
348 }
349 return cc;
350}
351
352static inline int qdio_siga_input(struct qdio_q *q)
353{
354 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
355 unsigned int fc = QDIO_SIGA_READ;
356 int cc;
357
358 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
359 qperf_inc(q, siga_read);
360
361 if (is_qebsm(q)) {
362 schid = q->irq_ptr->sch_token;
363 fc |= QDIO_SIGA_QEBSM_FLAG;
364 }
365
366 cc = do_siga_input(schid, q->mask, fc);
367 if (unlikely(cc))
368 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
369 return (cc) ? -EIO : 0;
370}
371
372#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
373#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
374
375static inline void qdio_sync_queues(struct qdio_q *q)
376{
377
378 if (pci_out_supported(q->irq_ptr))
379 qdio_siga_sync_all(q);
380 else
381 qdio_siga_sync_q(q);
382}
383
384int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
385 unsigned char *state)
386{
387 if (need_siga_sync(q))
388 qdio_siga_sync_q(q);
389 return get_buf_state(q, bufnr, state, 0);
390}
391
392static inline void qdio_stop_polling(struct qdio_q *q)
393{
394 if (!q->u.in.batch_count)
395 return;
396
397 qperf_inc(q, stop_polling);
398
399
400 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
401 q->u.in.batch_count);
402 q->u.in.batch_count = 0;
403}
404
405static inline void account_sbals(struct qdio_q *q, unsigned int count)
406{
407 q->q_stats.nr_sbal_total += count;
408 q->q_stats.nr_sbals[ilog2(count)]++;
409}
410
411static void process_buffer_error(struct qdio_q *q, unsigned int start,
412 int count)
413{
414
415 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
416 q->sbal[start]->element[15].sflags == 0x10) {
417 qperf_inc(q, target_full);
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
419 return;
420 }
421
422 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
423 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
424 DBF_ERROR("FTC:%3d C:%3d", start, count);
425 DBF_ERROR("F14:%2x F15:%2x",
426 q->sbal[start]->element[14].sflags,
427 q->sbal[start]->element[15].sflags);
428}
429
430static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
431 int count, bool auto_ack)
432{
433
434 if (!auto_ack)
435 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
436
437 if (!q->u.in.batch_count)
438 q->u.in.batch_start = start;
439 q->u.in.batch_count += count;
440}
441
442static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
443 unsigned int *error)
444{
445 unsigned char state = 0;
446 int count;
447
448 q->timestamp = get_tod_clock_fast();
449
450 count = atomic_read(&q->nr_buf_used);
451 if (!count)
452 return 0;
453
454
455
456
457
458 count = get_buf_states(q, start, &state, count, 1);
459 if (!count)
460 return 0;
461
462 switch (state) {
463 case SLSB_P_INPUT_PRIMED:
464 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
465 count);
466
467 inbound_handle_work(q, start, count, is_qebsm(q));
468 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
469 qperf_inc(q, inbound_queue_full);
470 if (q->irq_ptr->perf_stat_enabled)
471 account_sbals(q, count);
472 return count;
473 case SLSB_P_INPUT_ERROR:
474 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
475 count);
476
477 *error = QDIO_ERROR_SLSB_STATE;
478 process_buffer_error(q, start, count);
479 inbound_handle_work(q, start, count, false);
480 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
481 qperf_inc(q, inbound_queue_full);
482 if (q->irq_ptr->perf_stat_enabled)
483 account_sbals_error(q, count);
484 return count;
485 case SLSB_CU_INPUT_EMPTY:
486 if (q->irq_ptr->perf_stat_enabled)
487 q->q_stats.nr_sbal_nop++;
488 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
489 q->nr, start);
490 return 0;
491 case SLSB_P_INPUT_NOT_INIT:
492 case SLSB_P_INPUT_ACK:
493
494 default:
495 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
496 "found state %#x at index %u on queue %u\n",
497 state, start, q->nr);
498 return 0;
499 }
500}
501
502static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
503{
504 unsigned char state = 0;
505
506 if (!atomic_read(&q->nr_buf_used))
507 return 1;
508
509 if (need_siga_sync(q))
510 qdio_siga_sync_q(q);
511 get_buf_state(q, start, &state, 0);
512
513 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
514
515 return 0;
516
517 return 1;
518}
519
520static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
521 int bufnr)
522{
523 unsigned long phys_aob = 0;
524
525 if (!q->aobs[bufnr]) {
526 struct qaob *aob = qdio_allocate_aob();
527 q->aobs[bufnr] = aob;
528 }
529 if (q->aobs[bufnr]) {
530 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
531 phys_aob = virt_to_phys(q->aobs[bufnr]);
532 WARN_ON_ONCE(phys_aob & 0xFF);
533 }
534
535 return phys_aob;
536}
537
538static inline int qdio_tasklet_schedule(struct qdio_q *q)
539{
540 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
541 tasklet_schedule(&q->u.out.tasklet);
542 return 0;
543 }
544 return -EPERM;
545}
546
547static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
548 unsigned int *error)
549{
550 unsigned char state = 0;
551 unsigned int i;
552 int count;
553
554 q->timestamp = get_tod_clock_fast();
555
556 if (need_siga_sync(q))
557 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
558 !pci_out_supported(q->irq_ptr)) ||
559 (queue_type(q) == QDIO_IQDIO_QFMT &&
560 multicast_outbound(q)))
561 qdio_siga_sync_q(q);
562
563 count = atomic_read(&q->nr_buf_used);
564 if (!count)
565 return 0;
566
567 count = get_buf_states(q, start, &state, count, 0);
568 if (!count)
569 return 0;
570
571 switch (state) {
572 case SLSB_P_OUTPUT_PENDING:
573
574 for (i = 0; i < count; i++)
575 q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
576
577 *error = QDIO_ERROR_SLSB_PENDING;
578 fallthrough;
579 case SLSB_P_OUTPUT_EMPTY:
580
581 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
582 "out empty:%1d %02x", q->nr, count);
583
584 atomic_sub(count, &q->nr_buf_used);
585 if (q->irq_ptr->perf_stat_enabled)
586 account_sbals(q, count);
587 return count;
588 case SLSB_P_OUTPUT_ERROR:
589 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
590 q->nr, count);
591
592 *error = QDIO_ERROR_SLSB_STATE;
593 process_buffer_error(q, start, count);
594 atomic_sub(count, &q->nr_buf_used);
595 if (q->irq_ptr->perf_stat_enabled)
596 account_sbals_error(q, count);
597 return count;
598 case SLSB_CU_OUTPUT_PRIMED:
599
600 if (q->irq_ptr->perf_stat_enabled)
601 q->q_stats.nr_sbal_nop++;
602 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
603 q->nr);
604 return 0;
605 case SLSB_P_OUTPUT_HALTED:
606 return 0;
607 case SLSB_P_OUTPUT_NOT_INIT:
608
609 default:
610 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
611 "found state %#x at index %u on queue %u\n",
612 state, start, q->nr);
613 return 0;
614 }
615}
616
617
618static inline int qdio_outbound_q_done(struct qdio_q *q)
619{
620 return atomic_read(&q->nr_buf_used) == 0;
621}
622
623static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
624 unsigned long aob)
625{
626 int retries = 0, cc;
627 unsigned int busy_bit;
628
629 if (!need_siga_out(q))
630 return 0;
631
632 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
633retry:
634 qperf_inc(q, siga_write);
635
636 cc = qdio_siga_output(q, count, &busy_bit, aob);
637 switch (cc) {
638 case 0:
639 break;
640 case 2:
641 if (busy_bit) {
642 while (++retries < QDIO_BUSY_BIT_RETRIES) {
643 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
644 goto retry;
645 }
646 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
647 cc = -EBUSY;
648 } else {
649 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
650 cc = -ENOBUFS;
651 }
652 break;
653 case 1:
654 case 3:
655 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
656 cc = -EIO;
657 break;
658 }
659 if (retries) {
660 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
661 DBF_ERROR("count:%u", retries);
662 }
663 return cc;
664}
665
666void qdio_outbound_tasklet(struct tasklet_struct *t)
667{
668 struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
669 struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
670 unsigned int start = q->first_to_check;
671 unsigned int error = 0;
672 int count;
673
674 qperf_inc(q, tasklet_outbound);
675 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
676
677 count = get_outbound_buffer_frontier(q, start, &error);
678 if (count) {
679 q->first_to_check = add_buf(start, count);
680
681 if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
682 qperf_inc(q, outbound_handler);
683 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
684 start, count);
685
686 q->handler(q->irq_ptr->cdev, error, q->nr, start,
687 count, q->irq_ptr->int_parm);
688 }
689 }
690
691 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
692 !qdio_outbound_q_done(q))
693 goto sched;
694
695 if (q->u.out.pci_out_enabled)
696 return;
697
698
699
700
701
702
703 if (qdio_outbound_q_done(q))
704 del_timer_sync(&q->u.out.timer);
705 else
706 if (!timer_pending(&q->u.out.timer) &&
707 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
708 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
709 return;
710
711sched:
712 qdio_tasklet_schedule(q);
713}
714
715void qdio_outbound_timer(struct timer_list *t)
716{
717 struct qdio_q *q = from_timer(q, t, u.out.timer);
718
719 qdio_tasklet_schedule(q);
720}
721
722static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
723{
724 struct qdio_q *out;
725 int i;
726
727 if (!pci_out_supported(irq) || !irq->scan_threshold)
728 return;
729
730 for_each_output_queue(irq, out, i)
731 if (!qdio_outbound_q_done(out))
732 qdio_tasklet_schedule(out);
733}
734
735static inline void qdio_set_state(struct qdio_irq *irq_ptr,
736 enum qdio_irq_states state)
737{
738 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
739
740 irq_ptr->state = state;
741 mb();
742}
743
744static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
745{
746 if (irb->esw.esw0.erw.cons) {
747 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
748 DBF_ERROR_HEX(irb, 64);
749 DBF_ERROR_HEX(irb->ecw, 64);
750 }
751}
752
753
754static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
755{
756 int i;
757 struct qdio_q *q;
758
759 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
760 return;
761
762 qdio_deliver_irq(irq_ptr);
763 irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
764
765 if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
766 return;
767
768 for_each_output_queue(irq_ptr, q, i) {
769 if (qdio_outbound_q_done(q))
770 continue;
771 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
772 qdio_siga_sync_q(q);
773 qdio_tasklet_schedule(q);
774 }
775}
776
777static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
778 unsigned long intparm, int cstat,
779 int dstat)
780{
781 struct qdio_q *q;
782
783 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
784 DBF_ERROR("intp :%lx", intparm);
785 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
786
787 if (irq_ptr->nr_input_qs) {
788 q = irq_ptr->input_qs[0];
789 } else if (irq_ptr->nr_output_qs) {
790 q = irq_ptr->output_qs[0];
791 } else {
792 dump_stack();
793 goto no_handler;
794 }
795
796 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
797 q->nr, q->first_to_check, 0, irq_ptr->int_parm);
798no_handler:
799 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
800
801
802
803
804 lgr_info_log();
805}
806
807static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
808 int dstat)
809{
810 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
811
812 if (cstat)
813 goto error;
814 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
815 goto error;
816 if (!(dstat & DEV_STAT_DEV_END))
817 goto error;
818 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
819 return;
820
821error:
822 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
823 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
824 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
825}
826
827
828void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
829 struct irb *irb)
830{
831 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
832 struct subchannel_id schid;
833 int cstat, dstat;
834
835 if (!intparm || !irq_ptr) {
836 ccw_device_get_schid(cdev, &schid);
837 DBF_ERROR("qint:%4x", schid.sch_no);
838 return;
839 }
840
841 if (irq_ptr->perf_stat_enabled)
842 irq_ptr->perf_stat.qdio_int++;
843
844 if (IS_ERR(irb)) {
845 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
846 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
847 wake_up(&cdev->private->wait_q);
848 return;
849 }
850 qdio_irq_check_sense(irq_ptr, irb);
851 cstat = irb->scsw.cmd.cstat;
852 dstat = irb->scsw.cmd.dstat;
853
854 switch (irq_ptr->state) {
855 case QDIO_IRQ_STATE_INACTIVE:
856 qdio_establish_handle_irq(irq_ptr, cstat, dstat);
857 break;
858 case QDIO_IRQ_STATE_CLEANUP:
859 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
860 break;
861 case QDIO_IRQ_STATE_ESTABLISHED:
862 case QDIO_IRQ_STATE_ACTIVE:
863 if (cstat & SCHN_STAT_PCI) {
864 qdio_int_handler_pci(irq_ptr);
865 return;
866 }
867 if (cstat || dstat)
868 qdio_handle_activate_check(irq_ptr, intparm, cstat,
869 dstat);
870 break;
871 case QDIO_IRQ_STATE_STOPPED:
872 break;
873 default:
874 WARN_ON_ONCE(1);
875 }
876 wake_up(&cdev->private->wait_q);
877}
878
879
880
881
882
883
884
885
886
887int qdio_get_ssqd_desc(struct ccw_device *cdev,
888 struct qdio_ssqd_desc *data)
889{
890 struct subchannel_id schid;
891
892 if (!cdev || !cdev->private)
893 return -EINVAL;
894
895 ccw_device_get_schid(cdev, &schid);
896 DBF_EVENT("get ssqd:%4x", schid.sch_no);
897 return qdio_setup_get_ssqd(NULL, &schid, data);
898}
899EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
900
901static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
902{
903 struct qdio_q *q;
904 int i;
905
906 for_each_output_queue(irq_ptr, q, i) {
907 del_timer_sync(&q->u.out.timer);
908 tasklet_kill(&q->u.out.tasklet);
909 }
910}
911
912
913
914
915
916
917int qdio_shutdown(struct ccw_device *cdev, int how)
918{
919 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
920 struct subchannel_id schid;
921 int rc;
922
923 if (!irq_ptr)
924 return -ENODEV;
925
926 WARN_ON_ONCE(irqs_disabled());
927 ccw_device_get_schid(cdev, &schid);
928 DBF_EVENT("qshutdown:%4x", schid.sch_no);
929
930 mutex_lock(&irq_ptr->setup_mutex);
931
932
933
934
935 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
936 mutex_unlock(&irq_ptr->setup_mutex);
937 return 0;
938 }
939
940
941
942
943
944 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
945
946 qdio_shutdown_queues(irq_ptr);
947 qdio_shutdown_debug_entries(irq_ptr);
948
949
950 spin_lock_irq(get_ccwdev_lock(cdev));
951 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
952 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
953 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
954 else
955
956 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
957 spin_unlock_irq(get_ccwdev_lock(cdev));
958 if (rc) {
959 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
960 DBF_ERROR("rc:%4d", rc);
961 goto no_cleanup;
962 }
963
964 wait_event_interruptible_timeout(cdev->private->wait_q,
965 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
966 irq_ptr->state == QDIO_IRQ_STATE_ERR,
967 10 * HZ);
968
969no_cleanup:
970 qdio_shutdown_thinint(irq_ptr);
971 qdio_shutdown_irq(irq_ptr);
972
973 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
974 mutex_unlock(&irq_ptr->setup_mutex);
975 if (rc)
976 return rc;
977 return 0;
978}
979EXPORT_SYMBOL_GPL(qdio_shutdown);
980
981
982
983
984
985int qdio_free(struct ccw_device *cdev)
986{
987 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
988 struct subchannel_id schid;
989
990 if (!irq_ptr)
991 return -ENODEV;
992
993 ccw_device_get_schid(cdev, &schid);
994 DBF_EVENT("qfree:%4x", schid.sch_no);
995 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
996 mutex_lock(&irq_ptr->setup_mutex);
997
998 irq_ptr->debug_area = NULL;
999 cdev->private->qdio_data = NULL;
1000 mutex_unlock(&irq_ptr->setup_mutex);
1001
1002 qdio_free_async_data(irq_ptr);
1003 qdio_free_queues(irq_ptr);
1004 free_page((unsigned long) irq_ptr->qdr);
1005 free_page(irq_ptr->chsc_page);
1006 free_page((unsigned long) irq_ptr);
1007 return 0;
1008}
1009EXPORT_SYMBOL_GPL(qdio_free);
1010
1011
1012
1013
1014
1015
1016
1017int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
1018 unsigned int no_output_qs)
1019{
1020 struct subchannel_id schid;
1021 struct qdio_irq *irq_ptr;
1022 int rc = -ENOMEM;
1023
1024 ccw_device_get_schid(cdev, &schid);
1025 DBF_EVENT("qallocate:%4x", schid.sch_no);
1026
1027 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1028 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
1029 return -EINVAL;
1030
1031
1032 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1033 if (!irq_ptr)
1034 return -ENOMEM;
1035
1036 irq_ptr->cdev = cdev;
1037 mutex_init(&irq_ptr->setup_mutex);
1038 if (qdio_allocate_dbf(irq_ptr))
1039 goto err_dbf;
1040
1041 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1042 no_output_qs);
1043
1044
1045
1046
1047
1048
1049
1050 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1051 if (!irq_ptr->chsc_page)
1052 goto err_chsc;
1053
1054
1055 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1056 if (!irq_ptr->qdr)
1057 goto err_qdr;
1058
1059 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1060 if (rc)
1061 goto err_queues;
1062
1063 cdev->private->qdio_data = irq_ptr;
1064 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1065 return 0;
1066
1067err_queues:
1068 free_page((unsigned long) irq_ptr->qdr);
1069err_qdr:
1070 free_page(irq_ptr->chsc_page);
1071err_chsc:
1072err_dbf:
1073 free_page((unsigned long) irq_ptr);
1074 return rc;
1075}
1076EXPORT_SYMBOL_GPL(qdio_allocate);
1077
1078static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1079{
1080 struct qdio_q *q = irq_ptr->input_qs[0];
1081 int i, use_cq = 0;
1082
1083 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1084 use_cq = 1;
1085
1086 for_each_output_queue(irq_ptr, q, i) {
1087 if (use_cq) {
1088 if (multicast_outbound(q))
1089 continue;
1090 if (qdio_enable_async_operation(&q->u.out) < 0) {
1091 use_cq = 0;
1092 continue;
1093 }
1094 } else
1095 qdio_disable_async_operation(&q->u.out);
1096 }
1097 DBF_EVENT("use_cq:%d", use_cq);
1098}
1099
1100static void qdio_trace_init_data(struct qdio_irq *irq,
1101 struct qdio_initialize *data)
1102{
1103 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1104 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1105 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1106 DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1107 DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1108 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1109 data->no_output_qs);
1110 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1111 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1112 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1113 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1114 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1115 DBF_ERR);
1116}
1117
1118
1119
1120
1121
1122
1123int qdio_establish(struct ccw_device *cdev,
1124 struct qdio_initialize *init_data)
1125{
1126 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1127 struct subchannel_id schid;
1128 int rc;
1129
1130 ccw_device_get_schid(cdev, &schid);
1131 DBF_EVENT("qestablish:%4x", schid.sch_no);
1132
1133 if (!irq_ptr)
1134 return -ENODEV;
1135
1136 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1137 init_data->no_output_qs > irq_ptr->max_output_qs)
1138 return -EINVAL;
1139
1140 if ((init_data->no_input_qs && !init_data->input_handler) ||
1141 (init_data->no_output_qs && !init_data->output_handler))
1142 return -EINVAL;
1143
1144 if (!init_data->input_sbal_addr_array ||
1145 !init_data->output_sbal_addr_array)
1146 return -EINVAL;
1147
1148 if (!init_data->irq_poll)
1149 return -EINVAL;
1150
1151 mutex_lock(&irq_ptr->setup_mutex);
1152 qdio_trace_init_data(irq_ptr, init_data);
1153 qdio_setup_irq(irq_ptr, init_data);
1154
1155 rc = qdio_establish_thinint(irq_ptr);
1156 if (rc) {
1157 qdio_shutdown_irq(irq_ptr);
1158 mutex_unlock(&irq_ptr->setup_mutex);
1159 return rc;
1160 }
1161
1162
1163 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1164 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1165 irq_ptr->ccw.count = irq_ptr->equeue.count;
1166 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1167
1168 spin_lock_irq(get_ccwdev_lock(cdev));
1169 ccw_device_set_options_mask(cdev, 0);
1170
1171 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1172 spin_unlock_irq(get_ccwdev_lock(cdev));
1173 if (rc) {
1174 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1175 DBF_ERROR("rc:%4x", rc);
1176 qdio_shutdown_thinint(irq_ptr);
1177 qdio_shutdown_irq(irq_ptr);
1178 mutex_unlock(&irq_ptr->setup_mutex);
1179 return rc;
1180 }
1181
1182 wait_event_interruptible_timeout(cdev->private->wait_q,
1183 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1184 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1185
1186 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1187 mutex_unlock(&irq_ptr->setup_mutex);
1188 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1189 return -EIO;
1190 }
1191
1192 qdio_setup_ssqd_info(irq_ptr);
1193
1194 qdio_detect_hsicq(irq_ptr);
1195
1196
1197 qdio_init_buf_states(irq_ptr);
1198
1199 mutex_unlock(&irq_ptr->setup_mutex);
1200 qdio_print_subchannel_info(irq_ptr);
1201 qdio_setup_debug_entries(irq_ptr);
1202 return 0;
1203}
1204EXPORT_SYMBOL_GPL(qdio_establish);
1205
1206
1207
1208
1209
1210int qdio_activate(struct ccw_device *cdev)
1211{
1212 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1213 struct subchannel_id schid;
1214 int rc;
1215
1216 ccw_device_get_schid(cdev, &schid);
1217 DBF_EVENT("qactivate:%4x", schid.sch_no);
1218
1219 if (!irq_ptr)
1220 return -ENODEV;
1221
1222 mutex_lock(&irq_ptr->setup_mutex);
1223 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1224 rc = -EBUSY;
1225 goto out;
1226 }
1227
1228 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1229 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1230 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1231 irq_ptr->ccw.cda = 0;
1232
1233 spin_lock_irq(get_ccwdev_lock(cdev));
1234 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1235
1236 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1237 0, DOIO_DENY_PREFETCH);
1238 spin_unlock_irq(get_ccwdev_lock(cdev));
1239 if (rc) {
1240 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1241 DBF_ERROR("rc:%4x", rc);
1242 goto out;
1243 }
1244
1245
1246 msleep(5);
1247
1248 switch (irq_ptr->state) {
1249 case QDIO_IRQ_STATE_STOPPED:
1250 case QDIO_IRQ_STATE_ERR:
1251 rc = -EIO;
1252 break;
1253 default:
1254 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1255 rc = 0;
1256 }
1257out:
1258 mutex_unlock(&irq_ptr->setup_mutex);
1259 return rc;
1260}
1261EXPORT_SYMBOL_GPL(qdio_activate);
1262
1263
1264
1265
1266
1267
1268
1269
1270static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1271 int bufnr, int count)
1272{
1273 int overlap;
1274
1275 qperf_inc(q, inbound_call);
1276
1277
1278 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1279 q->u.in.batch_count);
1280 if (overlap > 0) {
1281 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1282 q->u.in.batch_count -= overlap;
1283 }
1284
1285 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1286 atomic_add(count, &q->nr_buf_used);
1287
1288 if (need_siga_in(q))
1289 return qdio_siga_input(q);
1290
1291 return 0;
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1302 unsigned int bufnr, unsigned int count)
1303{
1304 const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
1305 unsigned char state = 0;
1306 int used, rc = 0;
1307
1308 qperf_inc(q, outbound_call);
1309
1310 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1311 used = atomic_add_return(count, &q->nr_buf_used);
1312
1313 if (used == QDIO_MAX_BUFFERS_PER_Q)
1314 qperf_inc(q, outbound_queue_full);
1315
1316 if (callflags & QDIO_FLAG_PCI_OUT) {
1317 q->u.out.pci_out_enabled = 1;
1318 qperf_inc(q, pci_request_int);
1319 } else
1320 q->u.out.pci_out_enabled = 0;
1321
1322 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1323 unsigned long phys_aob = 0;
1324
1325 if (q->u.out.use_cq && count == 1)
1326 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1327
1328 rc = qdio_kick_outbound_q(q, count, phys_aob);
1329 } else if (need_siga_sync(q)) {
1330 rc = qdio_siga_sync_q(q);
1331 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1332 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1333 state == SLSB_CU_OUTPUT_PRIMED) {
1334
1335 qperf_inc(q, fast_requeue);
1336 } else {
1337 rc = qdio_kick_outbound_q(q, count, 0);
1338 }
1339
1340
1341 if (!scan_threshold)
1342 return rc;
1343
1344
1345 if (used >= scan_threshold || rc)
1346 qdio_tasklet_schedule(q);
1347 else
1348
1349 if (!timer_pending(&q->u.out.timer) &&
1350 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1351 mod_timer(&q->u.out.timer, jiffies + HZ);
1352 return rc;
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1364 int q_nr, unsigned int bufnr, unsigned int count)
1365{
1366 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1367
1368 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1369 return -EINVAL;
1370
1371 if (!irq_ptr)
1372 return -ENODEV;
1373
1374 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1375 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1376
1377 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1378 return -EIO;
1379 if (!count)
1380 return 0;
1381 if (callflags & QDIO_FLAG_SYNC_INPUT)
1382 return handle_inbound(irq_ptr->input_qs[q_nr],
1383 callflags, bufnr, count);
1384 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1385 return handle_outbound(irq_ptr->output_qs[q_nr],
1386 callflags, bufnr, count);
1387 return -EINVAL;
1388}
1389EXPORT_SYMBOL_GPL(do_QDIO);
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399int qdio_start_irq(struct ccw_device *cdev)
1400{
1401 struct qdio_q *q;
1402 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1403 unsigned int i;
1404
1405 if (!irq_ptr)
1406 return -ENODEV;
1407
1408 for_each_input_queue(irq_ptr, q, i)
1409 qdio_stop_polling(q);
1410
1411 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1412
1413
1414
1415
1416
1417 if (test_nonshared_ind(irq_ptr))
1418 goto rescan;
1419
1420 for_each_input_queue(irq_ptr, q, i) {
1421 if (!qdio_inbound_q_done(q, q->first_to_check))
1422 goto rescan;
1423 }
1424
1425 return 0;
1426
1427rescan:
1428 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1429 return 0;
1430 else
1431 return 1;
1432
1433}
1434EXPORT_SYMBOL(qdio_start_irq);
1435
1436static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1437 unsigned int *error)
1438{
1439 unsigned int start = q->first_to_check;
1440 int count;
1441
1442 *error = 0;
1443 count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
1444 get_outbound_buffer_frontier(q, start, error);
1445 if (count == 0)
1446 return 0;
1447
1448 *bufnr = start;
1449
1450
1451 q->first_to_check = add_buf(start, count);
1452
1453 return count;
1454}
1455
1456int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1457 unsigned int *bufnr, unsigned int *error)
1458{
1459 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1460 struct qdio_q *q;
1461
1462 if (!irq_ptr)
1463 return -ENODEV;
1464 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1465
1466 if (need_siga_sync(q))
1467 qdio_siga_sync_q(q);
1468
1469 return __qdio_inspect_queue(q, bufnr, error);
1470}
1471EXPORT_SYMBOL_GPL(qdio_inspect_queue);
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1486 int *error)
1487{
1488 struct qdio_q *q;
1489 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1490
1491 if (!irq_ptr)
1492 return -ENODEV;
1493 q = irq_ptr->input_qs[nr];
1494
1495
1496
1497
1498
1499 if (need_siga_sync(q))
1500 qdio_sync_queues(q);
1501
1502 qdio_check_outbound_pci_queues(irq_ptr);
1503
1504
1505 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1506 return -EIO;
1507
1508 return __qdio_inspect_queue(q, bufnr, error);
1509}
1510EXPORT_SYMBOL(qdio_get_next_buffers);
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520int qdio_stop_irq(struct ccw_device *cdev)
1521{
1522 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1523
1524 if (!irq_ptr)
1525 return -ENODEV;
1526
1527 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1528 return 0;
1529 else
1530 return 1;
1531}
1532EXPORT_SYMBOL(qdio_stop_irq);
1533
1534static int __init init_QDIO(void)
1535{
1536 int rc;
1537
1538 rc = qdio_debug_init();
1539 if (rc)
1540 return rc;
1541 rc = qdio_setup_init();
1542 if (rc)
1543 goto out_debug;
1544 rc = qdio_thinint_init();
1545 if (rc)
1546 goto out_cache;
1547 return 0;
1548
1549out_cache:
1550 qdio_setup_exit();
1551out_debug:
1552 qdio_debug_exit();
1553 return rc;
1554}
1555
1556static void __exit exit_QDIO(void)
1557{
1558 qdio_thinint_exit();
1559 qdio_setup_exit();
1560 qdio_debug_exit();
1561}
1562
1563module_init(init_QDIO);
1564module_exit(exit_QDIO);
1565