1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define KMSG_COMPONENT "ap"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30#include <linux/kernel_stat.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/workqueue.h>
37#include <linux/slab.h>
38#include <linux/notifier.h>
39#include <linux/kthread.h>
40#include <linux/mutex.h>
41#include <asm/reset.h>
42#include <asm/airq.h>
43#include <asm/atomic.h>
44#include <asm/system.h>
45#include <asm/isc.h>
46#include <linux/hrtimer.h>
47#include <linux/ktime.h>
48
49#include "ap_bus.h"
50
51
52static void ap_scan_bus(struct work_struct *);
53static void ap_poll_all(unsigned long);
54static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
55static int ap_poll_thread_start(void);
56static void ap_poll_thread_stop(void);
57static void ap_request_timeout(unsigned long);
58static inline void ap_schedule_poll_timer(void);
59static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
60static int ap_device_remove(struct device *dev);
61static int ap_device_probe(struct device *dev);
62static void ap_interrupt_handler(void *unused1, void *unused2);
63static void ap_reset(struct ap_device *ap_dev);
64static void ap_config_timeout(unsigned long ptr);
65static int ap_select_domain(void);
66
67
68
69
70MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
72 "Copyright 2006 IBM Corporation");
73MODULE_LICENSE("GPL");
74
75
76
77
78int ap_domain_index = -1;
79module_param_named(domain, ap_domain_index, int, 0000);
80MODULE_PARM_DESC(domain, "domain index for ap devices");
81EXPORT_SYMBOL(ap_domain_index);
82
83static int ap_thread_flag = 0;
84module_param_named(poll_thread, ap_thread_flag, int, 0000);
85MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
86
87static struct device *ap_root_device = NULL;
88static DEFINE_SPINLOCK(ap_device_list_lock);
89static LIST_HEAD(ap_device_list);
90
91
92
93
94static struct workqueue_struct *ap_work_queue;
95static struct timer_list ap_config_timer;
96static int ap_config_time = AP_CONFIG_TIME;
97static DECLARE_WORK(ap_config_work, ap_scan_bus);
98
99
100
101
102static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
103static atomic_t ap_poll_requests = ATOMIC_INIT(0);
104static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
105static struct task_struct *ap_poll_kthread = NULL;
106static DEFINE_MUTEX(ap_poll_thread_mutex);
107static DEFINE_SPINLOCK(ap_poll_timer_lock);
108static void *ap_interrupt_indicator;
109static struct hrtimer ap_poll_timer;
110
111
112static unsigned long long poll_timeout = 250000;
113
114
115static int ap_suspend_flag;
116
117
118
119static int user_set_domain = 0;
120static struct bus_type ap_bus_type;
121
122
123
124
125
126static inline int ap_using_interrupts(void)
127{
128 return ap_interrupt_indicator != NULL;
129}
130
131
132
133
134
135
136static inline int ap_instructions_available(void)
137{
138 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
139 register unsigned long reg1 asm ("1") = -ENODEV;
140 register unsigned long reg2 asm ("2") = 0UL;
141
142 asm volatile(
143 " .long 0xb2af0000\n"
144 "0: la %1,0\n"
145 "1:\n"
146 EX_TABLE(0b, 1b)
147 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
148 return reg1;
149}
150
151
152
153
154
155
156static int ap_interrupts_available(void)
157{
158 return test_facility(2) && test_facility(65);
159}
160
161
162
163
164
165
166
167
168
169static inline struct ap_queue_status
170ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
171{
172 register unsigned long reg0 asm ("0") = qid;
173 register struct ap_queue_status reg1 asm ("1");
174 register unsigned long reg2 asm ("2") = 0UL;
175
176 asm volatile(".long 0xb2af0000"
177 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
178 *device_type = (int) (reg2 >> 24);
179 *queue_depth = (int) (reg2 & 0xff);
180 return reg1;
181}
182
183
184
185
186
187
188
189static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
190{
191 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
192 register struct ap_queue_status reg1 asm ("1");
193 register unsigned long reg2 asm ("2") = 0UL;
194
195 asm volatile(
196 ".long 0xb2af0000"
197 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
198 return reg1;
199}
200
201#ifdef CONFIG_64BIT
202
203
204
205
206
207
208
209static inline struct ap_queue_status
210ap_queue_interruption_control(ap_qid_t qid, void *ind)
211{
212 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
213 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
214 register struct ap_queue_status reg1_out asm ("1");
215 register void *reg2 asm ("2") = ind;
216 asm volatile(
217 ".long 0xb2af0000"
218 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
219 :
220 : "cc" );
221 return reg1_out;
222}
223#endif
224
225static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
226 int *support)
227{
228 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
229 register struct ap_queue_status reg1 asm ("1");
230 register unsigned long reg2 asm ("2") = 0UL;
231
232 asm volatile(
233 ".long 0xb2af0000\n"
234 "0: la %1,0\n"
235 "1:\n"
236 EX_TABLE(0b, 1b)
237 : "+d" (reg0), "=d" (reg1), "=d" (reg2)
238 :
239 : "cc");
240
241 if (reg2 & 0x6000000000000000ULL)
242 *support = 1;
243 else
244 *support = 0;
245
246 return reg1;
247}
248
249
250
251
252
253
254
255
256int ap_4096_commands_available(ap_qid_t qid)
257{
258 struct ap_queue_status status;
259 int i, support = 0;
260 status = __ap_4096_commands_available(qid, &support);
261
262 for (i = 0; i < AP_MAX_RESET; i++) {
263 switch (status.response_code) {
264 case AP_RESPONSE_NORMAL:
265 return support;
266 case AP_RESPONSE_RESET_IN_PROGRESS:
267 case AP_RESPONSE_BUSY:
268 break;
269 case AP_RESPONSE_Q_NOT_AVAIL:
270 case AP_RESPONSE_DECONFIGURED:
271 case AP_RESPONSE_CHECKSTOPPED:
272 case AP_RESPONSE_INVALID_ADDRESS:
273 return 0;
274 case AP_RESPONSE_OTHERWISE_CHANGED:
275 break;
276 default:
277 break;
278 }
279 if (i < AP_MAX_RESET - 1) {
280 udelay(5);
281 status = __ap_4096_commands_available(qid, &support);
282 }
283 }
284 return support;
285}
286EXPORT_SYMBOL(ap_4096_commands_available);
287
288
289
290
291
292
293
294
295
296
297static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
298{
299#ifdef CONFIG_64BIT
300 struct ap_queue_status status;
301 int t_depth, t_device_type, rc, i;
302
303 rc = -EBUSY;
304 status = ap_queue_interruption_control(qid, ind);
305
306 for (i = 0; i < AP_MAX_RESET; i++) {
307 switch (status.response_code) {
308 case AP_RESPONSE_NORMAL:
309 if (status.int_enabled)
310 return 0;
311 break;
312 case AP_RESPONSE_RESET_IN_PROGRESS:
313 case AP_RESPONSE_BUSY:
314 break;
315 case AP_RESPONSE_Q_NOT_AVAIL:
316 case AP_RESPONSE_DECONFIGURED:
317 case AP_RESPONSE_CHECKSTOPPED:
318 case AP_RESPONSE_INVALID_ADDRESS:
319 return -ENODEV;
320 case AP_RESPONSE_OTHERWISE_CHANGED:
321 if (status.int_enabled)
322 return 0;
323 break;
324 default:
325 break;
326 }
327 if (i < AP_MAX_RESET - 1) {
328 udelay(5);
329 status = ap_test_queue(qid, &t_depth, &t_device_type);
330 }
331 }
332 return rc;
333#else
334 return -EINVAL;
335#endif
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351static inline struct ap_queue_status
352__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
353 unsigned int special)
354{
355 typedef struct { char _[length]; } msgblock;
356 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
357 register struct ap_queue_status reg1 asm ("1");
358 register unsigned long reg2 asm ("2") = (unsigned long) msg;
359 register unsigned long reg3 asm ("3") = (unsigned long) length;
360 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
361 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
362
363 if (special == 1)
364 reg0 |= 0x400000UL;
365
366 asm volatile (
367 "0: .long 0xb2ad0042\n"
368 " brc 2,0b"
369 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
370 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
371 : "cc" );
372 return reg1;
373}
374
375int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
376{
377 struct ap_queue_status status;
378
379 status = __ap_send(qid, psmid, msg, length, 0);
380 switch (status.response_code) {
381 case AP_RESPONSE_NORMAL:
382 return 0;
383 case AP_RESPONSE_Q_FULL:
384 case AP_RESPONSE_RESET_IN_PROGRESS:
385 return -EBUSY;
386 case AP_RESPONSE_REQ_FAC_NOT_INST:
387 return -EINVAL;
388 default:
389 return -ENODEV;
390 }
391}
392EXPORT_SYMBOL(ap_send);
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412static inline struct ap_queue_status
413__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
414{
415 typedef struct { char _[length]; } msgblock;
416 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
417 register struct ap_queue_status reg1 asm ("1");
418 register unsigned long reg2 asm("2") = 0UL;
419 register unsigned long reg4 asm("4") = (unsigned long) msg;
420 register unsigned long reg5 asm("5") = (unsigned long) length;
421 register unsigned long reg6 asm("6") = 0UL;
422 register unsigned long reg7 asm("7") = 0UL;
423
424
425 asm volatile(
426 "0: .long 0xb2ae0064\n"
427 " brc 6,0b\n"
428 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
429 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
430 "=m" (*(msgblock *) msg) : : "cc" );
431 *psmid = (((unsigned long long) reg6) << 32) + reg7;
432 return reg1;
433}
434
435int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
436{
437 struct ap_queue_status status;
438
439 status = __ap_recv(qid, psmid, msg, length);
440 switch (status.response_code) {
441 case AP_RESPONSE_NORMAL:
442 return 0;
443 case AP_RESPONSE_NO_PENDING_REPLY:
444 if (status.queue_empty)
445 return -ENOENT;
446 return -EBUSY;
447 case AP_RESPONSE_RESET_IN_PROGRESS:
448 return -EBUSY;
449 default:
450 return -ENODEV;
451 }
452}
453EXPORT_SYMBOL(ap_recv);
454
455
456
457
458
459
460
461
462
463static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
464{
465 struct ap_queue_status status;
466 int t_depth, t_device_type, rc, i;
467
468 rc = -EBUSY;
469 for (i = 0; i < AP_MAX_RESET; i++) {
470 status = ap_test_queue(qid, &t_depth, &t_device_type);
471 switch (status.response_code) {
472 case AP_RESPONSE_NORMAL:
473 *queue_depth = t_depth + 1;
474 *device_type = t_device_type;
475 rc = 0;
476 break;
477 case AP_RESPONSE_Q_NOT_AVAIL:
478 rc = -ENODEV;
479 break;
480 case AP_RESPONSE_RESET_IN_PROGRESS:
481 break;
482 case AP_RESPONSE_DECONFIGURED:
483 rc = -ENODEV;
484 break;
485 case AP_RESPONSE_CHECKSTOPPED:
486 rc = -ENODEV;
487 break;
488 case AP_RESPONSE_INVALID_ADDRESS:
489 rc = -ENODEV;
490 break;
491 case AP_RESPONSE_OTHERWISE_CHANGED:
492 break;
493 case AP_RESPONSE_BUSY:
494 break;
495 default:
496 BUG();
497 }
498 if (rc != -EBUSY)
499 break;
500 if (i < AP_MAX_RESET - 1)
501 udelay(5);
502 }
503 return rc;
504}
505
506
507
508
509
510
511
512static int ap_init_queue(ap_qid_t qid)
513{
514 struct ap_queue_status status;
515 int rc, dummy, i;
516
517 rc = -ENODEV;
518 status = ap_reset_queue(qid);
519 for (i = 0; i < AP_MAX_RESET; i++) {
520 switch (status.response_code) {
521 case AP_RESPONSE_NORMAL:
522 if (status.queue_empty)
523 rc = 0;
524 break;
525 case AP_RESPONSE_Q_NOT_AVAIL:
526 case AP_RESPONSE_DECONFIGURED:
527 case AP_RESPONSE_CHECKSTOPPED:
528 i = AP_MAX_RESET;
529 break;
530 case AP_RESPONSE_RESET_IN_PROGRESS:
531 rc = -EBUSY;
532 case AP_RESPONSE_BUSY:
533 default:
534 break;
535 }
536 if (rc != -ENODEV && rc != -EBUSY)
537 break;
538 if (i < AP_MAX_RESET - 1) {
539 udelay(5);
540 status = ap_test_queue(qid, &dummy, &dummy);
541 }
542 }
543 if (rc == 0 && ap_using_interrupts()) {
544 rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
545
546
547
548 if (rc)
549 pr_err("Registering adapter interrupts for "
550 "AP %d failed\n", AP_QID_DEVICE(qid));
551 }
552 return rc;
553}
554
555
556
557
558
559
560
561static void ap_increase_queue_count(struct ap_device *ap_dev)
562{
563 int timeout = ap_dev->drv->request_timeout;
564
565 ap_dev->queue_count++;
566 if (ap_dev->queue_count == 1) {
567 mod_timer(&ap_dev->timeout, jiffies + timeout);
568 ap_dev->reset = AP_RESET_ARMED;
569 }
570}
571
572
573
574
575
576
577
578
579static void ap_decrease_queue_count(struct ap_device *ap_dev)
580{
581 int timeout = ap_dev->drv->request_timeout;
582
583 ap_dev->queue_count--;
584 if (ap_dev->queue_count > 0)
585 mod_timer(&ap_dev->timeout, jiffies + timeout);
586 else
587
588
589
590
591
592 ap_dev->reset = AP_RESET_IGNORE;
593}
594
595
596
597
598static ssize_t ap_hwtype_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
600{
601 struct ap_device *ap_dev = to_ap_dev(dev);
602 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
603}
604
605static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
606static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
607 char *buf)
608{
609 struct ap_device *ap_dev = to_ap_dev(dev);
610 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
611}
612
613static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
614static ssize_t ap_request_count_show(struct device *dev,
615 struct device_attribute *attr,
616 char *buf)
617{
618 struct ap_device *ap_dev = to_ap_dev(dev);
619 int rc;
620
621 spin_lock_bh(&ap_dev->lock);
622 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
623 spin_unlock_bh(&ap_dev->lock);
624 return rc;
625}
626
627static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
628
629static ssize_t ap_modalias_show(struct device *dev,
630 struct device_attribute *attr, char *buf)
631{
632 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
633}
634
635static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
636
637static struct attribute *ap_dev_attrs[] = {
638 &dev_attr_hwtype.attr,
639 &dev_attr_depth.attr,
640 &dev_attr_request_count.attr,
641 &dev_attr_modalias.attr,
642 NULL
643};
644static struct attribute_group ap_dev_attr_group = {
645 .attrs = ap_dev_attrs
646};
647
648
649
650
651
652
653
654
655static int ap_bus_match(struct device *dev, struct device_driver *drv)
656{
657 struct ap_device *ap_dev = to_ap_dev(dev);
658 struct ap_driver *ap_drv = to_ap_drv(drv);
659 struct ap_device_id *id;
660
661
662
663
664
665 for (id = ap_drv->ids; id->match_flags; id++) {
666 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
667 (id->dev_type != ap_dev->device_type))
668 continue;
669 return 1;
670 }
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
683{
684 struct ap_device *ap_dev = to_ap_dev(dev);
685 int retval = 0;
686
687 if (!ap_dev)
688 return -ENODEV;
689
690
691 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
692 if (retval)
693 return retval;
694
695
696 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
697
698 return retval;
699}
700
701static int ap_bus_suspend(struct device *dev, pm_message_t state)
702{
703 struct ap_device *ap_dev = to_ap_dev(dev);
704 unsigned long flags;
705
706 if (!ap_suspend_flag) {
707 ap_suspend_flag = 1;
708
709
710
711
712 del_timer_sync(&ap_config_timer);
713 if (ap_work_queue != NULL) {
714 destroy_workqueue(ap_work_queue);
715 ap_work_queue = NULL;
716 }
717
718 tasklet_disable(&ap_tasklet);
719 }
720
721 do {
722 flags = 0;
723 spin_lock_bh(&ap_dev->lock);
724 __ap_poll_device(ap_dev, &flags);
725 spin_unlock_bh(&ap_dev->lock);
726 } while ((flags & 1) || (flags & 2));
727
728 spin_lock_bh(&ap_dev->lock);
729 ap_dev->unregistered = 1;
730 spin_unlock_bh(&ap_dev->lock);
731
732 return 0;
733}
734
735static int ap_bus_resume(struct device *dev)
736{
737 int rc = 0;
738 struct ap_device *ap_dev = to_ap_dev(dev);
739
740 if (ap_suspend_flag) {
741 ap_suspend_flag = 0;
742 if (!ap_interrupts_available())
743 ap_interrupt_indicator = NULL;
744 if (!user_set_domain) {
745 ap_domain_index = -1;
746 ap_select_domain();
747 }
748 init_timer(&ap_config_timer);
749 ap_config_timer.function = ap_config_timeout;
750 ap_config_timer.data = 0;
751 ap_config_timer.expires = jiffies + ap_config_time * HZ;
752 add_timer(&ap_config_timer);
753 ap_work_queue = create_singlethread_workqueue("kapwork");
754 if (!ap_work_queue)
755 return -ENOMEM;
756 tasklet_enable(&ap_tasklet);
757 if (!ap_using_interrupts())
758 ap_schedule_poll_timer();
759 else
760 tasklet_schedule(&ap_tasklet);
761 if (ap_thread_flag)
762 rc = ap_poll_thread_start();
763 }
764 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
765 spin_lock_bh(&ap_dev->lock);
766 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
767 ap_domain_index);
768 spin_unlock_bh(&ap_dev->lock);
769 }
770 queue_work(ap_work_queue, &ap_config_work);
771
772 return rc;
773}
774
775static struct bus_type ap_bus_type = {
776 .name = "ap",
777 .match = &ap_bus_match,
778 .uevent = &ap_uevent,
779 .suspend = ap_bus_suspend,
780 .resume = ap_bus_resume
781};
782
783static int ap_device_probe(struct device *dev)
784{
785 struct ap_device *ap_dev = to_ap_dev(dev);
786 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
787 int rc;
788
789 ap_dev->drv = ap_drv;
790 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
791 if (!rc) {
792 spin_lock_bh(&ap_device_list_lock);
793 list_add(&ap_dev->list, &ap_device_list);
794 spin_unlock_bh(&ap_device_list_lock);
795 }
796 return rc;
797}
798
799
800
801
802
803
804
805static void __ap_flush_queue(struct ap_device *ap_dev)
806{
807 struct ap_message *ap_msg, *next;
808
809 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
810 list_del_init(&ap_msg->list);
811 ap_dev->pendingq_count--;
812 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
813 }
814 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
815 list_del_init(&ap_msg->list);
816 ap_dev->requestq_count--;
817 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
818 }
819}
820
821void ap_flush_queue(struct ap_device *ap_dev)
822{
823 spin_lock_bh(&ap_dev->lock);
824 __ap_flush_queue(ap_dev);
825 spin_unlock_bh(&ap_dev->lock);
826}
827EXPORT_SYMBOL(ap_flush_queue);
828
829static int ap_device_remove(struct device *dev)
830{
831 struct ap_device *ap_dev = to_ap_dev(dev);
832 struct ap_driver *ap_drv = ap_dev->drv;
833
834 ap_flush_queue(ap_dev);
835 del_timer_sync(&ap_dev->timeout);
836 spin_lock_bh(&ap_device_list_lock);
837 list_del_init(&ap_dev->list);
838 spin_unlock_bh(&ap_device_list_lock);
839 if (ap_drv->remove)
840 ap_drv->remove(ap_dev);
841 spin_lock_bh(&ap_dev->lock);
842 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
843 spin_unlock_bh(&ap_dev->lock);
844 return 0;
845}
846
847int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
848 char *name)
849{
850 struct device_driver *drv = &ap_drv->driver;
851
852 drv->bus = &ap_bus_type;
853 drv->probe = ap_device_probe;
854 drv->remove = ap_device_remove;
855 drv->owner = owner;
856 drv->name = name;
857 return driver_register(drv);
858}
859EXPORT_SYMBOL(ap_driver_register);
860
861void ap_driver_unregister(struct ap_driver *ap_drv)
862{
863 driver_unregister(&ap_drv->driver);
864}
865EXPORT_SYMBOL(ap_driver_unregister);
866
867
868
869
870static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
871{
872 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
873}
874
875static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
876
877static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
878{
879 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
880}
881
882static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
883{
884 return snprintf(buf, PAGE_SIZE, "%d\n",
885 ap_using_interrupts() ? 1 : 0);
886}
887
888static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
889
890static ssize_t ap_config_time_store(struct bus_type *bus,
891 const char *buf, size_t count)
892{
893 int time;
894
895 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
896 return -EINVAL;
897 ap_config_time = time;
898 if (!timer_pending(&ap_config_timer) ||
899 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
900 ap_config_timer.expires = jiffies + ap_config_time * HZ;
901 add_timer(&ap_config_timer);
902 }
903 return count;
904}
905
906static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
907
908static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
909{
910 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
911}
912
913static ssize_t ap_poll_thread_store(struct bus_type *bus,
914 const char *buf, size_t count)
915{
916 int flag, rc;
917
918 if (sscanf(buf, "%d\n", &flag) != 1)
919 return -EINVAL;
920 if (flag) {
921 rc = ap_poll_thread_start();
922 if (rc)
923 return rc;
924 }
925 else
926 ap_poll_thread_stop();
927 return count;
928}
929
930static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
931
932static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
933{
934 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
935}
936
937static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
938 size_t count)
939{
940 unsigned long long time;
941 ktime_t hr_time;
942
943
944 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
945 time > 120000000000ULL)
946 return -EINVAL;
947 poll_timeout = time;
948 hr_time = ktime_set(0, poll_timeout);
949
950 if (!hrtimer_is_queued(&ap_poll_timer) ||
951 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
952 hrtimer_set_expires(&ap_poll_timer, hr_time);
953 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
954 }
955 return count;
956}
957
958static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
959
960static struct bus_attribute *const ap_bus_attrs[] = {
961 &bus_attr_ap_domain,
962 &bus_attr_config_time,
963 &bus_attr_poll_thread,
964 &bus_attr_ap_interrupts,
965 &bus_attr_poll_timeout,
966 NULL,
967};
968
969
970
971
972
973
974static int ap_select_domain(void)
975{
976 int queue_depth, device_type, count, max_count, best_domain;
977 int rc, i, j;
978
979
980
981
982
983
984 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
985
986 return 0;
987 best_domain = -1;
988 max_count = 0;
989 for (i = 0; i < AP_DOMAINS; i++) {
990 count = 0;
991 for (j = 0; j < AP_DEVICES; j++) {
992 ap_qid_t qid = AP_MKQID(j, i);
993 rc = ap_query_queue(qid, &queue_depth, &device_type);
994 if (rc)
995 continue;
996 count++;
997 }
998 if (count > max_count) {
999 max_count = count;
1000 best_domain = i;
1001 }
1002 }
1003 if (best_domain >= 0){
1004 ap_domain_index = best_domain;
1005 return 0;
1006 }
1007 return -ENODEV;
1008}
1009
1010
1011
1012
1013
1014
1015
1016static int ap_probe_device_type(struct ap_device *ap_dev)
1017{
1018 static unsigned char msg[] = {
1019 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1020 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1021 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1022 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1023 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1024 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1025 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1026 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1027 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1028 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1029 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1030 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1031 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1032 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1033 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1034 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1035 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1036 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1037 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1038 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1039 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1040 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1041 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1042 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1043 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1044 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1045 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1046 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1047 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1048 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1049 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1050 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1051 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1052 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1053 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1054 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1055 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1056 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1057 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1058 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1059 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1060 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1061 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1062 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1063 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1064 };
1065 struct ap_queue_status status;
1066 unsigned long long psmid;
1067 char *reply;
1068 int rc, i;
1069
1070 reply = (void *) get_zeroed_page(GFP_KERNEL);
1071 if (!reply) {
1072 rc = -ENOMEM;
1073 goto out;
1074 }
1075
1076 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1077 msg, sizeof(msg), 0);
1078 if (status.response_code != AP_RESPONSE_NORMAL) {
1079 rc = -ENODEV;
1080 goto out_free;
1081 }
1082
1083
1084 for (i = 0; i < 6; i++) {
1085 mdelay(300);
1086 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1087 if (status.response_code == AP_RESPONSE_NORMAL &&
1088 psmid == 0x0102030405060708ULL)
1089 break;
1090 }
1091 if (i < 6) {
1092
1093 if (reply[0] == 0x00 && reply[1] == 0x86)
1094 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1095 else
1096 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1097 rc = 0;
1098 } else
1099 rc = -ENODEV;
1100
1101out_free:
1102 free_page((unsigned long) reply);
1103out:
1104 return rc;
1105}
1106
1107static void ap_interrupt_handler(void *unused1, void *unused2)
1108{
1109 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1110 tasklet_schedule(&ap_tasklet);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120static int __ap_scan_bus(struct device *dev, void *data)
1121{
1122 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1123}
1124
1125static void ap_device_release(struct device *dev)
1126{
1127 struct ap_device *ap_dev = to_ap_dev(dev);
1128
1129 kfree(ap_dev);
1130}
1131
1132static void ap_scan_bus(struct work_struct *unused)
1133{
1134 struct ap_device *ap_dev;
1135 struct device *dev;
1136 ap_qid_t qid;
1137 int queue_depth, device_type;
1138 int rc, i;
1139
1140 if (ap_select_domain() != 0)
1141 return;
1142 for (i = 0; i < AP_DEVICES; i++) {
1143 qid = AP_MKQID(i, ap_domain_index);
1144 dev = bus_find_device(&ap_bus_type, NULL,
1145 (void *)(unsigned long)qid,
1146 __ap_scan_bus);
1147 rc = ap_query_queue(qid, &queue_depth, &device_type);
1148 if (dev) {
1149 if (rc == -EBUSY) {
1150 set_current_state(TASK_UNINTERRUPTIBLE);
1151 schedule_timeout(AP_RESET_TIMEOUT);
1152 rc = ap_query_queue(qid, &queue_depth,
1153 &device_type);
1154 }
1155 ap_dev = to_ap_dev(dev);
1156 spin_lock_bh(&ap_dev->lock);
1157 if (rc || ap_dev->unregistered) {
1158 spin_unlock_bh(&ap_dev->lock);
1159 if (ap_dev->unregistered)
1160 i--;
1161 device_unregister(dev);
1162 put_device(dev);
1163 continue;
1164 }
1165 spin_unlock_bh(&ap_dev->lock);
1166 put_device(dev);
1167 continue;
1168 }
1169 if (rc)
1170 continue;
1171 rc = ap_init_queue(qid);
1172 if (rc)
1173 continue;
1174 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1175 if (!ap_dev)
1176 break;
1177 ap_dev->qid = qid;
1178 ap_dev->queue_depth = queue_depth;
1179 ap_dev->unregistered = 1;
1180 spin_lock_init(&ap_dev->lock);
1181 INIT_LIST_HEAD(&ap_dev->pendingq);
1182 INIT_LIST_HEAD(&ap_dev->requestq);
1183 INIT_LIST_HEAD(&ap_dev->list);
1184 setup_timer(&ap_dev->timeout, ap_request_timeout,
1185 (unsigned long) ap_dev);
1186 if (device_type == 0)
1187 ap_probe_device_type(ap_dev);
1188 else
1189 ap_dev->device_type = device_type;
1190
1191 ap_dev->device.bus = &ap_bus_type;
1192 ap_dev->device.parent = ap_root_device;
1193 if (dev_set_name(&ap_dev->device, "card%02x",
1194 AP_QID_DEVICE(ap_dev->qid))) {
1195 kfree(ap_dev);
1196 continue;
1197 }
1198 ap_dev->device.release = ap_device_release;
1199 rc = device_register(&ap_dev->device);
1200 if (rc) {
1201 put_device(&ap_dev->device);
1202 continue;
1203 }
1204
1205 rc = sysfs_create_group(&ap_dev->device.kobj,
1206 &ap_dev_attr_group);
1207 if (!rc) {
1208 spin_lock_bh(&ap_dev->lock);
1209 ap_dev->unregistered = 0;
1210 spin_unlock_bh(&ap_dev->lock);
1211 }
1212 else
1213 device_unregister(&ap_dev->device);
1214 }
1215}
1216
1217static void
1218ap_config_timeout(unsigned long ptr)
1219{
1220 queue_work(ap_work_queue, &ap_config_work);
1221 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1222 add_timer(&ap_config_timer);
1223}
1224
1225
1226
1227
1228
1229
1230static inline void ap_schedule_poll_timer(void)
1231{
1232 ktime_t hr_time;
1233
1234 spin_lock_bh(&ap_poll_timer_lock);
1235 if (ap_using_interrupts() || ap_suspend_flag)
1236 goto out;
1237 if (hrtimer_is_queued(&ap_poll_timer))
1238 goto out;
1239 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1240 hr_time = ktime_set(0, poll_timeout);
1241 hrtimer_forward_now(&ap_poll_timer, hr_time);
1242 hrtimer_restart(&ap_poll_timer);
1243 }
1244out:
1245 spin_unlock_bh(&ap_poll_timer_lock);
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1257{
1258 struct ap_queue_status status;
1259 struct ap_message *ap_msg;
1260
1261 if (ap_dev->queue_count <= 0)
1262 return 0;
1263 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1264 ap_dev->reply->message, ap_dev->reply->length);
1265 switch (status.response_code) {
1266 case AP_RESPONSE_NORMAL:
1267 atomic_dec(&ap_poll_requests);
1268 ap_decrease_queue_count(ap_dev);
1269 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1270 if (ap_msg->psmid != ap_dev->reply->psmid)
1271 continue;
1272 list_del_init(&ap_msg->list);
1273 ap_dev->pendingq_count--;
1274 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1275 break;
1276 }
1277 if (ap_dev->queue_count > 0)
1278 *flags |= 1;
1279 break;
1280 case AP_RESPONSE_NO_PENDING_REPLY:
1281 if (status.queue_empty) {
1282
1283 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1284 ap_dev->queue_count = 0;
1285 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1286 ap_dev->requestq_count += ap_dev->pendingq_count;
1287 ap_dev->pendingq_count = 0;
1288 } else
1289 *flags |= 2;
1290 break;
1291 default:
1292 return -ENODEV;
1293 }
1294 return 0;
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1306{
1307 struct ap_queue_status status;
1308 struct ap_message *ap_msg;
1309
1310 if (ap_dev->requestq_count <= 0 ||
1311 ap_dev->queue_count >= ap_dev->queue_depth)
1312 return 0;
1313
1314 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1315 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1316 ap_msg->message, ap_msg->length, ap_msg->special);
1317 switch (status.response_code) {
1318 case AP_RESPONSE_NORMAL:
1319 atomic_inc(&ap_poll_requests);
1320 ap_increase_queue_count(ap_dev);
1321 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1322 ap_dev->requestq_count--;
1323 ap_dev->pendingq_count++;
1324 if (ap_dev->queue_count < ap_dev->queue_depth &&
1325 ap_dev->requestq_count > 0)
1326 *flags |= 1;
1327 *flags |= 2;
1328 break;
1329 case AP_RESPONSE_Q_FULL:
1330 case AP_RESPONSE_RESET_IN_PROGRESS:
1331 *flags |= 2;
1332 break;
1333 case AP_RESPONSE_MESSAGE_TOO_BIG:
1334 case AP_RESPONSE_REQ_FAC_NOT_INST:
1335 return -EINVAL;
1336 default:
1337 return -ENODEV;
1338 }
1339 return 0;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1353{
1354 int rc;
1355
1356 rc = ap_poll_read(ap_dev, flags);
1357 if (rc)
1358 return rc;
1359 return ap_poll_write(ap_dev, flags);
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1370{
1371 struct ap_queue_status status;
1372
1373 if (list_empty(&ap_dev->requestq) &&
1374 ap_dev->queue_count < ap_dev->queue_depth) {
1375 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1376 ap_msg->message, ap_msg->length,
1377 ap_msg->special);
1378 switch (status.response_code) {
1379 case AP_RESPONSE_NORMAL:
1380 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1381 atomic_inc(&ap_poll_requests);
1382 ap_dev->pendingq_count++;
1383 ap_increase_queue_count(ap_dev);
1384 ap_dev->total_request_count++;
1385 break;
1386 case AP_RESPONSE_Q_FULL:
1387 case AP_RESPONSE_RESET_IN_PROGRESS:
1388 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1389 ap_dev->requestq_count++;
1390 ap_dev->total_request_count++;
1391 return -EBUSY;
1392 case AP_RESPONSE_REQ_FAC_NOT_INST:
1393 case AP_RESPONSE_MESSAGE_TOO_BIG:
1394 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1395 return -EINVAL;
1396 default:
1397 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1398 return -ENODEV;
1399 }
1400 } else {
1401 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1402 ap_dev->requestq_count++;
1403 ap_dev->total_request_count++;
1404 return -EBUSY;
1405 }
1406 ap_schedule_poll_timer();
1407 return 0;
1408}
1409
1410void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1411{
1412 unsigned long flags;
1413 int rc;
1414
1415 spin_lock_bh(&ap_dev->lock);
1416 if (!ap_dev->unregistered) {
1417
1418 rc = ap_poll_queue(ap_dev, &flags);
1419 if (!rc)
1420 rc = __ap_queue_message(ap_dev, ap_msg);
1421 if (!rc)
1422 wake_up(&ap_poll_wait);
1423 if (rc == -ENODEV)
1424 ap_dev->unregistered = 1;
1425 } else {
1426 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1427 rc = -ENODEV;
1428 }
1429 spin_unlock_bh(&ap_dev->lock);
1430 if (rc == -ENODEV)
1431 device_unregister(&ap_dev->device);
1432}
1433EXPORT_SYMBOL(ap_queue_message);
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1446{
1447 struct ap_message *tmp;
1448
1449 spin_lock_bh(&ap_dev->lock);
1450 if (!list_empty(&ap_msg->list)) {
1451 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1452 if (tmp->psmid == ap_msg->psmid) {
1453 ap_dev->pendingq_count--;
1454 goto found;
1455 }
1456 ap_dev->requestq_count--;
1457 found:
1458 list_del_init(&ap_msg->list);
1459 }
1460 spin_unlock_bh(&ap_dev->lock);
1461}
1462EXPORT_SYMBOL(ap_cancel_message);
1463
1464
1465
1466
1467
1468
1469
1470static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1471{
1472 tasklet_schedule(&ap_tasklet);
1473 return HRTIMER_NORESTART;
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483static void ap_reset(struct ap_device *ap_dev)
1484{
1485 int rc;
1486
1487 ap_dev->reset = AP_RESET_IGNORE;
1488 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1489 ap_dev->queue_count = 0;
1490 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1491 ap_dev->requestq_count += ap_dev->pendingq_count;
1492 ap_dev->pendingq_count = 0;
1493 rc = ap_init_queue(ap_dev->qid);
1494 if (rc == -ENODEV)
1495 ap_dev->unregistered = 1;
1496}
1497
1498static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1499{
1500 if (!ap_dev->unregistered) {
1501 if (ap_poll_queue(ap_dev, flags))
1502 ap_dev->unregistered = 1;
1503 if (ap_dev->reset == AP_RESET_DO)
1504 ap_reset(ap_dev);
1505 }
1506 return 0;
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517static void ap_poll_all(unsigned long dummy)
1518{
1519 unsigned long flags;
1520 struct ap_device *ap_dev;
1521
1522
1523
1524
1525
1526 if (ap_using_interrupts())
1527 xchg((u8 *)ap_interrupt_indicator, 0);
1528 do {
1529 flags = 0;
1530 spin_lock(&ap_device_list_lock);
1531 list_for_each_entry(ap_dev, &ap_device_list, list) {
1532 spin_lock(&ap_dev->lock);
1533 __ap_poll_device(ap_dev, &flags);
1534 spin_unlock(&ap_dev->lock);
1535 }
1536 spin_unlock(&ap_device_list_lock);
1537 } while (flags & 1);
1538 if (flags & 2)
1539 ap_schedule_poll_timer();
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static int ap_poll_thread(void *data)
1553{
1554 DECLARE_WAITQUEUE(wait, current);
1555 unsigned long flags;
1556 int requests;
1557 struct ap_device *ap_dev;
1558
1559 set_user_nice(current, 19);
1560 while (1) {
1561 if (ap_suspend_flag)
1562 return 0;
1563 if (need_resched()) {
1564 schedule();
1565 continue;
1566 }
1567 add_wait_queue(&ap_poll_wait, &wait);
1568 set_current_state(TASK_INTERRUPTIBLE);
1569 if (kthread_should_stop())
1570 break;
1571 requests = atomic_read(&ap_poll_requests);
1572 if (requests <= 0)
1573 schedule();
1574 set_current_state(TASK_RUNNING);
1575 remove_wait_queue(&ap_poll_wait, &wait);
1576
1577 flags = 0;
1578 spin_lock_bh(&ap_device_list_lock);
1579 list_for_each_entry(ap_dev, &ap_device_list, list) {
1580 spin_lock(&ap_dev->lock);
1581 __ap_poll_device(ap_dev, &flags);
1582 spin_unlock(&ap_dev->lock);
1583 }
1584 spin_unlock_bh(&ap_device_list_lock);
1585 }
1586 set_current_state(TASK_RUNNING);
1587 remove_wait_queue(&ap_poll_wait, &wait);
1588 return 0;
1589}
1590
1591static int ap_poll_thread_start(void)
1592{
1593 int rc;
1594
1595 if (ap_using_interrupts() || ap_suspend_flag)
1596 return 0;
1597 mutex_lock(&ap_poll_thread_mutex);
1598 if (!ap_poll_kthread) {
1599 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1600 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1601 if (rc)
1602 ap_poll_kthread = NULL;
1603 }
1604 else
1605 rc = 0;
1606 mutex_unlock(&ap_poll_thread_mutex);
1607 return rc;
1608}
1609
1610static void ap_poll_thread_stop(void)
1611{
1612 mutex_lock(&ap_poll_thread_mutex);
1613 if (ap_poll_kthread) {
1614 kthread_stop(ap_poll_kthread);
1615 ap_poll_kthread = NULL;
1616 }
1617 mutex_unlock(&ap_poll_thread_mutex);
1618}
1619
1620
1621
1622
1623
1624
1625
1626static void ap_request_timeout(unsigned long data)
1627{
1628 struct ap_device *ap_dev = (struct ap_device *) data;
1629
1630 if (ap_dev->reset == AP_RESET_ARMED) {
1631 ap_dev->reset = AP_RESET_DO;
1632
1633 if (ap_using_interrupts())
1634 tasklet_schedule(&ap_tasklet);
1635 }
1636}
1637
1638static void ap_reset_domain(void)
1639{
1640 int i;
1641
1642 if (ap_domain_index != -1)
1643 for (i = 0; i < AP_DEVICES; i++)
1644 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1645}
1646
1647static void ap_reset_all(void)
1648{
1649 int i, j;
1650
1651 for (i = 0; i < AP_DOMAINS; i++)
1652 for (j = 0; j < AP_DEVICES; j++)
1653 ap_reset_queue(AP_MKQID(j, i));
1654}
1655
1656static struct reset_call ap_reset_call = {
1657 .fn = ap_reset_all,
1658};
1659
1660
1661
1662
1663
1664
1665int __init ap_module_init(void)
1666{
1667 int rc, i;
1668
1669 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1670 pr_warning("%d is not a valid cryptographic domain\n",
1671 ap_domain_index);
1672 return -EINVAL;
1673 }
1674
1675
1676
1677 if (ap_domain_index >= 0)
1678 user_set_domain = 1;
1679
1680 if (ap_instructions_available() != 0) {
1681 pr_warning("The hardware system does not support "
1682 "AP instructions\n");
1683 return -ENODEV;
1684 }
1685 if (ap_interrupts_available()) {
1686 isc_register(AP_ISC);
1687 ap_interrupt_indicator = s390_register_adapter_interrupt(
1688 &ap_interrupt_handler, NULL, AP_ISC);
1689 if (IS_ERR(ap_interrupt_indicator)) {
1690 ap_interrupt_indicator = NULL;
1691 isc_unregister(AP_ISC);
1692 }
1693 }
1694
1695 register_reset_call(&ap_reset_call);
1696
1697
1698 rc = bus_register(&ap_bus_type);
1699 if (rc)
1700 goto out;
1701 for (i = 0; ap_bus_attrs[i]; i++) {
1702 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1703 if (rc)
1704 goto out_bus;
1705 }
1706
1707
1708 ap_root_device = root_device_register("ap");
1709 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1710 if (rc)
1711 goto out_bus;
1712
1713 ap_work_queue = create_singlethread_workqueue("kapwork");
1714 if (!ap_work_queue) {
1715 rc = -ENOMEM;
1716 goto out_root;
1717 }
1718
1719 if (ap_select_domain() == 0)
1720 ap_scan_bus(NULL);
1721
1722
1723 init_timer(&ap_config_timer);
1724 ap_config_timer.function = ap_config_timeout;
1725 ap_config_timer.data = 0;
1726 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1727 add_timer(&ap_config_timer);
1728
1729
1730
1731
1732 if (MACHINE_IS_VM)
1733 poll_timeout = 1500000;
1734 spin_lock_init(&ap_poll_timer_lock);
1735 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1736 ap_poll_timer.function = ap_poll_timeout;
1737
1738
1739 if (ap_thread_flag) {
1740 rc = ap_poll_thread_start();
1741 if (rc)
1742 goto out_work;
1743 }
1744
1745 return 0;
1746
1747out_work:
1748 del_timer_sync(&ap_config_timer);
1749 hrtimer_cancel(&ap_poll_timer);
1750 destroy_workqueue(ap_work_queue);
1751out_root:
1752 root_device_unregister(ap_root_device);
1753out_bus:
1754 while (i--)
1755 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1756 bus_unregister(&ap_bus_type);
1757out:
1758 unregister_reset_call(&ap_reset_call);
1759 if (ap_using_interrupts()) {
1760 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1761 isc_unregister(AP_ISC);
1762 }
1763 return rc;
1764}
1765
1766static int __ap_match_all(struct device *dev, void *data)
1767{
1768 return 1;
1769}
1770
1771
1772
1773
1774
1775
1776void ap_module_exit(void)
1777{
1778 int i;
1779 struct device *dev;
1780
1781 ap_reset_domain();
1782 ap_poll_thread_stop();
1783 del_timer_sync(&ap_config_timer);
1784 hrtimer_cancel(&ap_poll_timer);
1785 destroy_workqueue(ap_work_queue);
1786 tasklet_kill(&ap_tasklet);
1787 root_device_unregister(ap_root_device);
1788 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1789 __ap_match_all)))
1790 {
1791 device_unregister(dev);
1792 put_device(dev);
1793 }
1794 for (i = 0; ap_bus_attrs[i]; i++)
1795 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1796 bus_unregister(&ap_bus_type);
1797 unregister_reset_call(&ap_reset_call);
1798 if (ap_using_interrupts()) {
1799 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1800 isc_unregister(AP_ISC);
1801 }
1802}
1803
1804#ifndef CONFIG_ZCRYPT_MONOLITHIC
1805module_init(ap_module_init);
1806module_exit(ap_module_exit);
1807#endif
1808