1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define KMSG_COMPONENT "ap"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29#include <linux/kernel_stat.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/err.h>
34#include <linux/interrupt.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/mutex.h>
40#include <asm/reset.h>
41#include <asm/airq.h>
42#include <linux/atomic.h>
43#include <asm/isc.h>
44#include <linux/hrtimer.h>
45#include <linux/ktime.h>
46#include <asm/facility.h>
47#include <linux/crypto.h>
48
49#include "ap_bus.h"
50
51
52static void ap_scan_bus(struct work_struct *);
53static void ap_poll_all(unsigned long);
54static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
55static int ap_poll_thread_start(void);
56static void ap_poll_thread_stop(void);
57static void ap_request_timeout(unsigned long);
58static inline void ap_schedule_poll_timer(void);
59static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
60static int ap_device_remove(struct device *dev);
61static int ap_device_probe(struct device *dev);
62static void ap_interrupt_handler(struct airq_struct *airq);
63static void ap_reset(struct ap_device *ap_dev);
64static void ap_config_timeout(unsigned long ptr);
65static int ap_select_domain(void);
66static void ap_query_configuration(void);
67
68
69
70
71MODULE_AUTHOR("IBM Corporation");
72MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
73 "Copyright IBM Corp. 2006, 2012");
74MODULE_LICENSE("GPL");
75MODULE_ALIAS_CRYPTO("z90crypt");
76
77
78
79
80int ap_domain_index = -1;
81module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
82MODULE_PARM_DESC(domain, "domain index for ap devices");
83EXPORT_SYMBOL(ap_domain_index);
84
85static int ap_thread_flag = 0;
86module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
87MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
88
89static struct device *ap_root_device = NULL;
90static struct ap_config_info *ap_configuration;
91static DEFINE_SPINLOCK(ap_device_list_lock);
92static LIST_HEAD(ap_device_list);
93
94
95
96
97static struct workqueue_struct *ap_work_queue;
98static struct timer_list ap_config_timer;
99static int ap_config_time = AP_CONFIG_TIME;
100static DECLARE_WORK(ap_config_work, ap_scan_bus);
101
102
103
104
105static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
106static atomic_t ap_poll_requests = ATOMIC_INIT(0);
107static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
108static struct task_struct *ap_poll_kthread = NULL;
109static DEFINE_MUTEX(ap_poll_thread_mutex);
110static DEFINE_SPINLOCK(ap_poll_timer_lock);
111static struct hrtimer ap_poll_timer;
112
113
114static unsigned long long poll_timeout = 250000;
115
116
117static int ap_suspend_flag;
118
119
120
121static int user_set_domain = 0;
122static struct bus_type ap_bus_type;
123
124
125static int ap_airq_flag;
126
127static struct airq_struct ap_airq = {
128 .handler = ap_interrupt_handler,
129 .isc = AP_ISC,
130};
131
132
133
134
135
136static inline int ap_using_interrupts(void)
137{
138 return ap_airq_flag;
139}
140
141
142
143
144
145
146static inline int ap_instructions_available(void)
147{
148 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
149 register unsigned long reg1 asm ("1") = -ENODEV;
150 register unsigned long reg2 asm ("2") = 0UL;
151
152 asm volatile(
153 " .long 0xb2af0000\n"
154 "0: la %1,0\n"
155 "1:\n"
156 EX_TABLE(0b, 1b)
157 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
158 return reg1;
159}
160
161
162
163
164
165
166static int ap_interrupts_available(void)
167{
168 return test_facility(65);
169}
170
171
172
173
174
175
176
177static int ap_configuration_available(void)
178{
179 return test_facility(12);
180}
181
182
183
184
185
186
187
188
189
190static inline struct ap_queue_status
191ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
192{
193 register unsigned long reg0 asm ("0") = qid;
194 register struct ap_queue_status reg1 asm ("1");
195 register unsigned long reg2 asm ("2") = 0UL;
196
197 asm volatile(".long 0xb2af0000"
198 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
199 *device_type = (int) (reg2 >> 24);
200 *queue_depth = (int) (reg2 & 0xff);
201 return reg1;
202}
203
204
205
206
207
208
209
210
211static inline unsigned long ap_query_facilities(ap_qid_t qid)
212{
213 register unsigned long reg0 asm ("0") = qid | 0x00800000UL;
214 register unsigned long reg1 asm ("1");
215 register unsigned long reg2 asm ("2") = 0UL;
216
217 asm volatile(".long 0xb2af0000"
218 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
219 return reg2;
220}
221
222
223
224
225
226
227
228static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
229{
230 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
231 register struct ap_queue_status reg1 asm ("1");
232 register unsigned long reg2 asm ("2") = 0UL;
233
234 asm volatile(
235 ".long 0xb2af0000"
236 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
237 return reg1;
238}
239
240
241
242
243
244
245
246
247static inline struct ap_queue_status
248ap_queue_interruption_control(ap_qid_t qid, void *ind)
249{
250 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
251 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
252 register struct ap_queue_status reg1_out asm ("1");
253 register void *reg2 asm ("2") = ind;
254 asm volatile(
255 ".long 0xb2af0000"
256 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
257 :
258 : "cc" );
259 return reg1_out;
260}
261
262static inline struct ap_queue_status
263__ap_query_functions(ap_qid_t qid, unsigned int *functions)
264{
265 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
266 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
267 register unsigned long reg2 asm ("2");
268
269 asm volatile(
270 ".long 0xb2af0000\n"
271 "0:\n"
272 EX_TABLE(0b, 0b)
273 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
274 :
275 : "cc");
276
277 *functions = (unsigned int)(reg2 >> 32);
278 return reg1;
279}
280
281static inline int __ap_query_configuration(struct ap_config_info *config)
282{
283 register unsigned long reg0 asm ("0") = 0x04000000UL;
284 register unsigned long reg1 asm ("1") = -EINVAL;
285 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
286
287 asm volatile(
288 ".long 0xb2af0000\n"
289 "0: la %1,0\n"
290 "1:\n"
291 EX_TABLE(0b, 1b)
292 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
293 :
294 : "cc");
295
296 return reg1;
297}
298
299
300
301
302
303
304
305
306
307
308
309
310static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
311{
312 struct ap_queue_status status;
313 int i;
314 status = __ap_query_functions(qid, functions);
315
316 for (i = 0; i < AP_MAX_RESET; i++) {
317 if (ap_queue_status_invalid_test(&status))
318 return -ENODEV;
319
320 switch (status.response_code) {
321 case AP_RESPONSE_NORMAL:
322 return 0;
323 case AP_RESPONSE_RESET_IN_PROGRESS:
324 case AP_RESPONSE_BUSY:
325 break;
326 case AP_RESPONSE_Q_NOT_AVAIL:
327 case AP_RESPONSE_DECONFIGURED:
328 case AP_RESPONSE_CHECKSTOPPED:
329 case AP_RESPONSE_INVALID_ADDRESS:
330 return -ENODEV;
331 case AP_RESPONSE_OTHERWISE_CHANGED:
332 break;
333 default:
334 break;
335 }
336 if (i < AP_MAX_RESET - 1) {
337 udelay(5);
338 status = __ap_query_functions(qid, functions);
339 }
340 }
341 return -EBUSY;
342}
343
344
345
346
347
348
349
350
351
352
353static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
354{
355 struct ap_queue_status status;
356 int t_depth, t_device_type, rc, i;
357
358 rc = -EBUSY;
359 status = ap_queue_interruption_control(qid, ind);
360
361 for (i = 0; i < AP_MAX_RESET; i++) {
362 switch (status.response_code) {
363 case AP_RESPONSE_NORMAL:
364 if (status.int_enabled)
365 return 0;
366 break;
367 case AP_RESPONSE_RESET_IN_PROGRESS:
368 case AP_RESPONSE_BUSY:
369 if (i < AP_MAX_RESET - 1) {
370 udelay(5);
371 status = ap_queue_interruption_control(qid,
372 ind);
373 continue;
374 }
375 break;
376 case AP_RESPONSE_Q_NOT_AVAIL:
377 case AP_RESPONSE_DECONFIGURED:
378 case AP_RESPONSE_CHECKSTOPPED:
379 case AP_RESPONSE_INVALID_ADDRESS:
380 return -ENODEV;
381 case AP_RESPONSE_OTHERWISE_CHANGED:
382 if (status.int_enabled)
383 return 0;
384 break;
385 default:
386 break;
387 }
388 if (i < AP_MAX_RESET - 1) {
389 udelay(5);
390 status = ap_test_queue(qid, &t_depth, &t_device_type);
391 }
392 }
393 return rc;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static inline struct ap_queue_status
410__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
411 unsigned int special)
412{
413 typedef struct { char _[length]; } msgblock;
414 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
415 register struct ap_queue_status reg1 asm ("1");
416 register unsigned long reg2 asm ("2") = (unsigned long) msg;
417 register unsigned long reg3 asm ("3") = (unsigned long) length;
418 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
419 register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
420
421 if (special == 1)
422 reg0 |= 0x400000UL;
423
424 asm volatile (
425 "0: .long 0xb2ad0042\n"
426 " brc 2,0b"
427 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
428 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
429 : "cc" );
430 return reg1;
431}
432
433int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
434{
435 struct ap_queue_status status;
436
437 status = __ap_send(qid, psmid, msg, length, 0);
438 switch (status.response_code) {
439 case AP_RESPONSE_NORMAL:
440 return 0;
441 case AP_RESPONSE_Q_FULL:
442 case AP_RESPONSE_RESET_IN_PROGRESS:
443 return -EBUSY;
444 case AP_RESPONSE_REQ_FAC_NOT_INST:
445 return -EINVAL;
446 default:
447 return -ENODEV;
448 }
449}
450EXPORT_SYMBOL(ap_send);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470static inline struct ap_queue_status
471__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
472{
473 typedef struct { char _[length]; } msgblock;
474 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
475 register struct ap_queue_status reg1 asm ("1");
476 register unsigned long reg2 asm("2") = 0UL;
477 register unsigned long reg4 asm("4") = (unsigned long) msg;
478 register unsigned long reg5 asm("5") = (unsigned long) length;
479 register unsigned long reg6 asm("6") = 0UL;
480 register unsigned long reg7 asm("7") = 0UL;
481
482
483 asm volatile(
484 "0: .long 0xb2ae0064\n"
485 " brc 6,0b\n"
486 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
487 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
488 "=m" (*(msgblock *) msg) : : "cc" );
489 *psmid = (((unsigned long long) reg6) << 32) + reg7;
490 return reg1;
491}
492
493int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
494{
495 struct ap_queue_status status;
496
497 status = __ap_recv(qid, psmid, msg, length);
498 switch (status.response_code) {
499 case AP_RESPONSE_NORMAL:
500 return 0;
501 case AP_RESPONSE_NO_PENDING_REPLY:
502 if (status.queue_empty)
503 return -ENOENT;
504 return -EBUSY;
505 case AP_RESPONSE_RESET_IN_PROGRESS:
506 return -EBUSY;
507 default:
508 return -ENODEV;
509 }
510}
511EXPORT_SYMBOL(ap_recv);
512
513
514
515
516
517
518
519
520
521static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
522{
523 struct ap_queue_status status;
524 int t_depth, t_device_type, rc, i;
525
526 rc = -EBUSY;
527 for (i = 0; i < AP_MAX_RESET; i++) {
528 status = ap_test_queue(qid, &t_depth, &t_device_type);
529 switch (status.response_code) {
530 case AP_RESPONSE_NORMAL:
531 *queue_depth = t_depth + 1;
532 *device_type = t_device_type;
533 rc = 0;
534 break;
535 case AP_RESPONSE_Q_NOT_AVAIL:
536 rc = -ENODEV;
537 break;
538 case AP_RESPONSE_RESET_IN_PROGRESS:
539 break;
540 case AP_RESPONSE_DECONFIGURED:
541 rc = -ENODEV;
542 break;
543 case AP_RESPONSE_CHECKSTOPPED:
544 rc = -ENODEV;
545 break;
546 case AP_RESPONSE_INVALID_ADDRESS:
547 rc = -ENODEV;
548 break;
549 case AP_RESPONSE_OTHERWISE_CHANGED:
550 break;
551 case AP_RESPONSE_BUSY:
552 break;
553 default:
554 BUG();
555 }
556 if (rc != -EBUSY)
557 break;
558 if (i < AP_MAX_RESET - 1)
559 udelay(5);
560 }
561 return rc;
562}
563
564
565
566
567
568
569
570static int ap_init_queue(ap_qid_t qid)
571{
572 struct ap_queue_status status;
573 int rc, dummy, i;
574
575 rc = -ENODEV;
576 status = ap_reset_queue(qid);
577 for (i = 0; i < AP_MAX_RESET; i++) {
578 switch (status.response_code) {
579 case AP_RESPONSE_NORMAL:
580 if (status.queue_empty)
581 rc = 0;
582 break;
583 case AP_RESPONSE_Q_NOT_AVAIL:
584 case AP_RESPONSE_DECONFIGURED:
585 case AP_RESPONSE_CHECKSTOPPED:
586 i = AP_MAX_RESET;
587 break;
588 case AP_RESPONSE_RESET_IN_PROGRESS:
589 rc = -EBUSY;
590 case AP_RESPONSE_BUSY:
591 default:
592 break;
593 }
594 if (rc != -ENODEV && rc != -EBUSY)
595 break;
596 if (i < AP_MAX_RESET - 1) {
597
598
599
600
601
602
603 schedule_timeout(AP_RESET_TIMEOUT);
604 status = ap_test_queue(qid, &dummy, &dummy);
605 }
606 }
607 if (rc == 0 && ap_using_interrupts()) {
608 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr);
609
610
611
612 if (rc)
613 pr_err("Registering adapter interrupts for "
614 "AP %d failed\n", AP_QID_DEVICE(qid));
615 }
616 return rc;
617}
618
619
620
621
622
623
624
625static void ap_increase_queue_count(struct ap_device *ap_dev)
626{
627 int timeout = ap_dev->drv->request_timeout;
628
629 ap_dev->queue_count++;
630 if (ap_dev->queue_count == 1) {
631 mod_timer(&ap_dev->timeout, jiffies + timeout);
632 ap_dev->reset = AP_RESET_ARMED;
633 }
634}
635
636
637
638
639
640
641
642
643static void ap_decrease_queue_count(struct ap_device *ap_dev)
644{
645 int timeout = ap_dev->drv->request_timeout;
646
647 ap_dev->queue_count--;
648 if (ap_dev->queue_count > 0)
649 mod_timer(&ap_dev->timeout, jiffies + timeout);
650 else
651
652
653
654
655
656 ap_dev->reset = AP_RESET_IGNORE;
657}
658
659
660
661
662static ssize_t ap_hwtype_show(struct device *dev,
663 struct device_attribute *attr, char *buf)
664{
665 struct ap_device *ap_dev = to_ap_dev(dev);
666 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
667}
668
669static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
670
671static ssize_t ap_raw_hwtype_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
673{
674 struct ap_device *ap_dev = to_ap_dev(dev);
675
676 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
677}
678
679static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
680
681static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
682 char *buf)
683{
684 struct ap_device *ap_dev = to_ap_dev(dev);
685 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
686}
687
688static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
689static ssize_t ap_request_count_show(struct device *dev,
690 struct device_attribute *attr,
691 char *buf)
692{
693 struct ap_device *ap_dev = to_ap_dev(dev);
694 int rc;
695
696 spin_lock_bh(&ap_dev->lock);
697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
698 spin_unlock_bh(&ap_dev->lock);
699 return rc;
700}
701
702static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
703
704static ssize_t ap_requestq_count_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
706{
707 struct ap_device *ap_dev = to_ap_dev(dev);
708 int rc;
709
710 spin_lock_bh(&ap_dev->lock);
711 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
712 spin_unlock_bh(&ap_dev->lock);
713 return rc;
714}
715
716static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
717
718static ssize_t ap_pendingq_count_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
720{
721 struct ap_device *ap_dev = to_ap_dev(dev);
722 int rc;
723
724 spin_lock_bh(&ap_dev->lock);
725 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
726 spin_unlock_bh(&ap_dev->lock);
727 return rc;
728}
729
730static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
731
732static ssize_t ap_modalias_show(struct device *dev,
733 struct device_attribute *attr, char *buf)
734{
735 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
736}
737
738static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
739
740static ssize_t ap_functions_show(struct device *dev,
741 struct device_attribute *attr, char *buf)
742{
743 struct ap_device *ap_dev = to_ap_dev(dev);
744 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
745}
746
747static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
748
749static struct attribute *ap_dev_attrs[] = {
750 &dev_attr_hwtype.attr,
751 &dev_attr_raw_hwtype.attr,
752 &dev_attr_depth.attr,
753 &dev_attr_request_count.attr,
754 &dev_attr_requestq_count.attr,
755 &dev_attr_pendingq_count.attr,
756 &dev_attr_modalias.attr,
757 &dev_attr_ap_functions.attr,
758 NULL
759};
760static struct attribute_group ap_dev_attr_group = {
761 .attrs = ap_dev_attrs
762};
763
764
765
766
767
768
769
770
771static int ap_bus_match(struct device *dev, struct device_driver *drv)
772{
773 struct ap_device *ap_dev = to_ap_dev(dev);
774 struct ap_driver *ap_drv = to_ap_drv(drv);
775 struct ap_device_id *id;
776
777
778
779
780
781 for (id = ap_drv->ids; id->match_flags; id++) {
782 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
783 (id->dev_type != ap_dev->device_type))
784 continue;
785 return 1;
786 }
787 return 0;
788}
789
790
791
792
793
794
795
796
797
798static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
799{
800 struct ap_device *ap_dev = to_ap_dev(dev);
801 int retval = 0;
802
803 if (!ap_dev)
804 return -ENODEV;
805
806
807 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
808 if (retval)
809 return retval;
810
811
812 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
813
814 return retval;
815}
816
817static int ap_bus_suspend(struct device *dev, pm_message_t state)
818{
819 struct ap_device *ap_dev = to_ap_dev(dev);
820 unsigned long flags;
821
822 if (!ap_suspend_flag) {
823 ap_suspend_flag = 1;
824
825
826
827
828 del_timer_sync(&ap_config_timer);
829 if (ap_work_queue != NULL) {
830 destroy_workqueue(ap_work_queue);
831 ap_work_queue = NULL;
832 }
833
834 tasklet_disable(&ap_tasklet);
835 }
836
837 do {
838 flags = 0;
839 spin_lock_bh(&ap_dev->lock);
840 __ap_poll_device(ap_dev, &flags);
841 spin_unlock_bh(&ap_dev->lock);
842 } while ((flags & 1) || (flags & 2));
843
844 spin_lock_bh(&ap_dev->lock);
845 ap_dev->unregistered = 1;
846 spin_unlock_bh(&ap_dev->lock);
847
848 return 0;
849}
850
851static int ap_bus_resume(struct device *dev)
852{
853 struct ap_device *ap_dev = to_ap_dev(dev);
854 int rc;
855
856 if (ap_suspend_flag) {
857 ap_suspend_flag = 0;
858 if (ap_interrupts_available()) {
859 if (!ap_using_interrupts()) {
860 rc = register_adapter_interrupt(&ap_airq);
861 ap_airq_flag = (rc == 0);
862 }
863 } else {
864 if (ap_using_interrupts()) {
865 unregister_adapter_interrupt(&ap_airq);
866 ap_airq_flag = 0;
867 }
868 }
869 ap_query_configuration();
870 if (!user_set_domain) {
871 ap_domain_index = -1;
872 ap_select_domain();
873 }
874 init_timer(&ap_config_timer);
875 ap_config_timer.function = ap_config_timeout;
876 ap_config_timer.data = 0;
877 ap_config_timer.expires = jiffies + ap_config_time * HZ;
878 add_timer(&ap_config_timer);
879 ap_work_queue = create_singlethread_workqueue("kapwork");
880 if (!ap_work_queue)
881 return -ENOMEM;
882 tasklet_enable(&ap_tasklet);
883 if (!ap_using_interrupts())
884 ap_schedule_poll_timer();
885 else
886 tasklet_schedule(&ap_tasklet);
887 if (ap_thread_flag)
888 rc = ap_poll_thread_start();
889 else
890 rc = 0;
891 } else
892 rc = 0;
893 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
894 spin_lock_bh(&ap_dev->lock);
895 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
896 ap_domain_index);
897 spin_unlock_bh(&ap_dev->lock);
898 }
899 queue_work(ap_work_queue, &ap_config_work);
900
901 return rc;
902}
903
904static struct bus_type ap_bus_type = {
905 .name = "ap",
906 .match = &ap_bus_match,
907 .uevent = &ap_uevent,
908 .suspend = ap_bus_suspend,
909 .resume = ap_bus_resume
910};
911
912static int ap_device_probe(struct device *dev)
913{
914 struct ap_device *ap_dev = to_ap_dev(dev);
915 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
916 int rc;
917
918 ap_dev->drv = ap_drv;
919
920 spin_lock_bh(&ap_device_list_lock);
921 list_add(&ap_dev->list, &ap_device_list);
922 spin_unlock_bh(&ap_device_list_lock);
923
924 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
925 if (rc) {
926 spin_lock_bh(&ap_device_list_lock);
927 list_del_init(&ap_dev->list);
928 spin_unlock_bh(&ap_device_list_lock);
929 }
930 return rc;
931}
932
933
934
935
936
937
938
939static void __ap_flush_queue(struct ap_device *ap_dev)
940{
941 struct ap_message *ap_msg, *next;
942
943 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
944 list_del_init(&ap_msg->list);
945 ap_dev->pendingq_count--;
946 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
947 }
948 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
949 list_del_init(&ap_msg->list);
950 ap_dev->requestq_count--;
951 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
952 }
953}
954
955void ap_flush_queue(struct ap_device *ap_dev)
956{
957 spin_lock_bh(&ap_dev->lock);
958 __ap_flush_queue(ap_dev);
959 spin_unlock_bh(&ap_dev->lock);
960}
961EXPORT_SYMBOL(ap_flush_queue);
962
963static int ap_device_remove(struct device *dev)
964{
965 struct ap_device *ap_dev = to_ap_dev(dev);
966 struct ap_driver *ap_drv = ap_dev->drv;
967
968 ap_flush_queue(ap_dev);
969 del_timer_sync(&ap_dev->timeout);
970 spin_lock_bh(&ap_device_list_lock);
971 list_del_init(&ap_dev->list);
972 spin_unlock_bh(&ap_device_list_lock);
973 if (ap_drv->remove)
974 ap_drv->remove(ap_dev);
975 spin_lock_bh(&ap_dev->lock);
976 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
977 spin_unlock_bh(&ap_dev->lock);
978 return 0;
979}
980
981int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
982 char *name)
983{
984 struct device_driver *drv = &ap_drv->driver;
985
986 drv->bus = &ap_bus_type;
987 drv->probe = ap_device_probe;
988 drv->remove = ap_device_remove;
989 drv->owner = owner;
990 drv->name = name;
991 return driver_register(drv);
992}
993EXPORT_SYMBOL(ap_driver_register);
994
995void ap_driver_unregister(struct ap_driver *ap_drv)
996{
997 driver_unregister(&ap_drv->driver);
998}
999EXPORT_SYMBOL(ap_driver_unregister);
1000
1001void ap_bus_force_rescan(void)
1002{
1003
1004 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1005
1006 queue_work(ap_work_queue, &ap_config_work);
1007 flush_work(&ap_config_work);
1008}
1009EXPORT_SYMBOL(ap_bus_force_rescan);
1010
1011
1012
1013
1014
1015static inline int ap_test_config(unsigned int *field, unsigned int nr)
1016{
1017 if (nr > 0xFFu)
1018 return 0;
1019 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static inline int ap_test_config_card_id(unsigned int id)
1031{
1032 if (!ap_configuration)
1033 return 1;
1034 return ap_test_config(ap_configuration->apm, id);
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static inline int ap_test_config_domain(unsigned int domain)
1046{
1047 if (!ap_configuration)
1048 if (domain < 16)
1049 return 1;
1050 else
1051 return 0;
1052 else
1053 return ap_test_config(ap_configuration->aqm, domain);
1054}
1055
1056
1057
1058
1059static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
1060{
1061 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1062}
1063
1064static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
1065
1066static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1067{
1068 if (ap_configuration != NULL) {
1069 if (test_facility(76)) {
1070 return snprintf(buf, PAGE_SIZE,
1071 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1072 ap_configuration->adm[0], ap_configuration->adm[1],
1073 ap_configuration->adm[2], ap_configuration->adm[3],
1074 ap_configuration->adm[4], ap_configuration->adm[5],
1075 ap_configuration->adm[6], ap_configuration->adm[7]);
1076 } else {
1077 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1078 ap_configuration->adm[0], ap_configuration->adm[1]);
1079 }
1080 } else {
1081 return snprintf(buf, PAGE_SIZE, "not supported\n");
1082 }
1083}
1084
1085static BUS_ATTR(ap_control_domain_mask, 0444,
1086 ap_control_domain_mask_show, NULL);
1087
1088static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
1089{
1090 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1091}
1092
1093static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1094{
1095 return snprintf(buf, PAGE_SIZE, "%d\n",
1096 ap_using_interrupts() ? 1 : 0);
1097}
1098
1099static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
1100
1101static ssize_t ap_config_time_store(struct bus_type *bus,
1102 const char *buf, size_t count)
1103{
1104 int time;
1105
1106 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1107 return -EINVAL;
1108 ap_config_time = time;
1109 if (!timer_pending(&ap_config_timer) ||
1110 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1111 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1112 add_timer(&ap_config_timer);
1113 }
1114 return count;
1115}
1116
1117static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
1118
1119static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
1120{
1121 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1122}
1123
1124static ssize_t ap_poll_thread_store(struct bus_type *bus,
1125 const char *buf, size_t count)
1126{
1127 int flag, rc;
1128
1129 if (sscanf(buf, "%d\n", &flag) != 1)
1130 return -EINVAL;
1131 if (flag) {
1132 rc = ap_poll_thread_start();
1133 if (rc)
1134 return rc;
1135 }
1136 else
1137 ap_poll_thread_stop();
1138 return count;
1139}
1140
1141static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
1142
1143static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1144{
1145 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1146}
1147
1148static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1149 size_t count)
1150{
1151 unsigned long long time;
1152 ktime_t hr_time;
1153
1154
1155 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1156 time > 120000000000ULL)
1157 return -EINVAL;
1158 poll_timeout = time;
1159 hr_time = ktime_set(0, poll_timeout);
1160
1161 spin_lock_bh(&ap_poll_timer_lock);
1162 hrtimer_cancel(&ap_poll_timer);
1163 hrtimer_set_expires(&ap_poll_timer, hr_time);
1164 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1165 spin_unlock_bh(&ap_poll_timer_lock);
1166
1167 return count;
1168}
1169
1170static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1171
1172static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
1173{
1174 ap_qid_t qid;
1175 int i, nd, max_domain_id = -1;
1176 unsigned long fbits;
1177
1178 if (ap_configuration) {
1179 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) {
1180 for (i = 0; i < AP_DEVICES; i++) {
1181 if (!ap_test_config_card_id(i))
1182 continue;
1183 qid = AP_MKQID(i, ap_domain_index);
1184 fbits = ap_query_facilities(qid);
1185 if (fbits & (1UL << 57)) {
1186
1187 nd = (int)((fbits & 0x00FF0000UL)>>16);
1188 if (nd > 0)
1189 max_domain_id = nd;
1190 else
1191 max_domain_id = 15;
1192 } else {
1193
1194 max_domain_id = 15;
1195 }
1196 break;
1197 }
1198 }
1199 } else {
1200
1201 max_domain_id = 15;
1202 }
1203 return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
1204}
1205
1206static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
1207
1208static struct bus_attribute *const ap_bus_attrs[] = {
1209 &bus_attr_ap_domain,
1210 &bus_attr_ap_control_domain_mask,
1211 &bus_attr_config_time,
1212 &bus_attr_poll_thread,
1213 &bus_attr_ap_interrupts,
1214 &bus_attr_poll_timeout,
1215 &bus_attr_ap_max_domain_id,
1216 NULL,
1217};
1218
1219
1220
1221
1222
1223
1224static void ap_query_configuration(void)
1225{
1226 if (ap_configuration_available()) {
1227 if (!ap_configuration)
1228 ap_configuration =
1229 kzalloc(sizeof(struct ap_config_info),
1230 GFP_KERNEL);
1231 if (ap_configuration)
1232 __ap_query_configuration(ap_configuration);
1233 } else
1234 ap_configuration = NULL;
1235}
1236
1237
1238
1239
1240
1241
1242static int ap_select_domain(void)
1243{
1244 int queue_depth, device_type, count, max_count, best_domain;
1245 ap_qid_t qid;
1246 int rc, i, j;
1247
1248
1249 if (!ap_configuration->ap_extended && (ap_domain_index > 15))
1250 return -EINVAL;
1251
1252
1253
1254
1255
1256
1257 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1258
1259 return 0;
1260 best_domain = -1;
1261 max_count = 0;
1262 for (i = 0; i < AP_DOMAINS; i++) {
1263 if (!ap_test_config_domain(i))
1264 continue;
1265 count = 0;
1266 for (j = 0; j < AP_DEVICES; j++) {
1267 if (!ap_test_config_card_id(j))
1268 continue;
1269 qid = AP_MKQID(j, i);
1270 rc = ap_query_queue(qid, &queue_depth, &device_type);
1271 if (rc)
1272 continue;
1273 count++;
1274 }
1275 if (count > max_count) {
1276 max_count = count;
1277 best_domain = i;
1278 }
1279 }
1280 if (best_domain >= 0){
1281 ap_domain_index = best_domain;
1282 return 0;
1283 }
1284 return -ENODEV;
1285}
1286
1287
1288
1289
1290
1291
1292
1293static int ap_probe_device_type(struct ap_device *ap_dev)
1294{
1295 static unsigned char msg[] = {
1296 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1297 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1298 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1299 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1300 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1301 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1302 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1303 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1304 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1305 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1306 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1307 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1308 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1309 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1310 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1311 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1312 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1313 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1314 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1315 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1316 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1317 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1318 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1319 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1320 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1321 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1322 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1323 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1324 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1325 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1326 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1327 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1328 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1329 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1330 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1331 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1332 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1333 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1334 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1335 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1336 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1337 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1338 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1339 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1340 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1341 };
1342 struct ap_queue_status status;
1343 unsigned long long psmid;
1344 char *reply;
1345 int rc, i;
1346
1347 reply = (void *) get_zeroed_page(GFP_KERNEL);
1348 if (!reply) {
1349 rc = -ENOMEM;
1350 goto out;
1351 }
1352
1353 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1354 msg, sizeof(msg), 0);
1355 if (status.response_code != AP_RESPONSE_NORMAL) {
1356 rc = -ENODEV;
1357 goto out_free;
1358 }
1359
1360
1361 for (i = 0; i < 6; i++) {
1362 mdelay(300);
1363 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1364 if (status.response_code == AP_RESPONSE_NORMAL &&
1365 psmid == 0x0102030405060708ULL)
1366 break;
1367 }
1368 if (i < 6) {
1369
1370 if (reply[0] == 0x00 && reply[1] == 0x86)
1371 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1372 else
1373 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1374 rc = 0;
1375 } else
1376 rc = -ENODEV;
1377
1378out_free:
1379 free_page((unsigned long) reply);
1380out:
1381 return rc;
1382}
1383
1384static void ap_interrupt_handler(struct airq_struct *airq)
1385{
1386 inc_irq_stat(IRQIO_APB);
1387 tasklet_schedule(&ap_tasklet);
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397static int __ap_scan_bus(struct device *dev, void *data)
1398{
1399 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1400}
1401
1402static void ap_device_release(struct device *dev)
1403{
1404 struct ap_device *ap_dev = to_ap_dev(dev);
1405
1406 kfree(ap_dev);
1407}
1408
1409static void ap_scan_bus(struct work_struct *unused)
1410{
1411 struct ap_device *ap_dev;
1412 struct device *dev;
1413 ap_qid_t qid;
1414 int queue_depth, device_type;
1415 unsigned int device_functions;
1416 int rc, i;
1417
1418 ap_query_configuration();
1419 if (ap_select_domain() != 0) {
1420 return;
1421 }
1422 for (i = 0; i < AP_DEVICES; i++) {
1423 qid = AP_MKQID(i, ap_domain_index);
1424 dev = bus_find_device(&ap_bus_type, NULL,
1425 (void *)(unsigned long)qid,
1426 __ap_scan_bus);
1427 if (ap_test_config_card_id(i))
1428 rc = ap_query_queue(qid, &queue_depth, &device_type);
1429 else
1430 rc = -ENODEV;
1431 if (dev) {
1432 if (rc == -EBUSY) {
1433 set_current_state(TASK_UNINTERRUPTIBLE);
1434 schedule_timeout(AP_RESET_TIMEOUT);
1435 rc = ap_query_queue(qid, &queue_depth,
1436 &device_type);
1437 }
1438 ap_dev = to_ap_dev(dev);
1439 spin_lock_bh(&ap_dev->lock);
1440 if (rc || ap_dev->unregistered) {
1441 spin_unlock_bh(&ap_dev->lock);
1442 if (ap_dev->unregistered)
1443 i--;
1444 device_unregister(dev);
1445 put_device(dev);
1446 continue;
1447 }
1448 spin_unlock_bh(&ap_dev->lock);
1449 put_device(dev);
1450 continue;
1451 }
1452 if (rc)
1453 continue;
1454 rc = ap_init_queue(qid);
1455 if (rc)
1456 continue;
1457 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1458 if (!ap_dev)
1459 break;
1460 ap_dev->qid = qid;
1461 ap_dev->queue_depth = queue_depth;
1462 ap_dev->unregistered = 1;
1463 spin_lock_init(&ap_dev->lock);
1464 INIT_LIST_HEAD(&ap_dev->pendingq);
1465 INIT_LIST_HEAD(&ap_dev->requestq);
1466 INIT_LIST_HEAD(&ap_dev->list);
1467 setup_timer(&ap_dev->timeout, ap_request_timeout,
1468 (unsigned long) ap_dev);
1469 switch (device_type) {
1470 case 0:
1471
1472 if (ap_probe_device_type(ap_dev)) {
1473 kfree(ap_dev);
1474 continue;
1475 }
1476 break;
1477 default:
1478 ap_dev->device_type = device_type;
1479 }
1480 ap_dev->raw_hwtype = device_type;
1481
1482 rc = ap_query_functions(qid, &device_functions);
1483 if (!rc)
1484 ap_dev->functions = device_functions;
1485 else
1486 ap_dev->functions = 0u;
1487
1488 ap_dev->device.bus = &ap_bus_type;
1489 ap_dev->device.parent = ap_root_device;
1490 if (dev_set_name(&ap_dev->device, "card%02x",
1491 AP_QID_DEVICE(ap_dev->qid))) {
1492 kfree(ap_dev);
1493 continue;
1494 }
1495 ap_dev->device.release = ap_device_release;
1496 rc = device_register(&ap_dev->device);
1497 if (rc) {
1498 put_device(&ap_dev->device);
1499 continue;
1500 }
1501
1502 rc = sysfs_create_group(&ap_dev->device.kobj,
1503 &ap_dev_attr_group);
1504 if (!rc) {
1505 spin_lock_bh(&ap_dev->lock);
1506 ap_dev->unregistered = 0;
1507 spin_unlock_bh(&ap_dev->lock);
1508 }
1509 else
1510 device_unregister(&ap_dev->device);
1511 }
1512}
1513
1514static void
1515ap_config_timeout(unsigned long ptr)
1516{
1517 queue_work(ap_work_queue, &ap_config_work);
1518 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1519 add_timer(&ap_config_timer);
1520}
1521
1522
1523
1524
1525
1526
1527static inline void __ap_schedule_poll_timer(void)
1528{
1529 ktime_t hr_time;
1530
1531 spin_lock_bh(&ap_poll_timer_lock);
1532 if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) {
1533 hr_time = ktime_set(0, poll_timeout);
1534 hrtimer_forward_now(&ap_poll_timer, hr_time);
1535 hrtimer_restart(&ap_poll_timer);
1536 }
1537 spin_unlock_bh(&ap_poll_timer_lock);
1538}
1539
1540
1541
1542
1543
1544
1545static inline void ap_schedule_poll_timer(void)
1546{
1547 if (ap_using_interrupts())
1548 return;
1549 __ap_schedule_poll_timer();
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1561{
1562 struct ap_queue_status status;
1563 struct ap_message *ap_msg;
1564
1565 if (ap_dev->queue_count <= 0)
1566 return 0;
1567 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1568 ap_dev->reply->message, ap_dev->reply->length);
1569 switch (status.response_code) {
1570 case AP_RESPONSE_NORMAL:
1571 atomic_dec(&ap_poll_requests);
1572 ap_decrease_queue_count(ap_dev);
1573 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1574 if (ap_msg->psmid != ap_dev->reply->psmid)
1575 continue;
1576 list_del_init(&ap_msg->list);
1577 ap_dev->pendingq_count--;
1578 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1579 break;
1580 }
1581 if (ap_dev->queue_count > 0)
1582 *flags |= 1;
1583 break;
1584 case AP_RESPONSE_NO_PENDING_REPLY:
1585 if (status.queue_empty) {
1586
1587 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1588 ap_dev->queue_count = 0;
1589 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1590 ap_dev->requestq_count += ap_dev->pendingq_count;
1591 ap_dev->pendingq_count = 0;
1592 } else
1593 *flags |= 2;
1594 break;
1595 default:
1596 return -ENODEV;
1597 }
1598 return 0;
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1610{
1611 struct ap_queue_status status;
1612 struct ap_message *ap_msg;
1613
1614 if (ap_dev->requestq_count <= 0 ||
1615 ap_dev->queue_count >= ap_dev->queue_depth)
1616 return 0;
1617
1618 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1619 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1620 ap_msg->message, ap_msg->length, ap_msg->special);
1621 switch (status.response_code) {
1622 case AP_RESPONSE_NORMAL:
1623 atomic_inc(&ap_poll_requests);
1624 ap_increase_queue_count(ap_dev);
1625 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1626 ap_dev->requestq_count--;
1627 ap_dev->pendingq_count++;
1628 if (ap_dev->queue_count < ap_dev->queue_depth &&
1629 ap_dev->requestq_count > 0)
1630 *flags |= 1;
1631 *flags |= 2;
1632 break;
1633 case AP_RESPONSE_RESET_IN_PROGRESS:
1634 __ap_schedule_poll_timer();
1635 case AP_RESPONSE_Q_FULL:
1636 *flags |= 2;
1637 break;
1638 case AP_RESPONSE_MESSAGE_TOO_BIG:
1639 case AP_RESPONSE_REQ_FAC_NOT_INST:
1640 return -EINVAL;
1641 default:
1642 return -ENODEV;
1643 }
1644 return 0;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1658{
1659 int rc;
1660
1661 rc = ap_poll_read(ap_dev, flags);
1662 if (rc)
1663 return rc;
1664 return ap_poll_write(ap_dev, flags);
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1675{
1676 struct ap_queue_status status;
1677
1678 if (list_empty(&ap_dev->requestq) &&
1679 ap_dev->queue_count < ap_dev->queue_depth) {
1680 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1681 ap_msg->message, ap_msg->length,
1682 ap_msg->special);
1683 switch (status.response_code) {
1684 case AP_RESPONSE_NORMAL:
1685 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1686 atomic_inc(&ap_poll_requests);
1687 ap_dev->pendingq_count++;
1688 ap_increase_queue_count(ap_dev);
1689 ap_dev->total_request_count++;
1690 break;
1691 case AP_RESPONSE_Q_FULL:
1692 case AP_RESPONSE_RESET_IN_PROGRESS:
1693 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1694 ap_dev->requestq_count++;
1695 ap_dev->total_request_count++;
1696 return -EBUSY;
1697 case AP_RESPONSE_REQ_FAC_NOT_INST:
1698 case AP_RESPONSE_MESSAGE_TOO_BIG:
1699 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1700 return -EINVAL;
1701 default:
1702 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1703 return -ENODEV;
1704 }
1705 } else {
1706 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1707 ap_dev->requestq_count++;
1708 ap_dev->total_request_count++;
1709 return -EBUSY;
1710 }
1711 ap_schedule_poll_timer();
1712 return 0;
1713}
1714
1715void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1716{
1717 unsigned long flags;
1718 int rc;
1719
1720
1721
1722 BUG_ON(!ap_msg->receive);
1723
1724 spin_lock_bh(&ap_dev->lock);
1725 if (!ap_dev->unregistered) {
1726
1727 rc = ap_poll_queue(ap_dev, &flags);
1728 if (!rc)
1729 rc = __ap_queue_message(ap_dev, ap_msg);
1730 if (!rc)
1731 wake_up(&ap_poll_wait);
1732 if (rc == -ENODEV)
1733 ap_dev->unregistered = 1;
1734 } else {
1735 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1736 rc = -ENODEV;
1737 }
1738 spin_unlock_bh(&ap_dev->lock);
1739 if (rc == -ENODEV)
1740 device_unregister(&ap_dev->device);
1741}
1742EXPORT_SYMBOL(ap_queue_message);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1755{
1756 struct ap_message *tmp;
1757
1758 spin_lock_bh(&ap_dev->lock);
1759 if (!list_empty(&ap_msg->list)) {
1760 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1761 if (tmp->psmid == ap_msg->psmid) {
1762 ap_dev->pendingq_count--;
1763 goto found;
1764 }
1765 ap_dev->requestq_count--;
1766 found:
1767 list_del_init(&ap_msg->list);
1768 }
1769 spin_unlock_bh(&ap_dev->lock);
1770}
1771EXPORT_SYMBOL(ap_cancel_message);
1772
1773
1774
1775
1776
1777
1778
1779static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1780{
1781 tasklet_schedule(&ap_tasklet);
1782 return HRTIMER_NORESTART;
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static void ap_reset(struct ap_device *ap_dev)
1793{
1794 int rc;
1795
1796 ap_dev->reset = AP_RESET_IGNORE;
1797 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1798 ap_dev->queue_count = 0;
1799 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1800 ap_dev->requestq_count += ap_dev->pendingq_count;
1801 ap_dev->pendingq_count = 0;
1802 rc = ap_init_queue(ap_dev->qid);
1803 if (rc == -ENODEV)
1804 ap_dev->unregistered = 1;
1805 else
1806 __ap_schedule_poll_timer();
1807}
1808
1809static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1810{
1811 if (!ap_dev->unregistered) {
1812 if (ap_poll_queue(ap_dev, flags))
1813 ap_dev->unregistered = 1;
1814 if (ap_dev->reset == AP_RESET_DO)
1815 ap_reset(ap_dev);
1816 }
1817 return 0;
1818}
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static void ap_poll_all(unsigned long dummy)
1829{
1830 unsigned long flags;
1831 struct ap_device *ap_dev;
1832
1833
1834
1835
1836
1837 if (ap_using_interrupts())
1838 xchg(ap_airq.lsi_ptr, 0);
1839 do {
1840 flags = 0;
1841 spin_lock(&ap_device_list_lock);
1842 list_for_each_entry(ap_dev, &ap_device_list, list) {
1843 spin_lock(&ap_dev->lock);
1844 __ap_poll_device(ap_dev, &flags);
1845 spin_unlock(&ap_dev->lock);
1846 }
1847 spin_unlock(&ap_device_list_lock);
1848 } while (flags & 1);
1849 if (flags & 2)
1850 ap_schedule_poll_timer();
1851}
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863static int ap_poll_thread(void *data)
1864{
1865 DECLARE_WAITQUEUE(wait, current);
1866 unsigned long flags;
1867 int requests;
1868 struct ap_device *ap_dev;
1869
1870 set_user_nice(current, MAX_NICE);
1871 while (1) {
1872 if (ap_suspend_flag)
1873 return 0;
1874 if (need_resched()) {
1875 schedule();
1876 continue;
1877 }
1878 add_wait_queue(&ap_poll_wait, &wait);
1879 set_current_state(TASK_INTERRUPTIBLE);
1880 if (kthread_should_stop())
1881 break;
1882 requests = atomic_read(&ap_poll_requests);
1883 if (requests <= 0)
1884 schedule();
1885 set_current_state(TASK_RUNNING);
1886 remove_wait_queue(&ap_poll_wait, &wait);
1887
1888 flags = 0;
1889 spin_lock_bh(&ap_device_list_lock);
1890 list_for_each_entry(ap_dev, &ap_device_list, list) {
1891 spin_lock(&ap_dev->lock);
1892 __ap_poll_device(ap_dev, &flags);
1893 spin_unlock(&ap_dev->lock);
1894 }
1895 spin_unlock_bh(&ap_device_list_lock);
1896 }
1897 set_current_state(TASK_RUNNING);
1898 remove_wait_queue(&ap_poll_wait, &wait);
1899 return 0;
1900}
1901
1902static int ap_poll_thread_start(void)
1903{
1904 int rc;
1905
1906 if (ap_using_interrupts() || ap_suspend_flag)
1907 return 0;
1908 mutex_lock(&ap_poll_thread_mutex);
1909 if (!ap_poll_kthread) {
1910 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1911 rc = PTR_RET(ap_poll_kthread);
1912 if (rc)
1913 ap_poll_kthread = NULL;
1914 }
1915 else
1916 rc = 0;
1917 mutex_unlock(&ap_poll_thread_mutex);
1918 return rc;
1919}
1920
1921static void ap_poll_thread_stop(void)
1922{
1923 mutex_lock(&ap_poll_thread_mutex);
1924 if (ap_poll_kthread) {
1925 kthread_stop(ap_poll_kthread);
1926 ap_poll_kthread = NULL;
1927 }
1928 mutex_unlock(&ap_poll_thread_mutex);
1929}
1930
1931
1932
1933
1934
1935
1936
1937static void ap_request_timeout(unsigned long data)
1938{
1939 struct ap_device *ap_dev = (struct ap_device *) data;
1940
1941 if (ap_dev->reset == AP_RESET_ARMED) {
1942 ap_dev->reset = AP_RESET_DO;
1943
1944 if (ap_using_interrupts())
1945 tasklet_schedule(&ap_tasklet);
1946 }
1947}
1948
1949static void ap_reset_domain(void)
1950{
1951 int i;
1952
1953 if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index)))
1954 for (i = 0; i < AP_DEVICES; i++)
1955 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1956}
1957
1958static void ap_reset_all(void)
1959{
1960 int i, j;
1961
1962 for (i = 0; i < AP_DOMAINS; i++) {
1963 if (!ap_test_config_domain(i))
1964 continue;
1965 for (j = 0; j < AP_DEVICES; j++) {
1966 if (!ap_test_config_card_id(j))
1967 continue;
1968 ap_reset_queue(AP_MKQID(j, i));
1969 }
1970 }
1971}
1972
1973static struct reset_call ap_reset_call = {
1974 .fn = ap_reset_all,
1975};
1976
1977
1978
1979
1980
1981
1982int __init ap_module_init(void)
1983{
1984 int rc, i;
1985
1986 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1987 pr_warning("%d is not a valid cryptographic domain\n",
1988 ap_domain_index);
1989 return -EINVAL;
1990 }
1991
1992
1993
1994 if (ap_domain_index >= 0)
1995 user_set_domain = 1;
1996
1997 if (ap_instructions_available() != 0) {
1998 pr_warning("The hardware system does not support "
1999 "AP instructions\n");
2000 return -ENODEV;
2001 }
2002 if (ap_interrupts_available()) {
2003 rc = register_adapter_interrupt(&ap_airq);
2004 ap_airq_flag = (rc == 0);
2005 }
2006
2007 register_reset_call(&ap_reset_call);
2008
2009
2010 rc = bus_register(&ap_bus_type);
2011 if (rc)
2012 goto out;
2013 for (i = 0; ap_bus_attrs[i]; i++) {
2014 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
2015 if (rc)
2016 goto out_bus;
2017 }
2018
2019
2020 ap_root_device = root_device_register("ap");
2021 rc = PTR_RET(ap_root_device);
2022 if (rc)
2023 goto out_bus;
2024
2025 ap_work_queue = create_singlethread_workqueue("kapwork");
2026 if (!ap_work_queue) {
2027 rc = -ENOMEM;
2028 goto out_root;
2029 }
2030
2031 ap_query_configuration();
2032 if (ap_select_domain() == 0)
2033 ap_scan_bus(NULL);
2034
2035
2036 init_timer(&ap_config_timer);
2037 ap_config_timer.function = ap_config_timeout;
2038 ap_config_timer.data = 0;
2039 ap_config_timer.expires = jiffies + ap_config_time * HZ;
2040 add_timer(&ap_config_timer);
2041
2042
2043
2044
2045 if (MACHINE_IS_VM)
2046 poll_timeout = 1500000;
2047 spin_lock_init(&ap_poll_timer_lock);
2048 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2049 ap_poll_timer.function = ap_poll_timeout;
2050
2051
2052 if (ap_thread_flag) {
2053 rc = ap_poll_thread_start();
2054 if (rc)
2055 goto out_work;
2056 }
2057
2058 return 0;
2059
2060out_work:
2061 del_timer_sync(&ap_config_timer);
2062 hrtimer_cancel(&ap_poll_timer);
2063 destroy_workqueue(ap_work_queue);
2064out_root:
2065 root_device_unregister(ap_root_device);
2066out_bus:
2067 while (i--)
2068 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2069 bus_unregister(&ap_bus_type);
2070out:
2071 unregister_reset_call(&ap_reset_call);
2072 if (ap_using_interrupts())
2073 unregister_adapter_interrupt(&ap_airq);
2074 return rc;
2075}
2076
2077static int __ap_match_all(struct device *dev, void *data)
2078{
2079 return 1;
2080}
2081
2082
2083
2084
2085
2086
2087void ap_module_exit(void)
2088{
2089 int i;
2090 struct device *dev;
2091
2092 ap_reset_domain();
2093 ap_poll_thread_stop();
2094 del_timer_sync(&ap_config_timer);
2095 hrtimer_cancel(&ap_poll_timer);
2096 destroy_workqueue(ap_work_queue);
2097 tasklet_kill(&ap_tasklet);
2098 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
2099 __ap_match_all)))
2100 {
2101 device_unregister(dev);
2102 put_device(dev);
2103 }
2104 for (i = 0; ap_bus_attrs[i]; i++)
2105 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2106 root_device_unregister(ap_root_device);
2107 bus_unregister(&ap_bus_type);
2108 unregister_reset_call(&ap_reset_call);
2109 if (ap_using_interrupts())
2110 unregister_adapter_interrupt(&ap_airq);
2111}
2112
2113module_init(ap_module_init);
2114module_exit(ap_module_exit);
2115