1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define KMSG_COMPONENT "ap"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29#include <linux/kernel_stat.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/err.h>
34#include <linux/interrupt.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/mutex.h>
40#include <asm/reset.h>
41#include <asm/airq.h>
42#include <linux/atomic.h>
43#include <asm/isc.h>
44#include <linux/hrtimer.h>
45#include <linux/ktime.h>
46#include <asm/facility.h>
47
48#include "ap_bus.h"
49
50
51static void ap_scan_bus(struct work_struct *);
52static void ap_poll_all(unsigned long);
53static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
54static int ap_poll_thread_start(void);
55static void ap_poll_thread_stop(void);
56static void ap_request_timeout(unsigned long);
57static inline void ap_schedule_poll_timer(void);
58static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
59static int ap_device_remove(struct device *dev);
60static int ap_device_probe(struct device *dev);
61static void ap_interrupt_handler(struct airq_struct *airq);
62static void ap_reset(struct ap_device *ap_dev);
63static void ap_config_timeout(unsigned long ptr);
64static int ap_select_domain(void);
65static void ap_query_configuration(void);
66
67
68
69
70MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
72 "Copyright IBM Corp. 2006, 2012");
73MODULE_LICENSE("GPL");
74MODULE_ALIAS("z90crypt");
75
76
77
78
79int ap_domain_index = -1;
80module_param_named(domain, ap_domain_index, int, 0000);
81MODULE_PARM_DESC(domain, "domain index for ap devices");
82EXPORT_SYMBOL(ap_domain_index);
83
84static int ap_thread_flag = 0;
85module_param_named(poll_thread, ap_thread_flag, int, 0000);
86MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
87
88static struct device *ap_root_device = NULL;
89static struct ap_config_info *ap_configuration;
90static DEFINE_SPINLOCK(ap_device_list_lock);
91static LIST_HEAD(ap_device_list);
92
93
94
95
96static struct workqueue_struct *ap_work_queue;
97static struct timer_list ap_config_timer;
98static int ap_config_time = AP_CONFIG_TIME;
99static DECLARE_WORK(ap_config_work, ap_scan_bus);
100
101
102
103
104static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
105static atomic_t ap_poll_requests = ATOMIC_INIT(0);
106static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
107static struct task_struct *ap_poll_kthread = NULL;
108static DEFINE_MUTEX(ap_poll_thread_mutex);
109static DEFINE_SPINLOCK(ap_poll_timer_lock);
110static struct hrtimer ap_poll_timer;
111
112
113static unsigned long long poll_timeout = 250000;
114
115
116static int ap_suspend_flag;
117
118
119
120static int user_set_domain = 0;
121static struct bus_type ap_bus_type;
122
123
124static int ap_airq_flag;
125
126static struct airq_struct ap_airq = {
127 .handler = ap_interrupt_handler,
128 .isc = AP_ISC,
129};
130
131
132
133
134
135static inline int ap_using_interrupts(void)
136{
137 return ap_airq_flag;
138}
139
140
141
142
143
144
145static inline int ap_instructions_available(void)
146{
147 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
148 register unsigned long reg1 asm ("1") = -ENODEV;
149 register unsigned long reg2 asm ("2") = 0UL;
150
151 asm volatile(
152 " .long 0xb2af0000\n"
153 "0: la %1,0\n"
154 "1:\n"
155 EX_TABLE(0b, 1b)
156 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
157 return reg1;
158}
159
160
161
162
163
164
165static int ap_interrupts_available(void)
166{
167 return test_facility(2) && test_facility(65);
168}
169
170
171
172
173
174
175
176#ifdef CONFIG_64BIT
177static int ap_configuration_available(void)
178{
179 return test_facility(2) && test_facility(12);
180}
181#endif
182
183
184
185
186
187
188
189
190
191static inline struct ap_queue_status
192ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
193{
194 register unsigned long reg0 asm ("0") = qid;
195 register struct ap_queue_status reg1 asm ("1");
196 register unsigned long reg2 asm ("2") = 0UL;
197
198 asm volatile(".long 0xb2af0000"
199 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
200 *device_type = (int) (reg2 >> 24);
201 *queue_depth = (int) (reg2 & 0xff);
202 return reg1;
203}
204
205
206
207
208
209
210
211static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
212{
213 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
214 register struct ap_queue_status reg1 asm ("1");
215 register unsigned long reg2 asm ("2") = 0UL;
216
217 asm volatile(
218 ".long 0xb2af0000"
219 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
220 return reg1;
221}
222
223#ifdef CONFIG_64BIT
224
225
226
227
228
229
230
231static inline struct ap_queue_status
232ap_queue_interruption_control(ap_qid_t qid, void *ind)
233{
234 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
235 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
236 register struct ap_queue_status reg1_out asm ("1");
237 register void *reg2 asm ("2") = ind;
238 asm volatile(
239 ".long 0xb2af0000"
240 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
241 :
242 : "cc" );
243 return reg1_out;
244}
245#endif
246
247#ifdef CONFIG_64BIT
248static inline struct ap_queue_status
249__ap_query_functions(ap_qid_t qid, unsigned int *functions)
250{
251 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
252 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
253 register unsigned long reg2 asm ("2");
254
255 asm volatile(
256 ".long 0xb2af0000\n"
257 "0:\n"
258 EX_TABLE(0b, 0b)
259 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
260 :
261 : "cc");
262
263 *functions = (unsigned int)(reg2 >> 32);
264 return reg1;
265}
266#endif
267
268#ifdef CONFIG_64BIT
269static inline int __ap_query_configuration(struct ap_config_info *config)
270{
271 register unsigned long reg0 asm ("0") = 0x04000000UL;
272 register unsigned long reg1 asm ("1") = -EINVAL;
273 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
274
275 asm volatile(
276 ".long 0xb2af0000\n"
277 "0: la %1,0\n"
278 "1:\n"
279 EX_TABLE(0b, 1b)
280 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
281 :
282 : "cc");
283
284 return reg1;
285}
286#endif
287
288
289
290
291
292
293
294
295
296
297
298
299static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
300{
301#ifdef CONFIG_64BIT
302 struct ap_queue_status status;
303 int i;
304 status = __ap_query_functions(qid, functions);
305
306 for (i = 0; i < AP_MAX_RESET; i++) {
307 if (ap_queue_status_invalid_test(&status))
308 return -ENODEV;
309
310 switch (status.response_code) {
311 case AP_RESPONSE_NORMAL:
312 return 0;
313 case AP_RESPONSE_RESET_IN_PROGRESS:
314 case AP_RESPONSE_BUSY:
315 break;
316 case AP_RESPONSE_Q_NOT_AVAIL:
317 case AP_RESPONSE_DECONFIGURED:
318 case AP_RESPONSE_CHECKSTOPPED:
319 case AP_RESPONSE_INVALID_ADDRESS:
320 return -ENODEV;
321 case AP_RESPONSE_OTHERWISE_CHANGED:
322 break;
323 default:
324 break;
325 }
326 if (i < AP_MAX_RESET - 1) {
327 udelay(5);
328 status = __ap_query_functions(qid, functions);
329 }
330 }
331 return -EBUSY;
332#else
333 return -EINVAL;
334#endif
335}
336
337
338
339
340
341
342
343
344
345
346static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
347{
348#ifdef CONFIG_64BIT
349 struct ap_queue_status status;
350 int t_depth, t_device_type, rc, i;
351
352 rc = -EBUSY;
353 status = ap_queue_interruption_control(qid, ind);
354
355 for (i = 0; i < AP_MAX_RESET; i++) {
356 switch (status.response_code) {
357 case AP_RESPONSE_NORMAL:
358 if (status.int_enabled)
359 return 0;
360 break;
361 case AP_RESPONSE_RESET_IN_PROGRESS:
362 case AP_RESPONSE_BUSY:
363 if (i < AP_MAX_RESET - 1) {
364 udelay(5);
365 status = ap_queue_interruption_control(qid,
366 ind);
367 continue;
368 }
369 break;
370 case AP_RESPONSE_Q_NOT_AVAIL:
371 case AP_RESPONSE_DECONFIGURED:
372 case AP_RESPONSE_CHECKSTOPPED:
373 case AP_RESPONSE_INVALID_ADDRESS:
374 return -ENODEV;
375 case AP_RESPONSE_OTHERWISE_CHANGED:
376 if (status.int_enabled)
377 return 0;
378 break;
379 default:
380 break;
381 }
382 if (i < AP_MAX_RESET - 1) {
383 udelay(5);
384 status = ap_test_queue(qid, &t_depth, &t_device_type);
385 }
386 }
387 return rc;
388#else
389 return -EINVAL;
390#endif
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406static inline struct ap_queue_status
407__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
408 unsigned int special)
409{
410 typedef struct { char _[length]; } msgblock;
411 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
412 register struct ap_queue_status reg1 asm ("1");
413 register unsigned long reg2 asm ("2") = (unsigned long) msg;
414 register unsigned long reg3 asm ("3") = (unsigned long) length;
415 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
416 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
417
418 if (special == 1)
419 reg0 |= 0x400000UL;
420
421 asm volatile (
422 "0: .long 0xb2ad0042\n"
423 " brc 2,0b"
424 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
425 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
426 : "cc" );
427 return reg1;
428}
429
430int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
431{
432 struct ap_queue_status status;
433
434 status = __ap_send(qid, psmid, msg, length, 0);
435 switch (status.response_code) {
436 case AP_RESPONSE_NORMAL:
437 return 0;
438 case AP_RESPONSE_Q_FULL:
439 case AP_RESPONSE_RESET_IN_PROGRESS:
440 return -EBUSY;
441 case AP_RESPONSE_REQ_FAC_NOT_INST:
442 return -EINVAL;
443 default:
444 return -ENODEV;
445 }
446}
447EXPORT_SYMBOL(ap_send);
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467static inline struct ap_queue_status
468__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
469{
470 typedef struct { char _[length]; } msgblock;
471 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
472 register struct ap_queue_status reg1 asm ("1");
473 register unsigned long reg2 asm("2") = 0UL;
474 register unsigned long reg4 asm("4") = (unsigned long) msg;
475 register unsigned long reg5 asm("5") = (unsigned long) length;
476 register unsigned long reg6 asm("6") = 0UL;
477 register unsigned long reg7 asm("7") = 0UL;
478
479
480 asm volatile(
481 "0: .long 0xb2ae0064\n"
482 " brc 6,0b\n"
483 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
484 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
485 "=m" (*(msgblock *) msg) : : "cc" );
486 *psmid = (((unsigned long long) reg6) << 32) + reg7;
487 return reg1;
488}
489
490int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
491{
492 struct ap_queue_status status;
493
494 status = __ap_recv(qid, psmid, msg, length);
495 switch (status.response_code) {
496 case AP_RESPONSE_NORMAL:
497 return 0;
498 case AP_RESPONSE_NO_PENDING_REPLY:
499 if (status.queue_empty)
500 return -ENOENT;
501 return -EBUSY;
502 case AP_RESPONSE_RESET_IN_PROGRESS:
503 return -EBUSY;
504 default:
505 return -ENODEV;
506 }
507}
508EXPORT_SYMBOL(ap_recv);
509
510
511
512
513
514
515
516
517
518static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
519{
520 struct ap_queue_status status;
521 int t_depth, t_device_type, rc, i;
522
523 rc = -EBUSY;
524 for (i = 0; i < AP_MAX_RESET; i++) {
525 status = ap_test_queue(qid, &t_depth, &t_device_type);
526 switch (status.response_code) {
527 case AP_RESPONSE_NORMAL:
528 *queue_depth = t_depth + 1;
529 *device_type = t_device_type;
530 rc = 0;
531 break;
532 case AP_RESPONSE_Q_NOT_AVAIL:
533 rc = -ENODEV;
534 break;
535 case AP_RESPONSE_RESET_IN_PROGRESS:
536 break;
537 case AP_RESPONSE_DECONFIGURED:
538 rc = -ENODEV;
539 break;
540 case AP_RESPONSE_CHECKSTOPPED:
541 rc = -ENODEV;
542 break;
543 case AP_RESPONSE_INVALID_ADDRESS:
544 rc = -ENODEV;
545 break;
546 case AP_RESPONSE_OTHERWISE_CHANGED:
547 break;
548 case AP_RESPONSE_BUSY:
549 break;
550 default:
551 BUG();
552 }
553 if (rc != -EBUSY)
554 break;
555 if (i < AP_MAX_RESET - 1)
556 udelay(5);
557 }
558 return rc;
559}
560
561
562
563
564
565
566
567static int ap_init_queue(ap_qid_t qid)
568{
569 struct ap_queue_status status;
570 int rc, dummy, i;
571
572 rc = -ENODEV;
573 status = ap_reset_queue(qid);
574 for (i = 0; i < AP_MAX_RESET; i++) {
575 switch (status.response_code) {
576 case AP_RESPONSE_NORMAL:
577 if (status.queue_empty)
578 rc = 0;
579 break;
580 case AP_RESPONSE_Q_NOT_AVAIL:
581 case AP_RESPONSE_DECONFIGURED:
582 case AP_RESPONSE_CHECKSTOPPED:
583 i = AP_MAX_RESET;
584 break;
585 case AP_RESPONSE_RESET_IN_PROGRESS:
586 rc = -EBUSY;
587 case AP_RESPONSE_BUSY:
588 default:
589 break;
590 }
591 if (rc != -ENODEV && rc != -EBUSY)
592 break;
593 if (i < AP_MAX_RESET - 1) {
594 udelay(5);
595 status = ap_test_queue(qid, &dummy, &dummy);
596 }
597 }
598 if (rc == 0 && ap_using_interrupts()) {
599 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr);
600
601
602
603 if (rc)
604 pr_err("Registering adapter interrupts for "
605 "AP %d failed\n", AP_QID_DEVICE(qid));
606 }
607 return rc;
608}
609
610
611
612
613
614
615
616static void ap_increase_queue_count(struct ap_device *ap_dev)
617{
618 int timeout = ap_dev->drv->request_timeout;
619
620 ap_dev->queue_count++;
621 if (ap_dev->queue_count == 1) {
622 mod_timer(&ap_dev->timeout, jiffies + timeout);
623 ap_dev->reset = AP_RESET_ARMED;
624 }
625}
626
627
628
629
630
631
632
633
634static void ap_decrease_queue_count(struct ap_device *ap_dev)
635{
636 int timeout = ap_dev->drv->request_timeout;
637
638 ap_dev->queue_count--;
639 if (ap_dev->queue_count > 0)
640 mod_timer(&ap_dev->timeout, jiffies + timeout);
641 else
642
643
644
645
646
647 ap_dev->reset = AP_RESET_IGNORE;
648}
649
650
651
652
653static ssize_t ap_hwtype_show(struct device *dev,
654 struct device_attribute *attr, char *buf)
655{
656 struct ap_device *ap_dev = to_ap_dev(dev);
657 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
658}
659
660static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
661static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
662 char *buf)
663{
664 struct ap_device *ap_dev = to_ap_dev(dev);
665 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
666}
667
668static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
669static ssize_t ap_request_count_show(struct device *dev,
670 struct device_attribute *attr,
671 char *buf)
672{
673 struct ap_device *ap_dev = to_ap_dev(dev);
674 int rc;
675
676 spin_lock_bh(&ap_dev->lock);
677 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
678 spin_unlock_bh(&ap_dev->lock);
679 return rc;
680}
681
682static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
683
684static ssize_t ap_requestq_count_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
686{
687 struct ap_device *ap_dev = to_ap_dev(dev);
688 int rc;
689
690 spin_lock_bh(&ap_dev->lock);
691 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
692 spin_unlock_bh(&ap_dev->lock);
693 return rc;
694}
695
696static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
697
698static ssize_t ap_pendingq_count_show(struct device *dev,
699 struct device_attribute *attr, char *buf)
700{
701 struct ap_device *ap_dev = to_ap_dev(dev);
702 int rc;
703
704 spin_lock_bh(&ap_dev->lock);
705 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
706 spin_unlock_bh(&ap_dev->lock);
707 return rc;
708}
709
710static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
711
712static ssize_t ap_modalias_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
716}
717
718static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
719
720static ssize_t ap_functions_show(struct device *dev,
721 struct device_attribute *attr, char *buf)
722{
723 struct ap_device *ap_dev = to_ap_dev(dev);
724 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
725}
726
727static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
728
729static struct attribute *ap_dev_attrs[] = {
730 &dev_attr_hwtype.attr,
731 &dev_attr_depth.attr,
732 &dev_attr_request_count.attr,
733 &dev_attr_requestq_count.attr,
734 &dev_attr_pendingq_count.attr,
735 &dev_attr_modalias.attr,
736 &dev_attr_ap_functions.attr,
737 NULL
738};
739static struct attribute_group ap_dev_attr_group = {
740 .attrs = ap_dev_attrs
741};
742
743
744
745
746
747
748
749
750static int ap_bus_match(struct device *dev, struct device_driver *drv)
751{
752 struct ap_device *ap_dev = to_ap_dev(dev);
753 struct ap_driver *ap_drv = to_ap_drv(drv);
754 struct ap_device_id *id;
755
756
757
758
759
760 for (id = ap_drv->ids; id->match_flags; id++) {
761 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
762 (id->dev_type != ap_dev->device_type))
763 continue;
764 return 1;
765 }
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
778{
779 struct ap_device *ap_dev = to_ap_dev(dev);
780 int retval = 0;
781
782 if (!ap_dev)
783 return -ENODEV;
784
785
786 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
787 if (retval)
788 return retval;
789
790
791 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
792
793 return retval;
794}
795
796static int ap_bus_suspend(struct device *dev, pm_message_t state)
797{
798 struct ap_device *ap_dev = to_ap_dev(dev);
799 unsigned long flags;
800
801 if (!ap_suspend_flag) {
802 ap_suspend_flag = 1;
803
804
805
806
807 del_timer_sync(&ap_config_timer);
808 if (ap_work_queue != NULL) {
809 destroy_workqueue(ap_work_queue);
810 ap_work_queue = NULL;
811 }
812
813 tasklet_disable(&ap_tasklet);
814 }
815
816 do {
817 flags = 0;
818 spin_lock_bh(&ap_dev->lock);
819 __ap_poll_device(ap_dev, &flags);
820 spin_unlock_bh(&ap_dev->lock);
821 } while ((flags & 1) || (flags & 2));
822
823 spin_lock_bh(&ap_dev->lock);
824 ap_dev->unregistered = 1;
825 spin_unlock_bh(&ap_dev->lock);
826
827 return 0;
828}
829
830static int ap_bus_resume(struct device *dev)
831{
832 struct ap_device *ap_dev = to_ap_dev(dev);
833 int rc;
834
835 if (ap_suspend_flag) {
836 ap_suspend_flag = 0;
837 if (ap_interrupts_available()) {
838 if (!ap_using_interrupts()) {
839 rc = register_adapter_interrupt(&ap_airq);
840 ap_airq_flag = (rc == 0);
841 }
842 } else {
843 if (ap_using_interrupts()) {
844 unregister_adapter_interrupt(&ap_airq);
845 ap_airq_flag = 0;
846 }
847 }
848 ap_query_configuration();
849 if (!user_set_domain) {
850 ap_domain_index = -1;
851 ap_select_domain();
852 }
853 init_timer(&ap_config_timer);
854 ap_config_timer.function = ap_config_timeout;
855 ap_config_timer.data = 0;
856 ap_config_timer.expires = jiffies + ap_config_time * HZ;
857 add_timer(&ap_config_timer);
858 ap_work_queue = create_singlethread_workqueue("kapwork");
859 if (!ap_work_queue)
860 return -ENOMEM;
861 tasklet_enable(&ap_tasklet);
862 if (!ap_using_interrupts())
863 ap_schedule_poll_timer();
864 else
865 tasklet_schedule(&ap_tasklet);
866 if (ap_thread_flag)
867 rc = ap_poll_thread_start();
868 else
869 rc = 0;
870 } else
871 rc = 0;
872 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
873 spin_lock_bh(&ap_dev->lock);
874 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
875 ap_domain_index);
876 spin_unlock_bh(&ap_dev->lock);
877 }
878 queue_work(ap_work_queue, &ap_config_work);
879
880 return rc;
881}
882
883static struct bus_type ap_bus_type = {
884 .name = "ap",
885 .match = &ap_bus_match,
886 .uevent = &ap_uevent,
887 .suspend = ap_bus_suspend,
888 .resume = ap_bus_resume
889};
890
891static int ap_device_probe(struct device *dev)
892{
893 struct ap_device *ap_dev = to_ap_dev(dev);
894 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
895 int rc;
896
897 ap_dev->drv = ap_drv;
898 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
899 if (!rc) {
900 spin_lock_bh(&ap_device_list_lock);
901 list_add(&ap_dev->list, &ap_device_list);
902 spin_unlock_bh(&ap_device_list_lock);
903 }
904 return rc;
905}
906
907
908
909
910
911
912
913static void __ap_flush_queue(struct ap_device *ap_dev)
914{
915 struct ap_message *ap_msg, *next;
916
917 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
918 list_del_init(&ap_msg->list);
919 ap_dev->pendingq_count--;
920 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
921 }
922 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
923 list_del_init(&ap_msg->list);
924 ap_dev->requestq_count--;
925 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
926 }
927}
928
929void ap_flush_queue(struct ap_device *ap_dev)
930{
931 spin_lock_bh(&ap_dev->lock);
932 __ap_flush_queue(ap_dev);
933 spin_unlock_bh(&ap_dev->lock);
934}
935EXPORT_SYMBOL(ap_flush_queue);
936
937static int ap_device_remove(struct device *dev)
938{
939 struct ap_device *ap_dev = to_ap_dev(dev);
940 struct ap_driver *ap_drv = ap_dev->drv;
941
942 ap_flush_queue(ap_dev);
943 del_timer_sync(&ap_dev->timeout);
944 spin_lock_bh(&ap_device_list_lock);
945 list_del_init(&ap_dev->list);
946 spin_unlock_bh(&ap_device_list_lock);
947 if (ap_drv->remove)
948 ap_drv->remove(ap_dev);
949 spin_lock_bh(&ap_dev->lock);
950 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
951 spin_unlock_bh(&ap_dev->lock);
952 return 0;
953}
954
955int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
956 char *name)
957{
958 struct device_driver *drv = &ap_drv->driver;
959
960 drv->bus = &ap_bus_type;
961 drv->probe = ap_device_probe;
962 drv->remove = ap_device_remove;
963 drv->owner = owner;
964 drv->name = name;
965 return driver_register(drv);
966}
967EXPORT_SYMBOL(ap_driver_register);
968
969void ap_driver_unregister(struct ap_driver *ap_drv)
970{
971 driver_unregister(&ap_drv->driver);
972}
973EXPORT_SYMBOL(ap_driver_unregister);
974
975void ap_bus_force_rescan(void)
976{
977
978 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
979
980 queue_work(ap_work_queue, &ap_config_work);
981 flush_work(&ap_config_work);
982}
983EXPORT_SYMBOL(ap_bus_force_rescan);
984
985
986
987
988static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
989{
990 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
991}
992
993static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
994
995static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
996{
997 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
998}
999
1000static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1001{
1002 return snprintf(buf, PAGE_SIZE, "%d\n",
1003 ap_using_interrupts() ? 1 : 0);
1004}
1005
1006static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
1007
1008static ssize_t ap_config_time_store(struct bus_type *bus,
1009 const char *buf, size_t count)
1010{
1011 int time;
1012
1013 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1014 return -EINVAL;
1015 ap_config_time = time;
1016 if (!timer_pending(&ap_config_timer) ||
1017 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1018 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1019 add_timer(&ap_config_timer);
1020 }
1021 return count;
1022}
1023
1024static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
1025
1026static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
1027{
1028 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1029}
1030
1031static ssize_t ap_poll_thread_store(struct bus_type *bus,
1032 const char *buf, size_t count)
1033{
1034 int flag, rc;
1035
1036 if (sscanf(buf, "%d\n", &flag) != 1)
1037 return -EINVAL;
1038 if (flag) {
1039 rc = ap_poll_thread_start();
1040 if (rc)
1041 return rc;
1042 }
1043 else
1044 ap_poll_thread_stop();
1045 return count;
1046}
1047
1048static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
1049
1050static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1051{
1052 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1053}
1054
1055static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1056 size_t count)
1057{
1058 unsigned long long time;
1059 ktime_t hr_time;
1060
1061
1062 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1063 time > 120000000000ULL)
1064 return -EINVAL;
1065 poll_timeout = time;
1066 hr_time = ktime_set(0, poll_timeout);
1067
1068 if (!hrtimer_is_queued(&ap_poll_timer) ||
1069 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
1070 hrtimer_set_expires(&ap_poll_timer, hr_time);
1071 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1072 }
1073 return count;
1074}
1075
1076static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1077
1078static struct bus_attribute *const ap_bus_attrs[] = {
1079 &bus_attr_ap_domain,
1080 &bus_attr_config_time,
1081 &bus_attr_poll_thread,
1082 &bus_attr_ap_interrupts,
1083 &bus_attr_poll_timeout,
1084 NULL,
1085};
1086
1087static inline int ap_test_config(unsigned int *field, unsigned int nr)
1088{
1089 if (nr > 0xFFu)
1090 return 0;
1091 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline int ap_test_config_card_id(unsigned int id)
1103{
1104 if (!ap_configuration)
1105 return 1;
1106 return ap_test_config(ap_configuration->apm, id);
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static inline int ap_test_config_domain(unsigned int domain)
1118{
1119 if (!ap_configuration)
1120 return 1;
1121 return ap_test_config(ap_configuration->aqm, domain);
1122}
1123
1124
1125
1126
1127
1128
1129static void ap_query_configuration(void)
1130{
1131#ifdef CONFIG_64BIT
1132 if (ap_configuration_available()) {
1133 if (!ap_configuration)
1134 ap_configuration =
1135 kzalloc(sizeof(struct ap_config_info),
1136 GFP_KERNEL);
1137 if (ap_configuration)
1138 __ap_query_configuration(ap_configuration);
1139 } else
1140 ap_configuration = NULL;
1141#else
1142 ap_configuration = NULL;
1143#endif
1144}
1145
1146
1147
1148
1149
1150
1151static int ap_select_domain(void)
1152{
1153 int queue_depth, device_type, count, max_count, best_domain;
1154 ap_qid_t qid;
1155 int rc, i, j;
1156
1157
1158
1159
1160
1161
1162 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1163
1164 return 0;
1165 best_domain = -1;
1166 max_count = 0;
1167 for (i = 0; i < AP_DOMAINS; i++) {
1168 if (!ap_test_config_domain(i))
1169 continue;
1170 count = 0;
1171 for (j = 0; j < AP_DEVICES; j++) {
1172 if (!ap_test_config_card_id(j))
1173 continue;
1174 qid = AP_MKQID(j, i);
1175 rc = ap_query_queue(qid, &queue_depth, &device_type);
1176 if (rc)
1177 continue;
1178 count++;
1179 }
1180 if (count > max_count) {
1181 max_count = count;
1182 best_domain = i;
1183 }
1184 }
1185 if (best_domain >= 0){
1186 ap_domain_index = best_domain;
1187 return 0;
1188 }
1189 return -ENODEV;
1190}
1191
1192
1193
1194
1195
1196
1197
1198static int ap_probe_device_type(struct ap_device *ap_dev)
1199{
1200 static unsigned char msg[] = {
1201 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1202 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1203 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1204 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1205 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1206 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1207 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1208 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1209 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1210 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1211 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1212 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1213 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1214 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1215 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1216 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1217 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1218 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1219 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1220 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1221 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1222 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1223 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1224 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1225 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1226 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1227 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1228 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1229 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1230 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1231 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1232 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1233 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1234 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1235 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1236 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1237 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1238 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1239 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1240 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1241 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1242 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1243 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1244 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1245 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1246 };
1247 struct ap_queue_status status;
1248 unsigned long long psmid;
1249 char *reply;
1250 int rc, i;
1251
1252 reply = (void *) get_zeroed_page(GFP_KERNEL);
1253 if (!reply) {
1254 rc = -ENOMEM;
1255 goto out;
1256 }
1257
1258 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1259 msg, sizeof(msg), 0);
1260 if (status.response_code != AP_RESPONSE_NORMAL) {
1261 rc = -ENODEV;
1262 goto out_free;
1263 }
1264
1265
1266 for (i = 0; i < 6; i++) {
1267 mdelay(300);
1268 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1269 if (status.response_code == AP_RESPONSE_NORMAL &&
1270 psmid == 0x0102030405060708ULL)
1271 break;
1272 }
1273 if (i < 6) {
1274
1275 if (reply[0] == 0x00 && reply[1] == 0x86)
1276 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1277 else
1278 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1279 rc = 0;
1280 } else
1281 rc = -ENODEV;
1282
1283out_free:
1284 free_page((unsigned long) reply);
1285out:
1286 return rc;
1287}
1288
1289static void ap_interrupt_handler(struct airq_struct *airq)
1290{
1291 inc_irq_stat(IRQIO_APB);
1292 tasklet_schedule(&ap_tasklet);
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302static int __ap_scan_bus(struct device *dev, void *data)
1303{
1304 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1305}
1306
1307static void ap_device_release(struct device *dev)
1308{
1309 struct ap_device *ap_dev = to_ap_dev(dev);
1310
1311 kfree(ap_dev);
1312}
1313
1314static void ap_scan_bus(struct work_struct *unused)
1315{
1316 struct ap_device *ap_dev;
1317 struct device *dev;
1318 ap_qid_t qid;
1319 int queue_depth, device_type;
1320 unsigned int device_functions;
1321 int rc, i;
1322
1323 ap_query_configuration();
1324 if (ap_select_domain() != 0) {
1325 return;
1326 }
1327 for (i = 0; i < AP_DEVICES; i++) {
1328 qid = AP_MKQID(i, ap_domain_index);
1329 dev = bus_find_device(&ap_bus_type, NULL,
1330 (void *)(unsigned long)qid,
1331 __ap_scan_bus);
1332 if (ap_test_config_card_id(i))
1333 rc = ap_query_queue(qid, &queue_depth, &device_type);
1334 else
1335 rc = -ENODEV;
1336 if (dev) {
1337 if (rc == -EBUSY) {
1338 set_current_state(TASK_UNINTERRUPTIBLE);
1339 schedule_timeout(AP_RESET_TIMEOUT);
1340 rc = ap_query_queue(qid, &queue_depth,
1341 &device_type);
1342 }
1343 ap_dev = to_ap_dev(dev);
1344 spin_lock_bh(&ap_dev->lock);
1345 if (rc || ap_dev->unregistered) {
1346 spin_unlock_bh(&ap_dev->lock);
1347 if (ap_dev->unregistered)
1348 i--;
1349 device_unregister(dev);
1350 put_device(dev);
1351 continue;
1352 }
1353 spin_unlock_bh(&ap_dev->lock);
1354 put_device(dev);
1355 continue;
1356 }
1357 if (rc)
1358 continue;
1359 rc = ap_init_queue(qid);
1360 if (rc)
1361 continue;
1362 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1363 if (!ap_dev)
1364 break;
1365 ap_dev->qid = qid;
1366 ap_dev->queue_depth = queue_depth;
1367 ap_dev->unregistered = 1;
1368 spin_lock_init(&ap_dev->lock);
1369 INIT_LIST_HEAD(&ap_dev->pendingq);
1370 INIT_LIST_HEAD(&ap_dev->requestq);
1371 INIT_LIST_HEAD(&ap_dev->list);
1372 setup_timer(&ap_dev->timeout, ap_request_timeout,
1373 (unsigned long) ap_dev);
1374 switch (device_type) {
1375 case 0:
1376
1377 if (ap_probe_device_type(ap_dev)) {
1378 kfree(ap_dev);
1379 continue;
1380 }
1381 break;
1382 default:
1383 ap_dev->device_type = device_type;
1384 }
1385
1386 rc = ap_query_functions(qid, &device_functions);
1387 if (!rc)
1388 ap_dev->functions = device_functions;
1389 else
1390 ap_dev->functions = 0u;
1391
1392 ap_dev->device.bus = &ap_bus_type;
1393 ap_dev->device.parent = ap_root_device;
1394 if (dev_set_name(&ap_dev->device, "card%02x",
1395 AP_QID_DEVICE(ap_dev->qid))) {
1396 kfree(ap_dev);
1397 continue;
1398 }
1399 ap_dev->device.release = ap_device_release;
1400 rc = device_register(&ap_dev->device);
1401 if (rc) {
1402 put_device(&ap_dev->device);
1403 continue;
1404 }
1405
1406 rc = sysfs_create_group(&ap_dev->device.kobj,
1407 &ap_dev_attr_group);
1408 if (!rc) {
1409 spin_lock_bh(&ap_dev->lock);
1410 ap_dev->unregistered = 0;
1411 spin_unlock_bh(&ap_dev->lock);
1412 }
1413 else
1414 device_unregister(&ap_dev->device);
1415 }
1416}
1417
1418static void
1419ap_config_timeout(unsigned long ptr)
1420{
1421 queue_work(ap_work_queue, &ap_config_work);
1422 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1423 add_timer(&ap_config_timer);
1424}
1425
1426
1427
1428
1429
1430
1431static inline void __ap_schedule_poll_timer(void)
1432{
1433 ktime_t hr_time;
1434
1435 spin_lock_bh(&ap_poll_timer_lock);
1436 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1437 goto out;
1438 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1439 hr_time = ktime_set(0, poll_timeout);
1440 hrtimer_forward_now(&ap_poll_timer, hr_time);
1441 hrtimer_restart(&ap_poll_timer);
1442 }
1443out:
1444 spin_unlock_bh(&ap_poll_timer_lock);
1445}
1446
1447
1448
1449
1450
1451
1452static inline void ap_schedule_poll_timer(void)
1453{
1454 if (ap_using_interrupts())
1455 return;
1456 __ap_schedule_poll_timer();
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1468{
1469 struct ap_queue_status status;
1470 struct ap_message *ap_msg;
1471
1472 if (ap_dev->queue_count <= 0)
1473 return 0;
1474 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1475 ap_dev->reply->message, ap_dev->reply->length);
1476 switch (status.response_code) {
1477 case AP_RESPONSE_NORMAL:
1478 atomic_dec(&ap_poll_requests);
1479 ap_decrease_queue_count(ap_dev);
1480 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1481 if (ap_msg->psmid != ap_dev->reply->psmid)
1482 continue;
1483 list_del_init(&ap_msg->list);
1484 ap_dev->pendingq_count--;
1485 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1486 break;
1487 }
1488 if (ap_dev->queue_count > 0)
1489 *flags |= 1;
1490 break;
1491 case AP_RESPONSE_NO_PENDING_REPLY:
1492 if (status.queue_empty) {
1493
1494 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1495 ap_dev->queue_count = 0;
1496 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1497 ap_dev->requestq_count += ap_dev->pendingq_count;
1498 ap_dev->pendingq_count = 0;
1499 } else
1500 *flags |= 2;
1501 break;
1502 default:
1503 return -ENODEV;
1504 }
1505 return 0;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1517{
1518 struct ap_queue_status status;
1519 struct ap_message *ap_msg;
1520
1521 if (ap_dev->requestq_count <= 0 ||
1522 ap_dev->queue_count >= ap_dev->queue_depth)
1523 return 0;
1524
1525 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1526 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1527 ap_msg->message, ap_msg->length, ap_msg->special);
1528 switch (status.response_code) {
1529 case AP_RESPONSE_NORMAL:
1530 atomic_inc(&ap_poll_requests);
1531 ap_increase_queue_count(ap_dev);
1532 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1533 ap_dev->requestq_count--;
1534 ap_dev->pendingq_count++;
1535 if (ap_dev->queue_count < ap_dev->queue_depth &&
1536 ap_dev->requestq_count > 0)
1537 *flags |= 1;
1538 *flags |= 2;
1539 break;
1540 case AP_RESPONSE_RESET_IN_PROGRESS:
1541 __ap_schedule_poll_timer();
1542 case AP_RESPONSE_Q_FULL:
1543 *flags |= 2;
1544 break;
1545 case AP_RESPONSE_MESSAGE_TOO_BIG:
1546 case AP_RESPONSE_REQ_FAC_NOT_INST:
1547 return -EINVAL;
1548 default:
1549 return -ENODEV;
1550 }
1551 return 0;
1552}
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1565{
1566 int rc;
1567
1568 rc = ap_poll_read(ap_dev, flags);
1569 if (rc)
1570 return rc;
1571 return ap_poll_write(ap_dev, flags);
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1582{
1583 struct ap_queue_status status;
1584
1585 if (list_empty(&ap_dev->requestq) &&
1586 ap_dev->queue_count < ap_dev->queue_depth) {
1587 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1588 ap_msg->message, ap_msg->length,
1589 ap_msg->special);
1590 switch (status.response_code) {
1591 case AP_RESPONSE_NORMAL:
1592 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1593 atomic_inc(&ap_poll_requests);
1594 ap_dev->pendingq_count++;
1595 ap_increase_queue_count(ap_dev);
1596 ap_dev->total_request_count++;
1597 break;
1598 case AP_RESPONSE_Q_FULL:
1599 case AP_RESPONSE_RESET_IN_PROGRESS:
1600 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1601 ap_dev->requestq_count++;
1602 ap_dev->total_request_count++;
1603 return -EBUSY;
1604 case AP_RESPONSE_REQ_FAC_NOT_INST:
1605 case AP_RESPONSE_MESSAGE_TOO_BIG:
1606 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1607 return -EINVAL;
1608 default:
1609 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1610 return -ENODEV;
1611 }
1612 } else {
1613 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1614 ap_dev->requestq_count++;
1615 ap_dev->total_request_count++;
1616 return -EBUSY;
1617 }
1618 ap_schedule_poll_timer();
1619 return 0;
1620}
1621
1622void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1623{
1624 unsigned long flags;
1625 int rc;
1626
1627
1628
1629 BUG_ON(!ap_msg->receive);
1630
1631 spin_lock_bh(&ap_dev->lock);
1632 if (!ap_dev->unregistered) {
1633
1634 rc = ap_poll_queue(ap_dev, &flags);
1635 if (!rc)
1636 rc = __ap_queue_message(ap_dev, ap_msg);
1637 if (!rc)
1638 wake_up(&ap_poll_wait);
1639 if (rc == -ENODEV)
1640 ap_dev->unregistered = 1;
1641 } else {
1642 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1643 rc = -ENODEV;
1644 }
1645 spin_unlock_bh(&ap_dev->lock);
1646 if (rc == -ENODEV)
1647 device_unregister(&ap_dev->device);
1648}
1649EXPORT_SYMBOL(ap_queue_message);
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1662{
1663 struct ap_message *tmp;
1664
1665 spin_lock_bh(&ap_dev->lock);
1666 if (!list_empty(&ap_msg->list)) {
1667 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1668 if (tmp->psmid == ap_msg->psmid) {
1669 ap_dev->pendingq_count--;
1670 goto found;
1671 }
1672 ap_dev->requestq_count--;
1673 found:
1674 list_del_init(&ap_msg->list);
1675 }
1676 spin_unlock_bh(&ap_dev->lock);
1677}
1678EXPORT_SYMBOL(ap_cancel_message);
1679
1680
1681
1682
1683
1684
1685
1686static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1687{
1688 tasklet_schedule(&ap_tasklet);
1689 return HRTIMER_NORESTART;
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699static void ap_reset(struct ap_device *ap_dev)
1700{
1701 int rc;
1702
1703 ap_dev->reset = AP_RESET_IGNORE;
1704 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1705 ap_dev->queue_count = 0;
1706 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1707 ap_dev->requestq_count += ap_dev->pendingq_count;
1708 ap_dev->pendingq_count = 0;
1709 rc = ap_init_queue(ap_dev->qid);
1710 if (rc == -ENODEV)
1711 ap_dev->unregistered = 1;
1712 else
1713 __ap_schedule_poll_timer();
1714}
1715
1716static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1717{
1718 if (!ap_dev->unregistered) {
1719 if (ap_poll_queue(ap_dev, flags))
1720 ap_dev->unregistered = 1;
1721 if (ap_dev->reset == AP_RESET_DO)
1722 ap_reset(ap_dev);
1723 }
1724 return 0;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static void ap_poll_all(unsigned long dummy)
1736{
1737 unsigned long flags;
1738 struct ap_device *ap_dev;
1739
1740
1741
1742
1743
1744 if (ap_using_interrupts())
1745 xchg(ap_airq.lsi_ptr, 0);
1746 do {
1747 flags = 0;
1748 spin_lock(&ap_device_list_lock);
1749 list_for_each_entry(ap_dev, &ap_device_list, list) {
1750 spin_lock(&ap_dev->lock);
1751 __ap_poll_device(ap_dev, &flags);
1752 spin_unlock(&ap_dev->lock);
1753 }
1754 spin_unlock(&ap_device_list_lock);
1755 } while (flags & 1);
1756 if (flags & 2)
1757 ap_schedule_poll_timer();
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static int ap_poll_thread(void *data)
1771{
1772 DECLARE_WAITQUEUE(wait, current);
1773 unsigned long flags;
1774 int requests;
1775 struct ap_device *ap_dev;
1776
1777 set_user_nice(current, 19);
1778 while (1) {
1779 if (ap_suspend_flag)
1780 return 0;
1781 if (need_resched()) {
1782 schedule();
1783 continue;
1784 }
1785 add_wait_queue(&ap_poll_wait, &wait);
1786 set_current_state(TASK_INTERRUPTIBLE);
1787 if (kthread_should_stop())
1788 break;
1789 requests = atomic_read(&ap_poll_requests);
1790 if (requests <= 0)
1791 schedule();
1792 set_current_state(TASK_RUNNING);
1793 remove_wait_queue(&ap_poll_wait, &wait);
1794
1795 flags = 0;
1796 spin_lock_bh(&ap_device_list_lock);
1797 list_for_each_entry(ap_dev, &ap_device_list, list) {
1798 spin_lock(&ap_dev->lock);
1799 __ap_poll_device(ap_dev, &flags);
1800 spin_unlock(&ap_dev->lock);
1801 }
1802 spin_unlock_bh(&ap_device_list_lock);
1803 }
1804 set_current_state(TASK_RUNNING);
1805 remove_wait_queue(&ap_poll_wait, &wait);
1806 return 0;
1807}
1808
1809static int ap_poll_thread_start(void)
1810{
1811 int rc;
1812
1813 if (ap_using_interrupts() || ap_suspend_flag)
1814 return 0;
1815 mutex_lock(&ap_poll_thread_mutex);
1816 if (!ap_poll_kthread) {
1817 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1818 rc = PTR_RET(ap_poll_kthread);
1819 if (rc)
1820 ap_poll_kthread = NULL;
1821 }
1822 else
1823 rc = 0;
1824 mutex_unlock(&ap_poll_thread_mutex);
1825 return rc;
1826}
1827
1828static void ap_poll_thread_stop(void)
1829{
1830 mutex_lock(&ap_poll_thread_mutex);
1831 if (ap_poll_kthread) {
1832 kthread_stop(ap_poll_kthread);
1833 ap_poll_kthread = NULL;
1834 }
1835 mutex_unlock(&ap_poll_thread_mutex);
1836}
1837
1838
1839
1840
1841
1842
1843
1844static void ap_request_timeout(unsigned long data)
1845{
1846 struct ap_device *ap_dev = (struct ap_device *) data;
1847
1848 if (ap_dev->reset == AP_RESET_ARMED) {
1849 ap_dev->reset = AP_RESET_DO;
1850
1851 if (ap_using_interrupts())
1852 tasklet_schedule(&ap_tasklet);
1853 }
1854}
1855
1856static void ap_reset_domain(void)
1857{
1858 int i;
1859
1860 if (ap_domain_index != -1)
1861 for (i = 0; i < AP_DEVICES; i++)
1862 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1863}
1864
1865static void ap_reset_all(void)
1866{
1867 int i, j;
1868
1869 for (i = 0; i < AP_DOMAINS; i++)
1870 for (j = 0; j < AP_DEVICES; j++)
1871 ap_reset_queue(AP_MKQID(j, i));
1872}
1873
1874static struct reset_call ap_reset_call = {
1875 .fn = ap_reset_all,
1876};
1877
1878
1879
1880
1881
1882
1883int __init ap_module_init(void)
1884{
1885 int rc, i;
1886
1887 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1888 pr_warning("%d is not a valid cryptographic domain\n",
1889 ap_domain_index);
1890 return -EINVAL;
1891 }
1892
1893
1894
1895 if (ap_domain_index >= 0)
1896 user_set_domain = 1;
1897
1898 if (ap_instructions_available() != 0) {
1899 pr_warning("The hardware system does not support "
1900 "AP instructions\n");
1901 return -ENODEV;
1902 }
1903 if (ap_interrupts_available()) {
1904 rc = register_adapter_interrupt(&ap_airq);
1905 ap_airq_flag = (rc == 0);
1906 }
1907
1908 register_reset_call(&ap_reset_call);
1909
1910
1911 rc = bus_register(&ap_bus_type);
1912 if (rc)
1913 goto out;
1914 for (i = 0; ap_bus_attrs[i]; i++) {
1915 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1916 if (rc)
1917 goto out_bus;
1918 }
1919
1920
1921 ap_root_device = root_device_register("ap");
1922 rc = PTR_RET(ap_root_device);
1923 if (rc)
1924 goto out_bus;
1925
1926 ap_work_queue = create_singlethread_workqueue("kapwork");
1927 if (!ap_work_queue) {
1928 rc = -ENOMEM;
1929 goto out_root;
1930 }
1931
1932 ap_query_configuration();
1933 if (ap_select_domain() == 0)
1934 ap_scan_bus(NULL);
1935
1936
1937 init_timer(&ap_config_timer);
1938 ap_config_timer.function = ap_config_timeout;
1939 ap_config_timer.data = 0;
1940 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1941 add_timer(&ap_config_timer);
1942
1943
1944
1945
1946 if (MACHINE_IS_VM)
1947 poll_timeout = 1500000;
1948 spin_lock_init(&ap_poll_timer_lock);
1949 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1950 ap_poll_timer.function = ap_poll_timeout;
1951
1952
1953 if (ap_thread_flag) {
1954 rc = ap_poll_thread_start();
1955 if (rc)
1956 goto out_work;
1957 }
1958
1959 return 0;
1960
1961out_work:
1962 del_timer_sync(&ap_config_timer);
1963 hrtimer_cancel(&ap_poll_timer);
1964 destroy_workqueue(ap_work_queue);
1965out_root:
1966 root_device_unregister(ap_root_device);
1967out_bus:
1968 while (i--)
1969 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1970 bus_unregister(&ap_bus_type);
1971out:
1972 unregister_reset_call(&ap_reset_call);
1973 if (ap_using_interrupts())
1974 unregister_adapter_interrupt(&ap_airq);
1975 return rc;
1976}
1977
1978static int __ap_match_all(struct device *dev, void *data)
1979{
1980 return 1;
1981}
1982
1983
1984
1985
1986
1987
1988void ap_module_exit(void)
1989{
1990 int i;
1991 struct device *dev;
1992
1993 ap_reset_domain();
1994 ap_poll_thread_stop();
1995 del_timer_sync(&ap_config_timer);
1996 hrtimer_cancel(&ap_poll_timer);
1997 destroy_workqueue(ap_work_queue);
1998 tasklet_kill(&ap_tasklet);
1999 root_device_unregister(ap_root_device);
2000 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
2001 __ap_match_all)))
2002 {
2003 device_unregister(dev);
2004 put_device(dev);
2005 }
2006 for (i = 0; ap_bus_attrs[i]; i++)
2007 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2008 bus_unregister(&ap_bus_type);
2009 unregister_reset_call(&ap_reset_call);
2010 if (ap_using_interrupts())
2011 unregister_adapter_interrupt(&ap_airq);
2012}
2013
2014module_init(ap_module_init);
2015module_exit(ap_module_exit);
2016