1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#define KMSG_COMPONENT "ap"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/err.h>
34#include <linux/interrupt.h>
35#include <linux/workqueue.h>
36#include <linux/notifier.h>
37#include <linux/kthread.h>
38#include <linux/mutex.h>
39#include <asm/reset.h>
40#include <asm/airq.h>
41#include <asm/atomic.h>
42#include <asm/system.h>
43#include <asm/isc.h>
44#include <linux/hrtimer.h>
45#include <linux/ktime.h>
46
47#include "ap_bus.h"
48
49
50static void ap_scan_bus(struct work_struct *);
51static void ap_poll_all(unsigned long);
52static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
53static int ap_poll_thread_start(void);
54static void ap_poll_thread_stop(void);
55static void ap_request_timeout(unsigned long);
56static inline void ap_schedule_poll_timer(void);
57static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
58static int ap_device_remove(struct device *dev);
59static int ap_device_probe(struct device *dev);
60static void ap_interrupt_handler(void *unused1, void *unused2);
61static void ap_reset(struct ap_device *ap_dev);
62static void ap_config_timeout(unsigned long ptr);
63static int ap_select_domain(void);
64
65
66
67
68MODULE_AUTHOR("IBM Corporation");
69MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
70 "Copyright 2006 IBM Corporation");
71MODULE_LICENSE("GPL");
72
73
74
75
76int ap_domain_index = -1;
77module_param_named(domain, ap_domain_index, int, 0000);
78MODULE_PARM_DESC(domain, "domain index for ap devices");
79EXPORT_SYMBOL(ap_domain_index);
80
81static int ap_thread_flag = 0;
82module_param_named(poll_thread, ap_thread_flag, int, 0000);
83MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
84
85static struct device *ap_root_device = NULL;
86static DEFINE_SPINLOCK(ap_device_list_lock);
87static LIST_HEAD(ap_device_list);
88
89
90
91
92static struct workqueue_struct *ap_work_queue;
93static struct timer_list ap_config_timer;
94static int ap_config_time = AP_CONFIG_TIME;
95static DECLARE_WORK(ap_config_work, ap_scan_bus);
96
97
98
99
100static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
101static atomic_t ap_poll_requests = ATOMIC_INIT(0);
102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
103static struct task_struct *ap_poll_kthread = NULL;
104static DEFINE_MUTEX(ap_poll_thread_mutex);
105static void *ap_interrupt_indicator;
106static struct hrtimer ap_poll_timer;
107
108
109static unsigned long long poll_timeout = 250000;
110
111
112static int ap_suspend_flag;
113
114
115
116static int user_set_domain = 0;
117static struct bus_type ap_bus_type;
118
119
120
121
122
123static inline int ap_using_interrupts(void)
124{
125 return ap_interrupt_indicator != NULL;
126}
127
128
129
130
131
132
133static inline int ap_instructions_available(void)
134{
135 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
136 register unsigned long reg1 asm ("1") = -ENODEV;
137 register unsigned long reg2 asm ("2") = 0UL;
138
139 asm volatile(
140 " .long 0xb2af0000\n"
141 "0: la %1,0\n"
142 "1:\n"
143 EX_TABLE(0b, 1b)
144 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
145 return reg1;
146}
147
148
149
150
151
152
153static int ap_interrupts_available(void)
154{
155 unsigned long long facility_bits[2];
156
157 if (stfle(facility_bits, 2) <= 1)
158 return 0;
159 if (!(facility_bits[0] & (1ULL << 61)) ||
160 !(facility_bits[1] & (1ULL << 62)))
161 return 0;
162 return 1;
163}
164
165
166
167
168
169
170
171
172
173static inline struct ap_queue_status
174ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
175{
176 register unsigned long reg0 asm ("0") = qid;
177 register struct ap_queue_status reg1 asm ("1");
178 register unsigned long reg2 asm ("2") = 0UL;
179
180 asm volatile(".long 0xb2af0000"
181 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
182 *device_type = (int) (reg2 >> 24);
183 *queue_depth = (int) (reg2 & 0xff);
184 return reg1;
185}
186
187
188
189
190
191
192
193static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
194{
195 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
196 register struct ap_queue_status reg1 asm ("1");
197 register unsigned long reg2 asm ("2") = 0UL;
198
199 asm volatile(
200 ".long 0xb2af0000"
201 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
202 return reg1;
203}
204
205#ifdef CONFIG_64BIT
206
207
208
209
210
211
212
213static inline struct ap_queue_status
214ap_queue_interruption_control(ap_qid_t qid, void *ind)
215{
216 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
217 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
218 register struct ap_queue_status reg1_out asm ("1");
219 register void *reg2 asm ("2") = ind;
220 asm volatile(
221 ".long 0xb2af0000"
222 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
223 :
224 : "cc" );
225 return reg1_out;
226}
227#endif
228
229
230
231
232
233
234
235
236
237
238static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
239{
240#ifdef CONFIG_64BIT
241 struct ap_queue_status status;
242 int t_depth, t_device_type, rc, i;
243
244 rc = -EBUSY;
245 status = ap_queue_interruption_control(qid, ind);
246
247 for (i = 0; i < AP_MAX_RESET; i++) {
248 switch (status.response_code) {
249 case AP_RESPONSE_NORMAL:
250 if (status.int_enabled)
251 return 0;
252 break;
253 case AP_RESPONSE_RESET_IN_PROGRESS:
254 case AP_RESPONSE_BUSY:
255 break;
256 case AP_RESPONSE_Q_NOT_AVAIL:
257 case AP_RESPONSE_DECONFIGURED:
258 case AP_RESPONSE_CHECKSTOPPED:
259 case AP_RESPONSE_INVALID_ADDRESS:
260 return -ENODEV;
261 case AP_RESPONSE_OTHERWISE_CHANGED:
262 if (status.int_enabled)
263 return 0;
264 break;
265 default:
266 break;
267 }
268 if (i < AP_MAX_RESET - 1) {
269 udelay(5);
270 status = ap_test_queue(qid, &t_depth, &t_device_type);
271 }
272 }
273 return rc;
274#else
275 return -EINVAL;
276#endif
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291static inline struct ap_queue_status
292__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
293{
294 typedef struct { char _[length]; } msgblock;
295 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
296 register struct ap_queue_status reg1 asm ("1");
297 register unsigned long reg2 asm ("2") = (unsigned long) msg;
298 register unsigned long reg3 asm ("3") = (unsigned long) length;
299 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
300 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
301
302 asm volatile (
303 "0: .long 0xb2ad0042\n"
304 " brc 2,0b"
305 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
306 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
307 : "cc" );
308 return reg1;
309}
310
311int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
312{
313 struct ap_queue_status status;
314
315 status = __ap_send(qid, psmid, msg, length);
316 switch (status.response_code) {
317 case AP_RESPONSE_NORMAL:
318 return 0;
319 case AP_RESPONSE_Q_FULL:
320 case AP_RESPONSE_RESET_IN_PROGRESS:
321 return -EBUSY;
322 default:
323 return -ENODEV;
324 }
325}
326EXPORT_SYMBOL(ap_send);
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static inline struct ap_queue_status
347__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
348{
349 typedef struct { char _[length]; } msgblock;
350 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
351 register struct ap_queue_status reg1 asm ("1");
352 register unsigned long reg2 asm("2") = 0UL;
353 register unsigned long reg4 asm("4") = (unsigned long) msg;
354 register unsigned long reg5 asm("5") = (unsigned long) length;
355 register unsigned long reg6 asm("6") = 0UL;
356 register unsigned long reg7 asm("7") = 0UL;
357
358
359 asm volatile(
360 "0: .long 0xb2ae0064\n"
361 " brc 6,0b\n"
362 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
363 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
364 "=m" (*(msgblock *) msg) : : "cc" );
365 *psmid = (((unsigned long long) reg6) << 32) + reg7;
366 return reg1;
367}
368
369int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
370{
371 struct ap_queue_status status;
372
373 status = __ap_recv(qid, psmid, msg, length);
374 switch (status.response_code) {
375 case AP_RESPONSE_NORMAL:
376 return 0;
377 case AP_RESPONSE_NO_PENDING_REPLY:
378 if (status.queue_empty)
379 return -ENOENT;
380 return -EBUSY;
381 case AP_RESPONSE_RESET_IN_PROGRESS:
382 return -EBUSY;
383 default:
384 return -ENODEV;
385 }
386}
387EXPORT_SYMBOL(ap_recv);
388
389
390
391
392
393
394
395
396
397static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
398{
399 struct ap_queue_status status;
400 int t_depth, t_device_type, rc, i;
401
402 rc = -EBUSY;
403 for (i = 0; i < AP_MAX_RESET; i++) {
404 status = ap_test_queue(qid, &t_depth, &t_device_type);
405 switch (status.response_code) {
406 case AP_RESPONSE_NORMAL:
407 *queue_depth = t_depth + 1;
408 *device_type = t_device_type;
409 rc = 0;
410 break;
411 case AP_RESPONSE_Q_NOT_AVAIL:
412 rc = -ENODEV;
413 break;
414 case AP_RESPONSE_RESET_IN_PROGRESS:
415 break;
416 case AP_RESPONSE_DECONFIGURED:
417 rc = -ENODEV;
418 break;
419 case AP_RESPONSE_CHECKSTOPPED:
420 rc = -ENODEV;
421 break;
422 case AP_RESPONSE_INVALID_ADDRESS:
423 rc = -ENODEV;
424 break;
425 case AP_RESPONSE_OTHERWISE_CHANGED:
426 break;
427 case AP_RESPONSE_BUSY:
428 break;
429 default:
430 BUG();
431 }
432 if (rc != -EBUSY)
433 break;
434 if (i < AP_MAX_RESET - 1)
435 udelay(5);
436 }
437 return rc;
438}
439
440
441
442
443
444
445
446static int ap_init_queue(ap_qid_t qid)
447{
448 struct ap_queue_status status;
449 int rc, dummy, i;
450
451 rc = -ENODEV;
452 status = ap_reset_queue(qid);
453 for (i = 0; i < AP_MAX_RESET; i++) {
454 switch (status.response_code) {
455 case AP_RESPONSE_NORMAL:
456 if (status.queue_empty)
457 rc = 0;
458 break;
459 case AP_RESPONSE_Q_NOT_AVAIL:
460 case AP_RESPONSE_DECONFIGURED:
461 case AP_RESPONSE_CHECKSTOPPED:
462 i = AP_MAX_RESET;
463 break;
464 case AP_RESPONSE_RESET_IN_PROGRESS:
465 rc = -EBUSY;
466 case AP_RESPONSE_BUSY:
467 default:
468 break;
469 }
470 if (rc != -ENODEV && rc != -EBUSY)
471 break;
472 if (i < AP_MAX_RESET - 1) {
473 udelay(5);
474 status = ap_test_queue(qid, &dummy, &dummy);
475 }
476 }
477 if (rc == 0 && ap_using_interrupts()) {
478 rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
479
480
481
482 if (rc)
483 pr_err("Registering adapter interrupts for "
484 "AP %d failed\n", AP_QID_DEVICE(qid));
485 }
486 return rc;
487}
488
489
490
491
492
493
494
495static void ap_increase_queue_count(struct ap_device *ap_dev)
496{
497 int timeout = ap_dev->drv->request_timeout;
498
499 ap_dev->queue_count++;
500 if (ap_dev->queue_count == 1) {
501 mod_timer(&ap_dev->timeout, jiffies + timeout);
502 ap_dev->reset = AP_RESET_ARMED;
503 }
504}
505
506
507
508
509
510
511
512
513static void ap_decrease_queue_count(struct ap_device *ap_dev)
514{
515 int timeout = ap_dev->drv->request_timeout;
516
517 ap_dev->queue_count--;
518 if (ap_dev->queue_count > 0)
519 mod_timer(&ap_dev->timeout, jiffies + timeout);
520 else
521
522
523
524
525
526 ap_dev->reset = AP_RESET_IGNORE;
527}
528
529
530
531
532static ssize_t ap_hwtype_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
534{
535 struct ap_device *ap_dev = to_ap_dev(dev);
536 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
537}
538
539static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
540static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
541 char *buf)
542{
543 struct ap_device *ap_dev = to_ap_dev(dev);
544 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
545}
546
547static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
548static ssize_t ap_request_count_show(struct device *dev,
549 struct device_attribute *attr,
550 char *buf)
551{
552 struct ap_device *ap_dev = to_ap_dev(dev);
553 int rc;
554
555 spin_lock_bh(&ap_dev->lock);
556 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
557 spin_unlock_bh(&ap_dev->lock);
558 return rc;
559}
560
561static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
562
563static ssize_t ap_modalias_show(struct device *dev,
564 struct device_attribute *attr, char *buf)
565{
566 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
567}
568
569static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
570
571static struct attribute *ap_dev_attrs[] = {
572 &dev_attr_hwtype.attr,
573 &dev_attr_depth.attr,
574 &dev_attr_request_count.attr,
575 &dev_attr_modalias.attr,
576 NULL
577};
578static struct attribute_group ap_dev_attr_group = {
579 .attrs = ap_dev_attrs
580};
581
582
583
584
585
586
587
588
589static int ap_bus_match(struct device *dev, struct device_driver *drv)
590{
591 struct ap_device *ap_dev = to_ap_dev(dev);
592 struct ap_driver *ap_drv = to_ap_drv(drv);
593 struct ap_device_id *id;
594
595
596
597
598
599 for (id = ap_drv->ids; id->match_flags; id++) {
600 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
601 (id->dev_type != ap_dev->device_type))
602 continue;
603 return 1;
604 }
605 return 0;
606}
607
608
609
610
611
612
613
614
615
616static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
617{
618 struct ap_device *ap_dev = to_ap_dev(dev);
619 int retval = 0;
620
621 if (!ap_dev)
622 return -ENODEV;
623
624
625 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
626 if (retval)
627 return retval;
628
629
630 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
631
632 return retval;
633}
634
635static int ap_bus_suspend(struct device *dev, pm_message_t state)
636{
637 struct ap_device *ap_dev = to_ap_dev(dev);
638 unsigned long flags;
639
640 if (!ap_suspend_flag) {
641 ap_suspend_flag = 1;
642
643
644
645
646 del_timer_sync(&ap_config_timer);
647 if (ap_work_queue != NULL) {
648 destroy_workqueue(ap_work_queue);
649 ap_work_queue = NULL;
650 }
651
652 tasklet_disable(&ap_tasklet);
653 }
654
655 do {
656 flags = 0;
657 spin_lock_bh(&ap_dev->lock);
658 __ap_poll_device(ap_dev, &flags);
659 spin_unlock_bh(&ap_dev->lock);
660 } while ((flags & 1) || (flags & 2));
661
662 spin_lock_bh(&ap_dev->lock);
663 ap_dev->unregistered = 1;
664 spin_unlock_bh(&ap_dev->lock);
665
666 return 0;
667}
668
669static int ap_bus_resume(struct device *dev)
670{
671 int rc = 0;
672 struct ap_device *ap_dev = to_ap_dev(dev);
673
674 if (ap_suspend_flag) {
675 ap_suspend_flag = 0;
676 if (!ap_interrupts_available())
677 ap_interrupt_indicator = NULL;
678 if (!user_set_domain) {
679 ap_domain_index = -1;
680 ap_select_domain();
681 }
682 init_timer(&ap_config_timer);
683 ap_config_timer.function = ap_config_timeout;
684 ap_config_timer.data = 0;
685 ap_config_timer.expires = jiffies + ap_config_time * HZ;
686 add_timer(&ap_config_timer);
687 ap_work_queue = create_singlethread_workqueue("kapwork");
688 if (!ap_work_queue)
689 return -ENOMEM;
690 tasklet_enable(&ap_tasklet);
691 if (!ap_using_interrupts())
692 ap_schedule_poll_timer();
693 else
694 tasklet_schedule(&ap_tasklet);
695 if (ap_thread_flag)
696 rc = ap_poll_thread_start();
697 }
698 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
699 spin_lock_bh(&ap_dev->lock);
700 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
701 ap_domain_index);
702 spin_unlock_bh(&ap_dev->lock);
703 }
704 queue_work(ap_work_queue, &ap_config_work);
705
706 return rc;
707}
708
709static struct bus_type ap_bus_type = {
710 .name = "ap",
711 .match = &ap_bus_match,
712 .uevent = &ap_uevent,
713 .suspend = ap_bus_suspend,
714 .resume = ap_bus_resume
715};
716
717static int ap_device_probe(struct device *dev)
718{
719 struct ap_device *ap_dev = to_ap_dev(dev);
720 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
721 int rc;
722
723 ap_dev->drv = ap_drv;
724 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
725 if (!rc) {
726 spin_lock_bh(&ap_device_list_lock);
727 list_add(&ap_dev->list, &ap_device_list);
728 spin_unlock_bh(&ap_device_list_lock);
729 }
730 return rc;
731}
732
733
734
735
736
737
738
739static void __ap_flush_queue(struct ap_device *ap_dev)
740{
741 struct ap_message *ap_msg, *next;
742
743 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
744 list_del_init(&ap_msg->list);
745 ap_dev->pendingq_count--;
746 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
747 }
748 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
749 list_del_init(&ap_msg->list);
750 ap_dev->requestq_count--;
751 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
752 }
753}
754
755void ap_flush_queue(struct ap_device *ap_dev)
756{
757 spin_lock_bh(&ap_dev->lock);
758 __ap_flush_queue(ap_dev);
759 spin_unlock_bh(&ap_dev->lock);
760}
761EXPORT_SYMBOL(ap_flush_queue);
762
763static int ap_device_remove(struct device *dev)
764{
765 struct ap_device *ap_dev = to_ap_dev(dev);
766 struct ap_driver *ap_drv = ap_dev->drv;
767
768 ap_flush_queue(ap_dev);
769 del_timer_sync(&ap_dev->timeout);
770 spin_lock_bh(&ap_device_list_lock);
771 list_del_init(&ap_dev->list);
772 spin_unlock_bh(&ap_device_list_lock);
773 if (ap_drv->remove)
774 ap_drv->remove(ap_dev);
775 spin_lock_bh(&ap_dev->lock);
776 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
777 spin_unlock_bh(&ap_dev->lock);
778 return 0;
779}
780
781int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
782 char *name)
783{
784 struct device_driver *drv = &ap_drv->driver;
785
786 drv->bus = &ap_bus_type;
787 drv->probe = ap_device_probe;
788 drv->remove = ap_device_remove;
789 drv->owner = owner;
790 drv->name = name;
791 return driver_register(drv);
792}
793EXPORT_SYMBOL(ap_driver_register);
794
795void ap_driver_unregister(struct ap_driver *ap_drv)
796{
797 driver_unregister(&ap_drv->driver);
798}
799EXPORT_SYMBOL(ap_driver_unregister);
800
801
802
803
804static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
805{
806 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
807}
808
809static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
810
811static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
812{
813 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
814}
815
816static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
817{
818 return snprintf(buf, PAGE_SIZE, "%d\n",
819 ap_using_interrupts() ? 1 : 0);
820}
821
822static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
823
824static ssize_t ap_config_time_store(struct bus_type *bus,
825 const char *buf, size_t count)
826{
827 int time;
828
829 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
830 return -EINVAL;
831 ap_config_time = time;
832 if (!timer_pending(&ap_config_timer) ||
833 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
834 ap_config_timer.expires = jiffies + ap_config_time * HZ;
835 add_timer(&ap_config_timer);
836 }
837 return count;
838}
839
840static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
841
842static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
843{
844 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
845}
846
847static ssize_t ap_poll_thread_store(struct bus_type *bus,
848 const char *buf, size_t count)
849{
850 int flag, rc;
851
852 if (sscanf(buf, "%d\n", &flag) != 1)
853 return -EINVAL;
854 if (flag) {
855 rc = ap_poll_thread_start();
856 if (rc)
857 return rc;
858 }
859 else
860 ap_poll_thread_stop();
861 return count;
862}
863
864static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
865
866static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
867{
868 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
869}
870
871static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
872 size_t count)
873{
874 unsigned long long time;
875 ktime_t hr_time;
876
877
878 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
879 time > 120000000000ULL)
880 return -EINVAL;
881 poll_timeout = time;
882 hr_time = ktime_set(0, poll_timeout);
883
884 if (!hrtimer_is_queued(&ap_poll_timer) ||
885 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
886 hrtimer_set_expires(&ap_poll_timer, hr_time);
887 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
888 }
889 return count;
890}
891
892static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
893
894static struct bus_attribute *const ap_bus_attrs[] = {
895 &bus_attr_ap_domain,
896 &bus_attr_config_time,
897 &bus_attr_poll_thread,
898 &bus_attr_ap_interrupts,
899 &bus_attr_poll_timeout,
900 NULL,
901};
902
903
904
905
906
907
908static int ap_select_domain(void)
909{
910 int queue_depth, device_type, count, max_count, best_domain;
911 int rc, i, j;
912
913
914
915
916
917
918 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
919
920 return 0;
921 best_domain = -1;
922 max_count = 0;
923 for (i = 0; i < AP_DOMAINS; i++) {
924 count = 0;
925 for (j = 0; j < AP_DEVICES; j++) {
926 ap_qid_t qid = AP_MKQID(j, i);
927 rc = ap_query_queue(qid, &queue_depth, &device_type);
928 if (rc)
929 continue;
930 count++;
931 }
932 if (count > max_count) {
933 max_count = count;
934 best_domain = i;
935 }
936 }
937 if (best_domain >= 0){
938 ap_domain_index = best_domain;
939 return 0;
940 }
941 return -ENODEV;
942}
943
944
945
946
947
948
949
950static int ap_probe_device_type(struct ap_device *ap_dev)
951{
952 static unsigned char msg[] = {
953 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
954 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
955 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
956 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
957 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
958 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
959 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
960 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
961 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
962 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
963 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
964 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
965 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
966 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
967 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
968 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
969 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
970 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
971 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
972 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
973 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
974 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
975 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
976 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
977 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
978 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
979 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
980 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
981 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
982 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
983 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
984 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
985 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
986 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
987 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
988 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
989 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
990 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
991 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
992 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
993 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
994 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
995 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
996 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
997 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
998 };
999 struct ap_queue_status status;
1000 unsigned long long psmid;
1001 char *reply;
1002 int rc, i;
1003
1004 reply = (void *) get_zeroed_page(GFP_KERNEL);
1005 if (!reply) {
1006 rc = -ENOMEM;
1007 goto out;
1008 }
1009
1010 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1011 msg, sizeof(msg));
1012 if (status.response_code != AP_RESPONSE_NORMAL) {
1013 rc = -ENODEV;
1014 goto out_free;
1015 }
1016
1017
1018 for (i = 0; i < 6; i++) {
1019 mdelay(300);
1020 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1021 if (status.response_code == AP_RESPONSE_NORMAL &&
1022 psmid == 0x0102030405060708ULL)
1023 break;
1024 }
1025 if (i < 6) {
1026
1027 if (reply[0] == 0x00 && reply[1] == 0x86)
1028 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1029 else
1030 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1031 rc = 0;
1032 } else
1033 rc = -ENODEV;
1034
1035out_free:
1036 free_page((unsigned long) reply);
1037out:
1038 return rc;
1039}
1040
1041static void ap_interrupt_handler(void *unused1, void *unused2)
1042{
1043 tasklet_schedule(&ap_tasklet);
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053static int __ap_scan_bus(struct device *dev, void *data)
1054{
1055 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1056}
1057
1058static void ap_device_release(struct device *dev)
1059{
1060 struct ap_device *ap_dev = to_ap_dev(dev);
1061
1062 kfree(ap_dev);
1063}
1064
1065static void ap_scan_bus(struct work_struct *unused)
1066{
1067 struct ap_device *ap_dev;
1068 struct device *dev;
1069 ap_qid_t qid;
1070 int queue_depth, device_type;
1071 int rc, i;
1072
1073 if (ap_select_domain() != 0)
1074 return;
1075 for (i = 0; i < AP_DEVICES; i++) {
1076 qid = AP_MKQID(i, ap_domain_index);
1077 dev = bus_find_device(&ap_bus_type, NULL,
1078 (void *)(unsigned long)qid,
1079 __ap_scan_bus);
1080 rc = ap_query_queue(qid, &queue_depth, &device_type);
1081 if (dev) {
1082 if (rc == -EBUSY) {
1083 set_current_state(TASK_UNINTERRUPTIBLE);
1084 schedule_timeout(AP_RESET_TIMEOUT);
1085 rc = ap_query_queue(qid, &queue_depth,
1086 &device_type);
1087 }
1088 ap_dev = to_ap_dev(dev);
1089 spin_lock_bh(&ap_dev->lock);
1090 if (rc || ap_dev->unregistered) {
1091 spin_unlock_bh(&ap_dev->lock);
1092 if (ap_dev->unregistered)
1093 i--;
1094 device_unregister(dev);
1095 put_device(dev);
1096 continue;
1097 }
1098 spin_unlock_bh(&ap_dev->lock);
1099 put_device(dev);
1100 continue;
1101 }
1102 if (rc)
1103 continue;
1104 rc = ap_init_queue(qid);
1105 if (rc)
1106 continue;
1107 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1108 if (!ap_dev)
1109 break;
1110 ap_dev->qid = qid;
1111 ap_dev->queue_depth = queue_depth;
1112 ap_dev->unregistered = 1;
1113 spin_lock_init(&ap_dev->lock);
1114 INIT_LIST_HEAD(&ap_dev->pendingq);
1115 INIT_LIST_HEAD(&ap_dev->requestq);
1116 INIT_LIST_HEAD(&ap_dev->list);
1117 setup_timer(&ap_dev->timeout, ap_request_timeout,
1118 (unsigned long) ap_dev);
1119 if (device_type == 0)
1120 ap_probe_device_type(ap_dev);
1121 else
1122 ap_dev->device_type = device_type;
1123
1124 ap_dev->device.bus = &ap_bus_type;
1125 ap_dev->device.parent = ap_root_device;
1126 if (dev_set_name(&ap_dev->device, "card%02x",
1127 AP_QID_DEVICE(ap_dev->qid))) {
1128 kfree(ap_dev);
1129 continue;
1130 }
1131 ap_dev->device.release = ap_device_release;
1132 rc = device_register(&ap_dev->device);
1133 if (rc) {
1134 put_device(&ap_dev->device);
1135 continue;
1136 }
1137
1138 rc = sysfs_create_group(&ap_dev->device.kobj,
1139 &ap_dev_attr_group);
1140 if (!rc) {
1141 spin_lock_bh(&ap_dev->lock);
1142 ap_dev->unregistered = 0;
1143 spin_unlock_bh(&ap_dev->lock);
1144 }
1145 else
1146 device_unregister(&ap_dev->device);
1147 }
1148}
1149
1150static void
1151ap_config_timeout(unsigned long ptr)
1152{
1153 queue_work(ap_work_queue, &ap_config_work);
1154 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1155 add_timer(&ap_config_timer);
1156}
1157
1158
1159
1160
1161
1162
1163static inline void ap_schedule_poll_timer(void)
1164{
1165 ktime_t hr_time;
1166 if (ap_using_interrupts() || ap_suspend_flag)
1167 return;
1168 if (hrtimer_is_queued(&ap_poll_timer))
1169 return;
1170 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1171 hr_time = ktime_set(0, poll_timeout);
1172 hrtimer_forward_now(&ap_poll_timer, hr_time);
1173 hrtimer_restart(&ap_poll_timer);
1174 }
1175 return;
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1187{
1188 struct ap_queue_status status;
1189 struct ap_message *ap_msg;
1190
1191 if (ap_dev->queue_count <= 0)
1192 return 0;
1193 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1194 ap_dev->reply->message, ap_dev->reply->length);
1195 switch (status.response_code) {
1196 case AP_RESPONSE_NORMAL:
1197 atomic_dec(&ap_poll_requests);
1198 ap_decrease_queue_count(ap_dev);
1199 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1200 if (ap_msg->psmid != ap_dev->reply->psmid)
1201 continue;
1202 list_del_init(&ap_msg->list);
1203 ap_dev->pendingq_count--;
1204 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1205 break;
1206 }
1207 if (ap_dev->queue_count > 0)
1208 *flags |= 1;
1209 break;
1210 case AP_RESPONSE_NO_PENDING_REPLY:
1211 if (status.queue_empty) {
1212
1213 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1214 ap_dev->queue_count = 0;
1215 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1216 ap_dev->requestq_count += ap_dev->pendingq_count;
1217 ap_dev->pendingq_count = 0;
1218 } else
1219 *flags |= 2;
1220 break;
1221 default:
1222 return -ENODEV;
1223 }
1224 return 0;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1236{
1237 struct ap_queue_status status;
1238 struct ap_message *ap_msg;
1239
1240 if (ap_dev->requestq_count <= 0 ||
1241 ap_dev->queue_count >= ap_dev->queue_depth)
1242 return 0;
1243
1244 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1245 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1246 ap_msg->message, ap_msg->length);
1247 switch (status.response_code) {
1248 case AP_RESPONSE_NORMAL:
1249 atomic_inc(&ap_poll_requests);
1250 ap_increase_queue_count(ap_dev);
1251 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1252 ap_dev->requestq_count--;
1253 ap_dev->pendingq_count++;
1254 if (ap_dev->queue_count < ap_dev->queue_depth &&
1255 ap_dev->requestq_count > 0)
1256 *flags |= 1;
1257 *flags |= 2;
1258 break;
1259 case AP_RESPONSE_Q_FULL:
1260 case AP_RESPONSE_RESET_IN_PROGRESS:
1261 *flags |= 2;
1262 break;
1263 case AP_RESPONSE_MESSAGE_TOO_BIG:
1264 return -EINVAL;
1265 default:
1266 return -ENODEV;
1267 }
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1282{
1283 int rc;
1284
1285 rc = ap_poll_read(ap_dev, flags);
1286 if (rc)
1287 return rc;
1288 return ap_poll_write(ap_dev, flags);
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1299{
1300 struct ap_queue_status status;
1301
1302 if (list_empty(&ap_dev->requestq) &&
1303 ap_dev->queue_count < ap_dev->queue_depth) {
1304 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1305 ap_msg->message, ap_msg->length);
1306 switch (status.response_code) {
1307 case AP_RESPONSE_NORMAL:
1308 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1309 atomic_inc(&ap_poll_requests);
1310 ap_dev->pendingq_count++;
1311 ap_increase_queue_count(ap_dev);
1312 ap_dev->total_request_count++;
1313 break;
1314 case AP_RESPONSE_Q_FULL:
1315 case AP_RESPONSE_RESET_IN_PROGRESS:
1316 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1317 ap_dev->requestq_count++;
1318 ap_dev->total_request_count++;
1319 return -EBUSY;
1320 case AP_RESPONSE_MESSAGE_TOO_BIG:
1321 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1322 return -EINVAL;
1323 default:
1324 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1325 return -ENODEV;
1326 }
1327 } else {
1328 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1329 ap_dev->requestq_count++;
1330 ap_dev->total_request_count++;
1331 return -EBUSY;
1332 }
1333 ap_schedule_poll_timer();
1334 return 0;
1335}
1336
1337void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1338{
1339 unsigned long flags;
1340 int rc;
1341
1342 spin_lock_bh(&ap_dev->lock);
1343 if (!ap_dev->unregistered) {
1344
1345 rc = ap_poll_queue(ap_dev, &flags);
1346 if (!rc)
1347 rc = __ap_queue_message(ap_dev, ap_msg);
1348 if (!rc)
1349 wake_up(&ap_poll_wait);
1350 if (rc == -ENODEV)
1351 ap_dev->unregistered = 1;
1352 } else {
1353 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1354 rc = -ENODEV;
1355 }
1356 spin_unlock_bh(&ap_dev->lock);
1357 if (rc == -ENODEV)
1358 device_unregister(&ap_dev->device);
1359}
1360EXPORT_SYMBOL(ap_queue_message);
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1373{
1374 struct ap_message *tmp;
1375
1376 spin_lock_bh(&ap_dev->lock);
1377 if (!list_empty(&ap_msg->list)) {
1378 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1379 if (tmp->psmid == ap_msg->psmid) {
1380 ap_dev->pendingq_count--;
1381 goto found;
1382 }
1383 ap_dev->requestq_count--;
1384 found:
1385 list_del_init(&ap_msg->list);
1386 }
1387 spin_unlock_bh(&ap_dev->lock);
1388}
1389EXPORT_SYMBOL(ap_cancel_message);
1390
1391
1392
1393
1394
1395
1396
1397static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1398{
1399 tasklet_schedule(&ap_tasklet);
1400 return HRTIMER_NORESTART;
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410static void ap_reset(struct ap_device *ap_dev)
1411{
1412 int rc;
1413
1414 ap_dev->reset = AP_RESET_IGNORE;
1415 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1416 ap_dev->queue_count = 0;
1417 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1418 ap_dev->requestq_count += ap_dev->pendingq_count;
1419 ap_dev->pendingq_count = 0;
1420 rc = ap_init_queue(ap_dev->qid);
1421 if (rc == -ENODEV)
1422 ap_dev->unregistered = 1;
1423}
1424
1425static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1426{
1427 if (!ap_dev->unregistered) {
1428 if (ap_poll_queue(ap_dev, flags))
1429 ap_dev->unregistered = 1;
1430 if (ap_dev->reset == AP_RESET_DO)
1431 ap_reset(ap_dev);
1432 }
1433 return 0;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444static void ap_poll_all(unsigned long dummy)
1445{
1446 unsigned long flags;
1447 struct ap_device *ap_dev;
1448
1449
1450
1451
1452
1453 if (ap_using_interrupts())
1454 xchg((u8 *)ap_interrupt_indicator, 0);
1455 do {
1456 flags = 0;
1457 spin_lock(&ap_device_list_lock);
1458 list_for_each_entry(ap_dev, &ap_device_list, list) {
1459 spin_lock(&ap_dev->lock);
1460 __ap_poll_device(ap_dev, &flags);
1461 spin_unlock(&ap_dev->lock);
1462 }
1463 spin_unlock(&ap_device_list_lock);
1464 } while (flags & 1);
1465 if (flags & 2)
1466 ap_schedule_poll_timer();
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479static int ap_poll_thread(void *data)
1480{
1481 DECLARE_WAITQUEUE(wait, current);
1482 unsigned long flags;
1483 int requests;
1484 struct ap_device *ap_dev;
1485
1486 set_user_nice(current, 19);
1487 while (1) {
1488 if (ap_suspend_flag)
1489 return 0;
1490 if (need_resched()) {
1491 schedule();
1492 continue;
1493 }
1494 add_wait_queue(&ap_poll_wait, &wait);
1495 set_current_state(TASK_INTERRUPTIBLE);
1496 if (kthread_should_stop())
1497 break;
1498 requests = atomic_read(&ap_poll_requests);
1499 if (requests <= 0)
1500 schedule();
1501 set_current_state(TASK_RUNNING);
1502 remove_wait_queue(&ap_poll_wait, &wait);
1503
1504 flags = 0;
1505 spin_lock_bh(&ap_device_list_lock);
1506 list_for_each_entry(ap_dev, &ap_device_list, list) {
1507 spin_lock(&ap_dev->lock);
1508 __ap_poll_device(ap_dev, &flags);
1509 spin_unlock(&ap_dev->lock);
1510 }
1511 spin_unlock_bh(&ap_device_list_lock);
1512 }
1513 set_current_state(TASK_RUNNING);
1514 remove_wait_queue(&ap_poll_wait, &wait);
1515 return 0;
1516}
1517
1518static int ap_poll_thread_start(void)
1519{
1520 int rc;
1521
1522 if (ap_using_interrupts() || ap_suspend_flag)
1523 return 0;
1524 mutex_lock(&ap_poll_thread_mutex);
1525 if (!ap_poll_kthread) {
1526 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1527 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1528 if (rc)
1529 ap_poll_kthread = NULL;
1530 }
1531 else
1532 rc = 0;
1533 mutex_unlock(&ap_poll_thread_mutex);
1534 return rc;
1535}
1536
1537static void ap_poll_thread_stop(void)
1538{
1539 mutex_lock(&ap_poll_thread_mutex);
1540 if (ap_poll_kthread) {
1541 kthread_stop(ap_poll_kthread);
1542 ap_poll_kthread = NULL;
1543 }
1544 mutex_unlock(&ap_poll_thread_mutex);
1545}
1546
1547
1548
1549
1550
1551
1552
1553static void ap_request_timeout(unsigned long data)
1554{
1555 struct ap_device *ap_dev = (struct ap_device *) data;
1556
1557 if (ap_dev->reset == AP_RESET_ARMED) {
1558 ap_dev->reset = AP_RESET_DO;
1559
1560 if (ap_using_interrupts())
1561 tasklet_schedule(&ap_tasklet);
1562 }
1563}
1564
1565static void ap_reset_domain(void)
1566{
1567 int i;
1568
1569 if (ap_domain_index != -1)
1570 for (i = 0; i < AP_DEVICES; i++)
1571 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1572}
1573
1574static void ap_reset_all(void)
1575{
1576 int i, j;
1577
1578 for (i = 0; i < AP_DOMAINS; i++)
1579 for (j = 0; j < AP_DEVICES; j++)
1580 ap_reset_queue(AP_MKQID(j, i));
1581}
1582
1583static struct reset_call ap_reset_call = {
1584 .fn = ap_reset_all,
1585};
1586
1587
1588
1589
1590
1591
1592int __init ap_module_init(void)
1593{
1594 int rc, i;
1595
1596 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1597 pr_warning("%d is not a valid cryptographic domain\n",
1598 ap_domain_index);
1599 return -EINVAL;
1600 }
1601
1602
1603
1604 if (ap_domain_index >= 0)
1605 user_set_domain = 1;
1606
1607 if (ap_instructions_available() != 0) {
1608 pr_warning("The hardware system does not support "
1609 "AP instructions\n");
1610 return -ENODEV;
1611 }
1612 if (ap_interrupts_available()) {
1613 isc_register(AP_ISC);
1614 ap_interrupt_indicator = s390_register_adapter_interrupt(
1615 &ap_interrupt_handler, NULL, AP_ISC);
1616 if (IS_ERR(ap_interrupt_indicator)) {
1617 ap_interrupt_indicator = NULL;
1618 isc_unregister(AP_ISC);
1619 }
1620 }
1621
1622 register_reset_call(&ap_reset_call);
1623
1624
1625 rc = bus_register(&ap_bus_type);
1626 if (rc)
1627 goto out;
1628 for (i = 0; ap_bus_attrs[i]; i++) {
1629 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1630 if (rc)
1631 goto out_bus;
1632 }
1633
1634
1635 ap_root_device = root_device_register("ap");
1636 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1637 if (rc)
1638 goto out_bus;
1639
1640 ap_work_queue = create_singlethread_workqueue("kapwork");
1641 if (!ap_work_queue) {
1642 rc = -ENOMEM;
1643 goto out_root;
1644 }
1645
1646 if (ap_select_domain() == 0)
1647 ap_scan_bus(NULL);
1648
1649
1650 init_timer(&ap_config_timer);
1651 ap_config_timer.function = ap_config_timeout;
1652 ap_config_timer.data = 0;
1653 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1654 add_timer(&ap_config_timer);
1655
1656
1657
1658
1659 if (MACHINE_IS_VM)
1660 poll_timeout = 1500000;
1661 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1662 ap_poll_timer.function = ap_poll_timeout;
1663
1664
1665 if (ap_thread_flag) {
1666 rc = ap_poll_thread_start();
1667 if (rc)
1668 goto out_work;
1669 }
1670
1671 return 0;
1672
1673out_work:
1674 del_timer_sync(&ap_config_timer);
1675 hrtimer_cancel(&ap_poll_timer);
1676 destroy_workqueue(ap_work_queue);
1677out_root:
1678 root_device_unregister(ap_root_device);
1679out_bus:
1680 while (i--)
1681 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1682 bus_unregister(&ap_bus_type);
1683out:
1684 unregister_reset_call(&ap_reset_call);
1685 if (ap_using_interrupts()) {
1686 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1687 isc_unregister(AP_ISC);
1688 }
1689 return rc;
1690}
1691
1692static int __ap_match_all(struct device *dev, void *data)
1693{
1694 return 1;
1695}
1696
1697
1698
1699
1700
1701
1702void ap_module_exit(void)
1703{
1704 int i;
1705 struct device *dev;
1706
1707 ap_reset_domain();
1708 ap_poll_thread_stop();
1709 del_timer_sync(&ap_config_timer);
1710 hrtimer_cancel(&ap_poll_timer);
1711 destroy_workqueue(ap_work_queue);
1712 tasklet_kill(&ap_tasklet);
1713 root_device_unregister(ap_root_device);
1714 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1715 __ap_match_all)))
1716 {
1717 device_unregister(dev);
1718 put_device(dev);
1719 }
1720 for (i = 0; ap_bus_attrs[i]; i++)
1721 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1722 bus_unregister(&ap_bus_type);
1723 unregister_reset_call(&ap_reset_call);
1724 if (ap_using_interrupts()) {
1725 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1726 isc_unregister(AP_ISC);
1727 }
1728}
1729
1730#ifndef CONFIG_ZCRYPT_MONOLITHIC
1731module_init(ap_module_init);
1732module_exit(ap_module_exit);
1733#endif
1734