1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel_stat.h>
12#include <linux/module.h>
13#include <linux/err.h>
14#include <linux/panic_notifier.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/timer.h>
18#include <linux/reboot.h>
19#include <linux/jiffies.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <asm/types.h>
23#include <asm/irq.h>
24#include <asm/debug.h>
25
26#include "sclp.h"
27
28#define SCLP_HEADER "sclp: "
29
30struct sclp_trace_entry {
31 char id[4] __nonstring;
32 u32 a;
33 u64 b;
34};
35
36#define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
37#define SCLP_TRACE_MAX_SIZE 128
38#define SCLP_TRACE_EVENT_MAX_SIZE 64
39
40
41DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
42 &debug_hex_ascii_view);
43
44
45DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
46 SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
47
48
49static DEFINE_SPINLOCK(sclp_lock);
50
51
52static sccb_mask_t sclp_receive_mask;
53
54
55static sccb_mask_t sclp_send_mask;
56
57
58static LIST_HEAD(sclp_reg_list);
59
60
61static LIST_HEAD(sclp_req_queue);
62
63
64static struct sclp_req sclp_read_req;
65static struct sclp_req sclp_init_req;
66static void *sclp_read_sccb;
67static struct init_sccb *sclp_init_sccb;
68
69
70int sclp_console_pages = SCLP_CONSOLE_PAGES;
71
72int sclp_console_drop = 1;
73
74unsigned long sclp_console_full;
75
76
77static sclp_cmdw_t active_cmd;
78
79static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
80{
81 struct sclp_trace_entry e;
82
83 memset(&e, 0, sizeof(e));
84 strncpy(e.id, id, sizeof(e.id));
85 e.a = a;
86 e.b = b;
87 debug_event(&sclp_debug, prio, &e, sizeof(e));
88 if (err)
89 debug_event(&sclp_debug_err, 0, &e, sizeof(e));
90}
91
92static inline int no_zeroes_len(void *data, int len)
93{
94 char *d = data;
95
96
97 while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
98 len--;
99
100 return len;
101}
102
103static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
104{
105 debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
106 if (errlen)
107 debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
108}
109
110static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
111{
112 struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
113 int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
114
115
116 if (sclp_debug.level == DEBUG_MAX_LEVEL)
117 return len;
118
119
120 if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
121 (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
122 limit = SCLP_TRACE_ENTRY_SIZE;
123
124 return min(len, limit);
125}
126
127static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
128 sclp_cmdw_t cmd, struct sccb_header *sccb,
129 bool err)
130{
131 sclp_trace(prio, id, a, b, err);
132 if (sccb) {
133 sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
134 err ? sccb->length : 0);
135 }
136}
137
138static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
139 struct evbuf_header *evbuf, bool err)
140{
141 sclp_trace(prio, id, a, b, err);
142 sclp_trace_bin(prio + 1, evbuf,
143 min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
144 err ? evbuf->length : 0);
145}
146
147static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
148 bool err)
149{
150 struct sccb_header *sccb = req->sccb;
151 union {
152 struct {
153 u16 status;
154 u16 response;
155 u16 timeout;
156 u16 start_count;
157 };
158 u64 b;
159 } summary;
160
161 summary.status = req->status;
162 summary.response = sccb ? sccb->response_code : 0;
163 summary.timeout = (u16)req->queue_timeout;
164 summary.start_count = (u16)req->start_count;
165
166 sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
167}
168
169static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
170 struct sclp_register *reg)
171{
172 struct {
173 u64 receive;
174 u64 send;
175 } d;
176
177 d.receive = reg->receive_mask;
178 d.send = reg->send_mask;
179
180 sclp_trace(prio, id, a, b, false);
181 sclp_trace_bin(prio, &d, sizeof(d), 0);
182}
183
184static int __init sclp_setup_console_pages(char *str)
185{
186 int pages, rc;
187
188 rc = kstrtoint(str, 0, &pages);
189 if (!rc && pages >= SCLP_CONSOLE_PAGES)
190 sclp_console_pages = pages;
191 return 1;
192}
193
194__setup("sclp_con_pages=", sclp_setup_console_pages);
195
196static int __init sclp_setup_console_drop(char *str)
197{
198 int drop, rc;
199
200 rc = kstrtoint(str, 0, &drop);
201 if (!rc)
202 sclp_console_drop = drop;
203 return 1;
204}
205
206__setup("sclp_con_drop=", sclp_setup_console_drop);
207
208
209static struct timer_list sclp_request_timer;
210
211
212static struct timer_list sclp_queue_timer;
213
214
215static volatile enum sclp_running_state_t {
216 sclp_running_state_idle,
217 sclp_running_state_running,
218 sclp_running_state_reset_pending
219} sclp_running_state = sclp_running_state_idle;
220
221
222static volatile enum sclp_reading_state_t {
223 sclp_reading_state_idle,
224 sclp_reading_state_reading
225} sclp_reading_state = sclp_reading_state_idle;
226
227
228static volatile enum sclp_activation_state_t {
229 sclp_activation_state_active,
230 sclp_activation_state_deactivating,
231 sclp_activation_state_inactive,
232 sclp_activation_state_activating
233} sclp_activation_state = sclp_activation_state_active;
234
235
236static volatile enum sclp_mask_state_t {
237 sclp_mask_state_idle,
238 sclp_mask_state_initializing
239} sclp_mask_state = sclp_mask_state_idle;
240
241
242#define SCLP_INIT_RETRY 3
243#define SCLP_MASK_RETRY 3
244
245
246#define SCLP_BUSY_INTERVAL 10
247#define SCLP_RETRY_INTERVAL 30
248
249static void sclp_request_timeout(bool force_restart);
250static void sclp_process_queue(void);
251static void __sclp_make_read_req(void);
252static int sclp_init_mask(int calculate);
253static int sclp_init(void);
254
255static void
256__sclp_queue_read_req(void)
257{
258 if (sclp_reading_state == sclp_reading_state_idle) {
259 sclp_reading_state = sclp_reading_state_reading;
260 __sclp_make_read_req();
261
262 list_add(&sclp_read_req.list, &sclp_req_queue);
263 }
264}
265
266
267static inline void
268__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
269{
270 del_timer(&sclp_request_timer);
271 sclp_request_timer.function = cb;
272 sclp_request_timer.expires = jiffies + time;
273 add_timer(&sclp_request_timer);
274}
275
276static void sclp_request_timeout_restart(struct timer_list *unused)
277{
278 sclp_request_timeout(true);
279}
280
281static void sclp_request_timeout_normal(struct timer_list *unused)
282{
283 sclp_request_timeout(false);
284}
285
286
287
288static void sclp_request_timeout(bool force_restart)
289{
290 unsigned long flags;
291
292
293 sclp_trace(2, "TMO", force_restart, 0, true);
294
295 spin_lock_irqsave(&sclp_lock, flags);
296 if (force_restart) {
297 if (sclp_running_state == sclp_running_state_running) {
298
299
300 __sclp_queue_read_req();
301 sclp_running_state = sclp_running_state_idle;
302 }
303 } else {
304 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
305 sclp_request_timeout_normal);
306 }
307 spin_unlock_irqrestore(&sclp_lock, flags);
308 sclp_process_queue();
309}
310
311
312
313
314
315static unsigned long __sclp_req_queue_find_next_timeout(void)
316{
317 unsigned long expires_next = 0;
318 struct sclp_req *req;
319
320 list_for_each_entry(req, &sclp_req_queue, list) {
321 if (!req->queue_expires)
322 continue;
323 if (!expires_next ||
324 (time_before(req->queue_expires, expires_next)))
325 expires_next = req->queue_expires;
326 }
327 return expires_next;
328}
329
330
331
332
333static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
334{
335 unsigned long flags, now;
336 struct sclp_req *req;
337
338 spin_lock_irqsave(&sclp_lock, flags);
339 now = jiffies;
340
341 list_for_each_entry(req, &sclp_req_queue, list) {
342 if (!req->queue_expires)
343 continue;
344 if (time_before_eq(req->queue_expires, now)) {
345 if (req->status == SCLP_REQ_QUEUED) {
346 req->status = SCLP_REQ_QUEUED_TIMEOUT;
347 list_del(&req->list);
348 goto out;
349 }
350 }
351 }
352 req = NULL;
353out:
354 spin_unlock_irqrestore(&sclp_lock, flags);
355 return req;
356}
357
358
359
360
361
362
363static void sclp_req_queue_timeout(struct timer_list *unused)
364{
365 unsigned long flags, expires_next;
366 struct sclp_req *req;
367
368 do {
369 req = __sclp_req_queue_remove_expired_req();
370
371 if (req) {
372
373 sclp_trace_req(2, "RQTM", req, true);
374 }
375
376 if (req && req->callback)
377 req->callback(req, req->callback_data);
378 } while (req);
379
380 spin_lock_irqsave(&sclp_lock, flags);
381 expires_next = __sclp_req_queue_find_next_timeout();
382 if (expires_next)
383 mod_timer(&sclp_queue_timer, expires_next);
384 spin_unlock_irqrestore(&sclp_lock, flags);
385}
386
387static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
388{
389 static u64 srvc_count;
390 int rc;
391
392
393 sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
394
395 rc = sclp_service_call(command, sccb);
396
397
398 sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
399
400 if (rc == 0)
401 active_cmd = command;
402
403 return rc;
404}
405
406
407
408
409static int
410__sclp_start_request(struct sclp_req *req)
411{
412 int rc;
413
414 if (sclp_running_state != sclp_running_state_idle)
415 return 0;
416 del_timer(&sclp_request_timer);
417 rc = sclp_service_call_trace(req->command, req->sccb);
418 req->start_count++;
419
420 if (rc == 0) {
421
422 req->status = SCLP_REQ_RUNNING;
423 sclp_running_state = sclp_running_state_running;
424 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
425 sclp_request_timeout_restart);
426 return 0;
427 } else if (rc == -EBUSY) {
428
429 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
430 sclp_request_timeout_normal);
431 return 0;
432 }
433
434 req->status = SCLP_REQ_FAILED;
435 return rc;
436}
437
438
439static void
440sclp_process_queue(void)
441{
442 struct sclp_req *req;
443 int rc;
444 unsigned long flags;
445
446 spin_lock_irqsave(&sclp_lock, flags);
447 if (sclp_running_state != sclp_running_state_idle) {
448 spin_unlock_irqrestore(&sclp_lock, flags);
449 return;
450 }
451 del_timer(&sclp_request_timer);
452 while (!list_empty(&sclp_req_queue)) {
453 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
454 rc = __sclp_start_request(req);
455 if (rc == 0)
456 break;
457
458 if (req->start_count > 1) {
459
460
461 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
462 sclp_request_timeout_normal);
463 break;
464 }
465
466 list_del(&req->list);
467
468
469 sclp_trace_req(2, "RQAB", req, true);
470
471 if (req->callback) {
472 spin_unlock_irqrestore(&sclp_lock, flags);
473 req->callback(req, req->callback_data);
474 spin_lock_irqsave(&sclp_lock, flags);
475 }
476 }
477 spin_unlock_irqrestore(&sclp_lock, flags);
478}
479
480static int __sclp_can_add_request(struct sclp_req *req)
481{
482 if (req == &sclp_init_req)
483 return 1;
484 if (sclp_init_state != sclp_init_state_initialized)
485 return 0;
486 if (sclp_activation_state != sclp_activation_state_active)
487 return 0;
488 return 1;
489}
490
491
492int
493sclp_add_request(struct sclp_req *req)
494{
495 unsigned long flags;
496 int rc;
497
498 spin_lock_irqsave(&sclp_lock, flags);
499 if (!__sclp_can_add_request(req)) {
500 spin_unlock_irqrestore(&sclp_lock, flags);
501 return -EIO;
502 }
503
504
505 sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
506
507 req->status = SCLP_REQ_QUEUED;
508 req->start_count = 0;
509 list_add_tail(&req->list, &sclp_req_queue);
510 rc = 0;
511 if (req->queue_timeout) {
512 req->queue_expires = jiffies + req->queue_timeout * HZ;
513 if (!timer_pending(&sclp_queue_timer) ||
514 time_after(sclp_queue_timer.expires, req->queue_expires))
515 mod_timer(&sclp_queue_timer, req->queue_expires);
516 } else
517 req->queue_expires = 0;
518
519 if (sclp_running_state == sclp_running_state_idle &&
520 req->list.prev == &sclp_req_queue) {
521 rc = __sclp_start_request(req);
522 if (rc)
523 list_del(&req->list);
524 }
525 spin_unlock_irqrestore(&sclp_lock, flags);
526 return rc;
527}
528
529EXPORT_SYMBOL(sclp_add_request);
530
531
532
533static int
534sclp_dispatch_evbufs(struct sccb_header *sccb)
535{
536 unsigned long flags;
537 struct evbuf_header *evbuf;
538 struct list_head *l;
539 struct sclp_register *reg;
540 int offset;
541 int rc;
542
543 spin_lock_irqsave(&sclp_lock, flags);
544 rc = 0;
545 for (offset = sizeof(struct sccb_header); offset < sccb->length;
546 offset += evbuf->length) {
547 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
548
549 if (evbuf->length == 0)
550 break;
551
552 reg = NULL;
553 list_for_each(l, &sclp_reg_list) {
554 reg = list_entry(l, struct sclp_register, list);
555 if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
556 break;
557 else
558 reg = NULL;
559 }
560
561
562 sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
563 evbuf, !reg);
564
565 if (reg && reg->receiver_fn) {
566 spin_unlock_irqrestore(&sclp_lock, flags);
567 reg->receiver_fn(evbuf);
568 spin_lock_irqsave(&sclp_lock, flags);
569 } else if (reg == NULL)
570 rc = -EOPNOTSUPP;
571 }
572 spin_unlock_irqrestore(&sclp_lock, flags);
573 return rc;
574}
575
576
577static void
578sclp_read_cb(struct sclp_req *req, void *data)
579{
580 unsigned long flags;
581 struct sccb_header *sccb;
582
583 sccb = (struct sccb_header *) req->sccb;
584 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
585 sccb->response_code == 0x220))
586 sclp_dispatch_evbufs(sccb);
587 spin_lock_irqsave(&sclp_lock, flags);
588 sclp_reading_state = sclp_reading_state_idle;
589 spin_unlock_irqrestore(&sclp_lock, flags);
590}
591
592
593static void __sclp_make_read_req(void)
594{
595 struct sccb_header *sccb;
596
597 sccb = (struct sccb_header *) sclp_read_sccb;
598 clear_page(sccb);
599 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
600 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
601 sclp_read_req.status = SCLP_REQ_QUEUED;
602 sclp_read_req.start_count = 0;
603 sclp_read_req.callback = sclp_read_cb;
604 sclp_read_req.sccb = sccb;
605 sccb->length = PAGE_SIZE;
606 sccb->function_code = 0;
607 sccb->control_mask[2] = 0x80;
608}
609
610
611
612static inline struct sclp_req *
613__sclp_find_req(u32 sccb)
614{
615 struct list_head *l;
616 struct sclp_req *req;
617
618 list_for_each(l, &sclp_req_queue) {
619 req = list_entry(l, struct sclp_req, list);
620 if (sccb == (u32) (addr_t) req->sccb)
621 return req;
622 }
623 return NULL;
624}
625
626static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
627{
628 struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
629 struct evbuf_header *evbuf;
630 u16 response;
631
632 if (!sccb)
633 return true;
634
635
636 response = sccb->response_code & 0xff;
637 if (response != 0x10 && response != 0x20)
638 return false;
639
640
641 if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
642 evbuf = (struct evbuf_header *)(sccb + 1);
643 if (!(evbuf->flags & 0x80))
644 return false;
645 }
646
647 return true;
648}
649
650
651
652
653static void sclp_interrupt_handler(struct ext_code ext_code,
654 unsigned int param32, unsigned long param64)
655{
656 struct sclp_req *req;
657 u32 finished_sccb;
658 u32 evbuf_pending;
659
660 inc_irq_stat(IRQEXT_SCP);
661 spin_lock(&sclp_lock);
662 finished_sccb = param32 & 0xfffffff8;
663 evbuf_pending = param32 & 0x3;
664
665
666 sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
667 (struct sccb_header *)(addr_t)finished_sccb,
668 !ok_response(finished_sccb, active_cmd));
669
670 if (finished_sccb) {
671 del_timer(&sclp_request_timer);
672 sclp_running_state = sclp_running_state_reset_pending;
673 req = __sclp_find_req(finished_sccb);
674 if (req) {
675
676 list_del(&req->list);
677 req->status = SCLP_REQ_DONE;
678
679
680 sclp_trace_req(2, "RQOK", req, false);
681
682 if (req->callback) {
683 spin_unlock(&sclp_lock);
684 req->callback(req, req->callback_data);
685 spin_lock(&sclp_lock);
686 }
687 } else {
688
689 sclp_trace(0, "UNEX", finished_sccb, 0, true);
690 }
691 sclp_running_state = sclp_running_state_idle;
692 active_cmd = 0;
693 }
694 if (evbuf_pending &&
695 sclp_activation_state == sclp_activation_state_active)
696 __sclp_queue_read_req();
697 spin_unlock(&sclp_lock);
698 sclp_process_queue();
699}
700
701
702static inline u64
703sclp_tod_from_jiffies(unsigned long jiffies)
704{
705 return (u64) (jiffies / HZ) << 32;
706}
707
708
709
710void
711sclp_sync_wait(void)
712{
713 unsigned long long old_tick;
714 unsigned long flags;
715 unsigned long cr0, cr0_sync;
716 static u64 sync_count;
717 u64 timeout;
718 int irq_context;
719
720
721 sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
722
723
724
725 timeout = 0;
726 if (timer_pending(&sclp_request_timer)) {
727
728 timeout = get_tod_clock_fast() +
729 sclp_tod_from_jiffies(sclp_request_timer.expires -
730 jiffies);
731 }
732 local_irq_save(flags);
733
734 irq_context = in_interrupt();
735 if (!irq_context)
736 local_bh_disable();
737
738 old_tick = local_tick_disable();
739 trace_hardirqs_on();
740 __ctl_store(cr0, 0, 0);
741 cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
742 cr0_sync |= 1UL << (63 - 54);
743 __ctl_load(cr0_sync, 0, 0);
744 __arch_local_irq_stosm(0x01);
745
746 while (sclp_running_state != sclp_running_state_idle) {
747
748 if (timer_pending(&sclp_request_timer) &&
749 get_tod_clock_fast() > timeout &&
750 del_timer(&sclp_request_timer))
751 sclp_request_timer.function(&sclp_request_timer);
752 cpu_relax();
753 }
754 local_irq_disable();
755 __ctl_load(cr0, 0, 0);
756 if (!irq_context)
757 _local_bh_enable();
758 local_tick_enable(old_tick);
759 local_irq_restore(flags);
760
761
762 sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
763}
764EXPORT_SYMBOL(sclp_sync_wait);
765
766
767static void
768sclp_dispatch_state_change(void)
769{
770 struct list_head *l;
771 struct sclp_register *reg;
772 unsigned long flags;
773 sccb_mask_t receive_mask;
774 sccb_mask_t send_mask;
775
776 do {
777 spin_lock_irqsave(&sclp_lock, flags);
778 reg = NULL;
779 list_for_each(l, &sclp_reg_list) {
780 reg = list_entry(l, struct sclp_register, list);
781 receive_mask = reg->send_mask & sclp_receive_mask;
782 send_mask = reg->receive_mask & sclp_send_mask;
783 if (reg->sclp_receive_mask != receive_mask ||
784 reg->sclp_send_mask != send_mask) {
785 reg->sclp_receive_mask = receive_mask;
786 reg->sclp_send_mask = send_mask;
787 break;
788 } else
789 reg = NULL;
790 }
791 spin_unlock_irqrestore(&sclp_lock, flags);
792 if (reg && reg->state_change_fn) {
793
794 sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
795 false);
796
797 reg->state_change_fn(reg);
798 }
799 } while (reg);
800}
801
802struct sclp_statechangebuf {
803 struct evbuf_header header;
804 u8 validity_sclp_active_facility_mask : 1;
805 u8 validity_sclp_receive_mask : 1;
806 u8 validity_sclp_send_mask : 1;
807 u8 validity_read_data_function_mask : 1;
808 u16 _zeros : 12;
809 u16 mask_length;
810 u64 sclp_active_facility_mask;
811 u8 masks[2 * 1021 + 4];
812
813
814
815
816
817} __attribute__((packed));
818
819
820
821static void
822sclp_state_change_cb(struct evbuf_header *evbuf)
823{
824 unsigned long flags;
825 struct sclp_statechangebuf *scbuf;
826
827 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
828
829 scbuf = (struct sclp_statechangebuf *) evbuf;
830 spin_lock_irqsave(&sclp_lock, flags);
831 if (scbuf->validity_sclp_receive_mask)
832 sclp_receive_mask = sccb_get_recv_mask(scbuf);
833 if (scbuf->validity_sclp_send_mask)
834 sclp_send_mask = sccb_get_send_mask(scbuf);
835 spin_unlock_irqrestore(&sclp_lock, flags);
836 if (scbuf->validity_sclp_active_facility_mask)
837 sclp.facilities = scbuf->sclp_active_facility_mask;
838 sclp_dispatch_state_change();
839}
840
841static struct sclp_register sclp_state_change_event = {
842 .receive_mask = EVTYP_STATECHANGE_MASK,
843 .receiver_fn = sclp_state_change_cb
844};
845
846
847
848static inline void
849__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
850{
851 struct list_head *l;
852 struct sclp_register *t;
853
854 *receive_mask = 0;
855 *send_mask = 0;
856 list_for_each(l, &sclp_reg_list) {
857 t = list_entry(l, struct sclp_register, list);
858 *receive_mask |= t->receive_mask;
859 *send_mask |= t->send_mask;
860 }
861}
862
863
864int
865sclp_register(struct sclp_register *reg)
866{
867 unsigned long flags;
868 sccb_mask_t receive_mask;
869 sccb_mask_t send_mask;
870 int rc;
871
872
873 sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
874
875 rc = sclp_init();
876 if (rc)
877 return rc;
878 spin_lock_irqsave(&sclp_lock, flags);
879
880 __sclp_get_mask(&receive_mask, &send_mask);
881 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
882 spin_unlock_irqrestore(&sclp_lock, flags);
883 return -EBUSY;
884 }
885
886 reg->sclp_receive_mask = 0;
887 reg->sclp_send_mask = 0;
888 list_add(®->list, &sclp_reg_list);
889 spin_unlock_irqrestore(&sclp_lock, flags);
890 rc = sclp_init_mask(1);
891 if (rc) {
892 spin_lock_irqsave(&sclp_lock, flags);
893 list_del(®->list);
894 spin_unlock_irqrestore(&sclp_lock, flags);
895 }
896 return rc;
897}
898
899EXPORT_SYMBOL(sclp_register);
900
901
902void
903sclp_unregister(struct sclp_register *reg)
904{
905 unsigned long flags;
906
907
908 sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
909
910 spin_lock_irqsave(&sclp_lock, flags);
911 list_del(®->list);
912 spin_unlock_irqrestore(&sclp_lock, flags);
913 sclp_init_mask(1);
914}
915
916EXPORT_SYMBOL(sclp_unregister);
917
918
919
920int
921sclp_remove_processed(struct sccb_header *sccb)
922{
923 struct evbuf_header *evbuf;
924 int unprocessed;
925 u16 remaining;
926
927 evbuf = (struct evbuf_header *) (sccb + 1);
928 unprocessed = 0;
929 remaining = sccb->length - sizeof(struct sccb_header);
930 while (remaining > 0) {
931 remaining -= evbuf->length;
932 if (evbuf->flags & 0x80) {
933 sccb->length -= evbuf->length;
934 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
935 remaining);
936 } else {
937 unprocessed++;
938 evbuf = (struct evbuf_header *)
939 ((addr_t) evbuf + evbuf->length);
940 }
941 }
942 return unprocessed;
943}
944
945EXPORT_SYMBOL(sclp_remove_processed);
946
947
948static inline void
949__sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
950{
951 struct init_sccb *sccb = sclp_init_sccb;
952
953 clear_page(sccb);
954 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
955 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
956 sclp_init_req.status = SCLP_REQ_FILLED;
957 sclp_init_req.start_count = 0;
958 sclp_init_req.callback = NULL;
959 sclp_init_req.callback_data = NULL;
960 sclp_init_req.sccb = sccb;
961 sccb->header.length = sizeof(*sccb);
962 if (sclp_mask_compat_mode)
963 sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
964 else
965 sccb->mask_length = sizeof(sccb_mask_t);
966 sccb_set_recv_mask(sccb, receive_mask);
967 sccb_set_send_mask(sccb, send_mask);
968 sccb_set_sclp_recv_mask(sccb, 0);
969 sccb_set_sclp_send_mask(sccb, 0);
970}
971
972
973
974
975static int
976sclp_init_mask(int calculate)
977{
978 unsigned long flags;
979 struct init_sccb *sccb = sclp_init_sccb;
980 sccb_mask_t receive_mask;
981 sccb_mask_t send_mask;
982 int retry;
983 int rc;
984 unsigned long wait;
985
986 spin_lock_irqsave(&sclp_lock, flags);
987
988 if (sclp_mask_state != sclp_mask_state_idle) {
989 spin_unlock_irqrestore(&sclp_lock, flags);
990 return -EBUSY;
991 }
992 if (sclp_activation_state == sclp_activation_state_inactive) {
993 spin_unlock_irqrestore(&sclp_lock, flags);
994 return -EINVAL;
995 }
996 sclp_mask_state = sclp_mask_state_initializing;
997
998 if (calculate)
999 __sclp_get_mask(&receive_mask, &send_mask);
1000 else {
1001 receive_mask = 0;
1002 send_mask = 0;
1003 }
1004 rc = -EIO;
1005 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
1006
1007 __sclp_make_init_req(receive_mask, send_mask);
1008 spin_unlock_irqrestore(&sclp_lock, flags);
1009 if (sclp_add_request(&sclp_init_req)) {
1010
1011 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
1012 while (time_before(jiffies, wait))
1013 sclp_sync_wait();
1014 spin_lock_irqsave(&sclp_lock, flags);
1015 continue;
1016 }
1017 while (sclp_init_req.status != SCLP_REQ_DONE &&
1018 sclp_init_req.status != SCLP_REQ_FAILED)
1019 sclp_sync_wait();
1020 spin_lock_irqsave(&sclp_lock, flags);
1021 if (sclp_init_req.status == SCLP_REQ_DONE &&
1022 sccb->header.response_code == 0x20) {
1023
1024 if (calculate) {
1025 sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
1026 sclp_send_mask = sccb_get_sclp_send_mask(sccb);
1027 } else {
1028 sclp_receive_mask = 0;
1029 sclp_send_mask = 0;
1030 }
1031 spin_unlock_irqrestore(&sclp_lock, flags);
1032 sclp_dispatch_state_change();
1033 spin_lock_irqsave(&sclp_lock, flags);
1034 rc = 0;
1035 break;
1036 }
1037 }
1038 sclp_mask_state = sclp_mask_state_idle;
1039 spin_unlock_irqrestore(&sclp_lock, flags);
1040 return rc;
1041}
1042
1043
1044
1045
1046int
1047sclp_deactivate(void)
1048{
1049 unsigned long flags;
1050 int rc;
1051
1052 spin_lock_irqsave(&sclp_lock, flags);
1053
1054 if (sclp_activation_state != sclp_activation_state_active) {
1055 spin_unlock_irqrestore(&sclp_lock, flags);
1056 return -EINVAL;
1057 }
1058 sclp_activation_state = sclp_activation_state_deactivating;
1059 spin_unlock_irqrestore(&sclp_lock, flags);
1060 rc = sclp_init_mask(0);
1061 spin_lock_irqsave(&sclp_lock, flags);
1062 if (rc == 0)
1063 sclp_activation_state = sclp_activation_state_inactive;
1064 else
1065 sclp_activation_state = sclp_activation_state_active;
1066 spin_unlock_irqrestore(&sclp_lock, flags);
1067 return rc;
1068}
1069
1070EXPORT_SYMBOL(sclp_deactivate);
1071
1072
1073
1074
1075int
1076sclp_reactivate(void)
1077{
1078 unsigned long flags;
1079 int rc;
1080
1081 spin_lock_irqsave(&sclp_lock, flags);
1082
1083 if (sclp_activation_state != sclp_activation_state_inactive) {
1084 spin_unlock_irqrestore(&sclp_lock, flags);
1085 return -EINVAL;
1086 }
1087 sclp_activation_state = sclp_activation_state_activating;
1088 spin_unlock_irqrestore(&sclp_lock, flags);
1089 rc = sclp_init_mask(1);
1090 spin_lock_irqsave(&sclp_lock, flags);
1091 if (rc == 0)
1092 sclp_activation_state = sclp_activation_state_active;
1093 else
1094 sclp_activation_state = sclp_activation_state_inactive;
1095 spin_unlock_irqrestore(&sclp_lock, flags);
1096 return rc;
1097}
1098
1099EXPORT_SYMBOL(sclp_reactivate);
1100
1101
1102
1103static void sclp_check_handler(struct ext_code ext_code,
1104 unsigned int param32, unsigned long param64)
1105{
1106 u32 finished_sccb;
1107
1108 inc_irq_stat(IRQEXT_SCP);
1109 finished_sccb = param32 & 0xfffffff8;
1110
1111 if (finished_sccb == 0)
1112 return;
1113 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
1114 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
1115 finished_sccb);
1116 spin_lock(&sclp_lock);
1117 if (sclp_running_state == sclp_running_state_running) {
1118 sclp_init_req.status = SCLP_REQ_DONE;
1119 sclp_running_state = sclp_running_state_idle;
1120 }
1121 spin_unlock(&sclp_lock);
1122}
1123
1124
1125static void
1126sclp_check_timeout(struct timer_list *unused)
1127{
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(&sclp_lock, flags);
1131 if (sclp_running_state == sclp_running_state_running) {
1132 sclp_init_req.status = SCLP_REQ_FAILED;
1133 sclp_running_state = sclp_running_state_idle;
1134 }
1135 spin_unlock_irqrestore(&sclp_lock, flags);
1136}
1137
1138
1139
1140
1141static int
1142sclp_check_interface(void)
1143{
1144 struct init_sccb *sccb;
1145 unsigned long flags;
1146 int retry;
1147 int rc;
1148
1149 spin_lock_irqsave(&sclp_lock, flags);
1150
1151 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1152 if (rc) {
1153 spin_unlock_irqrestore(&sclp_lock, flags);
1154 return rc;
1155 }
1156 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
1157 __sclp_make_init_req(0, 0);
1158 sccb = (struct init_sccb *) sclp_init_req.sccb;
1159 rc = sclp_service_call_trace(sclp_init_req.command, sccb);
1160 if (rc == -EIO)
1161 break;
1162 sclp_init_req.status = SCLP_REQ_RUNNING;
1163 sclp_running_state = sclp_running_state_running;
1164 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
1165 sclp_check_timeout);
1166 spin_unlock_irqrestore(&sclp_lock, flags);
1167
1168
1169 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1170
1171 sclp_sync_wait();
1172
1173
1174 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
1175 spin_lock_irqsave(&sclp_lock, flags);
1176 del_timer(&sclp_request_timer);
1177 rc = -EBUSY;
1178 if (sclp_init_req.status == SCLP_REQ_DONE) {
1179 if (sccb->header.response_code == 0x20) {
1180 rc = 0;
1181 break;
1182 } else if (sccb->header.response_code == 0x74f0) {
1183 if (!sclp_mask_compat_mode) {
1184 sclp_mask_compat_mode = true;
1185 retry = 0;
1186 }
1187 }
1188 }
1189 }
1190 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1191 spin_unlock_irqrestore(&sclp_lock, flags);
1192 return rc;
1193}
1194
1195
1196
1197static int
1198sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1199{
1200 sclp_deactivate();
1201 return NOTIFY_DONE;
1202}
1203
1204static struct notifier_block sclp_reboot_notifier = {
1205 .notifier_call = sclp_reboot_event
1206};
1207
1208static ssize_t con_pages_show(struct device_driver *dev, char *buf)
1209{
1210 return sprintf(buf, "%i\n", sclp_console_pages);
1211}
1212
1213static DRIVER_ATTR_RO(con_pages);
1214
1215static ssize_t con_drop_show(struct device_driver *dev, char *buf)
1216{
1217 return sprintf(buf, "%i\n", sclp_console_drop);
1218}
1219
1220static DRIVER_ATTR_RO(con_drop);
1221
1222static ssize_t con_full_show(struct device_driver *dev, char *buf)
1223{
1224 return sprintf(buf, "%lu\n", sclp_console_full);
1225}
1226
1227static DRIVER_ATTR_RO(con_full);
1228
1229static struct attribute *sclp_drv_attrs[] = {
1230 &driver_attr_con_pages.attr,
1231 &driver_attr_con_drop.attr,
1232 &driver_attr_con_full.attr,
1233 NULL,
1234};
1235static struct attribute_group sclp_drv_attr_group = {
1236 .attrs = sclp_drv_attrs,
1237};
1238static const struct attribute_group *sclp_drv_attr_groups[] = {
1239 &sclp_drv_attr_group,
1240 NULL,
1241};
1242
1243static struct platform_driver sclp_pdrv = {
1244 .driver = {
1245 .name = "sclp",
1246 .groups = sclp_drv_attr_groups,
1247 },
1248};
1249
1250
1251
1252static int
1253sclp_init(void)
1254{
1255 unsigned long flags;
1256 int rc = 0;
1257
1258 spin_lock_irqsave(&sclp_lock, flags);
1259
1260 if (sclp_init_state != sclp_init_state_uninitialized)
1261 goto fail_unlock;
1262 sclp_init_state = sclp_init_state_initializing;
1263 sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1264 sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
1265 BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
1266
1267 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1268 timer_setup(&sclp_request_timer, NULL, 0);
1269 timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
1270
1271 spin_unlock_irqrestore(&sclp_lock, flags);
1272 rc = sclp_check_interface();
1273 spin_lock_irqsave(&sclp_lock, flags);
1274 if (rc)
1275 goto fail_init_state_uninitialized;
1276
1277 rc = register_reboot_notifier(&sclp_reboot_notifier);
1278 if (rc)
1279 goto fail_init_state_uninitialized;
1280
1281 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1282 if (rc)
1283 goto fail_unregister_reboot_notifier;
1284 sclp_init_state = sclp_init_state_initialized;
1285 spin_unlock_irqrestore(&sclp_lock, flags);
1286
1287
1288 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1289 sclp_init_mask(1);
1290 return 0;
1291
1292fail_unregister_reboot_notifier:
1293 unregister_reboot_notifier(&sclp_reboot_notifier);
1294fail_init_state_uninitialized:
1295 sclp_init_state = sclp_init_state_uninitialized;
1296 free_page((unsigned long) sclp_read_sccb);
1297 free_page((unsigned long) sclp_init_sccb);
1298fail_unlock:
1299 spin_unlock_irqrestore(&sclp_lock, flags);
1300 return rc;
1301}
1302
1303static __init int sclp_initcall(void)
1304{
1305 int rc;
1306
1307 rc = platform_driver_register(&sclp_pdrv);
1308 if (rc)
1309 return rc;
1310
1311 return sclp_init();
1312}
1313
1314arch_initcall(sclp_initcall);
1315