linux/drivers/s390/char/sclp.c
<<
>>
Prefs
   1/*
   2 * core function to access sclp interface
   3 *
   4 * Copyright IBM Corp. 1999, 2009
   5 *
   6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
   7 *            Martin Schwidefsky <schwidefsky@de.ibm.com>
   8 */
   9
  10#include <linux/kernel_stat.h>
  11#include <linux/module.h>
  12#include <linux/err.h>
  13#include <linux/spinlock.h>
  14#include <linux/interrupt.h>
  15#include <linux/timer.h>
  16#include <linux/reboot.h>
  17#include <linux/jiffies.h>
  18#include <linux/init.h>
  19#include <linux/suspend.h>
  20#include <linux/completion.h>
  21#include <linux/platform_device.h>
  22#include <asm/types.h>
  23#include <asm/irq.h>
  24
  25#include "sclp.h"
  26
  27#define SCLP_HEADER             "sclp: "
  28
  29/* Lock to protect internal data consistency. */
  30static DEFINE_SPINLOCK(sclp_lock);
  31
  32/* Mask of events that we can send to the sclp interface. */
  33static sccb_mask_t sclp_receive_mask;
  34
  35/* Mask of events that we can receive from the sclp interface. */
  36static sccb_mask_t sclp_send_mask;
  37
  38/* List of registered event listeners and senders. */
  39static struct list_head sclp_reg_list;
  40
  41/* List of queued requests. */
  42static struct list_head sclp_req_queue;
  43
  44/* Data for read and and init requests. */
  45static struct sclp_req sclp_read_req;
  46static struct sclp_req sclp_init_req;
  47static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  48static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  49
  50/* Suspend request */
  51static DECLARE_COMPLETION(sclp_request_queue_flushed);
  52
  53/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  54int sclp_console_pages = SCLP_CONSOLE_PAGES;
  55/* Flag to indicate if buffer pages are dropped on buffer full condition */
  56int sclp_console_drop = 1;
  57/* Number of times the console dropped buffer pages */
  58unsigned long sclp_console_full;
  59
  60static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  61{
  62        complete(&sclp_request_queue_flushed);
  63}
  64
  65static int __init sclp_setup_console_pages(char *str)
  66{
  67        int pages, rc;
  68
  69        rc = kstrtoint(str, 0, &pages);
  70        if (!rc && pages >= SCLP_CONSOLE_PAGES)
  71                sclp_console_pages = pages;
  72        return 1;
  73}
  74
  75__setup("sclp_con_pages=", sclp_setup_console_pages);
  76
  77static int __init sclp_setup_console_drop(char *str)
  78{
  79        int drop, rc;
  80
  81        rc = kstrtoint(str, 0, &drop);
  82        if (!rc)
  83                sclp_console_drop = drop;
  84        return 1;
  85}
  86
  87__setup("sclp_con_drop=", sclp_setup_console_drop);
  88
  89static struct sclp_req sclp_suspend_req;
  90
  91/* Timer for request retries. */
  92static struct timer_list sclp_request_timer;
  93
  94/* Timer for queued requests. */
  95static struct timer_list sclp_queue_timer;
  96
  97/* Internal state: is the driver initialized? */
  98static volatile enum sclp_init_state_t {
  99        sclp_init_state_uninitialized,
 100        sclp_init_state_initializing,
 101        sclp_init_state_initialized
 102} sclp_init_state = sclp_init_state_uninitialized;
 103
 104/* Internal state: is a request active at the sclp? */
 105static volatile enum sclp_running_state_t {
 106        sclp_running_state_idle,
 107        sclp_running_state_running,
 108        sclp_running_state_reset_pending
 109} sclp_running_state = sclp_running_state_idle;
 110
 111/* Internal state: is a read request pending? */
 112static volatile enum sclp_reading_state_t {
 113        sclp_reading_state_idle,
 114        sclp_reading_state_reading
 115} sclp_reading_state = sclp_reading_state_idle;
 116
 117/* Internal state: is the driver currently serving requests? */
 118static volatile enum sclp_activation_state_t {
 119        sclp_activation_state_active,
 120        sclp_activation_state_deactivating,
 121        sclp_activation_state_inactive,
 122        sclp_activation_state_activating
 123} sclp_activation_state = sclp_activation_state_active;
 124
 125/* Internal state: is an init mask request pending? */
 126static volatile enum sclp_mask_state_t {
 127        sclp_mask_state_idle,
 128        sclp_mask_state_initializing
 129} sclp_mask_state = sclp_mask_state_idle;
 130
 131/* Internal state: is the driver suspended? */
 132static enum sclp_suspend_state_t {
 133        sclp_suspend_state_running,
 134        sclp_suspend_state_suspended,
 135} sclp_suspend_state = sclp_suspend_state_running;
 136
 137/* Maximum retry counts */
 138#define SCLP_INIT_RETRY         3
 139#define SCLP_MASK_RETRY         3
 140
 141/* Timeout intervals in seconds.*/
 142#define SCLP_BUSY_INTERVAL      10
 143#define SCLP_RETRY_INTERVAL     30
 144
 145static void sclp_process_queue(void);
 146static void __sclp_make_read_req(void);
 147static int sclp_init_mask(int calculate);
 148static int sclp_init(void);
 149
 150/* Perform service call. Return 0 on success, non-zero otherwise. */
 151int
 152sclp_service_call(sclp_cmdw_t command, void *sccb)
 153{
 154        int cc = 4; /* Initialize for program check handling */
 155
 156        asm volatile(
 157                "0:     .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
 158                "1:     ipm     %0\n"
 159                "       srl     %0,28\n"
 160                "2:\n"
 161                EX_TABLE(0b, 2b)
 162                EX_TABLE(1b, 2b)
 163                : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
 164                : "cc", "memory");
 165        if (cc == 4)
 166                return -EINVAL;
 167        if (cc == 3)
 168                return -EIO;
 169        if (cc == 2)
 170                return -EBUSY;
 171        return 0;
 172}
 173
 174
 175static void
 176__sclp_queue_read_req(void)
 177{
 178        if (sclp_reading_state == sclp_reading_state_idle) {
 179                sclp_reading_state = sclp_reading_state_reading;
 180                __sclp_make_read_req();
 181                /* Add request to head of queue */
 182                list_add(&sclp_read_req.list, &sclp_req_queue);
 183        }
 184}
 185
 186/* Set up request retry timer. Called while sclp_lock is locked. */
 187static inline void
 188__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
 189                         unsigned long data)
 190{
 191        del_timer(&sclp_request_timer);
 192        sclp_request_timer.function = function;
 193        sclp_request_timer.data = data;
 194        sclp_request_timer.expires = jiffies + time;
 195        add_timer(&sclp_request_timer);
 196}
 197
 198/* Request timeout handler. Restart the request queue. If DATA is non-zero,
 199 * force restart of running request. */
 200static void
 201sclp_request_timeout(unsigned long data)
 202{
 203        unsigned long flags;
 204
 205        spin_lock_irqsave(&sclp_lock, flags);
 206        if (data) {
 207                if (sclp_running_state == sclp_running_state_running) {
 208                        /* Break running state and queue NOP read event request
 209                         * to get a defined interface state. */
 210                        __sclp_queue_read_req();
 211                        sclp_running_state = sclp_running_state_idle;
 212                }
 213        } else {
 214                __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
 215                                         sclp_request_timeout, 0);
 216        }
 217        spin_unlock_irqrestore(&sclp_lock, flags);
 218        sclp_process_queue();
 219}
 220
 221/*
 222 * Returns the expire value in jiffies of the next pending request timeout,
 223 * if any. Needs to be called with sclp_lock.
 224 */
 225static unsigned long __sclp_req_queue_find_next_timeout(void)
 226{
 227        unsigned long expires_next = 0;
 228        struct sclp_req *req;
 229
 230        list_for_each_entry(req, &sclp_req_queue, list) {
 231                if (!req->queue_expires)
 232                        continue;
 233                if (!expires_next ||
 234                   (time_before(req->queue_expires, expires_next)))
 235                                expires_next = req->queue_expires;
 236        }
 237        return expires_next;
 238}
 239
 240/*
 241 * Returns expired request, if any, and removes it from the list.
 242 */
 243static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
 244{
 245        unsigned long flags, now;
 246        struct sclp_req *req;
 247
 248        spin_lock_irqsave(&sclp_lock, flags);
 249        now = jiffies;
 250        /* Don't need list_for_each_safe because we break out after list_del */
 251        list_for_each_entry(req, &sclp_req_queue, list) {
 252                if (!req->queue_expires)
 253                        continue;
 254                if (time_before_eq(req->queue_expires, now)) {
 255                        if (req->status == SCLP_REQ_QUEUED) {
 256                                req->status = SCLP_REQ_QUEUED_TIMEOUT;
 257                                list_del(&req->list);
 258                                goto out;
 259                        }
 260                }
 261        }
 262        req = NULL;
 263out:
 264        spin_unlock_irqrestore(&sclp_lock, flags);
 265        return req;
 266}
 267
 268/*
 269 * Timeout handler for queued requests. Removes request from list and
 270 * invokes callback. This timer can be set per request in situations where
 271 * waiting too long would be harmful to the system, e.g. during SE reboot.
 272 */
 273static void sclp_req_queue_timeout(unsigned long data)
 274{
 275        unsigned long flags, expires_next;
 276        struct sclp_req *req;
 277
 278        do {
 279                req = __sclp_req_queue_remove_expired_req();
 280                if (req && req->callback)
 281                        req->callback(req, req->callback_data);
 282        } while (req);
 283
 284        spin_lock_irqsave(&sclp_lock, flags);
 285        expires_next = __sclp_req_queue_find_next_timeout();
 286        if (expires_next)
 287                mod_timer(&sclp_queue_timer, expires_next);
 288        spin_unlock_irqrestore(&sclp_lock, flags);
 289}
 290
 291/* Try to start a request. Return zero if the request was successfully
 292 * started or if it will be started at a later time. Return non-zero otherwise.
 293 * Called while sclp_lock is locked. */
 294static int
 295__sclp_start_request(struct sclp_req *req)
 296{
 297        int rc;
 298
 299        if (sclp_running_state != sclp_running_state_idle)
 300                return 0;
 301        del_timer(&sclp_request_timer);
 302        rc = sclp_service_call(req->command, req->sccb);
 303        req->start_count++;
 304
 305        if (rc == 0) {
 306                /* Successfully started request */
 307                req->status = SCLP_REQ_RUNNING;
 308                sclp_running_state = sclp_running_state_running;
 309                __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
 310                                         sclp_request_timeout, 1);
 311                return 0;
 312        } else if (rc == -EBUSY) {
 313                /* Try again later */
 314                __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
 315                                         sclp_request_timeout, 0);
 316                return 0;
 317        }
 318        /* Request failed */
 319        req->status = SCLP_REQ_FAILED;
 320        return rc;
 321}
 322
 323/* Try to start queued requests. */
 324static void
 325sclp_process_queue(void)
 326{
 327        struct sclp_req *req;
 328        int rc;
 329        unsigned long flags;
 330
 331        spin_lock_irqsave(&sclp_lock, flags);
 332        if (sclp_running_state != sclp_running_state_idle) {
 333                spin_unlock_irqrestore(&sclp_lock, flags);
 334                return;
 335        }
 336        del_timer(&sclp_request_timer);
 337        while (!list_empty(&sclp_req_queue)) {
 338                req = list_entry(sclp_req_queue.next, struct sclp_req, list);
 339                if (!req->sccb)
 340                        goto do_post;
 341                rc = __sclp_start_request(req);
 342                if (rc == 0)
 343                        break;
 344                /* Request failed */
 345                if (req->start_count > 1) {
 346                        /* Cannot abort already submitted request - could still
 347                         * be active at the SCLP */
 348                        __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
 349                                                 sclp_request_timeout, 0);
 350                        break;
 351                }
 352do_post:
 353                /* Post-processing for aborted request */
 354                list_del(&req->list);
 355                if (req->callback) {
 356                        spin_unlock_irqrestore(&sclp_lock, flags);
 357                        req->callback(req, req->callback_data);
 358                        spin_lock_irqsave(&sclp_lock, flags);
 359                }
 360        }
 361        spin_unlock_irqrestore(&sclp_lock, flags);
 362}
 363
 364static int __sclp_can_add_request(struct sclp_req *req)
 365{
 366        if (req == &sclp_suspend_req || req == &sclp_init_req)
 367                return 1;
 368        if (sclp_suspend_state != sclp_suspend_state_running)
 369                return 0;
 370        if (sclp_init_state != sclp_init_state_initialized)
 371                return 0;
 372        if (sclp_activation_state != sclp_activation_state_active)
 373                return 0;
 374        return 1;
 375}
 376
 377/* Queue a new request. Return zero on success, non-zero otherwise. */
 378int
 379sclp_add_request(struct sclp_req *req)
 380{
 381        unsigned long flags;
 382        int rc;
 383
 384        spin_lock_irqsave(&sclp_lock, flags);
 385        if (!__sclp_can_add_request(req)) {
 386                spin_unlock_irqrestore(&sclp_lock, flags);
 387                return -EIO;
 388        }
 389        req->status = SCLP_REQ_QUEUED;
 390        req->start_count = 0;
 391        list_add_tail(&req->list, &sclp_req_queue);
 392        rc = 0;
 393        if (req->queue_timeout) {
 394                req->queue_expires = jiffies + req->queue_timeout * HZ;
 395                if (!timer_pending(&sclp_queue_timer) ||
 396                    time_after(sclp_queue_timer.expires, req->queue_expires))
 397                        mod_timer(&sclp_queue_timer, req->queue_expires);
 398        } else
 399                req->queue_expires = 0;
 400        /* Start if request is first in list */
 401        if (sclp_running_state == sclp_running_state_idle &&
 402            req->list.prev == &sclp_req_queue) {
 403                if (!req->sccb) {
 404                        list_del(&req->list);
 405                        rc = -ENODATA;
 406                        goto out;
 407                }
 408                rc = __sclp_start_request(req);
 409                if (rc)
 410                        list_del(&req->list);
 411        }
 412out:
 413        spin_unlock_irqrestore(&sclp_lock, flags);
 414        return rc;
 415}
 416
 417EXPORT_SYMBOL(sclp_add_request);
 418
 419/* Dispatch events found in request buffer to registered listeners. Return 0
 420 * if all events were dispatched, non-zero otherwise. */
 421static int
 422sclp_dispatch_evbufs(struct sccb_header *sccb)
 423{
 424        unsigned long flags;
 425        struct evbuf_header *evbuf;
 426        struct list_head *l;
 427        struct sclp_register *reg;
 428        int offset;
 429        int rc;
 430
 431        spin_lock_irqsave(&sclp_lock, flags);
 432        rc = 0;
 433        for (offset = sizeof(struct sccb_header); offset < sccb->length;
 434             offset += evbuf->length) {
 435                evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
 436                /* Check for malformed hardware response */
 437                if (evbuf->length == 0)
 438                        break;
 439                /* Search for event handler */
 440                reg = NULL;
 441                list_for_each(l, &sclp_reg_list) {
 442                        reg = list_entry(l, struct sclp_register, list);
 443                        if (reg->receive_mask & (1 << (32 - evbuf->type)))
 444                                break;
 445                        else
 446                                reg = NULL;
 447                }
 448                if (reg && reg->receiver_fn) {
 449                        spin_unlock_irqrestore(&sclp_lock, flags);
 450                        reg->receiver_fn(evbuf);
 451                        spin_lock_irqsave(&sclp_lock, flags);
 452                } else if (reg == NULL)
 453                        rc = -EOPNOTSUPP;
 454        }
 455        spin_unlock_irqrestore(&sclp_lock, flags);
 456        return rc;
 457}
 458
 459/* Read event data request callback. */
 460static void
 461sclp_read_cb(struct sclp_req *req, void *data)
 462{
 463        unsigned long flags;
 464        struct sccb_header *sccb;
 465
 466        sccb = (struct sccb_header *) req->sccb;
 467        if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
 468            sccb->response_code == 0x220))
 469                sclp_dispatch_evbufs(sccb);
 470        spin_lock_irqsave(&sclp_lock, flags);
 471        sclp_reading_state = sclp_reading_state_idle;
 472        spin_unlock_irqrestore(&sclp_lock, flags);
 473}
 474
 475/* Prepare read event data request. Called while sclp_lock is locked. */
 476static void __sclp_make_read_req(void)
 477{
 478        struct sccb_header *sccb;
 479
 480        sccb = (struct sccb_header *) sclp_read_sccb;
 481        clear_page(sccb);
 482        memset(&sclp_read_req, 0, sizeof(struct sclp_req));
 483        sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
 484        sclp_read_req.status = SCLP_REQ_QUEUED;
 485        sclp_read_req.start_count = 0;
 486        sclp_read_req.callback = sclp_read_cb;
 487        sclp_read_req.sccb = sccb;
 488        sccb->length = PAGE_SIZE;
 489        sccb->function_code = 0;
 490        sccb->control_mask[2] = 0x80;
 491}
 492
 493/* Search request list for request with matching sccb. Return request if found,
 494 * NULL otherwise. Called while sclp_lock is locked. */
 495static inline struct sclp_req *
 496__sclp_find_req(u32 sccb)
 497{
 498        struct list_head *l;
 499        struct sclp_req *req;
 500
 501        list_for_each(l, &sclp_req_queue) {
 502                req = list_entry(l, struct sclp_req, list);
 503                if (sccb == (u32) (addr_t) req->sccb)
 504                                return req;
 505        }
 506        return NULL;
 507}
 508
 509/* Handler for external interruption. Perform request post-processing.
 510 * Prepare read event data request if necessary. Start processing of next
 511 * request on queue. */
 512static void sclp_interrupt_handler(struct ext_code ext_code,
 513                                   unsigned int param32, unsigned long param64)
 514{
 515        struct sclp_req *req;
 516        u32 finished_sccb;
 517        u32 evbuf_pending;
 518
 519        inc_irq_stat(IRQEXT_SCP);
 520        spin_lock(&sclp_lock);
 521        finished_sccb = param32 & 0xfffffff8;
 522        evbuf_pending = param32 & 0x3;
 523        if (finished_sccb) {
 524                del_timer(&sclp_request_timer);
 525                sclp_running_state = sclp_running_state_reset_pending;
 526                req = __sclp_find_req(finished_sccb);
 527                if (req) {
 528                        /* Request post-processing */
 529                        list_del(&req->list);
 530                        req->status = SCLP_REQ_DONE;
 531                        if (req->callback) {
 532                                spin_unlock(&sclp_lock);
 533                                req->callback(req, req->callback_data);
 534                                spin_lock(&sclp_lock);
 535                        }
 536                }
 537                sclp_running_state = sclp_running_state_idle;
 538        }
 539        if (evbuf_pending &&
 540            sclp_activation_state == sclp_activation_state_active)
 541                __sclp_queue_read_req();
 542        spin_unlock(&sclp_lock);
 543        sclp_process_queue();
 544}
 545
 546/* Convert interval in jiffies to TOD ticks. */
 547static inline u64
 548sclp_tod_from_jiffies(unsigned long jiffies)
 549{
 550        return (u64) (jiffies / HZ) << 32;
 551}
 552
 553/* Wait until a currently running request finished. Note: while this function
 554 * is running, no timers are served on the calling CPU. */
 555void
 556sclp_sync_wait(void)
 557{
 558        unsigned long long old_tick;
 559        unsigned long flags;
 560        unsigned long cr0, cr0_sync;
 561        u64 timeout;
 562        int irq_context;
 563
 564        /* We'll be disabling timer interrupts, so we need a custom timeout
 565         * mechanism */
 566        timeout = 0;
 567        if (timer_pending(&sclp_request_timer)) {
 568                /* Get timeout TOD value */
 569                timeout = get_tod_clock_fast() +
 570                          sclp_tod_from_jiffies(sclp_request_timer.expires -
 571                                                jiffies);
 572        }
 573        local_irq_save(flags);
 574        /* Prevent bottom half from executing once we force interrupts open */
 575        irq_context = in_interrupt();
 576        if (!irq_context)
 577                local_bh_disable();
 578        /* Enable service-signal interruption, disable timer interrupts */
 579        old_tick = local_tick_disable();
 580        trace_hardirqs_on();
 581        __ctl_store(cr0, 0, 0);
 582        cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
 583        cr0_sync |= 1UL << (63 - 54);
 584        __ctl_load(cr0_sync, 0, 0);
 585        __arch_local_irq_stosm(0x01);
 586        /* Loop until driver state indicates finished request */
 587        while (sclp_running_state != sclp_running_state_idle) {
 588                /* Check for expired request timer */
 589                if (timer_pending(&sclp_request_timer) &&
 590                    get_tod_clock_fast() > timeout &&
 591                    del_timer(&sclp_request_timer))
 592                        sclp_request_timer.function(sclp_request_timer.data);
 593                cpu_relax();
 594        }
 595        local_irq_disable();
 596        __ctl_load(cr0, 0, 0);
 597        if (!irq_context)
 598                _local_bh_enable();
 599        local_tick_enable(old_tick);
 600        local_irq_restore(flags);
 601}
 602EXPORT_SYMBOL(sclp_sync_wait);
 603
 604/* Dispatch changes in send and receive mask to registered listeners. */
 605static void
 606sclp_dispatch_state_change(void)
 607{
 608        struct list_head *l;
 609        struct sclp_register *reg;
 610        unsigned long flags;
 611        sccb_mask_t receive_mask;
 612        sccb_mask_t send_mask;
 613
 614        do {
 615                spin_lock_irqsave(&sclp_lock, flags);
 616                reg = NULL;
 617                list_for_each(l, &sclp_reg_list) {
 618                        reg = list_entry(l, struct sclp_register, list);
 619                        receive_mask = reg->send_mask & sclp_receive_mask;
 620                        send_mask = reg->receive_mask & sclp_send_mask;
 621                        if (reg->sclp_receive_mask != receive_mask ||
 622                            reg->sclp_send_mask != send_mask) {
 623                                reg->sclp_receive_mask = receive_mask;
 624                                reg->sclp_send_mask = send_mask;
 625                                break;
 626                        } else
 627                                reg = NULL;
 628                }
 629                spin_unlock_irqrestore(&sclp_lock, flags);
 630                if (reg && reg->state_change_fn)
 631                        reg->state_change_fn(reg);
 632        } while (reg);
 633}
 634
 635struct sclp_statechangebuf {
 636        struct evbuf_header     header;
 637        u8              validity_sclp_active_facility_mask : 1;
 638        u8              validity_sclp_receive_mask : 1;
 639        u8              validity_sclp_send_mask : 1;
 640        u8              validity_read_data_function_mask : 1;
 641        u16             _zeros : 12;
 642        u16             mask_length;
 643        u64             sclp_active_facility_mask;
 644        sccb_mask_t     sclp_receive_mask;
 645        sccb_mask_t     sclp_send_mask;
 646        u32             read_data_function_mask;
 647} __attribute__((packed));
 648
 649
 650/* State change event callback. Inform listeners of changes. */
 651static void
 652sclp_state_change_cb(struct evbuf_header *evbuf)
 653{
 654        unsigned long flags;
 655        struct sclp_statechangebuf *scbuf;
 656
 657        scbuf = (struct sclp_statechangebuf *) evbuf;
 658        if (scbuf->mask_length != sizeof(sccb_mask_t))
 659                return;
 660        spin_lock_irqsave(&sclp_lock, flags);
 661        if (scbuf->validity_sclp_receive_mask)
 662                sclp_receive_mask = scbuf->sclp_receive_mask;
 663        if (scbuf->validity_sclp_send_mask)
 664                sclp_send_mask = scbuf->sclp_send_mask;
 665        spin_unlock_irqrestore(&sclp_lock, flags);
 666        if (scbuf->validity_sclp_active_facility_mask)
 667                sclp.facilities = scbuf->sclp_active_facility_mask;
 668        sclp_dispatch_state_change();
 669}
 670
 671static struct sclp_register sclp_state_change_event = {
 672        .receive_mask = EVTYP_STATECHANGE_MASK,
 673        .receiver_fn = sclp_state_change_cb
 674};
 675
 676/* Calculate receive and send mask of currently registered listeners.
 677 * Called while sclp_lock is locked. */
 678static inline void
 679__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
 680{
 681        struct list_head *l;
 682        struct sclp_register *t;
 683
 684        *receive_mask = 0;
 685        *send_mask = 0;
 686        list_for_each(l, &sclp_reg_list) {
 687                t = list_entry(l, struct sclp_register, list);
 688                *receive_mask |= t->receive_mask;
 689                *send_mask |= t->send_mask;
 690        }
 691}
 692
 693/* Register event listener. Return 0 on success, non-zero otherwise. */
 694int
 695sclp_register(struct sclp_register *reg)
 696{
 697        unsigned long flags;
 698        sccb_mask_t receive_mask;
 699        sccb_mask_t send_mask;
 700        int rc;
 701
 702        rc = sclp_init();
 703        if (rc)
 704                return rc;
 705        spin_lock_irqsave(&sclp_lock, flags);
 706        /* Check event mask for collisions */
 707        __sclp_get_mask(&receive_mask, &send_mask);
 708        if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
 709                spin_unlock_irqrestore(&sclp_lock, flags);
 710                return -EBUSY;
 711        }
 712        /* Trigger initial state change callback */
 713        reg->sclp_receive_mask = 0;
 714        reg->sclp_send_mask = 0;
 715        reg->pm_event_posted = 0;
 716        list_add(&reg->list, &sclp_reg_list);
 717        spin_unlock_irqrestore(&sclp_lock, flags);
 718        rc = sclp_init_mask(1);
 719        if (rc) {
 720                spin_lock_irqsave(&sclp_lock, flags);
 721                list_del(&reg->list);
 722                spin_unlock_irqrestore(&sclp_lock, flags);
 723        }
 724        return rc;
 725}
 726
 727EXPORT_SYMBOL(sclp_register);
 728
 729/* Unregister event listener. */
 730void
 731sclp_unregister(struct sclp_register *reg)
 732{
 733        unsigned long flags;
 734
 735        spin_lock_irqsave(&sclp_lock, flags);
 736        list_del(&reg->list);
 737        spin_unlock_irqrestore(&sclp_lock, flags);
 738        sclp_init_mask(1);
 739}
 740
 741EXPORT_SYMBOL(sclp_unregister);
 742
 743/* Remove event buffers which are marked processed. Return the number of
 744 * remaining event buffers. */
 745int
 746sclp_remove_processed(struct sccb_header *sccb)
 747{
 748        struct evbuf_header *evbuf;
 749        int unprocessed;
 750        u16 remaining;
 751
 752        evbuf = (struct evbuf_header *) (sccb + 1);
 753        unprocessed = 0;
 754        remaining = sccb->length - sizeof(struct sccb_header);
 755        while (remaining > 0) {
 756                remaining -= evbuf->length;
 757                if (evbuf->flags & 0x80) {
 758                        sccb->length -= evbuf->length;
 759                        memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
 760                               remaining);
 761                } else {
 762                        unprocessed++;
 763                        evbuf = (struct evbuf_header *)
 764                                        ((addr_t) evbuf + evbuf->length);
 765                }
 766        }
 767        return unprocessed;
 768}
 769
 770EXPORT_SYMBOL(sclp_remove_processed);
 771
 772/* Prepare init mask request. Called while sclp_lock is locked. */
 773static inline void
 774__sclp_make_init_req(u32 receive_mask, u32 send_mask)
 775{
 776        struct init_sccb *sccb;
 777
 778        sccb = (struct init_sccb *) sclp_init_sccb;
 779        clear_page(sccb);
 780        memset(&sclp_init_req, 0, sizeof(struct sclp_req));
 781        sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
 782        sclp_init_req.status = SCLP_REQ_FILLED;
 783        sclp_init_req.start_count = 0;
 784        sclp_init_req.callback = NULL;
 785        sclp_init_req.callback_data = NULL;
 786        sclp_init_req.sccb = sccb;
 787        sccb->header.length = sizeof(struct init_sccb);
 788        sccb->mask_length = sizeof(sccb_mask_t);
 789        sccb->receive_mask = receive_mask;
 790        sccb->send_mask = send_mask;
 791        sccb->sclp_receive_mask = 0;
 792        sccb->sclp_send_mask = 0;
 793}
 794
 795/* Start init mask request. If calculate is non-zero, calculate the mask as
 796 * requested by registered listeners. Use zero mask otherwise. Return 0 on
 797 * success, non-zero otherwise. */
 798static int
 799sclp_init_mask(int calculate)
 800{
 801        unsigned long flags;
 802        struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
 803        sccb_mask_t receive_mask;
 804        sccb_mask_t send_mask;
 805        int retry;
 806        int rc;
 807        unsigned long wait;
 808
 809        spin_lock_irqsave(&sclp_lock, flags);
 810        /* Check if interface is in appropriate state */
 811        if (sclp_mask_state != sclp_mask_state_idle) {
 812                spin_unlock_irqrestore(&sclp_lock, flags);
 813                return -EBUSY;
 814        }
 815        if (sclp_activation_state == sclp_activation_state_inactive) {
 816                spin_unlock_irqrestore(&sclp_lock, flags);
 817                return -EINVAL;
 818        }
 819        sclp_mask_state = sclp_mask_state_initializing;
 820        /* Determine mask */
 821        if (calculate)
 822                __sclp_get_mask(&receive_mask, &send_mask);
 823        else {
 824                receive_mask = 0;
 825                send_mask = 0;
 826        }
 827        rc = -EIO;
 828        for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
 829                /* Prepare request */
 830                __sclp_make_init_req(receive_mask, send_mask);
 831                spin_unlock_irqrestore(&sclp_lock, flags);
 832                if (sclp_add_request(&sclp_init_req)) {
 833                        /* Try again later */
 834                        wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
 835                        while (time_before(jiffies, wait))
 836                                sclp_sync_wait();
 837                        spin_lock_irqsave(&sclp_lock, flags);
 838                        continue;
 839                }
 840                while (sclp_init_req.status != SCLP_REQ_DONE &&
 841                       sclp_init_req.status != SCLP_REQ_FAILED)
 842                        sclp_sync_wait();
 843                spin_lock_irqsave(&sclp_lock, flags);
 844                if (sclp_init_req.status == SCLP_REQ_DONE &&
 845                    sccb->header.response_code == 0x20) {
 846                        /* Successful request */
 847                        if (calculate) {
 848                                sclp_receive_mask = sccb->sclp_receive_mask;
 849                                sclp_send_mask = sccb->sclp_send_mask;
 850                        } else {
 851                                sclp_receive_mask = 0;
 852                                sclp_send_mask = 0;
 853                        }
 854                        spin_unlock_irqrestore(&sclp_lock, flags);
 855                        sclp_dispatch_state_change();
 856                        spin_lock_irqsave(&sclp_lock, flags);
 857                        rc = 0;
 858                        break;
 859                }
 860        }
 861        sclp_mask_state = sclp_mask_state_idle;
 862        spin_unlock_irqrestore(&sclp_lock, flags);
 863        return rc;
 864}
 865
 866/* Deactivate SCLP interface. On success, new requests will be rejected,
 867 * events will no longer be dispatched. Return 0 on success, non-zero
 868 * otherwise. */
 869int
 870sclp_deactivate(void)
 871{
 872        unsigned long flags;
 873        int rc;
 874
 875        spin_lock_irqsave(&sclp_lock, flags);
 876        /* Deactivate can only be called when active */
 877        if (sclp_activation_state != sclp_activation_state_active) {
 878                spin_unlock_irqrestore(&sclp_lock, flags);
 879                return -EINVAL;
 880        }
 881        sclp_activation_state = sclp_activation_state_deactivating;
 882        spin_unlock_irqrestore(&sclp_lock, flags);
 883        rc = sclp_init_mask(0);
 884        spin_lock_irqsave(&sclp_lock, flags);
 885        if (rc == 0)
 886                sclp_activation_state = sclp_activation_state_inactive;
 887        else
 888                sclp_activation_state = sclp_activation_state_active;
 889        spin_unlock_irqrestore(&sclp_lock, flags);
 890        return rc;
 891}
 892
 893EXPORT_SYMBOL(sclp_deactivate);
 894
 895/* Reactivate SCLP interface after sclp_deactivate. On success, new
 896 * requests will be accepted, events will be dispatched again. Return 0 on
 897 * success, non-zero otherwise. */
 898int
 899sclp_reactivate(void)
 900{
 901        unsigned long flags;
 902        int rc;
 903
 904        spin_lock_irqsave(&sclp_lock, flags);
 905        /* Reactivate can only be called when inactive */
 906        if (sclp_activation_state != sclp_activation_state_inactive) {
 907                spin_unlock_irqrestore(&sclp_lock, flags);
 908                return -EINVAL;
 909        }
 910        sclp_activation_state = sclp_activation_state_activating;
 911        spin_unlock_irqrestore(&sclp_lock, flags);
 912        rc = sclp_init_mask(1);
 913        spin_lock_irqsave(&sclp_lock, flags);
 914        if (rc == 0)
 915                sclp_activation_state = sclp_activation_state_active;
 916        else
 917                sclp_activation_state = sclp_activation_state_inactive;
 918        spin_unlock_irqrestore(&sclp_lock, flags);
 919        return rc;
 920}
 921
 922EXPORT_SYMBOL(sclp_reactivate);
 923
 924/* Handler for external interruption used during initialization. Modify
 925 * request state to done. */
 926static void sclp_check_handler(struct ext_code ext_code,
 927                               unsigned int param32, unsigned long param64)
 928{
 929        u32 finished_sccb;
 930
 931        inc_irq_stat(IRQEXT_SCP);
 932        finished_sccb = param32 & 0xfffffff8;
 933        /* Is this the interrupt we are waiting for? */
 934        if (finished_sccb == 0)
 935                return;
 936        if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
 937                panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
 938                      finished_sccb);
 939        spin_lock(&sclp_lock);
 940        if (sclp_running_state == sclp_running_state_running) {
 941                sclp_init_req.status = SCLP_REQ_DONE;
 942                sclp_running_state = sclp_running_state_idle;
 943        }
 944        spin_unlock(&sclp_lock);
 945}
 946
 947/* Initial init mask request timed out. Modify request state to failed. */
 948static void
 949sclp_check_timeout(unsigned long data)
 950{
 951        unsigned long flags;
 952
 953        spin_lock_irqsave(&sclp_lock, flags);
 954        if (sclp_running_state == sclp_running_state_running) {
 955                sclp_init_req.status = SCLP_REQ_FAILED;
 956                sclp_running_state = sclp_running_state_idle;
 957        }
 958        spin_unlock_irqrestore(&sclp_lock, flags);
 959}
 960
 961/* Perform a check of the SCLP interface. Return zero if the interface is
 962 * available and there are no pending requests from a previous instance.
 963 * Return non-zero otherwise. */
 964static int
 965sclp_check_interface(void)
 966{
 967        struct init_sccb *sccb;
 968        unsigned long flags;
 969        int retry;
 970        int rc;
 971
 972        spin_lock_irqsave(&sclp_lock, flags);
 973        /* Prepare init mask command */
 974        rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
 975        if (rc) {
 976                spin_unlock_irqrestore(&sclp_lock, flags);
 977                return rc;
 978        }
 979        for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
 980                __sclp_make_init_req(0, 0);
 981                sccb = (struct init_sccb *) sclp_init_req.sccb;
 982                rc = sclp_service_call(sclp_init_req.command, sccb);
 983                if (rc == -EIO)
 984                        break;
 985                sclp_init_req.status = SCLP_REQ_RUNNING;
 986                sclp_running_state = sclp_running_state_running;
 987                __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
 988                                         sclp_check_timeout, 0);
 989                spin_unlock_irqrestore(&sclp_lock, flags);
 990                /* Enable service-signal interruption - needs to happen
 991                 * with IRQs enabled. */
 992                irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 993                /* Wait for signal from interrupt or timeout */
 994                sclp_sync_wait();
 995                /* Disable service-signal interruption - needs to happen
 996                 * with IRQs enabled. */
 997                irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
 998                spin_lock_irqsave(&sclp_lock, flags);
 999                del_timer(&sclp_request_timer);
1000                if (sclp_init_req.status == SCLP_REQ_DONE &&
1001                    sccb->header.response_code == 0x20) {
1002                        rc = 0;
1003                        break;
1004                } else
1005                        rc = -EBUSY;
1006        }
1007        unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1008        spin_unlock_irqrestore(&sclp_lock, flags);
1009        return rc;
1010}
1011
1012/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1013 * events from interfering with rebooted system. */
1014static int
1015sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1016{
1017        sclp_deactivate();
1018        return NOTIFY_DONE;
1019}
1020
1021static struct notifier_block sclp_reboot_notifier = {
1022        .notifier_call = sclp_reboot_event
1023};
1024
1025/*
1026 * Suspend/resume SCLP notifier implementation
1027 */
1028
1029static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
1030{
1031        struct sclp_register *reg;
1032        unsigned long flags;
1033
1034        if (!rollback) {
1035                spin_lock_irqsave(&sclp_lock, flags);
1036                list_for_each_entry(reg, &sclp_reg_list, list)
1037                        reg->pm_event_posted = 0;
1038                spin_unlock_irqrestore(&sclp_lock, flags);
1039        }
1040        do {
1041                spin_lock_irqsave(&sclp_lock, flags);
1042                list_for_each_entry(reg, &sclp_reg_list, list) {
1043                        if (rollback && reg->pm_event_posted)
1044                                goto found;
1045                        if (!rollback && !reg->pm_event_posted)
1046                                goto found;
1047                }
1048                spin_unlock_irqrestore(&sclp_lock, flags);
1049                return;
1050found:
1051                spin_unlock_irqrestore(&sclp_lock, flags);
1052                if (reg->pm_event_fn)
1053                        reg->pm_event_fn(reg, sclp_pm_event);
1054                reg->pm_event_posted = rollback ? 0 : 1;
1055        } while (1);
1056}
1057
1058/*
1059 * Susend/resume callbacks for platform device
1060 */
1061
1062static int sclp_freeze(struct device *dev)
1063{
1064        unsigned long flags;
1065        int rc;
1066
1067        sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
1068
1069        spin_lock_irqsave(&sclp_lock, flags);
1070        sclp_suspend_state = sclp_suspend_state_suspended;
1071        spin_unlock_irqrestore(&sclp_lock, flags);
1072
1073        /* Init supend data */
1074        memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
1075        sclp_suspend_req.callback = sclp_suspend_req_cb;
1076        sclp_suspend_req.status = SCLP_REQ_FILLED;
1077        init_completion(&sclp_request_queue_flushed);
1078
1079        rc = sclp_add_request(&sclp_suspend_req);
1080        if (rc == 0)
1081                wait_for_completion(&sclp_request_queue_flushed);
1082        else if (rc != -ENODATA)
1083                goto fail_thaw;
1084
1085        rc = sclp_deactivate();
1086        if (rc)
1087                goto fail_thaw;
1088        return 0;
1089
1090fail_thaw:
1091        spin_lock_irqsave(&sclp_lock, flags);
1092        sclp_suspend_state = sclp_suspend_state_running;
1093        spin_unlock_irqrestore(&sclp_lock, flags);
1094        sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1095        return rc;
1096}
1097
1098static int sclp_undo_suspend(enum sclp_pm_event event)
1099{
1100        unsigned long flags;
1101        int rc;
1102
1103        rc = sclp_reactivate();
1104        if (rc)
1105                return rc;
1106
1107        spin_lock_irqsave(&sclp_lock, flags);
1108        sclp_suspend_state = sclp_suspend_state_running;
1109        spin_unlock_irqrestore(&sclp_lock, flags);
1110
1111        sclp_pm_event(event, 0);
1112        return 0;
1113}
1114
1115static int sclp_thaw(struct device *dev)
1116{
1117        return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1118}
1119
1120static int sclp_restore(struct device *dev)
1121{
1122        return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1123}
1124
1125static const struct dev_pm_ops sclp_pm_ops = {
1126        .freeze         = sclp_freeze,
1127        .thaw           = sclp_thaw,
1128        .restore        = sclp_restore,
1129};
1130
1131static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
1132{
1133        return sprintf(buf, "%i\n", sclp_console_pages);
1134}
1135
1136static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
1137
1138static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
1139{
1140        return sprintf(buf, "%i\n", sclp_console_drop);
1141}
1142
1143static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
1144
1145static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
1146{
1147        return sprintf(buf, "%lu\n", sclp_console_full);
1148}
1149
1150static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
1151
1152static struct attribute *sclp_drv_attrs[] = {
1153        &driver_attr_con_pages.attr,
1154        &driver_attr_con_drop.attr,
1155        &driver_attr_con_full.attr,
1156        NULL,
1157};
1158static struct attribute_group sclp_drv_attr_group = {
1159        .attrs = sclp_drv_attrs,
1160};
1161static const struct attribute_group *sclp_drv_attr_groups[] = {
1162        &sclp_drv_attr_group,
1163        NULL,
1164};
1165
1166static struct platform_driver sclp_pdrv = {
1167        .driver = {
1168                .name   = "sclp",
1169                .pm     = &sclp_pm_ops,
1170                .groups = sclp_drv_attr_groups,
1171        },
1172};
1173
1174static struct platform_device *sclp_pdev;
1175
1176/* Initialize SCLP driver. Return zero if driver is operational, non-zero
1177 * otherwise. */
1178static int
1179sclp_init(void)
1180{
1181        unsigned long flags;
1182        int rc = 0;
1183
1184        spin_lock_irqsave(&sclp_lock, flags);
1185        /* Check for previous or running initialization */
1186        if (sclp_init_state != sclp_init_state_uninitialized)
1187                goto fail_unlock;
1188        sclp_init_state = sclp_init_state_initializing;
1189        /* Set up variables */
1190        INIT_LIST_HEAD(&sclp_req_queue);
1191        INIT_LIST_HEAD(&sclp_reg_list);
1192        list_add(&sclp_state_change_event.list, &sclp_reg_list);
1193        init_timer(&sclp_request_timer);
1194        init_timer(&sclp_queue_timer);
1195        sclp_queue_timer.function = sclp_req_queue_timeout;
1196        /* Check interface */
1197        spin_unlock_irqrestore(&sclp_lock, flags);
1198        rc = sclp_check_interface();
1199        spin_lock_irqsave(&sclp_lock, flags);
1200        if (rc)
1201                goto fail_init_state_uninitialized;
1202        /* Register reboot handler */
1203        rc = register_reboot_notifier(&sclp_reboot_notifier);
1204        if (rc)
1205                goto fail_init_state_uninitialized;
1206        /* Register interrupt handler */
1207        rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1208        if (rc)
1209                goto fail_unregister_reboot_notifier;
1210        sclp_init_state = sclp_init_state_initialized;
1211        spin_unlock_irqrestore(&sclp_lock, flags);
1212        /* Enable service-signal external interruption - needs to happen with
1213         * IRQs enabled. */
1214        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1215        sclp_init_mask(1);
1216        return 0;
1217
1218fail_unregister_reboot_notifier:
1219        unregister_reboot_notifier(&sclp_reboot_notifier);
1220fail_init_state_uninitialized:
1221        sclp_init_state = sclp_init_state_uninitialized;
1222fail_unlock:
1223        spin_unlock_irqrestore(&sclp_lock, flags);
1224        return rc;
1225}
1226
1227/*
1228 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1229 * to print the panic message.
1230 */
1231static int sclp_panic_notify(struct notifier_block *self,
1232                             unsigned long event, void *data)
1233{
1234        if (sclp_suspend_state == sclp_suspend_state_suspended)
1235                sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1236        return NOTIFY_OK;
1237}
1238
1239static struct notifier_block sclp_on_panic_nb = {
1240        .notifier_call = sclp_panic_notify,
1241        .priority = SCLP_PANIC_PRIO,
1242};
1243
1244static __init int sclp_initcall(void)
1245{
1246        int rc;
1247
1248        rc = platform_driver_register(&sclp_pdrv);
1249        if (rc)
1250                return rc;
1251
1252        sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1253        rc = PTR_ERR_OR_ZERO(sclp_pdev);
1254        if (rc)
1255                goto fail_platform_driver_unregister;
1256
1257        rc = atomic_notifier_chain_register(&panic_notifier_list,
1258                                            &sclp_on_panic_nb);
1259        if (rc)
1260                goto fail_platform_device_unregister;
1261
1262        return sclp_init();
1263
1264fail_platform_device_unregister:
1265        platform_device_unregister(sclp_pdev);
1266fail_platform_driver_unregister:
1267        platform_driver_unregister(&sclp_pdrv);
1268        return rc;
1269}
1270
1271arch_initcall(sclp_initcall);
1272