linux/net/iucv/iucv.c
<<
>>
Prefs
   1/*
   2 * IUCV base infrastructure.
   3 *
   4 * Copyright IBM Corp. 2001, 2009
   5 *
   6 * Author(s):
   7 *    Original source:
   8 *      Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
   9 *      Xenia Tkatschow (xenia@us.ibm.com)
  10 *    2Gb awareness and general cleanup:
  11 *      Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  12 *    Rewritten for af_iucv:
  13 *      Martin Schwidefsky <schwidefsky@de.ibm.com>
  14 *    PM functions:
  15 *      Ursula Braun (ursula.braun@de.ibm.com)
  16 *
  17 * Documentation used:
  18 *    The original source
  19 *    CP Programming Service, IBM document # SC24-5760
  20 *
  21 * This program is free software; you can redistribute it and/or modify
  22 * it under the terms of the GNU General Public License as published by
  23 * the Free Software Foundation; either version 2, or (at your option)
  24 * any later version.
  25 *
  26 * This program is distributed in the hope that it will be useful,
  27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  29 * GNU General Public License for more details.
  30 *
  31 * You should have received a copy of the GNU General Public License
  32 * along with this program; if not, write to the Free Software
  33 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  34 */
  35
  36#define KMSG_COMPONENT "iucv"
  37#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  38
  39#include <linux/module.h>
  40#include <linux/moduleparam.h>
  41#include <linux/spinlock.h>
  42#include <linux/kernel.h>
  43#include <linux/slab.h>
  44#include <linux/init.h>
  45#include <linux/interrupt.h>
  46#include <linux/list.h>
  47#include <linux/errno.h>
  48#include <linux/err.h>
  49#include <linux/device.h>
  50#include <linux/cpu.h>
  51#include <linux/reboot.h>
  52#include <net/iucv/iucv.h>
  53#include <asm/atomic.h>
  54#include <asm/ebcdic.h>
  55#include <asm/io.h>
  56#include <asm/s390_ext.h>
  57#include <asm/smp.h>
  58
  59/*
  60 * FLAGS:
  61 * All flags are defined in the field IPFLAGS1 of each function
  62 * and can be found in CP Programming Services.
  63 * IPSRCCLS - Indicates you have specified a source class.
  64 * IPTRGCLS - Indicates you have specified a target class.
  65 * IPFGPID  - Indicates you have specified a pathid.
  66 * IPFGMID  - Indicates you have specified a message ID.
  67 * IPNORPY  - Indicates a one-way message. No reply expected.
  68 * IPALL    - Indicates that all paths are affected.
  69 */
  70#define IUCV_IPSRCCLS   0x01
  71#define IUCV_IPTRGCLS   0x01
  72#define IUCV_IPFGPID    0x02
  73#define IUCV_IPFGMID    0x04
  74#define IUCV_IPNORPY    0x10
  75#define IUCV_IPALL      0x80
  76
  77static int iucv_bus_match(struct device *dev, struct device_driver *drv)
  78{
  79        return 0;
  80}
  81
  82enum iucv_pm_states {
  83        IUCV_PM_INITIAL = 0,
  84        IUCV_PM_FREEZING = 1,
  85        IUCV_PM_THAWING = 2,
  86        IUCV_PM_RESTORING = 3,
  87};
  88static enum iucv_pm_states iucv_pm_state;
  89
  90static int iucv_pm_prepare(struct device *);
  91static void iucv_pm_complete(struct device *);
  92static int iucv_pm_freeze(struct device *);
  93static int iucv_pm_thaw(struct device *);
  94static int iucv_pm_restore(struct device *);
  95
  96static struct dev_pm_ops iucv_pm_ops = {
  97        .prepare = iucv_pm_prepare,
  98        .complete = iucv_pm_complete,
  99        .freeze = iucv_pm_freeze,
 100        .thaw = iucv_pm_thaw,
 101        .restore = iucv_pm_restore,
 102};
 103
 104struct bus_type iucv_bus = {
 105        .name = "iucv",
 106        .match = iucv_bus_match,
 107        .pm = &iucv_pm_ops,
 108};
 109EXPORT_SYMBOL(iucv_bus);
 110
 111struct device *iucv_root;
 112EXPORT_SYMBOL(iucv_root);
 113
 114static int iucv_available;
 115
 116/* General IUCV interrupt structure */
 117struct iucv_irq_data {
 118        u16 ippathid;
 119        u8  ipflags1;
 120        u8  iptype;
 121        u32 res2[8];
 122};
 123
 124struct iucv_irq_list {
 125        struct list_head list;
 126        struct iucv_irq_data data;
 127};
 128
 129static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
 130static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
 131static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
 132
 133/*
 134 * Queue of interrupt buffers lock for delivery via the tasklet
 135 * (fast but can't call smp_call_function).
 136 */
 137static LIST_HEAD(iucv_task_queue);
 138
 139/*
 140 * The tasklet for fast delivery of iucv interrupts.
 141 */
 142static void iucv_tasklet_fn(unsigned long);
 143static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
 144
 145/*
 146 * Queue of interrupt buffers for delivery via a work queue
 147 * (slower but can call smp_call_function).
 148 */
 149static LIST_HEAD(iucv_work_queue);
 150
 151/*
 152 * The work element to deliver path pending interrupts.
 153 */
 154static void iucv_work_fn(struct work_struct *work);
 155static DECLARE_WORK(iucv_work, iucv_work_fn);
 156
 157/*
 158 * Spinlock protecting task and work queue.
 159 */
 160static DEFINE_SPINLOCK(iucv_queue_lock);
 161
 162enum iucv_command_codes {
 163        IUCV_QUERY = 0,
 164        IUCV_RETRIEVE_BUFFER = 2,
 165        IUCV_SEND = 4,
 166        IUCV_RECEIVE = 5,
 167        IUCV_REPLY = 6,
 168        IUCV_REJECT = 8,
 169        IUCV_PURGE = 9,
 170        IUCV_ACCEPT = 10,
 171        IUCV_CONNECT = 11,
 172        IUCV_DECLARE_BUFFER = 12,
 173        IUCV_QUIESCE = 13,
 174        IUCV_RESUME = 14,
 175        IUCV_SEVER = 15,
 176        IUCV_SETMASK = 16,
 177        IUCV_SETCONTROLMASK = 17,
 178};
 179
 180/*
 181 * Error messages that are used with the iucv_sever function. They get
 182 * converted to EBCDIC.
 183 */
 184static char iucv_error_no_listener[16] = "NO LISTENER";
 185static char iucv_error_no_memory[16] = "NO MEMORY";
 186static char iucv_error_pathid[16] = "INVALID PATHID";
 187
 188/*
 189 * iucv_handler_list: List of registered handlers.
 190 */
 191static LIST_HEAD(iucv_handler_list);
 192
 193/*
 194 * iucv_path_table: an array of iucv_path structures.
 195 */
 196static struct iucv_path **iucv_path_table;
 197static unsigned long iucv_max_pathid;
 198
 199/*
 200 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
 201 */
 202static DEFINE_SPINLOCK(iucv_table_lock);
 203
 204/*
 205 * iucv_active_cpu: contains the number of the cpu executing the tasklet
 206 * or the work handler. Needed for iucv_path_sever called from tasklet.
 207 */
 208static int iucv_active_cpu = -1;
 209
 210/*
 211 * Mutex and wait queue for iucv_register/iucv_unregister.
 212 */
 213static DEFINE_MUTEX(iucv_register_mutex);
 214
 215/*
 216 * Counter for number of non-smp capable handlers.
 217 */
 218static int iucv_nonsmp_handler;
 219
 220/*
 221 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
 222 * iucv_path_quiesce and iucv_path_sever.
 223 */
 224struct iucv_cmd_control {
 225        u16 ippathid;
 226        u8  ipflags1;
 227        u8  iprcode;
 228        u16 ipmsglim;
 229        u16 res1;
 230        u8  ipvmid[8];
 231        u8  ipuser[16];
 232        u8  iptarget[8];
 233} __attribute__ ((packed,aligned(8)));
 234
 235/*
 236 * Data in parameter list iucv structure. Used by iucv_message_send,
 237 * iucv_message_send2way and iucv_message_reply.
 238 */
 239struct iucv_cmd_dpl {
 240        u16 ippathid;
 241        u8  ipflags1;
 242        u8  iprcode;
 243        u32 ipmsgid;
 244        u32 iptrgcls;
 245        u8  iprmmsg[8];
 246        u32 ipsrccls;
 247        u32 ipmsgtag;
 248        u32 ipbfadr2;
 249        u32 ipbfln2f;
 250        u32 res;
 251} __attribute__ ((packed,aligned(8)));
 252
 253/*
 254 * Data in buffer iucv structure. Used by iucv_message_receive,
 255 * iucv_message_reject, iucv_message_send, iucv_message_send2way
 256 * and iucv_declare_cpu.
 257 */
 258struct iucv_cmd_db {
 259        u16 ippathid;
 260        u8  ipflags1;
 261        u8  iprcode;
 262        u32 ipmsgid;
 263        u32 iptrgcls;
 264        u32 ipbfadr1;
 265        u32 ipbfln1f;
 266        u32 ipsrccls;
 267        u32 ipmsgtag;
 268        u32 ipbfadr2;
 269        u32 ipbfln2f;
 270        u32 res;
 271} __attribute__ ((packed,aligned(8)));
 272
 273/*
 274 * Purge message iucv structure. Used by iucv_message_purge.
 275 */
 276struct iucv_cmd_purge {
 277        u16 ippathid;
 278        u8  ipflags1;
 279        u8  iprcode;
 280        u32 ipmsgid;
 281        u8  ipaudit[3];
 282        u8  res1[5];
 283        u32 res2;
 284        u32 ipsrccls;
 285        u32 ipmsgtag;
 286        u32 res3[3];
 287} __attribute__ ((packed,aligned(8)));
 288
 289/*
 290 * Set mask iucv structure. Used by iucv_enable_cpu.
 291 */
 292struct iucv_cmd_set_mask {
 293        u8  ipmask;
 294        u8  res1[2];
 295        u8  iprcode;
 296        u32 res2[9];
 297} __attribute__ ((packed,aligned(8)));
 298
 299union iucv_param {
 300        struct iucv_cmd_control ctrl;
 301        struct iucv_cmd_dpl dpl;
 302        struct iucv_cmd_db db;
 303        struct iucv_cmd_purge purge;
 304        struct iucv_cmd_set_mask set_mask;
 305};
 306
 307/*
 308 * Anchor for per-cpu IUCV command parameter block.
 309 */
 310static union iucv_param *iucv_param[NR_CPUS];
 311static union iucv_param *iucv_param_irq[NR_CPUS];
 312
 313/**
 314 * iucv_call_b2f0
 315 * @code: identifier of IUCV call to CP.
 316 * @parm: pointer to a struct iucv_parm block
 317 *
 318 * Calls CP to execute IUCV commands.
 319 *
 320 * Returns the result of the CP IUCV call.
 321 */
 322static inline int iucv_call_b2f0(int command, union iucv_param *parm)
 323{
 324        register unsigned long reg0 asm ("0");
 325        register unsigned long reg1 asm ("1");
 326        int ccode;
 327
 328        reg0 = command;
 329        reg1 = virt_to_phys(parm);
 330        asm volatile(
 331                "       .long 0xb2f01000\n"
 332                "       ipm     %0\n"
 333                "       srl     %0,28\n"
 334                : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
 335                :  "m" (*parm) : "cc");
 336        return (ccode == 1) ? parm->ctrl.iprcode : ccode;
 337}
 338
 339/**
 340 * iucv_query_maxconn
 341 *
 342 * Determines the maximum number of connections that may be established.
 343 *
 344 * Returns the maximum number of connections or -EPERM is IUCV is not
 345 * available.
 346 */
 347static int iucv_query_maxconn(void)
 348{
 349        register unsigned long reg0 asm ("0");
 350        register unsigned long reg1 asm ("1");
 351        void *param;
 352        int ccode;
 353
 354        param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
 355        if (!param)
 356                return -ENOMEM;
 357        reg0 = IUCV_QUERY;
 358        reg1 = (unsigned long) param;
 359        asm volatile (
 360                "       .long   0xb2f01000\n"
 361                "       ipm     %0\n"
 362                "       srl     %0,28\n"
 363                : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
 364        if (ccode == 0)
 365                iucv_max_pathid = reg1;
 366        kfree(param);
 367        return ccode ? -EPERM : 0;
 368}
 369
 370/**
 371 * iucv_allow_cpu
 372 * @data: unused
 373 *
 374 * Allow iucv interrupts on this cpu.
 375 */
 376static void iucv_allow_cpu(void *data)
 377{
 378        int cpu = smp_processor_id();
 379        union iucv_param *parm;
 380
 381        /*
 382         * Enable all iucv interrupts.
 383         * ipmask contains bits for the different interrupts
 384         *      0x80 - Flag to allow nonpriority message pending interrupts
 385         *      0x40 - Flag to allow priority message pending interrupts
 386         *      0x20 - Flag to allow nonpriority message completion interrupts
 387         *      0x10 - Flag to allow priority message completion interrupts
 388         *      0x08 - Flag to allow IUCV control interrupts
 389         */
 390        parm = iucv_param_irq[cpu];
 391        memset(parm, 0, sizeof(union iucv_param));
 392        parm->set_mask.ipmask = 0xf8;
 393        iucv_call_b2f0(IUCV_SETMASK, parm);
 394
 395        /*
 396         * Enable all iucv control interrupts.
 397         * ipmask contains bits for the different interrupts
 398         *      0x80 - Flag to allow pending connections interrupts
 399         *      0x40 - Flag to allow connection complete interrupts
 400         *      0x20 - Flag to allow connection severed interrupts
 401         *      0x10 - Flag to allow connection quiesced interrupts
 402         *      0x08 - Flag to allow connection resumed interrupts
 403         */
 404        memset(parm, 0, sizeof(union iucv_param));
 405        parm->set_mask.ipmask = 0xf8;
 406        iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
 407        /* Set indication that iucv interrupts are allowed for this cpu. */
 408        cpu_set(cpu, iucv_irq_cpumask);
 409}
 410
 411/**
 412 * iucv_block_cpu
 413 * @data: unused
 414 *
 415 * Block iucv interrupts on this cpu.
 416 */
 417static void iucv_block_cpu(void *data)
 418{
 419        int cpu = smp_processor_id();
 420        union iucv_param *parm;
 421
 422        /* Disable all iucv interrupts. */
 423        parm = iucv_param_irq[cpu];
 424        memset(parm, 0, sizeof(union iucv_param));
 425        iucv_call_b2f0(IUCV_SETMASK, parm);
 426
 427        /* Clear indication that iucv interrupts are allowed for this cpu. */
 428        cpu_clear(cpu, iucv_irq_cpumask);
 429}
 430
 431/**
 432 * iucv_block_cpu_almost
 433 * @data: unused
 434 *
 435 * Allow connection-severed interrupts only on this cpu.
 436 */
 437static void iucv_block_cpu_almost(void *data)
 438{
 439        int cpu = smp_processor_id();
 440        union iucv_param *parm;
 441
 442        /* Allow iucv control interrupts only */
 443        parm = iucv_param_irq[cpu];
 444        memset(parm, 0, sizeof(union iucv_param));
 445        parm->set_mask.ipmask = 0x08;
 446        iucv_call_b2f0(IUCV_SETMASK, parm);
 447        /* Allow iucv-severed interrupt only */
 448        memset(parm, 0, sizeof(union iucv_param));
 449        parm->set_mask.ipmask = 0x20;
 450        iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
 451
 452        /* Clear indication that iucv interrupts are allowed for this cpu. */
 453        cpu_clear(cpu, iucv_irq_cpumask);
 454}
 455
 456/**
 457 * iucv_declare_cpu
 458 * @data: unused
 459 *
 460 * Declare a interrupt buffer on this cpu.
 461 */
 462static void iucv_declare_cpu(void *data)
 463{
 464        int cpu = smp_processor_id();
 465        union iucv_param *parm;
 466        int rc;
 467
 468        if (cpu_isset(cpu, iucv_buffer_cpumask))
 469                return;
 470
 471        /* Declare interrupt buffer. */
 472        parm = iucv_param_irq[cpu];
 473        memset(parm, 0, sizeof(union iucv_param));
 474        parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
 475        rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
 476        if (rc) {
 477                char *err = "Unknown";
 478                switch (rc) {
 479                case 0x03:
 480                        err = "Directory error";
 481                        break;
 482                case 0x0a:
 483                        err = "Invalid length";
 484                        break;
 485                case 0x13:
 486                        err = "Buffer already exists";
 487                        break;
 488                case 0x3e:
 489                        err = "Buffer overlap";
 490                        break;
 491                case 0x5c:
 492                        err = "Paging or storage error";
 493                        break;
 494                }
 495                pr_warning("Defining an interrupt buffer on CPU %i"
 496                           " failed with 0x%02x (%s)\n", cpu, rc, err);
 497                return;
 498        }
 499
 500        /* Set indication that an iucv buffer exists for this cpu. */
 501        cpu_set(cpu, iucv_buffer_cpumask);
 502
 503        if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
 504                /* Enable iucv interrupts on this cpu. */
 505                iucv_allow_cpu(NULL);
 506        else
 507                /* Disable iucv interrupts on this cpu. */
 508                iucv_block_cpu(NULL);
 509}
 510
 511/**
 512 * iucv_retrieve_cpu
 513 * @data: unused
 514 *
 515 * Retrieve interrupt buffer on this cpu.
 516 */
 517static void iucv_retrieve_cpu(void *data)
 518{
 519        int cpu = smp_processor_id();
 520        union iucv_param *parm;
 521
 522        if (!cpu_isset(cpu, iucv_buffer_cpumask))
 523                return;
 524
 525        /* Block iucv interrupts. */
 526        iucv_block_cpu(NULL);
 527
 528        /* Retrieve interrupt buffer. */
 529        parm = iucv_param_irq[cpu];
 530        iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
 531
 532        /* Clear indication that an iucv buffer exists for this cpu. */
 533        cpu_clear(cpu, iucv_buffer_cpumask);
 534}
 535
 536/**
 537 * iucv_setmask_smp
 538 *
 539 * Allow iucv interrupts on all cpus.
 540 */
 541static void iucv_setmask_mp(void)
 542{
 543        int cpu;
 544
 545        get_online_cpus();
 546        for_each_online_cpu(cpu)
 547                /* Enable all cpus with a declared buffer. */
 548                if (cpu_isset(cpu, iucv_buffer_cpumask) &&
 549                    !cpu_isset(cpu, iucv_irq_cpumask))
 550                        smp_call_function_single(cpu, iucv_allow_cpu,
 551                                                 NULL, 1);
 552        put_online_cpus();
 553}
 554
 555/**
 556 * iucv_setmask_up
 557 *
 558 * Allow iucv interrupts on a single cpu.
 559 */
 560static void iucv_setmask_up(void)
 561{
 562        cpumask_t cpumask;
 563        int cpu;
 564
 565        /* Disable all cpu but the first in cpu_irq_cpumask. */
 566        cpumask = iucv_irq_cpumask;
 567        cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
 568        for_each_cpu_mask_nr(cpu, cpumask)
 569                smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 570}
 571
 572/**
 573 * iucv_enable
 574 *
 575 * This function makes iucv ready for use. It allocates the pathid
 576 * table, declares an iucv interrupt buffer and enables the iucv
 577 * interrupts. Called when the first user has registered an iucv
 578 * handler.
 579 */
 580static int iucv_enable(void)
 581{
 582        size_t alloc_size;
 583        int cpu, rc;
 584
 585        get_online_cpus();
 586        rc = -ENOMEM;
 587        alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
 588        iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
 589        if (!iucv_path_table)
 590                goto out;
 591        /* Declare per cpu buffers. */
 592        rc = -EIO;
 593        for_each_online_cpu(cpu)
 594                smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
 595        if (cpus_empty(iucv_buffer_cpumask))
 596                /* No cpu could declare an iucv buffer. */
 597                goto out;
 598        put_online_cpus();
 599        return 0;
 600out:
 601        kfree(iucv_path_table);
 602        iucv_path_table = NULL;
 603        put_online_cpus();
 604        return rc;
 605}
 606
 607/**
 608 * iucv_disable
 609 *
 610 * This function shuts down iucv. It disables iucv interrupts, retrieves
 611 * the iucv interrupt buffer and frees the pathid table. Called after the
 612 * last user unregister its iucv handler.
 613 */
 614static void iucv_disable(void)
 615{
 616        get_online_cpus();
 617        on_each_cpu(iucv_retrieve_cpu, NULL, 1);
 618        kfree(iucv_path_table);
 619        iucv_path_table = NULL;
 620        put_online_cpus();
 621}
 622
 623static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
 624                                     unsigned long action, void *hcpu)
 625{
 626        cpumask_t cpumask;
 627        long cpu = (long) hcpu;
 628
 629        switch (action) {
 630        case CPU_UP_PREPARE:
 631        case CPU_UP_PREPARE_FROZEN:
 632                iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
 633                                        GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
 634                if (!iucv_irq_data[cpu])
 635                        return NOTIFY_BAD;
 636                iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
 637                                     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
 638                if (!iucv_param[cpu]) {
 639                        kfree(iucv_irq_data[cpu]);
 640                        iucv_irq_data[cpu] = NULL;
 641                        return NOTIFY_BAD;
 642                }
 643                iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
 644                                        GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
 645                if (!iucv_param_irq[cpu]) {
 646                        kfree(iucv_param[cpu]);
 647                        iucv_param[cpu] = NULL;
 648                        kfree(iucv_irq_data[cpu]);
 649                        iucv_irq_data[cpu] = NULL;
 650                        return NOTIFY_BAD;
 651                }
 652                break;
 653        case CPU_UP_CANCELED:
 654        case CPU_UP_CANCELED_FROZEN:
 655        case CPU_DEAD:
 656        case CPU_DEAD_FROZEN:
 657                kfree(iucv_param_irq[cpu]);
 658                iucv_param_irq[cpu] = NULL;
 659                kfree(iucv_param[cpu]);
 660                iucv_param[cpu] = NULL;
 661                kfree(iucv_irq_data[cpu]);
 662                iucv_irq_data[cpu] = NULL;
 663                break;
 664        case CPU_ONLINE:
 665        case CPU_ONLINE_FROZEN:
 666        case CPU_DOWN_FAILED:
 667        case CPU_DOWN_FAILED_FROZEN:
 668                if (!iucv_path_table)
 669                        break;
 670                smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
 671                break;
 672        case CPU_DOWN_PREPARE:
 673        case CPU_DOWN_PREPARE_FROZEN:
 674                if (!iucv_path_table)
 675                        break;
 676                cpumask = iucv_buffer_cpumask;
 677                cpu_clear(cpu, cpumask);
 678                if (cpus_empty(cpumask))
 679                        /* Can't offline last IUCV enabled cpu. */
 680                        return NOTIFY_BAD;
 681                smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
 682                if (cpus_empty(iucv_irq_cpumask))
 683                        smp_call_function_single(first_cpu(iucv_buffer_cpumask),
 684                                                 iucv_allow_cpu, NULL, 1);
 685                break;
 686        }
 687        return NOTIFY_OK;
 688}
 689
 690static struct notifier_block __refdata iucv_cpu_notifier = {
 691        .notifier_call = iucv_cpu_notify,
 692};
 693
 694/**
 695 * iucv_sever_pathid
 696 * @pathid: path identification number.
 697 * @userdata: 16-bytes of user data.
 698 *
 699 * Sever an iucv path to free up the pathid. Used internally.
 700 */
 701static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
 702{
 703        union iucv_param *parm;
 704
 705        parm = iucv_param_irq[smp_processor_id()];
 706        memset(parm, 0, sizeof(union iucv_param));
 707        if (userdata)
 708                memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 709        parm->ctrl.ippathid = pathid;
 710        return iucv_call_b2f0(IUCV_SEVER, parm);
 711}
 712
 713/**
 714 * __iucv_cleanup_queue
 715 * @dummy: unused dummy argument
 716 *
 717 * Nop function called via smp_call_function to force work items from
 718 * pending external iucv interrupts to the work queue.
 719 */
 720static void __iucv_cleanup_queue(void *dummy)
 721{
 722}
 723
 724/**
 725 * iucv_cleanup_queue
 726 *
 727 * Function called after a path has been severed to find all remaining
 728 * work items for the now stale pathid. The caller needs to hold the
 729 * iucv_table_lock.
 730 */
 731static void iucv_cleanup_queue(void)
 732{
 733        struct iucv_irq_list *p, *n;
 734
 735        /*
 736         * When a path is severed, the pathid can be reused immediatly
 737         * on a iucv connect or a connection pending interrupt. Remove
 738         * all entries from the task queue that refer to a stale pathid
 739         * (iucv_path_table[ix] == NULL). Only then do the iucv connect
 740         * or deliver the connection pending interrupt. To get all the
 741         * pending interrupts force them to the work queue by calling
 742         * an empty function on all cpus.
 743         */
 744        smp_call_function(__iucv_cleanup_queue, NULL, 1);
 745        spin_lock_irq(&iucv_queue_lock);
 746        list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
 747                /* Remove stale work items from the task queue. */
 748                if (iucv_path_table[p->data.ippathid] == NULL) {
 749                        list_del(&p->list);
 750                        kfree(p);
 751                }
 752        }
 753        spin_unlock_irq(&iucv_queue_lock);
 754}
 755
 756/**
 757 * iucv_register:
 758 * @handler: address of iucv handler structure
 759 * @smp: != 0 indicates that the handler can deal with out of order messages
 760 *
 761 * Registers a driver with IUCV.
 762 *
 763 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
 764 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
 765 */
 766int iucv_register(struct iucv_handler *handler, int smp)
 767{
 768        int rc;
 769
 770        if (!iucv_available)
 771                return -ENOSYS;
 772        mutex_lock(&iucv_register_mutex);
 773        if (!smp)
 774                iucv_nonsmp_handler++;
 775        if (list_empty(&iucv_handler_list)) {
 776                rc = iucv_enable();
 777                if (rc)
 778                        goto out_mutex;
 779        } else if (!smp && iucv_nonsmp_handler == 1)
 780                iucv_setmask_up();
 781        INIT_LIST_HEAD(&handler->paths);
 782
 783        spin_lock_bh(&iucv_table_lock);
 784        list_add_tail(&handler->list, &iucv_handler_list);
 785        spin_unlock_bh(&iucv_table_lock);
 786        rc = 0;
 787out_mutex:
 788        mutex_unlock(&iucv_register_mutex);
 789        return rc;
 790}
 791EXPORT_SYMBOL(iucv_register);
 792
 793/**
 794 * iucv_unregister
 795 * @handler:  address of iucv handler structure
 796 * @smp: != 0 indicates that the handler can deal with out of order messages
 797 *
 798 * Unregister driver from IUCV.
 799 */
 800void iucv_unregister(struct iucv_handler *handler, int smp)
 801{
 802        struct iucv_path *p, *n;
 803
 804        mutex_lock(&iucv_register_mutex);
 805        spin_lock_bh(&iucv_table_lock);
 806        /* Remove handler from the iucv_handler_list. */
 807        list_del_init(&handler->list);
 808        /* Sever all pathids still refering to the handler. */
 809        list_for_each_entry_safe(p, n, &handler->paths, list) {
 810                iucv_sever_pathid(p->pathid, NULL);
 811                iucv_path_table[p->pathid] = NULL;
 812                list_del(&p->list);
 813                iucv_path_free(p);
 814        }
 815        spin_unlock_bh(&iucv_table_lock);
 816        if (!smp)
 817                iucv_nonsmp_handler--;
 818        if (list_empty(&iucv_handler_list))
 819                iucv_disable();
 820        else if (!smp && iucv_nonsmp_handler == 0)
 821                iucv_setmask_mp();
 822        mutex_unlock(&iucv_register_mutex);
 823}
 824EXPORT_SYMBOL(iucv_unregister);
 825
 826static int iucv_reboot_event(struct notifier_block *this,
 827                             unsigned long event, void *ptr)
 828{
 829        int i, rc;
 830
 831        get_online_cpus();
 832        on_each_cpu(iucv_block_cpu, NULL, 1);
 833        preempt_disable();
 834        for (i = 0; i < iucv_max_pathid; i++) {
 835                if (iucv_path_table[i])
 836                        rc = iucv_sever_pathid(i, NULL);
 837        }
 838        preempt_enable();
 839        put_online_cpus();
 840        iucv_disable();
 841        return NOTIFY_DONE;
 842}
 843
 844static struct notifier_block iucv_reboot_notifier = {
 845        .notifier_call = iucv_reboot_event,
 846};
 847
 848/**
 849 * iucv_path_accept
 850 * @path: address of iucv path structure
 851 * @handler: address of iucv handler structure
 852 * @userdata: 16 bytes of data reflected to the communication partner
 853 * @private: private data passed to interrupt handlers for this path
 854 *
 855 * This function is issued after the user received a connection pending
 856 * external interrupt and now wishes to complete the IUCV communication path.
 857 *
 858 * Returns the result of the CP IUCV call.
 859 */
 860int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
 861                     u8 userdata[16], void *private)
 862{
 863        union iucv_param *parm;
 864        int rc;
 865
 866        local_bh_disable();
 867        if (cpus_empty(iucv_buffer_cpumask)) {
 868                rc = -EIO;
 869                goto out;
 870        }
 871        /* Prepare parameter block. */
 872        parm = iucv_param[smp_processor_id()];
 873        memset(parm, 0, sizeof(union iucv_param));
 874        parm->ctrl.ippathid = path->pathid;
 875        parm->ctrl.ipmsglim = path->msglim;
 876        if (userdata)
 877                memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 878        parm->ctrl.ipflags1 = path->flags;
 879
 880        rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
 881        if (!rc) {
 882                path->private = private;
 883                path->msglim = parm->ctrl.ipmsglim;
 884                path->flags = parm->ctrl.ipflags1;
 885        }
 886out:
 887        local_bh_enable();
 888        return rc;
 889}
 890EXPORT_SYMBOL(iucv_path_accept);
 891
 892/**
 893 * iucv_path_connect
 894 * @path: address of iucv path structure
 895 * @handler: address of iucv handler structure
 896 * @userid: 8-byte user identification
 897 * @system: 8-byte target system identification
 898 * @userdata: 16 bytes of data reflected to the communication partner
 899 * @private: private data passed to interrupt handlers for this path
 900 *
 901 * This function establishes an IUCV path. Although the connect may complete
 902 * successfully, you are not able to use the path until you receive an IUCV
 903 * Connection Complete external interrupt.
 904 *
 905 * Returns the result of the CP IUCV call.
 906 */
 907int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
 908                      u8 userid[8], u8 system[8], u8 userdata[16],
 909                      void *private)
 910{
 911        union iucv_param *parm;
 912        int rc;
 913
 914        spin_lock_bh(&iucv_table_lock);
 915        iucv_cleanup_queue();
 916        if (cpus_empty(iucv_buffer_cpumask)) {
 917                rc = -EIO;
 918                goto out;
 919        }
 920        parm = iucv_param[smp_processor_id()];
 921        memset(parm, 0, sizeof(union iucv_param));
 922        parm->ctrl.ipmsglim = path->msglim;
 923        parm->ctrl.ipflags1 = path->flags;
 924        if (userid) {
 925                memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
 926                ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
 927                EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
 928        }
 929        if (system) {
 930                memcpy(parm->ctrl.iptarget, system,
 931                       sizeof(parm->ctrl.iptarget));
 932                ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
 933                EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
 934        }
 935        if (userdata)
 936                memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 937
 938        rc = iucv_call_b2f0(IUCV_CONNECT, parm);
 939        if (!rc) {
 940                if (parm->ctrl.ippathid < iucv_max_pathid) {
 941                        path->pathid = parm->ctrl.ippathid;
 942                        path->msglim = parm->ctrl.ipmsglim;
 943                        path->flags = parm->ctrl.ipflags1;
 944                        path->handler = handler;
 945                        path->private = private;
 946                        list_add_tail(&path->list, &handler->paths);
 947                        iucv_path_table[path->pathid] = path;
 948                } else {
 949                        iucv_sever_pathid(parm->ctrl.ippathid,
 950                                          iucv_error_pathid);
 951                        rc = -EIO;
 952                }
 953        }
 954out:
 955        spin_unlock_bh(&iucv_table_lock);
 956        return rc;
 957}
 958EXPORT_SYMBOL(iucv_path_connect);
 959
 960/**
 961 * iucv_path_quiesce:
 962 * @path: address of iucv path structure
 963 * @userdata: 16 bytes of data reflected to the communication partner
 964 *
 965 * This function temporarily suspends incoming messages on an IUCV path.
 966 * You can later reactivate the path by invoking the iucv_resume function.
 967 *
 968 * Returns the result from the CP IUCV call.
 969 */
 970int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
 971{
 972        union iucv_param *parm;
 973        int rc;
 974
 975        local_bh_disable();
 976        if (cpus_empty(iucv_buffer_cpumask)) {
 977                rc = -EIO;
 978                goto out;
 979        }
 980        parm = iucv_param[smp_processor_id()];
 981        memset(parm, 0, sizeof(union iucv_param));
 982        if (userdata)
 983                memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
 984        parm->ctrl.ippathid = path->pathid;
 985        rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
 986out:
 987        local_bh_enable();
 988        return rc;
 989}
 990EXPORT_SYMBOL(iucv_path_quiesce);
 991
 992/**
 993 * iucv_path_resume:
 994 * @path: address of iucv path structure
 995 * @userdata: 16 bytes of data reflected to the communication partner
 996 *
 997 * This function resumes incoming messages on an IUCV path that has
 998 * been stopped with iucv_path_quiesce.
 999 *
1000 * Returns the result from the CP IUCV call.
1001 */
1002int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
1003{
1004        union iucv_param *parm;
1005        int rc;
1006
1007        local_bh_disable();
1008        if (cpus_empty(iucv_buffer_cpumask)) {
1009                rc = -EIO;
1010                goto out;
1011        }
1012        parm = iucv_param[smp_processor_id()];
1013        memset(parm, 0, sizeof(union iucv_param));
1014        if (userdata)
1015                memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
1016        parm->ctrl.ippathid = path->pathid;
1017        rc = iucv_call_b2f0(IUCV_RESUME, parm);
1018out:
1019        local_bh_enable();
1020        return rc;
1021}
1022
1023/**
1024 * iucv_path_sever
1025 * @path: address of iucv path structure
1026 * @userdata: 16 bytes of data reflected to the communication partner
1027 *
1028 * This function terminates an IUCV path.
1029 *
1030 * Returns the result from the CP IUCV call.
1031 */
1032int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
1033{
1034        int rc;
1035
1036        preempt_disable();
1037        if (cpus_empty(iucv_buffer_cpumask)) {
1038                rc = -EIO;
1039                goto out;
1040        }
1041        if (iucv_active_cpu != smp_processor_id())
1042                spin_lock_bh(&iucv_table_lock);
1043        rc = iucv_sever_pathid(path->pathid, userdata);
1044        iucv_path_table[path->pathid] = NULL;
1045        list_del_init(&path->list);
1046        if (iucv_active_cpu != smp_processor_id())
1047                spin_unlock_bh(&iucv_table_lock);
1048out:
1049        preempt_enable();
1050        return rc;
1051}
1052EXPORT_SYMBOL(iucv_path_sever);
1053
1054/**
1055 * iucv_message_purge
1056 * @path: address of iucv path structure
1057 * @msg: address of iucv msg structure
1058 * @srccls: source class of message
1059 *
1060 * Cancels a message you have sent.
1061 *
1062 * Returns the result from the CP IUCV call.
1063 */
1064int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
1065                       u32 srccls)
1066{
1067        union iucv_param *parm;
1068        int rc;
1069
1070        local_bh_disable();
1071        if (cpus_empty(iucv_buffer_cpumask)) {
1072                rc = -EIO;
1073                goto out;
1074        }
1075        parm = iucv_param[smp_processor_id()];
1076        memset(parm, 0, sizeof(union iucv_param));
1077        parm->purge.ippathid = path->pathid;
1078        parm->purge.ipmsgid = msg->id;
1079        parm->purge.ipsrccls = srccls;
1080        parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
1081        rc = iucv_call_b2f0(IUCV_PURGE, parm);
1082        if (!rc) {
1083                msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
1084                msg->tag = parm->purge.ipmsgtag;
1085        }
1086out:
1087        local_bh_enable();
1088        return rc;
1089}
1090EXPORT_SYMBOL(iucv_message_purge);
1091
1092/**
1093 * iucv_message_receive_iprmdata
1094 * @path: address of iucv path structure
1095 * @msg: address of iucv msg structure
1096 * @flags: how the message is received (IUCV_IPBUFLST)
1097 * @buffer: address of data buffer or address of struct iucv_array
1098 * @size: length of data buffer
1099 * @residual:
1100 *
1101 * Internal function used by iucv_message_receive and __iucv_message_receive
1102 * to receive RMDATA data stored in struct iucv_message.
1103 */
1104static int iucv_message_receive_iprmdata(struct iucv_path *path,
1105                                         struct iucv_message *msg,
1106                                         u8 flags, void *buffer,
1107                                         size_t size, size_t *residual)
1108{
1109        struct iucv_array *array;
1110        u8 *rmmsg;
1111        size_t copy;
1112
1113        /*
1114         * Message is 8 bytes long and has been stored to the
1115         * message descriptor itself.
1116         */
1117        if (residual)
1118                *residual = abs(size - 8);
1119        rmmsg = msg->rmmsg;
1120        if (flags & IUCV_IPBUFLST) {
1121                /* Copy to struct iucv_array. */
1122                size = (size < 8) ? size : 8;
1123                for (array = buffer; size > 0; array++) {
1124                        copy = min_t(size_t, size, array->length);
1125                        memcpy((u8 *)(addr_t) array->address,
1126                                rmmsg, copy);
1127                        rmmsg += copy;
1128                        size -= copy;
1129                }
1130        } else {
1131                /* Copy to direct buffer. */
1132                memcpy(buffer, rmmsg, min_t(size_t, size, 8));
1133        }
1134        return 0;
1135}
1136
1137/**
1138 * __iucv_message_receive
1139 * @path: address of iucv path structure
1140 * @msg: address of iucv msg structure
1141 * @flags: how the message is received (IUCV_IPBUFLST)
1142 * @buffer: address of data buffer or address of struct iucv_array
1143 * @size: length of data buffer
1144 * @residual:
1145 *
1146 * This function receives messages that are being sent to you over
1147 * established paths. This function will deal with RMDATA messages
1148 * embedded in struct iucv_message as well.
1149 *
1150 * Locking:     no locking
1151 *
1152 * Returns the result from the CP IUCV call.
1153 */
1154int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1155                           u8 flags, void *buffer, size_t size, size_t *residual)
1156{
1157        union iucv_param *parm;
1158        int rc;
1159
1160        if (msg->flags & IUCV_IPRMDATA)
1161                return iucv_message_receive_iprmdata(path, msg, flags,
1162                                                     buffer, size, residual);
1163        if (cpus_empty(iucv_buffer_cpumask)) {
1164                rc = -EIO;
1165                goto out;
1166        }
1167        parm = iucv_param[smp_processor_id()];
1168        memset(parm, 0, sizeof(union iucv_param));
1169        parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1170        parm->db.ipbfln1f = (u32) size;
1171        parm->db.ipmsgid = msg->id;
1172        parm->db.ippathid = path->pathid;
1173        parm->db.iptrgcls = msg->class;
1174        parm->db.ipflags1 = (flags | IUCV_IPFGPID |
1175                             IUCV_IPFGMID | IUCV_IPTRGCLS);
1176        rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
1177        if (!rc || rc == 5) {
1178                msg->flags = parm->db.ipflags1;
1179                if (residual)
1180                        *residual = parm->db.ipbfln1f;
1181        }
1182out:
1183        return rc;
1184}
1185EXPORT_SYMBOL(__iucv_message_receive);
1186
1187/**
1188 * iucv_message_receive
1189 * @path: address of iucv path structure
1190 * @msg: address of iucv msg structure
1191 * @flags: how the message is received (IUCV_IPBUFLST)
1192 * @buffer: address of data buffer or address of struct iucv_array
1193 * @size: length of data buffer
1194 * @residual:
1195 *
1196 * This function receives messages that are being sent to you over
1197 * established paths. This function will deal with RMDATA messages
1198 * embedded in struct iucv_message as well.
1199 *
1200 * Locking:     local_bh_enable/local_bh_disable
1201 *
1202 * Returns the result from the CP IUCV call.
1203 */
1204int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1205                         u8 flags, void *buffer, size_t size, size_t *residual)
1206{
1207        int rc;
1208
1209        if (msg->flags & IUCV_IPRMDATA)
1210                return iucv_message_receive_iprmdata(path, msg, flags,
1211                                                     buffer, size, residual);
1212        local_bh_disable();
1213        rc = __iucv_message_receive(path, msg, flags, buffer, size, residual);
1214        local_bh_enable();
1215        return rc;
1216}
1217EXPORT_SYMBOL(iucv_message_receive);
1218
1219/**
1220 * iucv_message_reject
1221 * @path: address of iucv path structure
1222 * @msg: address of iucv msg structure
1223 *
1224 * The reject function refuses a specified message. Between the time you
1225 * are notified of a message and the time that you complete the message,
1226 * the message may be rejected.
1227 *
1228 * Returns the result from the CP IUCV call.
1229 */
1230int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1231{
1232        union iucv_param *parm;
1233        int rc;
1234
1235        local_bh_disable();
1236        if (cpus_empty(iucv_buffer_cpumask)) {
1237                rc = -EIO;
1238                goto out;
1239        }
1240        parm = iucv_param[smp_processor_id()];
1241        memset(parm, 0, sizeof(union iucv_param));
1242        parm->db.ippathid = path->pathid;
1243        parm->db.ipmsgid = msg->id;
1244        parm->db.iptrgcls = msg->class;
1245        parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
1246        rc = iucv_call_b2f0(IUCV_REJECT, parm);
1247out:
1248        local_bh_enable();
1249        return rc;
1250}
1251EXPORT_SYMBOL(iucv_message_reject);
1252
1253/**
1254 * iucv_message_reply
1255 * @path: address of iucv path structure
1256 * @msg: address of iucv msg structure
1257 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1258 * @reply: address of reply data buffer or address of struct iucv_array
1259 * @size: length of reply data buffer
1260 *
1261 * This function responds to the two-way messages that you receive. You
1262 * must identify completely the message to which you wish to reply. ie,
1263 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
1264 * the parameter list.
1265 *
1266 * Returns the result from the CP IUCV call.
1267 */
1268int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1269                       u8 flags, void *reply, size_t size)
1270{
1271        union iucv_param *parm;
1272        int rc;
1273
1274        local_bh_disable();
1275        if (cpus_empty(iucv_buffer_cpumask)) {
1276                rc = -EIO;
1277                goto out;
1278        }
1279        parm = iucv_param[smp_processor_id()];
1280        memset(parm, 0, sizeof(union iucv_param));
1281        if (flags & IUCV_IPRMDATA) {
1282                parm->dpl.ippathid = path->pathid;
1283                parm->dpl.ipflags1 = flags;
1284                parm->dpl.ipmsgid = msg->id;
1285                parm->dpl.iptrgcls = msg->class;
1286                memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
1287        } else {
1288                parm->db.ipbfadr1 = (u32)(addr_t) reply;
1289                parm->db.ipbfln1f = (u32) size;
1290                parm->db.ippathid = path->pathid;
1291                parm->db.ipflags1 = flags;
1292                parm->db.ipmsgid = msg->id;
1293                parm->db.iptrgcls = msg->class;
1294        }
1295        rc = iucv_call_b2f0(IUCV_REPLY, parm);
1296out:
1297        local_bh_enable();
1298        return rc;
1299}
1300EXPORT_SYMBOL(iucv_message_reply);
1301
1302/**
1303 * __iucv_message_send
1304 * @path: address of iucv path structure
1305 * @msg: address of iucv msg structure
1306 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1307 * @srccls: source class of message
1308 * @buffer: address of send buffer or address of struct iucv_array
1309 * @size: length of send buffer
1310 *
1311 * This function transmits data to another application. Data to be
1312 * transmitted is in a buffer and this is a one-way message and the
1313 * receiver will not reply to the message.
1314 *
1315 * Locking:     no locking
1316 *
1317 * Returns the result from the CP IUCV call.
1318 */
1319int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1320                      u8 flags, u32 srccls, void *buffer, size_t size)
1321{
1322        union iucv_param *parm;
1323        int rc;
1324
1325        if (cpus_empty(iucv_buffer_cpumask)) {
1326                rc = -EIO;
1327                goto out;
1328        }
1329        parm = iucv_param[smp_processor_id()];
1330        memset(parm, 0, sizeof(union iucv_param));
1331        if (flags & IUCV_IPRMDATA) {
1332                /* Message of 8 bytes can be placed into the parameter list. */
1333                parm->dpl.ippathid = path->pathid;
1334                parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
1335                parm->dpl.iptrgcls = msg->class;
1336                parm->dpl.ipsrccls = srccls;
1337                parm->dpl.ipmsgtag = msg->tag;
1338                memcpy(parm->dpl.iprmmsg, buffer, 8);
1339        } else {
1340                parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1341                parm->db.ipbfln1f = (u32) size;
1342                parm->db.ippathid = path->pathid;
1343                parm->db.ipflags1 = flags | IUCV_IPNORPY;
1344                parm->db.iptrgcls = msg->class;
1345                parm->db.ipsrccls = srccls;
1346                parm->db.ipmsgtag = msg->tag;
1347        }
1348        rc = iucv_call_b2f0(IUCV_SEND, parm);
1349        if (!rc)
1350                msg->id = parm->db.ipmsgid;
1351out:
1352        return rc;
1353}
1354EXPORT_SYMBOL(__iucv_message_send);
1355
1356/**
1357 * iucv_message_send
1358 * @path: address of iucv path structure
1359 * @msg: address of iucv msg structure
1360 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
1361 * @srccls: source class of message
1362 * @buffer: address of send buffer or address of struct iucv_array
1363 * @size: length of send buffer
1364 *
1365 * This function transmits data to another application. Data to be
1366 * transmitted is in a buffer and this is a one-way message and the
1367 * receiver will not reply to the message.
1368 *
1369 * Locking:     local_bh_enable/local_bh_disable
1370 *
1371 * Returns the result from the CP IUCV call.
1372 */
1373int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1374                      u8 flags, u32 srccls, void *buffer, size_t size)
1375{
1376        int rc;
1377
1378        local_bh_disable();
1379        rc = __iucv_message_send(path, msg, flags, srccls, buffer, size);
1380        local_bh_enable();
1381        return rc;
1382}
1383EXPORT_SYMBOL(iucv_message_send);
1384
1385/**
1386 * iucv_message_send2way
1387 * @path: address of iucv path structure
1388 * @msg: address of iucv msg structure
1389 * @flags: how the message is sent and the reply is received
1390 *         (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
1391 * @srccls: source class of message
1392 * @buffer: address of send buffer or address of struct iucv_array
1393 * @size: length of send buffer
1394 * @ansbuf: address of answer buffer or address of struct iucv_array
1395 * @asize: size of reply buffer
1396 *
1397 * This function transmits data to another application. Data to be
1398 * transmitted is in a buffer. The receiver of the send is expected to
1399 * reply to the message and a buffer is provided into which IUCV moves
1400 * the reply to this message.
1401 *
1402 * Returns the result from the CP IUCV call.
1403 */
1404int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1405                          u8 flags, u32 srccls, void *buffer, size_t size,
1406                          void *answer, size_t asize, size_t *residual)
1407{
1408        union iucv_param *parm;
1409        int rc;
1410
1411        local_bh_disable();
1412        if (cpus_empty(iucv_buffer_cpumask)) {
1413                rc = -EIO;
1414                goto out;
1415        }
1416        parm = iucv_param[smp_processor_id()];
1417        memset(parm, 0, sizeof(union iucv_param));
1418        if (flags & IUCV_IPRMDATA) {
1419                parm->dpl.ippathid = path->pathid;
1420                parm->dpl.ipflags1 = path->flags;       /* priority message */
1421                parm->dpl.iptrgcls = msg->class;
1422                parm->dpl.ipsrccls = srccls;
1423                parm->dpl.ipmsgtag = msg->tag;
1424                parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
1425                parm->dpl.ipbfln2f = (u32) asize;
1426                memcpy(parm->dpl.iprmmsg, buffer, 8);
1427        } else {
1428                parm->db.ippathid = path->pathid;
1429                parm->db.ipflags1 = path->flags;        /* priority message */
1430                parm->db.iptrgcls = msg->class;
1431                parm->db.ipsrccls = srccls;
1432                parm->db.ipmsgtag = msg->tag;
1433                parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1434                parm->db.ipbfln1f = (u32) size;
1435                parm->db.ipbfadr2 = (u32)(addr_t) answer;
1436                parm->db.ipbfln2f = (u32) asize;
1437        }
1438        rc = iucv_call_b2f0(IUCV_SEND, parm);
1439        if (!rc)
1440                msg->id = parm->db.ipmsgid;
1441out:
1442        local_bh_enable();
1443        return rc;
1444}
1445EXPORT_SYMBOL(iucv_message_send2way);
1446
1447/**
1448 * iucv_path_pending
1449 * @data: Pointer to external interrupt buffer
1450 *
1451 * Process connection pending work item. Called from tasklet while holding
1452 * iucv_table_lock.
1453 */
1454struct iucv_path_pending {
1455        u16 ippathid;
1456        u8  ipflags1;
1457        u8  iptype;
1458        u16 ipmsglim;
1459        u16 res1;
1460        u8  ipvmid[8];
1461        u8  ipuser[16];
1462        u32 res3;
1463        u8  ippollfg;
1464        u8  res4[3];
1465} __attribute__ ((packed));
1466
1467static void iucv_path_pending(struct iucv_irq_data *data)
1468{
1469        struct iucv_path_pending *ipp = (void *) data;
1470        struct iucv_handler *handler;
1471        struct iucv_path *path;
1472        char *error;
1473
1474        BUG_ON(iucv_path_table[ipp->ippathid]);
1475        /* New pathid, handler found. Create a new path struct. */
1476        error = iucv_error_no_memory;
1477        path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
1478        if (!path)
1479                goto out_sever;
1480        path->pathid = ipp->ippathid;
1481        iucv_path_table[path->pathid] = path;
1482        EBCASC(ipp->ipvmid, 8);
1483
1484        /* Call registered handler until one is found that wants the path. */
1485        list_for_each_entry(handler, &iucv_handler_list, list) {
1486                if (!handler->path_pending)
1487                        continue;
1488                /*
1489                 * Add path to handler to allow a call to iucv_path_sever
1490                 * inside the path_pending function. If the handler returns
1491                 * an error remove the path from the handler again.
1492                 */
1493                list_add(&path->list, &handler->paths);
1494                path->handler = handler;
1495                if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
1496                        return;
1497                list_del(&path->list);
1498                path->handler = NULL;
1499        }
1500        /* No handler wanted the path. */
1501        iucv_path_table[path->pathid] = NULL;
1502        iucv_path_free(path);
1503        error = iucv_error_no_listener;
1504out_sever:
1505        iucv_sever_pathid(ipp->ippathid, error);
1506}
1507
1508/**
1509 * iucv_path_complete
1510 * @data: Pointer to external interrupt buffer
1511 *
1512 * Process connection complete work item. Called from tasklet while holding
1513 * iucv_table_lock.
1514 */
1515struct iucv_path_complete {
1516        u16 ippathid;
1517        u8  ipflags1;
1518        u8  iptype;
1519        u16 ipmsglim;
1520        u16 res1;
1521        u8  res2[8];
1522        u8  ipuser[16];
1523        u32 res3;
1524        u8  ippollfg;
1525        u8  res4[3];
1526} __attribute__ ((packed));
1527
1528static void iucv_path_complete(struct iucv_irq_data *data)
1529{
1530        struct iucv_path_complete *ipc = (void *) data;
1531        struct iucv_path *path = iucv_path_table[ipc->ippathid];
1532
1533        if (path)
1534                path->flags = ipc->ipflags1;
1535        if (path && path->handler && path->handler->path_complete)
1536                path->handler->path_complete(path, ipc->ipuser);
1537}
1538
1539/**
1540 * iucv_path_severed
1541 * @data: Pointer to external interrupt buffer
1542 *
1543 * Process connection severed work item. Called from tasklet while holding
1544 * iucv_table_lock.
1545 */
1546struct iucv_path_severed {
1547        u16 ippathid;
1548        u8  res1;
1549        u8  iptype;
1550        u32 res2;
1551        u8  res3[8];
1552        u8  ipuser[16];
1553        u32 res4;
1554        u8  ippollfg;
1555        u8  res5[3];
1556} __attribute__ ((packed));
1557
1558static void iucv_path_severed(struct iucv_irq_data *data)
1559{
1560        struct iucv_path_severed *ips = (void *) data;
1561        struct iucv_path *path = iucv_path_table[ips->ippathid];
1562
1563        if (!path || !path->handler)    /* Already severed */
1564                return;
1565        if (path->handler->path_severed)
1566                path->handler->path_severed(path, ips->ipuser);
1567        else {
1568                iucv_sever_pathid(path->pathid, NULL);
1569                iucv_path_table[path->pathid] = NULL;
1570                list_del(&path->list);
1571                iucv_path_free(path);
1572        }
1573}
1574
1575/**
1576 * iucv_path_quiesced
1577 * @data: Pointer to external interrupt buffer
1578 *
1579 * Process connection quiesced work item. Called from tasklet while holding
1580 * iucv_table_lock.
1581 */
1582struct iucv_path_quiesced {
1583        u16 ippathid;
1584        u8  res1;
1585        u8  iptype;
1586        u32 res2;
1587        u8  res3[8];
1588        u8  ipuser[16];
1589        u32 res4;
1590        u8  ippollfg;
1591        u8  res5[3];
1592} __attribute__ ((packed));
1593
1594static void iucv_path_quiesced(struct iucv_irq_data *data)
1595{
1596        struct iucv_path_quiesced *ipq = (void *) data;
1597        struct iucv_path *path = iucv_path_table[ipq->ippathid];
1598
1599        if (path && path->handler && path->handler->path_quiesced)
1600                path->handler->path_quiesced(path, ipq->ipuser);
1601}
1602
1603/**
1604 * iucv_path_resumed
1605 * @data: Pointer to external interrupt buffer
1606 *
1607 * Process connection resumed work item. Called from tasklet while holding
1608 * iucv_table_lock.
1609 */
1610struct iucv_path_resumed {
1611        u16 ippathid;
1612        u8  res1;
1613        u8  iptype;
1614        u32 res2;
1615        u8  res3[8];
1616        u8  ipuser[16];
1617        u32 res4;
1618        u8  ippollfg;
1619        u8  res5[3];
1620} __attribute__ ((packed));
1621
1622static void iucv_path_resumed(struct iucv_irq_data *data)
1623{
1624        struct iucv_path_resumed *ipr = (void *) data;
1625        struct iucv_path *path = iucv_path_table[ipr->ippathid];
1626
1627        if (path && path->handler && path->handler->path_resumed)
1628                path->handler->path_resumed(path, ipr->ipuser);
1629}
1630
1631/**
1632 * iucv_message_complete
1633 * @data: Pointer to external interrupt buffer
1634 *
1635 * Process message complete work item. Called from tasklet while holding
1636 * iucv_table_lock.
1637 */
1638struct iucv_message_complete {
1639        u16 ippathid;
1640        u8  ipflags1;
1641        u8  iptype;
1642        u32 ipmsgid;
1643        u32 ipaudit;
1644        u8  iprmmsg[8];
1645        u32 ipsrccls;
1646        u32 ipmsgtag;
1647        u32 res;
1648        u32 ipbfln2f;
1649        u8  ippollfg;
1650        u8  res2[3];
1651} __attribute__ ((packed));
1652
1653static void iucv_message_complete(struct iucv_irq_data *data)
1654{
1655        struct iucv_message_complete *imc = (void *) data;
1656        struct iucv_path *path = iucv_path_table[imc->ippathid];
1657        struct iucv_message msg;
1658
1659        if (path && path->handler && path->handler->message_complete) {
1660                msg.flags = imc->ipflags1;
1661                msg.id = imc->ipmsgid;
1662                msg.audit = imc->ipaudit;
1663                memcpy(msg.rmmsg, imc->iprmmsg, 8);
1664                msg.class = imc->ipsrccls;
1665                msg.tag = imc->ipmsgtag;
1666                msg.length = imc->ipbfln2f;
1667                path->handler->message_complete(path, &msg);
1668        }
1669}
1670
1671/**
1672 * iucv_message_pending
1673 * @data: Pointer to external interrupt buffer
1674 *
1675 * Process message pending work item. Called from tasklet while holding
1676 * iucv_table_lock.
1677 */
1678struct iucv_message_pending {
1679        u16 ippathid;
1680        u8  ipflags1;
1681        u8  iptype;
1682        u32 ipmsgid;
1683        u32 iptrgcls;
1684        union {
1685                u32 iprmmsg1_u32;
1686                u8  iprmmsg1[4];
1687        } ln1msg1;
1688        union {
1689                u32 ipbfln1f;
1690                u8  iprmmsg2[4];
1691        } ln1msg2;
1692        u32 res1[3];
1693        u32 ipbfln2f;
1694        u8  ippollfg;
1695        u8  res2[3];
1696} __attribute__ ((packed));
1697
1698static void iucv_message_pending(struct iucv_irq_data *data)
1699{
1700        struct iucv_message_pending *imp = (void *) data;
1701        struct iucv_path *path = iucv_path_table[imp->ippathid];
1702        struct iucv_message msg;
1703
1704        if (path && path->handler && path->handler->message_pending) {
1705                msg.flags = imp->ipflags1;
1706                msg.id = imp->ipmsgid;
1707                msg.class = imp->iptrgcls;
1708                if (imp->ipflags1 & IUCV_IPRMDATA) {
1709                        memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
1710                        msg.length = 8;
1711                } else
1712                        msg.length = imp->ln1msg2.ipbfln1f;
1713                msg.reply_size = imp->ipbfln2f;
1714                path->handler->message_pending(path, &msg);
1715        }
1716}
1717
1718/**
1719 * iucv_tasklet_fn:
1720 *
1721 * This tasklet loops over the queue of irq buffers created by
1722 * iucv_external_interrupt, calls the appropriate action handler
1723 * and then frees the buffer.
1724 */
1725static void iucv_tasklet_fn(unsigned long ignored)
1726{
1727        typedef void iucv_irq_fn(struct iucv_irq_data *);
1728        static iucv_irq_fn *irq_fn[] = {
1729                [0x02] = iucv_path_complete,
1730                [0x03] = iucv_path_severed,
1731                [0x04] = iucv_path_quiesced,
1732                [0x05] = iucv_path_resumed,
1733                [0x06] = iucv_message_complete,
1734                [0x07] = iucv_message_complete,
1735                [0x08] = iucv_message_pending,
1736                [0x09] = iucv_message_pending,
1737        };
1738        LIST_HEAD(task_queue);
1739        struct iucv_irq_list *p, *n;
1740
1741        /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1742        if (!spin_trylock(&iucv_table_lock)) {
1743                tasklet_schedule(&iucv_tasklet);
1744                return;
1745        }
1746        iucv_active_cpu = smp_processor_id();
1747
1748        spin_lock_irq(&iucv_queue_lock);
1749        list_splice_init(&iucv_task_queue, &task_queue);
1750        spin_unlock_irq(&iucv_queue_lock);
1751
1752        list_for_each_entry_safe(p, n, &task_queue, list) {
1753                list_del_init(&p->list);
1754                irq_fn[p->data.iptype](&p->data);
1755                kfree(p);
1756        }
1757
1758        iucv_active_cpu = -1;
1759        spin_unlock(&iucv_table_lock);
1760}
1761
1762/**
1763 * iucv_work_fn:
1764 *
1765 * This work function loops over the queue of path pending irq blocks
1766 * created by iucv_external_interrupt, calls the appropriate action
1767 * handler and then frees the buffer.
1768 */
1769static void iucv_work_fn(struct work_struct *work)
1770{
1771        typedef void iucv_irq_fn(struct iucv_irq_data *);
1772        LIST_HEAD(work_queue);
1773        struct iucv_irq_list *p, *n;
1774
1775        /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1776        spin_lock_bh(&iucv_table_lock);
1777        iucv_active_cpu = smp_processor_id();
1778
1779        spin_lock_irq(&iucv_queue_lock);
1780        list_splice_init(&iucv_work_queue, &work_queue);
1781        spin_unlock_irq(&iucv_queue_lock);
1782
1783        iucv_cleanup_queue();
1784        list_for_each_entry_safe(p, n, &work_queue, list) {
1785                list_del_init(&p->list);
1786                iucv_path_pending(&p->data);
1787                kfree(p);
1788        }
1789
1790        iucv_active_cpu = -1;
1791        spin_unlock_bh(&iucv_table_lock);
1792}
1793
1794/**
1795 * iucv_external_interrupt
1796 * @code: irq code
1797 *
1798 * Handles external interrupts coming in from CP.
1799 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
1800 */
1801static void iucv_external_interrupt(u16 code)
1802{
1803        struct iucv_irq_data *p;
1804        struct iucv_irq_list *work;
1805
1806        p = iucv_irq_data[smp_processor_id()];
1807        if (p->ippathid >= iucv_max_pathid) {
1808                WARN_ON(p->ippathid >= iucv_max_pathid);
1809                iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
1810                return;
1811        }
1812        BUG_ON(p->iptype  < 0x01 || p->iptype > 0x09);
1813        work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
1814        if (!work) {
1815                pr_warning("iucv_external_interrupt: out of memory\n");
1816                return;
1817        }
1818        memcpy(&work->data, p, sizeof(work->data));
1819        spin_lock(&iucv_queue_lock);
1820        if (p->iptype == 0x01) {
1821                /* Path pending interrupt. */
1822                list_add_tail(&work->list, &iucv_work_queue);
1823                schedule_work(&iucv_work);
1824        } else {
1825                /* The other interrupts. */
1826                list_add_tail(&work->list, &iucv_task_queue);
1827                tasklet_schedule(&iucv_tasklet);
1828        }
1829        spin_unlock(&iucv_queue_lock);
1830}
1831
1832static int iucv_pm_prepare(struct device *dev)
1833{
1834        int rc = 0;
1835
1836#ifdef CONFIG_PM_DEBUG
1837        printk(KERN_INFO "iucv_pm_prepare\n");
1838#endif
1839        if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
1840                rc = dev->driver->pm->prepare(dev);
1841        return rc;
1842}
1843
1844static void iucv_pm_complete(struct device *dev)
1845{
1846#ifdef CONFIG_PM_DEBUG
1847        printk(KERN_INFO "iucv_pm_complete\n");
1848#endif
1849        if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
1850                dev->driver->pm->complete(dev);
1851}
1852
1853/**
1854 * iucv_path_table_empty() - determine if iucv path table is empty
1855 *
1856 * Returns 0 if there are still iucv pathes defined
1857 *         1 if there are no iucv pathes defined
1858 */
1859int iucv_path_table_empty(void)
1860{
1861        int i;
1862
1863        for (i = 0; i < iucv_max_pathid; i++) {
1864                if (iucv_path_table[i])
1865                        return 0;
1866        }
1867        return 1;
1868}
1869
1870/**
1871 * iucv_pm_freeze() - Freeze PM callback
1872 * @dev:        iucv-based device
1873 *
1874 * disable iucv interrupts
1875 * invoke callback function of the iucv-based driver
1876 * shut down iucv, if no iucv-pathes are established anymore
1877 */
1878static int iucv_pm_freeze(struct device *dev)
1879{
1880        int cpu;
1881        int rc = 0;
1882
1883#ifdef CONFIG_PM_DEBUG
1884        printk(KERN_WARNING "iucv_pm_freeze\n");
1885#endif
1886        iucv_pm_state = IUCV_PM_FREEZING;
1887        for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
1888                smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
1889        if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
1890                rc = dev->driver->pm->freeze(dev);
1891        if (iucv_path_table_empty())
1892                iucv_disable();
1893        return rc;
1894}
1895
1896/**
1897 * iucv_pm_thaw() - Thaw PM callback
1898 * @dev:        iucv-based device
1899 *
1900 * make iucv ready for use again: allocate path table, declare interrupt buffers
1901 *                                and enable iucv interrupts
1902 * invoke callback function of the iucv-based driver
1903 */
1904static int iucv_pm_thaw(struct device *dev)
1905{
1906        int rc = 0;
1907
1908#ifdef CONFIG_PM_DEBUG
1909        printk(KERN_WARNING "iucv_pm_thaw\n");
1910#endif
1911        iucv_pm_state = IUCV_PM_THAWING;
1912        if (!iucv_path_table) {
1913                rc = iucv_enable();
1914                if (rc)
1915                        goto out;
1916        }
1917        if (cpus_empty(iucv_irq_cpumask)) {
1918                if (iucv_nonsmp_handler)
1919                        /* enable interrupts on one cpu */
1920                        iucv_allow_cpu(NULL);
1921                else
1922                        /* enable interrupts on all cpus */
1923                        iucv_setmask_mp();
1924        }
1925        if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
1926                rc = dev->driver->pm->thaw(dev);
1927out:
1928        return rc;
1929}
1930
1931/**
1932 * iucv_pm_restore() - Restore PM callback
1933 * @dev:        iucv-based device
1934 *
1935 * make iucv ready for use again: allocate path table, declare interrupt buffers
1936 *                                and enable iucv interrupts
1937 * invoke callback function of the iucv-based driver
1938 */
1939static int iucv_pm_restore(struct device *dev)
1940{
1941        int rc = 0;
1942
1943#ifdef CONFIG_PM_DEBUG
1944        printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
1945#endif
1946        if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
1947                pr_warning("Suspending Linux did not completely close all IUCV "
1948                        "connections\n");
1949        iucv_pm_state = IUCV_PM_RESTORING;
1950        if (cpus_empty(iucv_irq_cpumask)) {
1951                rc = iucv_query_maxconn();
1952                rc = iucv_enable();
1953                if (rc)
1954                        goto out;
1955        }
1956        if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
1957                rc = dev->driver->pm->restore(dev);
1958out:
1959        return rc;
1960}
1961
1962/**
1963 * iucv_init
1964 *
1965 * Allocates and initializes various data structures.
1966 */
1967static int __init iucv_init(void)
1968{
1969        int rc;
1970        int cpu;
1971
1972        if (!MACHINE_IS_VM) {
1973                rc = -EPROTONOSUPPORT;
1974                goto out;
1975        }
1976        rc = iucv_query_maxconn();
1977        if (rc)
1978                goto out;
1979        rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1980        if (rc)
1981                goto out;
1982        iucv_root = root_device_register("iucv");
1983        if (IS_ERR(iucv_root)) {
1984                rc = PTR_ERR(iucv_root);
1985                goto out_int;
1986        }
1987
1988        for_each_online_cpu(cpu) {
1989                /* Note: GFP_DMA used to get memory below 2G */
1990                iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
1991                                     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1992                if (!iucv_irq_data[cpu]) {
1993                        rc = -ENOMEM;
1994                        goto out_free;
1995                }
1996
1997                /* Allocate parameter blocks. */
1998                iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
1999                                  GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2000                if (!iucv_param[cpu]) {
2001                        rc = -ENOMEM;
2002                        goto out_free;
2003                }
2004                iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
2005                                  GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2006                if (!iucv_param_irq[cpu]) {
2007                        rc = -ENOMEM;
2008                        goto out_free;
2009                }
2010
2011        }
2012        rc = register_hotcpu_notifier(&iucv_cpu_notifier);
2013        if (rc)
2014                goto out_free;
2015        rc = register_reboot_notifier(&iucv_reboot_notifier);
2016        if (rc)
2017                goto out_cpu;
2018        ASCEBC(iucv_error_no_listener, 16);
2019        ASCEBC(iucv_error_no_memory, 16);
2020        ASCEBC(iucv_error_pathid, 16);
2021        iucv_available = 1;
2022        rc = bus_register(&iucv_bus);
2023        if (rc)
2024                goto out_reboot;
2025        return 0;
2026
2027out_reboot:
2028        unregister_reboot_notifier(&iucv_reboot_notifier);
2029out_cpu:
2030        unregister_hotcpu_notifier(&iucv_cpu_notifier);
2031out_free:
2032        for_each_possible_cpu(cpu) {
2033                kfree(iucv_param_irq[cpu]);
2034                iucv_param_irq[cpu] = NULL;
2035                kfree(iucv_param[cpu]);
2036                iucv_param[cpu] = NULL;
2037                kfree(iucv_irq_data[cpu]);
2038                iucv_irq_data[cpu] = NULL;
2039        }
2040        root_device_unregister(iucv_root);
2041out_int:
2042        unregister_external_interrupt(0x4000, iucv_external_interrupt);
2043out:
2044        return rc;
2045}
2046
2047/**
2048 * iucv_exit
2049 *
2050 * Frees everything allocated from iucv_init.
2051 */
2052static void __exit iucv_exit(void)
2053{
2054        struct iucv_irq_list *p, *n;
2055        int cpu;
2056
2057        spin_lock_irq(&iucv_queue_lock);
2058        list_for_each_entry_safe(p, n, &iucv_task_queue, list)
2059                kfree(p);
2060        list_for_each_entry_safe(p, n, &iucv_work_queue, list)
2061                kfree(p);
2062        spin_unlock_irq(&iucv_queue_lock);
2063        unregister_reboot_notifier(&iucv_reboot_notifier);
2064        unregister_hotcpu_notifier(&iucv_cpu_notifier);
2065        for_each_possible_cpu(cpu) {
2066                kfree(iucv_param_irq[cpu]);
2067                iucv_param_irq[cpu] = NULL;
2068                kfree(iucv_param[cpu]);
2069                iucv_param[cpu] = NULL;
2070                kfree(iucv_irq_data[cpu]);
2071                iucv_irq_data[cpu] = NULL;
2072        }
2073        root_device_unregister(iucv_root);
2074        bus_unregister(&iucv_bus);
2075        unregister_external_interrupt(0x4000, iucv_external_interrupt);
2076}
2077
2078subsys_initcall(iucv_init);
2079module_exit(iucv_exit);
2080
2081MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
2082MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
2083MODULE_LICENSE("GPL");
2084