linux/drivers/s390/cio/chsc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *   S/390 common I/O routines -- channel subsystem call
   4 *
   5 *    Copyright IBM Corp. 1999,2012
   6 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
   7 *               Cornelia Huck (cornelia.huck@de.ibm.com)
   8 *               Arnd Bergmann (arndb@de.ibm.com)
   9 */
  10
  11#define KMSG_COMPONENT "cio"
  12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/init.h>
  17#include <linux/device.h>
  18#include <linux/mutex.h>
  19#include <linux/pci.h>
  20
  21#include <asm/cio.h>
  22#include <asm/chpid.h>
  23#include <asm/chsc.h>
  24#include <asm/crw.h>
  25#include <asm/isc.h>
  26#include <asm/ebcdic.h>
  27
  28#include "css.h"
  29#include "cio.h"
  30#include "cio_debug.h"
  31#include "ioasm.h"
  32#include "chp.h"
  33#include "chsc.h"
  34
  35static void *sei_page;
  36static void *chsc_page;
  37static DEFINE_SPINLOCK(chsc_page_lock);
  38
  39/**
  40 * chsc_error_from_response() - convert a chsc response to an error
  41 * @response: chsc response code
  42 *
  43 * Returns an appropriate Linux error code for @response.
  44 */
  45int chsc_error_from_response(int response)
  46{
  47        switch (response) {
  48        case 0x0001:
  49                return 0;
  50        case 0x0002:
  51        case 0x0003:
  52        case 0x0006:
  53        case 0x0007:
  54        case 0x0008:
  55        case 0x000a:
  56        case 0x0104:
  57                return -EINVAL;
  58        case 0x0004:
  59                return -EOPNOTSUPP;
  60        case 0x000b:
  61        case 0x0107:            /* "Channel busy" for the op 0x003d */
  62                return -EBUSY;
  63        case 0x0100:
  64        case 0x0102:
  65                return -ENOMEM;
  66        default:
  67                return -EIO;
  68        }
  69}
  70EXPORT_SYMBOL_GPL(chsc_error_from_response);
  71
  72struct chsc_ssd_area {
  73        struct chsc_header request;
  74        u16 :10;
  75        u16 ssid:2;
  76        u16 :4;
  77        u16 f_sch;        /* first subchannel */
  78        u16 :16;
  79        u16 l_sch;        /* last subchannel */
  80        u32 :32;
  81        struct chsc_header response;
  82        u32 :32;
  83        u8 sch_valid : 1;
  84        u8 dev_valid : 1;
  85        u8 st        : 3; /* subchannel type */
  86        u8 zeroes    : 3;
  87        u8  unit_addr;    /* unit address */
  88        u16 devno;        /* device number */
  89        u8 path_mask;
  90        u8 fla_valid_mask;
  91        u16 sch;          /* subchannel */
  92        u8 chpid[8];      /* chpids 0-7 */
  93        u16 fla[8];       /* full link addresses 0-7 */
  94} __packed __aligned(PAGE_SIZE);
  95
  96int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  97{
  98        struct chsc_ssd_area *ssd_area;
  99        unsigned long flags;
 100        int ccode;
 101        int ret;
 102        int i;
 103        int mask;
 104
 105        spin_lock_irqsave(&chsc_page_lock, flags);
 106        memset(chsc_page, 0, PAGE_SIZE);
 107        ssd_area = chsc_page;
 108        ssd_area->request.length = 0x0010;
 109        ssd_area->request.code = 0x0004;
 110        ssd_area->ssid = schid.ssid;
 111        ssd_area->f_sch = schid.sch_no;
 112        ssd_area->l_sch = schid.sch_no;
 113
 114        ccode = chsc(ssd_area);
 115        /* Check response. */
 116        if (ccode > 0) {
 117                ret = (ccode == 3) ? -ENODEV : -EBUSY;
 118                goto out;
 119        }
 120        ret = chsc_error_from_response(ssd_area->response.code);
 121        if (ret != 0) {
 122                CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
 123                              schid.ssid, schid.sch_no,
 124                              ssd_area->response.code);
 125                goto out;
 126        }
 127        if (!ssd_area->sch_valid) {
 128                ret = -ENODEV;
 129                goto out;
 130        }
 131        /* Copy data */
 132        ret = 0;
 133        memset(ssd, 0, sizeof(struct chsc_ssd_info));
 134        if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
 135            (ssd_area->st != SUBCHANNEL_TYPE_MSG))
 136                goto out;
 137        ssd->path_mask = ssd_area->path_mask;
 138        ssd->fla_valid_mask = ssd_area->fla_valid_mask;
 139        for (i = 0; i < 8; i++) {
 140                mask = 0x80 >> i;
 141                if (ssd_area->path_mask & mask) {
 142                        chp_id_init(&ssd->chpid[i]);
 143                        ssd->chpid[i].id = ssd_area->chpid[i];
 144                }
 145                if (ssd_area->fla_valid_mask & mask)
 146                        ssd->fla[i] = ssd_area->fla[i];
 147        }
 148out:
 149        spin_unlock_irqrestore(&chsc_page_lock, flags);
 150        return ret;
 151}
 152
 153/**
 154 * chsc_ssqd() - store subchannel QDIO data (SSQD)
 155 * @schid: id of the subchannel on which SSQD is performed
 156 * @ssqd: request and response block for SSQD
 157 *
 158 * Returns 0 on success.
 159 */
 160int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
 161{
 162        memset(ssqd, 0, sizeof(*ssqd));
 163        ssqd->request.length = 0x0010;
 164        ssqd->request.code = 0x0024;
 165        ssqd->first_sch = schid.sch_no;
 166        ssqd->last_sch = schid.sch_no;
 167        ssqd->ssid = schid.ssid;
 168
 169        if (chsc(ssqd))
 170                return -EIO;
 171
 172        return chsc_error_from_response(ssqd->response.code);
 173}
 174EXPORT_SYMBOL_GPL(chsc_ssqd);
 175
 176/**
 177 * chsc_sadc() - set adapter device controls (SADC)
 178 * @schid: id of the subchannel on which SADC is performed
 179 * @scssc: request and response block for SADC
 180 * @summary_indicator_addr: summary indicator address
 181 * @subchannel_indicator_addr: subchannel indicator address
 182 *
 183 * Returns 0 on success.
 184 */
 185int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
 186              u64 summary_indicator_addr, u64 subchannel_indicator_addr)
 187{
 188        memset(scssc, 0, sizeof(*scssc));
 189        scssc->request.length = 0x0fe0;
 190        scssc->request.code = 0x0021;
 191        scssc->operation_code = 0;
 192
 193        scssc->summary_indicator_addr = summary_indicator_addr;
 194        scssc->subchannel_indicator_addr = subchannel_indicator_addr;
 195
 196        scssc->ks = PAGE_DEFAULT_KEY >> 4;
 197        scssc->kc = PAGE_DEFAULT_KEY >> 4;
 198        scssc->isc = QDIO_AIRQ_ISC;
 199        scssc->schid = schid;
 200
 201        /* enable the time delay disablement facility */
 202        if (css_general_characteristics.aif_tdd)
 203                scssc->word_with_d_bit = 0x10000000;
 204
 205        if (chsc(scssc))
 206                return -EIO;
 207
 208        return chsc_error_from_response(scssc->response.code);
 209}
 210EXPORT_SYMBOL_GPL(chsc_sadc);
 211
 212static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
 213{
 214        spin_lock_irq(sch->lock);
 215        if (sch->driver && sch->driver->chp_event)
 216                if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
 217                        goto out_unreg;
 218        spin_unlock_irq(sch->lock);
 219        return 0;
 220
 221out_unreg:
 222        sch->lpm = 0;
 223        spin_unlock_irq(sch->lock);
 224        css_schedule_eval(sch->schid);
 225        return 0;
 226}
 227
 228void chsc_chp_offline(struct chp_id chpid)
 229{
 230        struct channel_path *chp = chpid_to_chp(chpid);
 231        struct chp_link link;
 232        char dbf_txt[15];
 233
 234        sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
 235        CIO_TRACE_EVENT(2, dbf_txt);
 236
 237        if (chp_get_status(chpid) <= 0)
 238                return;
 239        memset(&link, 0, sizeof(struct chp_link));
 240        link.chpid = chpid;
 241        /* Wait until previous actions have settled. */
 242        css_wait_for_slow_path();
 243
 244        mutex_lock(&chp->lock);
 245        chp_update_desc(chp);
 246        mutex_unlock(&chp->lock);
 247
 248        for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
 249}
 250
 251static int __s390_process_res_acc(struct subchannel *sch, void *data)
 252{
 253        spin_lock_irq(sch->lock);
 254        if (sch->driver && sch->driver->chp_event)
 255                sch->driver->chp_event(sch, data, CHP_ONLINE);
 256        spin_unlock_irq(sch->lock);
 257
 258        return 0;
 259}
 260
 261static void s390_process_res_acc(struct chp_link *link)
 262{
 263        char dbf_txt[15];
 264
 265        sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
 266                link->chpid.id);
 267        CIO_TRACE_EVENT( 2, dbf_txt);
 268        if (link->fla != 0) {
 269                sprintf(dbf_txt, "fla%x", link->fla);
 270                CIO_TRACE_EVENT( 2, dbf_txt);
 271        }
 272        /* Wait until previous actions have settled. */
 273        css_wait_for_slow_path();
 274        /*
 275         * I/O resources may have become accessible.
 276         * Scan through all subchannels that may be concerned and
 277         * do a validation on those.
 278         * The more information we have (info), the less scanning
 279         * will we have to do.
 280         */
 281        for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
 282        css_schedule_reprobe();
 283}
 284
 285struct chsc_sei_nt0_area {
 286        u8  flags;
 287        u8  vf;                         /* validity flags */
 288        u8  rs;                         /* reporting source */
 289        u8  cc;                         /* content code */
 290        u16 fla;                        /* full link address */
 291        u16 rsid;                       /* reporting source id */
 292        u32 reserved1;
 293        u32 reserved2;
 294        /* ccdf has to be big enough for a link-incident record */
 295        u8  ccdf[PAGE_SIZE - 24 - 16];  /* content-code dependent field */
 296} __packed;
 297
 298struct chsc_sei_nt2_area {
 299        u8  flags;                      /* p and v bit */
 300        u8  reserved1;
 301        u8  reserved2;
 302        u8  cc;                         /* content code */
 303        u32 reserved3[13];
 304        u8  ccdf[PAGE_SIZE - 24 - 56];  /* content-code dependent field */
 305} __packed;
 306
 307#define CHSC_SEI_NT0    (1ULL << 63)
 308#define CHSC_SEI_NT2    (1ULL << 61)
 309
 310struct chsc_sei {
 311        struct chsc_header request;
 312        u32 reserved1;
 313        u64 ntsm;                       /* notification type mask */
 314        struct chsc_header response;
 315        u32 :24;
 316        u8 nt;
 317        union {
 318                struct chsc_sei_nt0_area nt0_area;
 319                struct chsc_sei_nt2_area nt2_area;
 320                u8 nt_area[PAGE_SIZE - 24];
 321        } u;
 322} __packed __aligned(PAGE_SIZE);
 323
 324/*
 325 * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
 326 */
 327
 328#define ND_VALIDITY_VALID       0
 329#define ND_VALIDITY_OUTDATED    1
 330#define ND_VALIDITY_INVALID     2
 331
 332struct node_descriptor {
 333        /* Flags. */
 334        union {
 335                struct {
 336                        u32 validity:3;
 337                        u32 reserved:5;
 338                } __packed;
 339                u8 byte0;
 340        } __packed;
 341
 342        /* Node parameters. */
 343        u32 params:24;
 344
 345        /* Node ID. */
 346        char type[6];
 347        char model[3];
 348        char manufacturer[3];
 349        char plant[2];
 350        char seq[12];
 351        u16 tag;
 352} __packed;
 353
 354/*
 355 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
 356 */
 357
 358#define LIR_IQ_CLASS_INFO               0
 359#define LIR_IQ_CLASS_DEGRADED           1
 360#define LIR_IQ_CLASS_NOT_OPERATIONAL    2
 361
 362struct lir {
 363        struct {
 364                u32 null:1;
 365                u32 reserved:3;
 366                u32 class:2;
 367                u32 reserved2:2;
 368        } __packed iq;
 369        u32 ic:8;
 370        u32 reserved:16;
 371        struct node_descriptor incident_node;
 372        struct node_descriptor attached_node;
 373        u8 reserved2[32];
 374} __packed;
 375
 376#define PARAMS_LEN      10      /* PARAMS=xx,xxxxxx */
 377#define NODEID_LEN      35      /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
 378
 379/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
 380static char *store_ebcdic(char *dest, const char *src, unsigned long len,
 381                          char delim)
 382{
 383        memcpy(dest, src, len);
 384        EBCASC(dest, len);
 385
 386        if (delim)
 387                dest[len++] = delim;
 388
 389        return dest + len;
 390}
 391
 392/* Format node ID and parameters for output in LIR log message. */
 393static void format_node_data(char *params, char *id, struct node_descriptor *nd)
 394{
 395        memset(params, 0, PARAMS_LEN);
 396        memset(id, 0, NODEID_LEN);
 397
 398        if (nd->validity != ND_VALIDITY_VALID) {
 399                strncpy(params, "n/a", PARAMS_LEN - 1);
 400                strncpy(id, "n/a", NODEID_LEN - 1);
 401                return;
 402        }
 403
 404        /* PARAMS=xx,xxxxxx */
 405        snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
 406        /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
 407        id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
 408        id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
 409        id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
 410        id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
 411        id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
 412        sprintf(id, "%04X", nd->tag);
 413}
 414
 415static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
 416{
 417        struct lir *lir = (struct lir *) &sei_area->ccdf;
 418        char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
 419             aunodeid[NODEID_LEN];
 420
 421        CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
 422                      sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
 423
 424        /* Ignore NULL Link Incident Records. */
 425        if (lir->iq.null)
 426                return;
 427
 428        /* Inform user that a link requires maintenance actions because it has
 429         * become degraded or not operational. Note that this log message is
 430         * the primary intention behind a Link Incident Record. */
 431
 432        format_node_data(iuparams, iunodeid, &lir->incident_node);
 433        format_node_data(auparams, aunodeid, &lir->attached_node);
 434
 435        switch (lir->iq.class) {
 436        case LIR_IQ_CLASS_DEGRADED:
 437                pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
 438                        "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
 439                        sei_area->rs, sei_area->rsid, lir->ic, iuparams,
 440                        iunodeid, auparams, aunodeid);
 441                break;
 442        case LIR_IQ_CLASS_NOT_OPERATIONAL:
 443                pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
 444                       "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
 445                       sei_area->rs, sei_area->rsid, lir->ic, iuparams,
 446                       iunodeid, auparams, aunodeid);
 447                break;
 448        default:
 449                break;
 450        }
 451}
 452
 453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
 454{
 455        struct channel_path *chp;
 456        struct chp_link link;
 457        struct chp_id chpid;
 458        int status;
 459
 460        CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
 461                      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
 462        if (sei_area->rs != 4)
 463                return;
 464        chp_id_init(&chpid);
 465        chpid.id = sei_area->rsid;
 466        /* allocate a new channel path structure, if needed */
 467        status = chp_get_status(chpid);
 468        if (!status)
 469                return;
 470
 471        if (status < 0) {
 472                chp_new(chpid);
 473        } else {
 474                chp = chpid_to_chp(chpid);
 475                mutex_lock(&chp->lock);
 476                chp_update_desc(chp);
 477                mutex_unlock(&chp->lock);
 478        }
 479        memset(&link, 0, sizeof(struct chp_link));
 480        link.chpid = chpid;
 481        if ((sei_area->vf & 0xc0) != 0) {
 482                link.fla = sei_area->fla;
 483                if ((sei_area->vf & 0xc0) == 0xc0)
 484                        /* full link address */
 485                        link.fla_mask = 0xffff;
 486                else
 487                        /* link address */
 488                        link.fla_mask = 0xff00;
 489        }
 490        s390_process_res_acc(&link);
 491}
 492
 493static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
 494{
 495        struct channel_path *chp;
 496        struct chp_id chpid;
 497        u8 *data;
 498        int num;
 499
 500        CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
 501        if (sei_area->rs != 0)
 502                return;
 503        data = sei_area->ccdf;
 504        chp_id_init(&chpid);
 505        for (num = 0; num <= __MAX_CHPID; num++) {
 506                if (!chp_test_bit(data, num))
 507                        continue;
 508                chpid.id = num;
 509
 510                CIO_CRW_EVENT(4, "Update information for channel path "
 511                              "%x.%02x\n", chpid.cssid, chpid.id);
 512                chp = chpid_to_chp(chpid);
 513                if (!chp) {
 514                        chp_new(chpid);
 515                        continue;
 516                }
 517                mutex_lock(&chp->lock);
 518                chp_update_desc(chp);
 519                mutex_unlock(&chp->lock);
 520        }
 521}
 522
 523struct chp_config_data {
 524        u8 map[32];
 525        u8 op;
 526        u8 pc;
 527};
 528
 529static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
 530{
 531        struct chp_config_data *data;
 532        struct chp_id chpid;
 533        int num;
 534        char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
 535
 536        CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
 537        if (sei_area->rs != 0)
 538                return;
 539        data = (struct chp_config_data *) &(sei_area->ccdf);
 540        chp_id_init(&chpid);
 541        for (num = 0; num <= __MAX_CHPID; num++) {
 542                if (!chp_test_bit(data->map, num))
 543                        continue;
 544                chpid.id = num;
 545                pr_notice("Processing %s for channel path %x.%02x\n",
 546                          events[data->op], chpid.cssid, chpid.id);
 547                switch (data->op) {
 548                case 0:
 549                        chp_cfg_schedule(chpid, 1);
 550                        break;
 551                case 1:
 552                        chp_cfg_schedule(chpid, 0);
 553                        break;
 554                case 2:
 555                        chp_cfg_cancel_deconfigure(chpid);
 556                        break;
 557                }
 558        }
 559}
 560
 561static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
 562{
 563        int ret;
 564
 565        CIO_CRW_EVENT(4, "chsc: scm change notification\n");
 566        if (sei_area->rs != 7)
 567                return;
 568
 569        ret = scm_update_information();
 570        if (ret)
 571                CIO_CRW_EVENT(0, "chsc: updating change notification"
 572                              " failed (rc=%d).\n", ret);
 573}
 574
 575static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
 576{
 577        int ret;
 578
 579        CIO_CRW_EVENT(4, "chsc: scm available information\n");
 580        if (sei_area->rs != 7)
 581                return;
 582
 583        ret = scm_process_availability_information();
 584        if (ret)
 585                CIO_CRW_EVENT(0, "chsc: process availability information"
 586                              " failed (rc=%d).\n", ret);
 587}
 588
 589static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 590{
 591        switch (sei_area->cc) {
 592        case 1:
 593                zpci_event_error(sei_area->ccdf);
 594                break;
 595        case 2:
 596                zpci_event_availability(sei_area->ccdf);
 597                break;
 598        default:
 599                CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
 600                              sei_area->cc);
 601                break;
 602        }
 603}
 604
 605static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
 606{
 607        /* which kind of information was stored? */
 608        switch (sei_area->cc) {
 609        case 1: /* link incident*/
 610                chsc_process_sei_link_incident(sei_area);
 611                break;
 612        case 2: /* i/o resource accessibility */
 613                chsc_process_sei_res_acc(sei_area);
 614                break;
 615        case 7: /* channel-path-availability information */
 616                chsc_process_sei_chp_avail(sei_area);
 617                break;
 618        case 8: /* channel-path-configuration notification */
 619                chsc_process_sei_chp_config(sei_area);
 620                break;
 621        case 12: /* scm change notification */
 622                chsc_process_sei_scm_change(sei_area);
 623                break;
 624        case 14: /* scm available notification */
 625                chsc_process_sei_scm_avail(sei_area);
 626                break;
 627        default: /* other stuff */
 628                CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
 629                              sei_area->cc);
 630                break;
 631        }
 632
 633        /* Check if we might have lost some information. */
 634        if (sei_area->flags & 0x40) {
 635                CIO_CRW_EVENT(2, "chsc: event overflow\n");
 636                css_schedule_eval_all();
 637        }
 638}
 639
 640static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
 641{
 642        static int ntsm_unsupported;
 643
 644        while (true) {
 645                memset(sei, 0, sizeof(*sei));
 646                sei->request.length = 0x0010;
 647                sei->request.code = 0x000e;
 648                if (!ntsm_unsupported)
 649                        sei->ntsm = ntsm;
 650
 651                if (chsc(sei))
 652                        break;
 653
 654                if (sei->response.code != 0x0001) {
 655                        CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
 656                                      sei->response.code, sei->ntsm);
 657
 658                        if (sei->response.code == 3 && sei->ntsm) {
 659                                /* Fallback for old firmware. */
 660                                ntsm_unsupported = 1;
 661                                continue;
 662                        }
 663                        break;
 664                }
 665
 666                CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
 667                switch (sei->nt) {
 668                case 0:
 669                        chsc_process_sei_nt0(&sei->u.nt0_area);
 670                        break;
 671                case 2:
 672                        chsc_process_sei_nt2(&sei->u.nt2_area);
 673                        break;
 674                default:
 675                        CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
 676                        break;
 677                }
 678
 679                if (!(sei->u.nt0_area.flags & 0x80))
 680                        break;
 681        }
 682}
 683
 684/*
 685 * Handle channel subsystem related CRWs.
 686 * Use store event information to find out what's going on.
 687 *
 688 * Note: Access to sei_page is serialized through machine check handler
 689 * thread, so no need for locking.
 690 */
 691static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 692{
 693        struct chsc_sei *sei = sei_page;
 694
 695        if (overflow) {
 696                css_schedule_eval_all();
 697                return;
 698        }
 699        CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
 700                      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
 701                      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
 702                      crw0->erc, crw0->rsid);
 703
 704        CIO_TRACE_EVENT(2, "prcss");
 705        chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
 706}
 707
 708void chsc_chp_online(struct chp_id chpid)
 709{
 710        struct channel_path *chp = chpid_to_chp(chpid);
 711        struct chp_link link;
 712        char dbf_txt[15];
 713
 714        sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
 715        CIO_TRACE_EVENT(2, dbf_txt);
 716
 717        if (chp_get_status(chpid) != 0) {
 718                memset(&link, 0, sizeof(struct chp_link));
 719                link.chpid = chpid;
 720                /* Wait until previous actions have settled. */
 721                css_wait_for_slow_path();
 722
 723                mutex_lock(&chp->lock);
 724                chp_update_desc(chp);
 725                mutex_unlock(&chp->lock);
 726
 727                for_each_subchannel_staged(__s390_process_res_acc, NULL,
 728                                           &link);
 729                css_schedule_reprobe();
 730        }
 731}
 732
 733static void __s390_subchannel_vary_chpid(struct subchannel *sch,
 734                                         struct chp_id chpid, int on)
 735{
 736        unsigned long flags;
 737        struct chp_link link;
 738
 739        memset(&link, 0, sizeof(struct chp_link));
 740        link.chpid = chpid;
 741        spin_lock_irqsave(sch->lock, flags);
 742        if (sch->driver && sch->driver->chp_event)
 743                sch->driver->chp_event(sch, &link,
 744                                       on ? CHP_VARY_ON : CHP_VARY_OFF);
 745        spin_unlock_irqrestore(sch->lock, flags);
 746}
 747
 748static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
 749{
 750        struct chp_id *chpid = data;
 751
 752        __s390_subchannel_vary_chpid(sch, *chpid, 0);
 753        return 0;
 754}
 755
 756static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
 757{
 758        struct chp_id *chpid = data;
 759
 760        __s390_subchannel_vary_chpid(sch, *chpid, 1);
 761        return 0;
 762}
 763
 764/**
 765 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 766 * @chpid: channl-path ID
 767 * @on: non-zero for vary online, zero for vary offline
 768 */
 769int chsc_chp_vary(struct chp_id chpid, int on)
 770{
 771        struct channel_path *chp = chpid_to_chp(chpid);
 772
 773        /* Wait until previous actions have settled. */
 774        css_wait_for_slow_path();
 775        /*
 776         * Redo PathVerification on the devices the chpid connects to
 777         */
 778        if (on) {
 779                /* Try to update the channel path description. */
 780                chp_update_desc(chp);
 781                for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
 782                                           NULL, &chpid);
 783                css_schedule_reprobe();
 784        } else
 785                for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
 786                                           NULL, &chpid);
 787
 788        return 0;
 789}
 790
 791static void
 792chsc_remove_cmg_attr(struct channel_subsystem *css)
 793{
 794        int i;
 795
 796        for (i = 0; i <= __MAX_CHPID; i++) {
 797                if (!css->chps[i])
 798                        continue;
 799                chp_remove_cmg_attr(css->chps[i]);
 800        }
 801}
 802
 803static int
 804chsc_add_cmg_attr(struct channel_subsystem *css)
 805{
 806        int i, ret;
 807
 808        ret = 0;
 809        for (i = 0; i <= __MAX_CHPID; i++) {
 810                if (!css->chps[i])
 811                        continue;
 812                ret = chp_add_cmg_attr(css->chps[i]);
 813                if (ret)
 814                        goto cleanup;
 815        }
 816        return ret;
 817cleanup:
 818        for (--i; i >= 0; i--) {
 819                if (!css->chps[i])
 820                        continue;
 821                chp_remove_cmg_attr(css->chps[i]);
 822        }
 823        return ret;
 824}
 825
 826int __chsc_do_secm(struct channel_subsystem *css, int enable)
 827{
 828        struct {
 829                struct chsc_header request;
 830                u32 operation_code : 2;
 831                u32 : 30;
 832                u32 key : 4;
 833                u32 : 28;
 834                u32 zeroes1;
 835                u32 cub_addr1;
 836                u32 zeroes2;
 837                u32 cub_addr2;
 838                u32 reserved[13];
 839                struct chsc_header response;
 840                u32 status : 8;
 841                u32 : 4;
 842                u32 fmt : 4;
 843                u32 : 16;
 844        } *secm_area;
 845        unsigned long flags;
 846        int ret, ccode;
 847
 848        spin_lock_irqsave(&chsc_page_lock, flags);
 849        memset(chsc_page, 0, PAGE_SIZE);
 850        secm_area = chsc_page;
 851        secm_area->request.length = 0x0050;
 852        secm_area->request.code = 0x0016;
 853
 854        secm_area->key = PAGE_DEFAULT_KEY >> 4;
 855        secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
 856        secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
 857
 858        secm_area->operation_code = enable ? 0 : 1;
 859
 860        ccode = chsc(secm_area);
 861        if (ccode > 0) {
 862                ret = (ccode == 3) ? -ENODEV : -EBUSY;
 863                goto out;
 864        }
 865
 866        switch (secm_area->response.code) {
 867        case 0x0102:
 868        case 0x0103:
 869                ret = -EINVAL;
 870                break;
 871        default:
 872                ret = chsc_error_from_response(secm_area->response.code);
 873        }
 874        if (ret != 0)
 875                CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
 876                              secm_area->response.code);
 877out:
 878        spin_unlock_irqrestore(&chsc_page_lock, flags);
 879        return ret;
 880}
 881
 882int
 883chsc_secm(struct channel_subsystem *css, int enable)
 884{
 885        int ret;
 886
 887        if (enable && !css->cm_enabled) {
 888                css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 889                css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 890                if (!css->cub_addr1 || !css->cub_addr2) {
 891                        free_page((unsigned long)css->cub_addr1);
 892                        free_page((unsigned long)css->cub_addr2);
 893                        return -ENOMEM;
 894                }
 895        }
 896        ret = __chsc_do_secm(css, enable);
 897        if (!ret) {
 898                css->cm_enabled = enable;
 899                if (css->cm_enabled) {
 900                        ret = chsc_add_cmg_attr(css);
 901                        if (ret) {
 902                                __chsc_do_secm(css, 0);
 903                                css->cm_enabled = 0;
 904                        }
 905                } else
 906                        chsc_remove_cmg_attr(css);
 907        }
 908        if (!css->cm_enabled) {
 909                free_page((unsigned long)css->cub_addr1);
 910                free_page((unsigned long)css->cub_addr2);
 911        }
 912        return ret;
 913}
 914
 915int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
 916                                     int c, int m, void *page)
 917{
 918        struct chsc_scpd *scpd_area;
 919        int ccode, ret;
 920
 921        if ((rfmt == 1 || rfmt == 0) && c == 1 &&
 922            !css_general_characteristics.fcs)
 923                return -EINVAL;
 924        if ((rfmt == 2) && !css_general_characteristics.cib)
 925                return -EINVAL;
 926        if ((rfmt == 3) && !css_general_characteristics.util_str)
 927                return -EINVAL;
 928
 929        memset(page, 0, PAGE_SIZE);
 930        scpd_area = page;
 931        scpd_area->request.length = 0x0010;
 932        scpd_area->request.code = 0x0002;
 933        scpd_area->cssid = chpid.cssid;
 934        scpd_area->first_chpid = chpid.id;
 935        scpd_area->last_chpid = chpid.id;
 936        scpd_area->m = m;
 937        scpd_area->c = c;
 938        scpd_area->fmt = fmt;
 939        scpd_area->rfmt = rfmt;
 940
 941        ccode = chsc(scpd_area);
 942        if (ccode > 0)
 943                return (ccode == 3) ? -ENODEV : -EBUSY;
 944
 945        ret = chsc_error_from_response(scpd_area->response.code);
 946        if (ret)
 947                CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
 948                              scpd_area->response.code);
 949        return ret;
 950}
 951EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
 952
 953#define chsc_det_chp_desc(FMT, c)                                       \
 954int chsc_determine_fmt##FMT##_channel_path_desc(                        \
 955        struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc)   \
 956{                                                                       \
 957        struct chsc_scpd *scpd_area;                                    \
 958        unsigned long flags;                                            \
 959        int ret;                                                        \
 960                                                                        \
 961        spin_lock_irqsave(&chsc_page_lock, flags);                      \
 962        scpd_area = chsc_page;                                          \
 963        ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0,     \
 964                                               scpd_area);              \
 965        if (ret)                                                        \
 966                goto out;                                               \
 967                                                                        \
 968        memcpy(desc, scpd_area->data, sizeof(*desc));                   \
 969out:                                                                    \
 970        spin_unlock_irqrestore(&chsc_page_lock, flags);                 \
 971        return ret;                                                     \
 972}
 973
 974chsc_det_chp_desc(0, 0)
 975chsc_det_chp_desc(1, 1)
 976chsc_det_chp_desc(3, 0)
 977
 978static void
 979chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
 980                          struct cmg_chars *chars)
 981{
 982        int i, mask;
 983
 984        for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
 985                mask = 0x80 >> (i + 3);
 986                if (cmcv & mask)
 987                        chp->cmg_chars.values[i] = chars->values[i];
 988                else
 989                        chp->cmg_chars.values[i] = 0;
 990        }
 991}
 992
 993int chsc_get_channel_measurement_chars(struct channel_path *chp)
 994{
 995        unsigned long flags;
 996        int ccode, ret;
 997
 998        struct {
 999                struct chsc_header request;
1000                u32 : 24;
1001                u32 first_chpid : 8;
1002                u32 : 24;
1003                u32 last_chpid : 8;
1004                u32 zeroes1;
1005                struct chsc_header response;
1006                u32 zeroes2;
1007                u32 not_valid : 1;
1008                u32 shared : 1;
1009                u32 : 22;
1010                u32 chpid : 8;
1011                u32 cmcv : 5;
1012                u32 : 11;
1013                u32 cmgq : 8;
1014                u32 cmg : 8;
1015                u32 zeroes3;
1016                u32 data[NR_MEASUREMENT_CHARS];
1017        } *scmc_area;
1018
1019        chp->shared = -1;
1020        chp->cmg = -1;
1021
1022        if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1023                return -EINVAL;
1024
1025        spin_lock_irqsave(&chsc_page_lock, flags);
1026        memset(chsc_page, 0, PAGE_SIZE);
1027        scmc_area = chsc_page;
1028        scmc_area->request.length = 0x0010;
1029        scmc_area->request.code = 0x0022;
1030        scmc_area->first_chpid = chp->chpid.id;
1031        scmc_area->last_chpid = chp->chpid.id;
1032
1033        ccode = chsc(scmc_area);
1034        if (ccode > 0) {
1035                ret = (ccode == 3) ? -ENODEV : -EBUSY;
1036                goto out;
1037        }
1038
1039        ret = chsc_error_from_response(scmc_area->response.code);
1040        if (ret) {
1041                CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
1042                              scmc_area->response.code);
1043                goto out;
1044        }
1045        if (scmc_area->not_valid)
1046                goto out;
1047
1048        chp->cmg = scmc_area->cmg;
1049        chp->shared = scmc_area->shared;
1050        if (chp->cmg != 2 && chp->cmg != 3) {
1051                /* No cmg-dependent data. */
1052                goto out;
1053        }
1054        chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1055                                  (struct cmg_chars *) &scmc_area->data);
1056out:
1057        spin_unlock_irqrestore(&chsc_page_lock, flags);
1058        return ret;
1059}
1060
1061int __init chsc_init(void)
1062{
1063        int ret;
1064
1065        sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1066        chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1067        if (!sei_page || !chsc_page) {
1068                ret = -ENOMEM;
1069                goto out_err;
1070        }
1071        ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
1072        if (ret)
1073                goto out_err;
1074        return ret;
1075out_err:
1076        free_page((unsigned long)chsc_page);
1077        free_page((unsigned long)sei_page);
1078        return ret;
1079}
1080
1081void __init chsc_init_cleanup(void)
1082{
1083        crw_unregister_handler(CRW_RSC_CSS);
1084        free_page((unsigned long)chsc_page);
1085        free_page((unsigned long)sei_page);
1086}
1087
1088int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1089{
1090        int ret;
1091
1092        sda_area->request.length = 0x0400;
1093        sda_area->request.code = 0x0031;
1094        sda_area->operation_code = operation_code;
1095
1096        ret = chsc(sda_area);
1097        if (ret > 0) {
1098                ret = (ret == 3) ? -ENODEV : -EBUSY;
1099                goto out;
1100        }
1101
1102        switch (sda_area->response.code) {
1103        case 0x0101:
1104                ret = -EOPNOTSUPP;
1105                break;
1106        default:
1107                ret = chsc_error_from_response(sda_area->response.code);
1108        }
1109out:
1110        return ret;
1111}
1112
1113int chsc_enable_facility(int operation_code)
1114{
1115        struct chsc_sda_area *sda_area;
1116        unsigned long flags;
1117        int ret;
1118
1119        spin_lock_irqsave(&chsc_page_lock, flags);
1120        memset(chsc_page, 0, PAGE_SIZE);
1121        sda_area = chsc_page;
1122
1123        ret = __chsc_enable_facility(sda_area, operation_code);
1124        if (ret != 0)
1125                CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1126                              operation_code, sda_area->response.code);
1127
1128        spin_unlock_irqrestore(&chsc_page_lock, flags);
1129        return ret;
1130}
1131
1132int __init chsc_get_cssid(int idx)
1133{
1134        struct {
1135                struct chsc_header request;
1136                u8 atype;
1137                u32 : 24;
1138                u32 reserved1[6];
1139                struct chsc_header response;
1140                u32 reserved2[3];
1141                struct {
1142                        u8 cssid;
1143                        u32 : 24;
1144                } list[0];
1145        } *sdcal_area;
1146        int ret;
1147
1148        spin_lock_irq(&chsc_page_lock);
1149        memset(chsc_page, 0, PAGE_SIZE);
1150        sdcal_area = chsc_page;
1151        sdcal_area->request.length = 0x0020;
1152        sdcal_area->request.code = 0x0034;
1153        sdcal_area->atype = 4;
1154
1155        ret = chsc(sdcal_area);
1156        if (ret) {
1157                ret = (ret == 3) ? -ENODEV : -EBUSY;
1158                goto exit;
1159        }
1160
1161        ret = chsc_error_from_response(sdcal_area->response.code);
1162        if (ret) {
1163                CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
1164                              sdcal_area->response.code);
1165                goto exit;
1166        }
1167
1168        if ((addr_t) &sdcal_area->list[idx] <
1169            (addr_t) &sdcal_area->response + sdcal_area->response.length)
1170                ret = sdcal_area->list[idx].cssid;
1171        else
1172                ret = -ENODEV;
1173exit:
1174        spin_unlock_irq(&chsc_page_lock);
1175        return ret;
1176}
1177
1178struct css_general_char css_general_characteristics;
1179struct css_chsc_char css_chsc_characteristics;
1180
1181int __init
1182chsc_determine_css_characteristics(void)
1183{
1184        unsigned long flags;
1185        int result;
1186        struct {
1187                struct chsc_header request;
1188                u32 reserved1;
1189                u32 reserved2;
1190                u32 reserved3;
1191                struct chsc_header response;
1192                u32 reserved4;
1193                u32 general_char[510];
1194                u32 chsc_char[508];
1195        } *scsc_area;
1196
1197        spin_lock_irqsave(&chsc_page_lock, flags);
1198        memset(chsc_page, 0, PAGE_SIZE);
1199        scsc_area = chsc_page;
1200        scsc_area->request.length = 0x0010;
1201        scsc_area->request.code = 0x0010;
1202
1203        result = chsc(scsc_area);
1204        if (result) {
1205                result = (result == 3) ? -ENODEV : -EBUSY;
1206                goto exit;
1207        }
1208
1209        result = chsc_error_from_response(scsc_area->response.code);
1210        if (result == 0) {
1211                memcpy(&css_general_characteristics, scsc_area->general_char,
1212                       sizeof(css_general_characteristics));
1213                memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1214                       sizeof(css_chsc_characteristics));
1215        } else
1216                CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1217                              scsc_area->response.code);
1218exit:
1219        spin_unlock_irqrestore(&chsc_page_lock, flags);
1220        return result;
1221}
1222
1223EXPORT_SYMBOL_GPL(css_general_characteristics);
1224EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1225
1226int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
1227{
1228        struct {
1229                struct chsc_header request;
1230                unsigned int rsvd0;
1231                unsigned int op : 8;
1232                unsigned int rsvd1 : 8;
1233                unsigned int ctrl : 16;
1234                unsigned int rsvd2[5];
1235                struct chsc_header response;
1236                unsigned int rsvd3[3];
1237                u64 clock_delta;
1238                unsigned int rsvd4[2];
1239        } *rr;
1240        int rc;
1241
1242        memset(page, 0, PAGE_SIZE);
1243        rr = page;
1244        rr->request.length = 0x0020;
1245        rr->request.code = 0x0033;
1246        rr->op = op;
1247        rr->ctrl = ctrl;
1248        rc = chsc(rr);
1249        if (rc)
1250                return -EIO;
1251        rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1252        if (clock_delta)
1253                *clock_delta = rr->clock_delta;
1254        return rc;
1255}
1256
1257int chsc_sstpi(void *page, void *result, size_t size)
1258{
1259        struct {
1260                struct chsc_header request;
1261                unsigned int rsvd0[3];
1262                struct chsc_header response;
1263                char data[];
1264        } *rr;
1265        int rc;
1266
1267        memset(page, 0, PAGE_SIZE);
1268        rr = page;
1269        rr->request.length = 0x0010;
1270        rr->request.code = 0x0038;
1271        rc = chsc(rr);
1272        if (rc)
1273                return -EIO;
1274        memcpy(result, &rr->data, size);
1275        return (rr->response.code == 0x0001) ? 0 : -EIO;
1276}
1277
1278int chsc_siosl(struct subchannel_id schid)
1279{
1280        struct {
1281                struct chsc_header request;
1282                u32 word1;
1283                struct subchannel_id sid;
1284                u32 word3;
1285                struct chsc_header response;
1286                u32 word[11];
1287        } *siosl_area;
1288        unsigned long flags;
1289        int ccode;
1290        int rc;
1291
1292        spin_lock_irqsave(&chsc_page_lock, flags);
1293        memset(chsc_page, 0, PAGE_SIZE);
1294        siosl_area = chsc_page;
1295        siosl_area->request.length = 0x0010;
1296        siosl_area->request.code = 0x0046;
1297        siosl_area->word1 = 0x80000000;
1298        siosl_area->sid = schid;
1299
1300        ccode = chsc(siosl_area);
1301        if (ccode > 0) {
1302                if (ccode == 3)
1303                        rc = -ENODEV;
1304                else
1305                        rc = -EBUSY;
1306                CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1307                              schid.ssid, schid.sch_no, ccode);
1308                goto out;
1309        }
1310        rc = chsc_error_from_response(siosl_area->response.code);
1311        if (rc)
1312                CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1313                              schid.ssid, schid.sch_no,
1314                              siosl_area->response.code);
1315        else
1316                CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1317                              schid.ssid, schid.sch_no);
1318out:
1319        spin_unlock_irqrestore(&chsc_page_lock, flags);
1320        return rc;
1321}
1322EXPORT_SYMBOL_GPL(chsc_siosl);
1323
1324/**
1325 * chsc_scm_info() - store SCM information (SSI)
1326 * @scm_area: request and response block for SSI
1327 * @token: continuation token
1328 *
1329 * Returns 0 on success.
1330 */
1331int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1332{
1333        int ccode, ret;
1334
1335        memset(scm_area, 0, sizeof(*scm_area));
1336        scm_area->request.length = 0x0020;
1337        scm_area->request.code = 0x004C;
1338        scm_area->reqtok = token;
1339
1340        ccode = chsc(scm_area);
1341        if (ccode > 0) {
1342                ret = (ccode == 3) ? -ENODEV : -EBUSY;
1343                goto out;
1344        }
1345        ret = chsc_error_from_response(scm_area->response.code);
1346        if (ret != 0)
1347                CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1348                              scm_area->response.code);
1349out:
1350        return ret;
1351}
1352EXPORT_SYMBOL_GPL(chsc_scm_info);
1353
1354/**
1355 * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
1356 * @schid:              id of the subchannel on which PNSO is performed
1357 * @brinfo_area:        request and response block for the operation
1358 * @resume_token:       resume token for multiblock response
1359 * @cnc:                Boolean change-notification control
1360 *
1361 * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1362 *
1363 * Returns 0 on success.
1364 */
1365int chsc_pnso_brinfo(struct subchannel_id schid,
1366                struct chsc_pnso_area *brinfo_area,
1367                struct chsc_brinfo_resume_token resume_token,
1368                int cnc)
1369{
1370        memset(brinfo_area, 0, sizeof(*brinfo_area));
1371        brinfo_area->request.length = 0x0030;
1372        brinfo_area->request.code = 0x003d; /* network-subchannel operation */
1373        brinfo_area->m     = schid.m;
1374        brinfo_area->ssid  = schid.ssid;
1375        brinfo_area->sch   = schid.sch_no;
1376        brinfo_area->cssid = schid.cssid;
1377        brinfo_area->oc    = 0; /* Store-network-bridging-information list */
1378        brinfo_area->resume_token = resume_token;
1379        brinfo_area->n     = (cnc != 0);
1380        if (chsc(brinfo_area))
1381                return -EIO;
1382        return chsc_error_from_response(brinfo_area->response.code);
1383}
1384EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
1385