linux/drivers/s390/char/sclp_cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 2007,2012
   3 *
   4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   5 *            Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
   6 */
   7
   8#define KMSG_COMPONENT "sclp_cmd"
   9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10
  11#include <linux/completion.h>
  12#include <linux/init.h>
  13#include <linux/errno.h>
  14#include <linux/err.h>
  15#include <linux/export.h>
  16#include <linux/slab.h>
  17#include <linux/string.h>
  18#include <linux/mm.h>
  19#include <linux/mmzone.h>
  20#include <linux/memory.h>
  21#include <linux/module.h>
  22#include <linux/platform_device.h>
  23#include <asm/ctl_reg.h>
  24#include <asm/chpid.h>
  25#include <asm/setup.h>
  26#include <asm/page.h>
  27#include <asm/sclp.h>
  28
  29#include "sclp.h"
  30
  31#define SCLP_CMDW_READ_SCP_INFO         0x00020001
  32#define SCLP_CMDW_READ_SCP_INFO_FORCED  0x00120001
  33
  34struct read_info_sccb {
  35        struct  sccb_header header;     /* 0-7 */
  36        u16     rnmax;                  /* 8-9 */
  37        u8      rnsize;                 /* 10 */
  38        u8      _reserved0[24 - 11];    /* 11-15 */
  39        u8      loadparm[8];            /* 24-31 */
  40        u8      _reserved1[48 - 32];    /* 32-47 */
  41        u64     facilities;             /* 48-55 */
  42        u8      _reserved2[84 - 56];    /* 56-83 */
  43        u8      fac84;                  /* 84 */
  44        u8      fac85;                  /* 85 */
  45        u8      _reserved3[91 - 86];    /* 86-90 */
  46        u8      flags;                  /* 91 */
  47        u8      _reserved4[100 - 92];   /* 92-99 */
  48        u32     rnsize2;                /* 100-103 */
  49        u64     rnmax2;                 /* 104-111 */
  50        u8      _reserved5[4096 - 112]; /* 112-4095 */
  51} __attribute__((packed, aligned(PAGE_SIZE)));
  52
  53static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
  54static struct read_info_sccb __initdata early_read_info_sccb;
  55static int __initdata early_read_info_sccb_valid;
  56
  57u64 sclp_facilities;
  58static u8 sclp_fac84;
  59static unsigned long long rzm;
  60static unsigned long long rnmax;
  61
  62static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
  63{
  64        int rc;
  65
  66        __ctl_set_bit(0, 9);
  67        rc = sclp_service_call(cmd, sccb);
  68        if (rc)
  69                goto out;
  70        __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
  71                        PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
  72        local_irq_disable();
  73out:
  74        /* Contents of the sccb might have changed. */
  75        barrier();
  76        __ctl_clear_bit(0, 9);
  77        return rc;
  78}
  79
  80static void __init sclp_read_info_early(void)
  81{
  82        int rc;
  83        int i;
  84        struct read_info_sccb *sccb;
  85        sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
  86                                  SCLP_CMDW_READ_SCP_INFO};
  87
  88        sccb = &early_read_info_sccb;
  89        for (i = 0; i < ARRAY_SIZE(commands); i++) {
  90                do {
  91                        memset(sccb, 0, sizeof(*sccb));
  92                        sccb->header.length = sizeof(*sccb);
  93                        sccb->header.function_code = 0x80;
  94                        sccb->header.control_mask[2] = 0x80;
  95                        rc = sclp_cmd_sync_early(commands[i], sccb);
  96                } while (rc == -EBUSY);
  97
  98                if (rc)
  99                        break;
 100                if (sccb->header.response_code == 0x10) {
 101                        early_read_info_sccb_valid = 1;
 102                        break;
 103                }
 104                if (sccb->header.response_code != 0x1f0)
 105                        break;
 106        }
 107}
 108
 109static void __init sclp_event_mask_early(void)
 110{
 111        struct init_sccb *sccb = &early_event_mask_sccb;
 112        int rc;
 113
 114        do {
 115                memset(sccb, 0, sizeof(*sccb));
 116                sccb->header.length = sizeof(*sccb);
 117                sccb->mask_length = sizeof(sccb_mask_t);
 118                rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
 119        } while (rc == -EBUSY);
 120}
 121
 122void __init sclp_facilities_detect(void)
 123{
 124        struct read_info_sccb *sccb;
 125
 126        sclp_read_info_early();
 127        if (!early_read_info_sccb_valid)
 128                return;
 129
 130        sccb = &early_read_info_sccb;
 131        sclp_facilities = sccb->facilities;
 132        sclp_fac84 = sccb->fac84;
 133        if (sccb->fac85 & 0x02)
 134                S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
 135        rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
 136        rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
 137        rzm <<= 20;
 138
 139        sclp_event_mask_early();
 140}
 141
 142bool __init sclp_has_linemode(void)
 143{
 144        struct init_sccb *sccb = &early_event_mask_sccb;
 145
 146        if (sccb->header.response_code != 0x20)
 147                return 0;
 148        if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
 149                return 1;
 150        return 0;
 151}
 152
 153bool __init sclp_has_vt220(void)
 154{
 155        struct init_sccb *sccb = &early_event_mask_sccb;
 156
 157        if (sccb->header.response_code != 0x20)
 158                return 0;
 159        if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
 160                return 1;
 161        return 0;
 162}
 163
 164unsigned long long sclp_get_rnmax(void)
 165{
 166        return rnmax;
 167}
 168
 169unsigned long long sclp_get_rzm(void)
 170{
 171        return rzm;
 172}
 173
 174/*
 175 * This function will be called after sclp_facilities_detect(), which gets
 176 * called from early.c code. Therefore the sccb should have valid contents.
 177 */
 178void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
 179{
 180        struct read_info_sccb *sccb;
 181
 182        if (!early_read_info_sccb_valid)
 183                return;
 184        sccb = &early_read_info_sccb;
 185        info->is_valid = 1;
 186        if (sccb->flags & 0x2)
 187                info->has_dump = 1;
 188        memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
 189}
 190
 191static void sclp_sync_callback(struct sclp_req *req, void *data)
 192{
 193        struct completion *completion = data;
 194
 195        complete(completion);
 196}
 197
 198int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
 199{
 200        struct completion completion;
 201        struct sclp_req *request;
 202        int rc;
 203
 204        request = kzalloc(sizeof(*request), GFP_KERNEL);
 205        if (!request)
 206                return -ENOMEM;
 207        request->command = cmd;
 208        request->sccb = sccb;
 209        request->status = SCLP_REQ_FILLED;
 210        request->callback = sclp_sync_callback;
 211        request->callback_data = &completion;
 212        init_completion(&completion);
 213
 214        /* Perform sclp request. */
 215        rc = sclp_add_request(request);
 216        if (rc)
 217                goto out;
 218        wait_for_completion(&completion);
 219
 220        /* Check response. */
 221        if (request->status != SCLP_REQ_DONE) {
 222                pr_warning("sync request failed (cmd=0x%08x, "
 223                           "status=0x%02x)\n", cmd, request->status);
 224                rc = -EIO;
 225        }
 226out:
 227        kfree(request);
 228        return rc;
 229}
 230
 231/*
 232 * CPU configuration related functions.
 233 */
 234
 235#define SCLP_CMDW_READ_CPU_INFO         0x00010001
 236#define SCLP_CMDW_CONFIGURE_CPU         0x00110001
 237#define SCLP_CMDW_DECONFIGURE_CPU       0x00100001
 238
 239struct read_cpu_info_sccb {
 240        struct  sccb_header header;
 241        u16     nr_configured;
 242        u16     offset_configured;
 243        u16     nr_standby;
 244        u16     offset_standby;
 245        u8      reserved[4096 - 16];
 246} __attribute__((packed, aligned(PAGE_SIZE)));
 247
 248static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
 249                               struct read_cpu_info_sccb *sccb)
 250{
 251        char *page = (char *) sccb;
 252
 253        memset(info, 0, sizeof(*info));
 254        info->configured = sccb->nr_configured;
 255        info->standby = sccb->nr_standby;
 256        info->combined = sccb->nr_configured + sccb->nr_standby;
 257        info->has_cpu_type = sclp_fac84 & 0x1;
 258        memcpy(&info->cpu, page + sccb->offset_configured,
 259               info->combined * sizeof(struct sclp_cpu_entry));
 260}
 261
 262int sclp_get_cpu_info(struct sclp_cpu_info *info)
 263{
 264        int rc;
 265        struct read_cpu_info_sccb *sccb;
 266
 267        if (!SCLP_HAS_CPU_INFO)
 268                return -EOPNOTSUPP;
 269        sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 270        if (!sccb)
 271                return -ENOMEM;
 272        sccb->header.length = sizeof(*sccb);
 273        rc = sclp_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
 274        if (rc)
 275                goto out;
 276        if (sccb->header.response_code != 0x0010) {
 277                pr_warning("readcpuinfo failed (response=0x%04x)\n",
 278                           sccb->header.response_code);
 279                rc = -EIO;
 280                goto out;
 281        }
 282        sclp_fill_cpu_info(info, sccb);
 283out:
 284        free_page((unsigned long) sccb);
 285        return rc;
 286}
 287
 288struct cpu_configure_sccb {
 289        struct sccb_header header;
 290} __attribute__((packed, aligned(8)));
 291
 292static int do_cpu_configure(sclp_cmdw_t cmd)
 293{
 294        struct cpu_configure_sccb *sccb;
 295        int rc;
 296
 297        if (!SCLP_HAS_CPU_RECONFIG)
 298                return -EOPNOTSUPP;
 299        /*
 300         * This is not going to cross a page boundary since we force
 301         * kmalloc to have a minimum alignment of 8 bytes on s390.
 302         */
 303        sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
 304        if (!sccb)
 305                return -ENOMEM;
 306        sccb->header.length = sizeof(*sccb);
 307        rc = sclp_sync_request(cmd, sccb);
 308        if (rc)
 309                goto out;
 310        switch (sccb->header.response_code) {
 311        case 0x0020:
 312        case 0x0120:
 313                break;
 314        default:
 315                pr_warning("configure cpu failed (cmd=0x%08x, "
 316                           "response=0x%04x)\n", cmd,
 317                           sccb->header.response_code);
 318                rc = -EIO;
 319                break;
 320        }
 321out:
 322        kfree(sccb);
 323        return rc;
 324}
 325
 326int sclp_cpu_configure(u8 cpu)
 327{
 328        return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
 329}
 330
 331int sclp_cpu_deconfigure(u8 cpu)
 332{
 333        return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
 334}
 335
 336#ifdef CONFIG_MEMORY_HOTPLUG
 337
 338static DEFINE_MUTEX(sclp_mem_mutex);
 339static LIST_HEAD(sclp_mem_list);
 340static u8 sclp_max_storage_id;
 341static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
 342static int sclp_mem_state_changed;
 343
 344struct memory_increment {
 345        struct list_head list;
 346        u16 rn;
 347        int standby;
 348};
 349
 350struct assign_storage_sccb {
 351        struct sccb_header header;
 352        u16 rn;
 353} __packed;
 354
 355int arch_get_memory_phys_device(unsigned long start_pfn)
 356{
 357        if (!rzm)
 358                return 0;
 359        return PFN_PHYS(start_pfn) >> ilog2(rzm);
 360}
 361
 362static unsigned long long rn2addr(u16 rn)
 363{
 364        return (unsigned long long) (rn - 1) * rzm;
 365}
 366
 367static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
 368{
 369        struct assign_storage_sccb *sccb;
 370        int rc;
 371
 372        sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 373        if (!sccb)
 374                return -ENOMEM;
 375        sccb->header.length = PAGE_SIZE;
 376        sccb->rn = rn;
 377        rc = sclp_sync_request(cmd, sccb);
 378        if (rc)
 379                goto out;
 380        switch (sccb->header.response_code) {
 381        case 0x0020:
 382        case 0x0120:
 383                break;
 384        default:
 385                pr_warning("assign storage failed (cmd=0x%08x, "
 386                           "response=0x%04x, rn=0x%04x)\n", cmd,
 387                           sccb->header.response_code, rn);
 388                rc = -EIO;
 389                break;
 390        }
 391out:
 392        free_page((unsigned long) sccb);
 393        return rc;
 394}
 395
 396static int sclp_assign_storage(u16 rn)
 397{
 398        unsigned long long start;
 399        int rc;
 400
 401        rc = do_assign_storage(0x000d0001, rn);
 402        if (rc)
 403                return rc;
 404        start = rn2addr(rn);
 405        storage_key_init_range(start, start + rzm);
 406        return 0;
 407}
 408
 409static int sclp_unassign_storage(u16 rn)
 410{
 411        return do_assign_storage(0x000c0001, rn);
 412}
 413
 414struct attach_storage_sccb {
 415        struct sccb_header header;
 416        u16 :16;
 417        u16 assigned;
 418        u32 :32;
 419        u32 entries[0];
 420} __packed;
 421
 422static int sclp_attach_storage(u8 id)
 423{
 424        struct attach_storage_sccb *sccb;
 425        int rc;
 426        int i;
 427
 428        sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 429        if (!sccb)
 430                return -ENOMEM;
 431        sccb->header.length = PAGE_SIZE;
 432        rc = sclp_sync_request(0x00080001 | id << 8, sccb);
 433        if (rc)
 434                goto out;
 435        switch (sccb->header.response_code) {
 436        case 0x0020:
 437                set_bit(id, sclp_storage_ids);
 438                for (i = 0; i < sccb->assigned; i++) {
 439                        if (sccb->entries[i])
 440                                sclp_unassign_storage(sccb->entries[i] >> 16);
 441                }
 442                break;
 443        default:
 444                rc = -EIO;
 445                break;
 446        }
 447out:
 448        free_page((unsigned long) sccb);
 449        return rc;
 450}
 451
 452static int sclp_mem_change_state(unsigned long start, unsigned long size,
 453                                 int online)
 454{
 455        struct memory_increment *incr;
 456        unsigned long long istart;
 457        int rc = 0;
 458
 459        list_for_each_entry(incr, &sclp_mem_list, list) {
 460                istart = rn2addr(incr->rn);
 461                if (start + size - 1 < istart)
 462                        break;
 463                if (start > istart + rzm - 1)
 464                        continue;
 465                if (online)
 466                        rc |= sclp_assign_storage(incr->rn);
 467                else
 468                        sclp_unassign_storage(incr->rn);
 469        }
 470        return rc ? -EIO : 0;
 471}
 472
 473static int sclp_mem_notifier(struct notifier_block *nb,
 474                             unsigned long action, void *data)
 475{
 476        unsigned long start, size;
 477        struct memory_notify *arg;
 478        unsigned char id;
 479        int rc = 0;
 480
 481        arg = data;
 482        start = arg->start_pfn << PAGE_SHIFT;
 483        size = arg->nr_pages << PAGE_SHIFT;
 484        mutex_lock(&sclp_mem_mutex);
 485        for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
 486                sclp_attach_storage(id);
 487        switch (action) {
 488        case MEM_ONLINE:
 489        case MEM_GOING_OFFLINE:
 490        case MEM_CANCEL_OFFLINE:
 491                break;
 492        case MEM_GOING_ONLINE:
 493                rc = sclp_mem_change_state(start, size, 1);
 494                break;
 495        case MEM_CANCEL_ONLINE:
 496                sclp_mem_change_state(start, size, 0);
 497                break;
 498        case MEM_OFFLINE:
 499                sclp_mem_change_state(start, size, 0);
 500                break;
 501        default:
 502                rc = -EINVAL;
 503                break;
 504        }
 505        if (!rc)
 506                sclp_mem_state_changed = 1;
 507        mutex_unlock(&sclp_mem_mutex);
 508        return rc ? NOTIFY_BAD : NOTIFY_OK;
 509}
 510
 511static struct notifier_block sclp_mem_nb = {
 512        .notifier_call = sclp_mem_notifier,
 513};
 514
 515static void __init add_memory_merged(u16 rn)
 516{
 517        static u16 first_rn, num;
 518        unsigned long long start, size;
 519
 520        if (rn && first_rn && (first_rn + num == rn)) {
 521                num++;
 522                return;
 523        }
 524        if (!first_rn)
 525                goto skip_add;
 526        start = rn2addr(first_rn);
 527        size = (unsigned long long ) num * rzm;
 528        if (start >= VMEM_MAX_PHYS)
 529                goto skip_add;
 530        if (start + size > VMEM_MAX_PHYS)
 531                size = VMEM_MAX_PHYS - start;
 532        if (memory_end_set && (start >= memory_end))
 533                goto skip_add;
 534        if (memory_end_set && (start + size > memory_end))
 535                size = memory_end - start;
 536        add_memory(0, start, size);
 537skip_add:
 538        first_rn = rn;
 539        num = 1;
 540}
 541
 542static void __init sclp_add_standby_memory(void)
 543{
 544        struct memory_increment *incr;
 545
 546        list_for_each_entry(incr, &sclp_mem_list, list)
 547                if (incr->standby)
 548                        add_memory_merged(incr->rn);
 549        add_memory_merged(0);
 550}
 551
 552static void __init insert_increment(u16 rn, int standby, int assigned)
 553{
 554        struct memory_increment *incr, *new_incr;
 555        struct list_head *prev;
 556        u16 last_rn;
 557
 558        new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
 559        if (!new_incr)
 560                return;
 561        new_incr->rn = rn;
 562        new_incr->standby = standby;
 563        last_rn = 0;
 564        prev = &sclp_mem_list;
 565        list_for_each_entry(incr, &sclp_mem_list, list) {
 566                if (assigned && incr->rn > rn)
 567                        break;
 568                if (!assigned && incr->rn - last_rn > 1)
 569                        break;
 570                last_rn = incr->rn;
 571                prev = &incr->list;
 572        }
 573        if (!assigned)
 574                new_incr->rn = last_rn + 1;
 575        if (new_incr->rn > rnmax) {
 576                kfree(new_incr);
 577                return;
 578        }
 579        list_add(&new_incr->list, prev);
 580}
 581
 582static int sclp_mem_freeze(struct device *dev)
 583{
 584        if (!sclp_mem_state_changed)
 585                return 0;
 586        pr_err("Memory hotplug state changed, suspend refused.\n");
 587        return -EPERM;
 588}
 589
 590struct read_storage_sccb {
 591        struct sccb_header header;
 592        u16 max_id;
 593        u16 assigned;
 594        u16 standby;
 595        u16 :16;
 596        u32 entries[0];
 597} __packed;
 598
 599static const struct dev_pm_ops sclp_mem_pm_ops = {
 600        .freeze         = sclp_mem_freeze,
 601};
 602
 603static struct platform_driver sclp_mem_pdrv = {
 604        .driver = {
 605                .name   = "sclp_mem",
 606                .pm     = &sclp_mem_pm_ops,
 607        },
 608};
 609
 610static int __init sclp_detect_standby_memory(void)
 611{
 612        struct platform_device *sclp_pdev;
 613        struct read_storage_sccb *sccb;
 614        int i, id, assigned, rc;
 615
 616        if (OLDMEM_BASE) /* No standby memory in kdump mode */
 617                return 0;
 618        if (!early_read_info_sccb_valid)
 619                return 0;
 620        if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
 621                return 0;
 622        rc = -ENOMEM;
 623        sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
 624        if (!sccb)
 625                goto out;
 626        assigned = 0;
 627        for (id = 0; id <= sclp_max_storage_id; id++) {
 628                memset(sccb, 0, PAGE_SIZE);
 629                sccb->header.length = PAGE_SIZE;
 630                rc = sclp_sync_request(0x00040001 | id << 8, sccb);
 631                if (rc)
 632                        goto out;
 633                switch (sccb->header.response_code) {
 634                case 0x0010:
 635                        set_bit(id, sclp_storage_ids);
 636                        for (i = 0; i < sccb->assigned; i++) {
 637                                if (!sccb->entries[i])
 638                                        continue;
 639                                assigned++;
 640                                insert_increment(sccb->entries[i] >> 16, 0, 1);
 641                        }
 642                        break;
 643                case 0x0310:
 644                        break;
 645                case 0x0410:
 646                        for (i = 0; i < sccb->assigned; i++) {
 647                                if (!sccb->entries[i])
 648                                        continue;
 649                                assigned++;
 650                                insert_increment(sccb->entries[i] >> 16, 1, 1);
 651                        }
 652                        break;
 653                default:
 654                        rc = -EIO;
 655                        break;
 656                }
 657                if (!rc)
 658                        sclp_max_storage_id = sccb->max_id;
 659        }
 660        if (rc || list_empty(&sclp_mem_list))
 661                goto out;
 662        for (i = 1; i <= rnmax - assigned; i++)
 663                insert_increment(0, 1, 0);
 664        rc = register_memory_notifier(&sclp_mem_nb);
 665        if (rc)
 666                goto out;
 667        rc = platform_driver_register(&sclp_mem_pdrv);
 668        if (rc)
 669                goto out;
 670        sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
 671        rc = PTR_RET(sclp_pdev);
 672        if (rc)
 673                goto out_driver;
 674        sclp_add_standby_memory();
 675        goto out;
 676out_driver:
 677        platform_driver_unregister(&sclp_mem_pdrv);
 678out:
 679        free_page((unsigned long) sccb);
 680        return rc;
 681}
 682__initcall(sclp_detect_standby_memory);
 683
 684#endif /* CONFIG_MEMORY_HOTPLUG */
 685
 686/*
 687 * PCI I/O adapter configuration related functions.
 688 */
 689#define SCLP_CMDW_CONFIGURE_PCI                 0x001a0001
 690#define SCLP_CMDW_DECONFIGURE_PCI               0x001b0001
 691
 692#define SCLP_RECONFIG_PCI_ATPYE                 2
 693
 694struct pci_cfg_sccb {
 695        struct sccb_header header;
 696        u8 atype;               /* adapter type */
 697        u8 reserved1;
 698        u16 reserved2;
 699        u32 aid;                /* adapter identifier */
 700} __packed;
 701
 702static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
 703{
 704        struct pci_cfg_sccb *sccb;
 705        int rc;
 706
 707        if (!SCLP_HAS_PCI_RECONFIG)
 708                return -EOPNOTSUPP;
 709
 710        sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 711        if (!sccb)
 712                return -ENOMEM;
 713
 714        sccb->header.length = PAGE_SIZE;
 715        sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
 716        sccb->aid = fid;
 717        rc = sclp_sync_request(cmd, sccb);
 718        if (rc)
 719                goto out;
 720        switch (sccb->header.response_code) {
 721        case 0x0020:
 722        case 0x0120:
 723                break;
 724        default:
 725                pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
 726                        cmd, sccb->header.response_code);
 727                rc = -EIO;
 728                break;
 729        }
 730out:
 731        free_page((unsigned long) sccb);
 732        return rc;
 733}
 734
 735int sclp_pci_configure(u32 fid)
 736{
 737        return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
 738}
 739EXPORT_SYMBOL(sclp_pci_configure);
 740
 741int sclp_pci_deconfigure(u32 fid)
 742{
 743        return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
 744}
 745EXPORT_SYMBOL(sclp_pci_deconfigure);
 746
 747/*
 748 * Channel path configuration related functions.
 749 */
 750
 751#define SCLP_CMDW_CONFIGURE_CHPATH              0x000f0001
 752#define SCLP_CMDW_DECONFIGURE_CHPATH            0x000e0001
 753#define SCLP_CMDW_READ_CHPATH_INFORMATION       0x00030001
 754
 755struct chp_cfg_sccb {
 756        struct sccb_header header;
 757        u8 ccm;
 758        u8 reserved[6];
 759        u8 cssid;
 760} __attribute__((packed));
 761
 762static int do_chp_configure(sclp_cmdw_t cmd)
 763{
 764        struct chp_cfg_sccb *sccb;
 765        int rc;
 766
 767        if (!SCLP_HAS_CHP_RECONFIG)
 768                return -EOPNOTSUPP;
 769        /* Prepare sccb. */
 770        sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 771        if (!sccb)
 772                return -ENOMEM;
 773        sccb->header.length = sizeof(*sccb);
 774        rc = sclp_sync_request(cmd, sccb);
 775        if (rc)
 776                goto out;
 777        switch (sccb->header.response_code) {
 778        case 0x0020:
 779        case 0x0120:
 780        case 0x0440:
 781        case 0x0450:
 782                break;
 783        default:
 784                pr_warning("configure channel-path failed "
 785                           "(cmd=0x%08x, response=0x%04x)\n", cmd,
 786                           sccb->header.response_code);
 787                rc = -EIO;
 788                break;
 789        }
 790out:
 791        free_page((unsigned long) sccb);
 792        return rc;
 793}
 794
 795/**
 796 * sclp_chp_configure - perform configure channel-path sclp command
 797 * @chpid: channel-path ID
 798 *
 799 * Perform configure channel-path command sclp command for specified chpid.
 800 * Return 0 after command successfully finished, non-zero otherwise.
 801 */
 802int sclp_chp_configure(struct chp_id chpid)
 803{
 804        return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
 805}
 806
 807/**
 808 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
 809 * @chpid: channel-path ID
 810 *
 811 * Perform deconfigure channel-path command sclp command for specified chpid
 812 * and wait for completion. On success return 0. Return non-zero otherwise.
 813 */
 814int sclp_chp_deconfigure(struct chp_id chpid)
 815{
 816        return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
 817}
 818
 819struct chp_info_sccb {
 820        struct sccb_header header;
 821        u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
 822        u8 standby[SCLP_CHP_INFO_MASK_SIZE];
 823        u8 configured[SCLP_CHP_INFO_MASK_SIZE];
 824        u8 ccm;
 825        u8 reserved[6];
 826        u8 cssid;
 827} __attribute__((packed));
 828
 829/**
 830 * sclp_chp_read_info - perform read channel-path information sclp command
 831 * @info: resulting channel-path information data
 832 *
 833 * Perform read channel-path information sclp command and wait for completion.
 834 * On success, store channel-path information in @info and return 0. Return
 835 * non-zero otherwise.
 836 */
 837int sclp_chp_read_info(struct sclp_chp_info *info)
 838{
 839        struct chp_info_sccb *sccb;
 840        int rc;
 841
 842        if (!SCLP_HAS_CHP_INFO)
 843                return -EOPNOTSUPP;
 844        /* Prepare sccb. */
 845        sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 846        if (!sccb)
 847                return -ENOMEM;
 848        sccb->header.length = sizeof(*sccb);
 849        rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
 850        if (rc)
 851                goto out;
 852        if (sccb->header.response_code != 0x0010) {
 853                pr_warning("read channel-path info failed "
 854                           "(response=0x%04x)\n", sccb->header.response_code);
 855                rc = -EIO;
 856                goto out;
 857        }
 858        memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
 859        memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
 860        memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
 861out:
 862        free_page((unsigned long) sccb);
 863        return rc;
 864}
 865