linux/drivers/s390/cio/eadm_sch.c
<<
>>
Prefs
   1/*
   2 * Driver for s390 eadm subchannels
   3 *
   4 * Copyright IBM Corp. 2012
   5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
   6 */
   7
   8#include <linux/kernel_stat.h>
   9#include <linux/workqueue.h>
  10#include <linux/spinlock.h>
  11#include <linux/device.h>
  12#include <linux/module.h>
  13#include <linux/timer.h>
  14#include <linux/slab.h>
  15#include <linux/list.h>
  16
  17#include <asm/css_chars.h>
  18#include <asm/debug.h>
  19#include <asm/isc.h>
  20#include <asm/cio.h>
  21#include <asm/scsw.h>
  22#include <asm/eadm.h>
  23
  24#include "eadm_sch.h"
  25#include "ioasm.h"
  26#include "cio.h"
  27#include "css.h"
  28#include "orb.h"
  29
  30MODULE_DESCRIPTION("driver for s390 eadm subchannels");
  31MODULE_LICENSE("GPL");
  32
  33#define EADM_TIMEOUT (5 * HZ)
  34static DEFINE_SPINLOCK(list_lock);
  35static LIST_HEAD(eadm_list);
  36
  37static debug_info_t *eadm_debug;
  38
  39#define EADM_LOG(imp, txt) do {                                 \
  40                debug_text_event(eadm_debug, imp, txt);         \
  41        } while (0)
  42
  43static void EADM_LOG_HEX(int level, void *data, int length)
  44{
  45        if (level > eadm_debug->level)
  46                return;
  47        while (length > 0) {
  48                debug_event(eadm_debug, level, data, length);
  49                length -= eadm_debug->buf_size;
  50                data += eadm_debug->buf_size;
  51        }
  52}
  53
  54static void orb_init(union orb *orb)
  55{
  56        memset(orb, 0, sizeof(union orb));
  57        orb->eadm.compat1 = 1;
  58        orb->eadm.compat2 = 1;
  59        orb->eadm.fmt = 1;
  60        orb->eadm.x = 1;
  61}
  62
  63static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
  64{
  65        union orb *orb = &get_eadm_private(sch)->orb;
  66        int cc;
  67
  68        orb_init(orb);
  69        orb->eadm.aob = (u32)__pa(aob);
  70        orb->eadm.intparm = (u32)(addr_t)sch;
  71        orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
  72
  73        EADM_LOG(6, "start");
  74        EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
  75
  76        cc = ssch(sch->schid, orb);
  77        switch (cc) {
  78        case 0:
  79                sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
  80                break;
  81        case 1:         /* status pending */
  82        case 2:         /* busy */
  83                return -EBUSY;
  84        case 3:         /* not operational */
  85                return -ENODEV;
  86        }
  87        return 0;
  88}
  89
  90static int eadm_subchannel_clear(struct subchannel *sch)
  91{
  92        int cc;
  93
  94        cc = csch(sch->schid);
  95        if (cc)
  96                return -ENODEV;
  97
  98        sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
  99        return 0;
 100}
 101
 102static void eadm_subchannel_timeout(unsigned long data)
 103{
 104        struct subchannel *sch = (struct subchannel *) data;
 105
 106        spin_lock_irq(sch->lock);
 107        EADM_LOG(1, "timeout");
 108        EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
 109        if (eadm_subchannel_clear(sch))
 110                EADM_LOG(0, "clear failed");
 111        spin_unlock_irq(sch->lock);
 112}
 113
 114static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
 115{
 116        struct eadm_private *private = get_eadm_private(sch);
 117
 118        if (expires == 0) {
 119                del_timer(&private->timer);
 120                return;
 121        }
 122        if (timer_pending(&private->timer)) {
 123                if (mod_timer(&private->timer, jiffies + expires))
 124                        return;
 125        }
 126        private->timer.function = eadm_subchannel_timeout;
 127        private->timer.data = (unsigned long) sch;
 128        private->timer.expires = jiffies + expires;
 129        add_timer(&private->timer);
 130}
 131
 132static void eadm_subchannel_irq(struct subchannel *sch)
 133{
 134        struct eadm_private *private = get_eadm_private(sch);
 135        struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
 136        struct irb *irb = (struct irb *)&S390_lowcore.irb;
 137        int error = 0;
 138
 139        EADM_LOG(6, "irq");
 140        EADM_LOG_HEX(6, irb, sizeof(*irb));
 141
 142        inc_irq_stat(IRQIO_ADM);
 143
 144        if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
 145            && scsw->eswf == 1 && irb->esw.eadm.erw.r)
 146                error = -EIO;
 147
 148        if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
 149                error = -ETIMEDOUT;
 150
 151        eadm_subchannel_set_timeout(sch, 0);
 152
 153        if (private->state != EADM_BUSY) {
 154                EADM_LOG(1, "irq unsol");
 155                EADM_LOG_HEX(1, irb, sizeof(*irb));
 156                private->state = EADM_NOT_OPER;
 157                css_sched_sch_todo(sch, SCH_TODO_EVAL);
 158                return;
 159        }
 160        scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
 161        private->state = EADM_IDLE;
 162}
 163
 164static struct subchannel *eadm_get_idle_sch(void)
 165{
 166        struct eadm_private *private;
 167        struct subchannel *sch;
 168        unsigned long flags;
 169
 170        spin_lock_irqsave(&list_lock, flags);
 171        list_for_each_entry(private, &eadm_list, head) {
 172                sch = private->sch;
 173                spin_lock(sch->lock);
 174                if (private->state == EADM_IDLE) {
 175                        private->state = EADM_BUSY;
 176                        list_move_tail(&private->head, &eadm_list);
 177                        spin_unlock(sch->lock);
 178                        spin_unlock_irqrestore(&list_lock, flags);
 179
 180                        return sch;
 181                }
 182                spin_unlock(sch->lock);
 183        }
 184        spin_unlock_irqrestore(&list_lock, flags);
 185
 186        return NULL;
 187}
 188
 189static int eadm_start_aob(struct aob *aob)
 190{
 191        struct eadm_private *private;
 192        struct subchannel *sch;
 193        unsigned long flags;
 194        int ret;
 195
 196        sch = eadm_get_idle_sch();
 197        if (!sch)
 198                return -EBUSY;
 199
 200        spin_lock_irqsave(sch->lock, flags);
 201        eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
 202        ret = eadm_subchannel_start(sch, aob);
 203        if (!ret)
 204                goto out_unlock;
 205
 206        /* Handle start subchannel failure. */
 207        eadm_subchannel_set_timeout(sch, 0);
 208        private = get_eadm_private(sch);
 209        private->state = EADM_NOT_OPER;
 210        css_sched_sch_todo(sch, SCH_TODO_EVAL);
 211
 212out_unlock:
 213        spin_unlock_irqrestore(sch->lock, flags);
 214
 215        return ret;
 216}
 217
 218static int eadm_subchannel_probe(struct subchannel *sch)
 219{
 220        struct eadm_private *private;
 221        int ret;
 222
 223        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
 224        if (!private)
 225                return -ENOMEM;
 226
 227        INIT_LIST_HEAD(&private->head);
 228        init_timer(&private->timer);
 229
 230        spin_lock_irq(sch->lock);
 231        set_eadm_private(sch, private);
 232        private->state = EADM_IDLE;
 233        private->sch = sch;
 234        sch->isc = EADM_SCH_ISC;
 235        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 236        if (ret) {
 237                set_eadm_private(sch, NULL);
 238                spin_unlock_irq(sch->lock);
 239                kfree(private);
 240                goto out;
 241        }
 242        spin_unlock_irq(sch->lock);
 243
 244        spin_lock_irq(&list_lock);
 245        list_add(&private->head, &eadm_list);
 246        spin_unlock_irq(&list_lock);
 247
 248        if (dev_get_uevent_suppress(&sch->dev)) {
 249                dev_set_uevent_suppress(&sch->dev, 0);
 250                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
 251        }
 252out:
 253        return ret;
 254}
 255
 256static void eadm_quiesce(struct subchannel *sch)
 257{
 258        int ret;
 259
 260        do {
 261                spin_lock_irq(sch->lock);
 262                ret = cio_disable_subchannel(sch);
 263                spin_unlock_irq(sch->lock);
 264        } while (ret == -EBUSY);
 265}
 266
 267static int eadm_subchannel_remove(struct subchannel *sch)
 268{
 269        struct eadm_private *private = get_eadm_private(sch);
 270
 271        spin_lock_irq(&list_lock);
 272        list_del(&private->head);
 273        spin_unlock_irq(&list_lock);
 274
 275        eadm_quiesce(sch);
 276
 277        spin_lock_irq(sch->lock);
 278        set_eadm_private(sch, NULL);
 279        spin_unlock_irq(sch->lock);
 280
 281        kfree(private);
 282
 283        return 0;
 284}
 285
 286static void eadm_subchannel_shutdown(struct subchannel *sch)
 287{
 288        eadm_quiesce(sch);
 289}
 290
 291static int eadm_subchannel_freeze(struct subchannel *sch)
 292{
 293        return cio_disable_subchannel(sch);
 294}
 295
 296static int eadm_subchannel_restore(struct subchannel *sch)
 297{
 298        return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
 299}
 300
 301/**
 302 * eadm_subchannel_sch_event - process subchannel event
 303 * @sch: subchannel
 304 * @process: non-zero if function is called in process context
 305 *
 306 * An unspecified event occurred for this subchannel. Adjust data according
 307 * to the current operational state of the subchannel. Return zero when the
 308 * event has been handled sufficiently or -EAGAIN when this function should
 309 * be called again in process context.
 310 */
 311static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
 312{
 313        struct eadm_private *private;
 314        unsigned long flags;
 315        int ret = 0;
 316
 317        spin_lock_irqsave(sch->lock, flags);
 318        if (!device_is_registered(&sch->dev))
 319                goto out_unlock;
 320
 321        if (work_pending(&sch->todo_work))
 322                goto out_unlock;
 323
 324        if (cio_update_schib(sch)) {
 325                css_sched_sch_todo(sch, SCH_TODO_UNREG);
 326                goto out_unlock;
 327        }
 328        private = get_eadm_private(sch);
 329        if (private->state == EADM_NOT_OPER)
 330                private->state = EADM_IDLE;
 331
 332out_unlock:
 333        spin_unlock_irqrestore(sch->lock, flags);
 334
 335        return ret;
 336}
 337
 338static struct css_device_id eadm_subchannel_ids[] = {
 339        { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
 340        { /* end of list */ },
 341};
 342MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
 343
 344static struct css_driver eadm_subchannel_driver = {
 345        .drv = {
 346                .name = "eadm_subchannel",
 347                .owner = THIS_MODULE,
 348        },
 349        .subchannel_type = eadm_subchannel_ids,
 350        .irq = eadm_subchannel_irq,
 351        .probe = eadm_subchannel_probe,
 352        .remove = eadm_subchannel_remove,
 353        .shutdown = eadm_subchannel_shutdown,
 354        .sch_event = eadm_subchannel_sch_event,
 355        .freeze = eadm_subchannel_freeze,
 356        .thaw = eadm_subchannel_restore,
 357        .restore = eadm_subchannel_restore,
 358};
 359
 360static struct eadm_ops eadm_ops = {
 361        .eadm_start = eadm_start_aob,
 362        .owner = THIS_MODULE,
 363};
 364
 365static int __init eadm_sch_init(void)
 366{
 367        int ret;
 368
 369        if (!css_general_characteristics.eadm)
 370                return -ENXIO;
 371
 372        eadm_debug = debug_register("eadm_log", 16, 1, 16);
 373        if (!eadm_debug)
 374                return -ENOMEM;
 375
 376        debug_register_view(eadm_debug, &debug_hex_ascii_view);
 377        debug_set_level(eadm_debug, 2);
 378
 379        isc_register(EADM_SCH_ISC);
 380        ret = css_driver_register(&eadm_subchannel_driver);
 381        if (ret)
 382                goto cleanup;
 383
 384        register_eadm_ops(&eadm_ops);
 385        return ret;
 386
 387cleanup:
 388        isc_unregister(EADM_SCH_ISC);
 389        debug_unregister(eadm_debug);
 390        return ret;
 391}
 392
 393static void __exit eadm_sch_exit(void)
 394{
 395        unregister_eadm_ops(&eadm_ops);
 396        css_driver_unregister(&eadm_subchannel_driver);
 397        isc_unregister(EADM_SCH_ISC);
 398        debug_unregister(eadm_debug);
 399}
 400module_init(eadm_sch_init);
 401module_exit(eadm_sch_exit);
 402