linux/drivers/s390/cio/qdio_thinint.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 2000, 2009
   3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   4 *            Cornelia Huck <cornelia.huck@de.ibm.com>
   5 *            Jan Glauber <jang@linux.vnet.ibm.com>
   6 */
   7#include <linux/io.h>
   8#include <linux/slab.h>
   9#include <linux/kernel_stat.h>
  10#include <linux/atomic.h>
  11#include <asm/debug.h>
  12#include <asm/qdio.h>
  13#include <asm/airq.h>
  14#include <asm/isc.h>
  15
  16#include "cio.h"
  17#include "ioasm.h"
  18#include "qdio.h"
  19#include "qdio_debug.h"
  20
  21/*
  22 * Restriction: only 63 iqdio subchannels would have its own indicator,
  23 * after that, subsequent subchannels share one indicator
  24 */
  25#define TIQDIO_NR_NONSHARED_IND         63
  26#define TIQDIO_NR_INDICATORS            (TIQDIO_NR_NONSHARED_IND + 1)
  27#define TIQDIO_SHARED_IND               63
  28
  29/* device state change indicators */
  30struct indicator_t {
  31        u32 ind;        /* u32 because of compare-and-swap performance */
  32        atomic_t count; /* use count, 0 or 1 for non-shared indicators */
  33};
  34
  35/* list of thin interrupt input queues */
  36static LIST_HEAD(tiq_list);
  37static DEFINE_MUTEX(tiq_list_lock);
  38
  39/* Adapter interrupt definitions */
  40static void tiqdio_thinint_handler(struct airq_struct *airq);
  41
  42static struct airq_struct tiqdio_airq = {
  43        .handler = tiqdio_thinint_handler,
  44        .isc = QDIO_AIRQ_ISC,
  45};
  46
  47static struct indicator_t *q_indicators;
  48
  49u64 last_ai_time;
  50
  51/* returns addr for the device state change indicator */
  52static u32 *get_indicator(void)
  53{
  54        int i;
  55
  56        for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
  57                if (!atomic_read(&q_indicators[i].count)) {
  58                        atomic_set(&q_indicators[i].count, 1);
  59                        return &q_indicators[i].ind;
  60                }
  61
  62        /* use the shared indicator */
  63        atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
  64        return &q_indicators[TIQDIO_SHARED_IND].ind;
  65}
  66
  67static void put_indicator(u32 *addr)
  68{
  69        int i;
  70
  71        if (!addr)
  72                return;
  73        i = ((unsigned long)addr - (unsigned long)q_indicators) /
  74                sizeof(struct indicator_t);
  75        atomic_dec(&q_indicators[i].count);
  76}
  77
  78void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
  79{
  80        mutex_lock(&tiq_list_lock);
  81        list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
  82        mutex_unlock(&tiq_list_lock);
  83        xchg(irq_ptr->dsci, 1 << 7);
  84}
  85
  86void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
  87{
  88        struct qdio_q *q;
  89
  90        q = irq_ptr->input_qs[0];
  91        /* if establish triggered an error */
  92        if (!q || !q->entry.prev || !q->entry.next)
  93                return;
  94
  95        mutex_lock(&tiq_list_lock);
  96        list_del_rcu(&q->entry);
  97        mutex_unlock(&tiq_list_lock);
  98        synchronize_rcu();
  99}
 100
 101static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
 102{
 103        return irq_ptr->nr_input_qs > 1;
 104}
 105
 106static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
 107{
 108        return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
 109}
 110
 111static inline int shared_ind(struct qdio_irq *irq_ptr)
 112{
 113        return references_shared_dsci(irq_ptr) ||
 114                has_multiple_inq_on_dsci(irq_ptr);
 115}
 116
 117void clear_nonshared_ind(struct qdio_irq *irq_ptr)
 118{
 119        if (!is_thinint_irq(irq_ptr))
 120                return;
 121        if (shared_ind(irq_ptr))
 122                return;
 123        xchg(irq_ptr->dsci, 0);
 124}
 125
 126int test_nonshared_ind(struct qdio_irq *irq_ptr)
 127{
 128        if (!is_thinint_irq(irq_ptr))
 129                return 0;
 130        if (shared_ind(irq_ptr))
 131                return 0;
 132        if (*irq_ptr->dsci)
 133                return 1;
 134        else
 135                return 0;
 136}
 137
 138static inline u32 clear_shared_ind(void)
 139{
 140        if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
 141                return 0;
 142        return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
 143}
 144
 145static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
 146{
 147        struct qdio_q *q;
 148        int i;
 149
 150        if (!references_shared_dsci(irq) &&
 151            has_multiple_inq_on_dsci(irq))
 152                xchg(irq->dsci, 0);
 153
 154        for_each_input_queue(irq, q, i) {
 155                if (q->u.in.queue_start_poll) {
 156                        /* skip if polling is enabled or already in work */
 157                        if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 158                                             &q->u.in.queue_irq_state)) {
 159                                qperf_inc(q, int_discarded);
 160                                continue;
 161                        }
 162
 163                        /* avoid dsci clear here, done after processing */
 164                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 165                                                 q->irq_ptr->int_parm);
 166                } else {
 167                        if (!shared_ind(q->irq_ptr))
 168                                xchg(q->irq_ptr->dsci, 0);
 169
 170                        /*
 171                         * Call inbound processing but not directly
 172                         * since that could starve other thinint queues.
 173                         */
 174                        tasklet_schedule(&q->tasklet);
 175                }
 176        }
 177}
 178
 179/**
 180 * tiqdio_thinint_handler - thin interrupt handler for qdio
 181 * @alsi: pointer to adapter local summary indicator
 182 * @data: NULL
 183 */
 184static void tiqdio_thinint_handler(struct airq_struct *airq)
 185{
 186        u32 si_used = clear_shared_ind();
 187        struct qdio_q *q;
 188
 189        last_ai_time = S390_lowcore.int_clock;
 190        inc_irq_stat(IRQIO_QAI);
 191
 192        /* protect tiq_list entries, only changed in activate or shutdown */
 193        rcu_read_lock();
 194
 195        /* check for work on all inbound thinint queues */
 196        list_for_each_entry_rcu(q, &tiq_list, entry) {
 197                struct qdio_irq *irq;
 198
 199                /* only process queues from changed sets */
 200                irq = q->irq_ptr;
 201                if (unlikely(references_shared_dsci(irq))) {
 202                        if (!si_used)
 203                                continue;
 204                } else if (!*irq->dsci)
 205                        continue;
 206
 207                tiqdio_call_inq_handlers(irq);
 208
 209                qperf_inc(q, adapter_int);
 210        }
 211        rcu_read_unlock();
 212}
 213
 214static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
 215{
 216        struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
 217        u64 summary_indicator_addr, subchannel_indicator_addr;
 218        int rc;
 219
 220        if (reset) {
 221                summary_indicator_addr = 0;
 222                subchannel_indicator_addr = 0;
 223        } else {
 224                summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
 225                subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
 226        }
 227
 228        rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
 229                       subchannel_indicator_addr);
 230        if (rc) {
 231                DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
 232                          scssc->response.code);
 233                goto out;
 234        }
 235
 236        DBF_EVENT("setscind");
 237        DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
 238        DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
 239out:
 240        return rc;
 241}
 242
 243/* allocate non-shared indicators and shared indicator */
 244int __init tiqdio_allocate_memory(void)
 245{
 246        q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
 247                             GFP_KERNEL);
 248        if (!q_indicators)
 249                return -ENOMEM;
 250        return 0;
 251}
 252
 253void tiqdio_free_memory(void)
 254{
 255        kfree(q_indicators);
 256}
 257
 258int __init tiqdio_register_thinints(void)
 259{
 260        int rc;
 261
 262        rc = register_adapter_interrupt(&tiqdio_airq);
 263        if (rc) {
 264                DBF_EVENT("RTI:%x", rc);
 265                return rc;
 266        }
 267        return 0;
 268}
 269
 270int qdio_establish_thinint(struct qdio_irq *irq_ptr)
 271{
 272        if (!is_thinint_irq(irq_ptr))
 273                return 0;
 274        return set_subchannel_ind(irq_ptr, 0);
 275}
 276
 277void qdio_setup_thinint(struct qdio_irq *irq_ptr)
 278{
 279        if (!is_thinint_irq(irq_ptr))
 280                return;
 281        irq_ptr->dsci = get_indicator();
 282        DBF_HEX(&irq_ptr->dsci, sizeof(void *));
 283}
 284
 285void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
 286{
 287        if (!is_thinint_irq(irq_ptr))
 288                return;
 289
 290        /* reset adapter interrupt indicators */
 291        set_subchannel_ind(irq_ptr, 1);
 292        put_indicator(irq_ptr->dsci);
 293}
 294
 295void __exit tiqdio_unregister_thinints(void)
 296{
 297        WARN_ON(!list_empty(&tiq_list));
 298        unregister_adapter_interrupt(&tiqdio_airq);
 299}
 300