linux/drivers/s390/scsi/zfcp_qdio.c
<<
>>
Prefs
   1/*
   2 * zfcp device driver
   3 *
   4 * Setup and helper functions to access QDIO.
   5 *
   6 * Copyright IBM Corp. 2002, 2010
   7 */
   8
   9#define KMSG_COMPONENT "zfcp"
  10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11
  12#include <linux/slab.h>
  13#include <linux/module.h>
  14#include "zfcp_ext.h"
  15#include "zfcp_qdio.h"
  16
  17#define QBUFF_PER_PAGE          (PAGE_SIZE / sizeof(struct qdio_buffer))
  18
  19static bool enable_multibuffer;
  20module_param_named(datarouter, enable_multibuffer, bool, 0400);
  21MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
  22
  23static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
  24{
  25        int pos;
  26
  27        for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
  28                sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
  29                if (!sbal[pos])
  30                        return -ENOMEM;
  31        }
  32        for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
  33                if (pos % QBUFF_PER_PAGE)
  34                        sbal[pos] = sbal[pos - 1] + 1;
  35        return 0;
  36}
  37
  38static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
  39                                    unsigned int qdio_err)
  40{
  41        struct zfcp_adapter *adapter = qdio->adapter;
  42
  43        dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
  44
  45        if (qdio_err & QDIO_ERROR_SLSB_STATE) {
  46                zfcp_qdio_siosl(adapter);
  47                zfcp_erp_adapter_shutdown(adapter, 0, id);
  48                return;
  49        }
  50        zfcp_erp_adapter_reopen(adapter,
  51                                ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
  52                                ZFCP_STATUS_COMMON_ERP_FAILED, id);
  53}
  54
  55static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
  56{
  57        int i, sbal_idx;
  58
  59        for (i = first; i < first + cnt; i++) {
  60                sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
  61                memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
  62        }
  63}
  64
  65/* this needs to be called prior to updating the queue fill level */
  66static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
  67{
  68        unsigned long long now, span;
  69        int used;
  70
  71        now = get_tod_clock_monotonic();
  72        span = (now - qdio->req_q_time) >> 12;
  73        used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
  74        qdio->req_q_util += used * span;
  75        qdio->req_q_time = now;
  76}
  77
  78static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
  79                              int queue_no, int idx, int count,
  80                              unsigned long parm)
  81{
  82        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
  83
  84        if (unlikely(qdio_err)) {
  85                zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
  86                return;
  87        }
  88
  89        /* cleanup all SBALs being program-owned now */
  90        zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
  91
  92        spin_lock_irq(&qdio->stat_lock);
  93        zfcp_qdio_account(qdio);
  94        spin_unlock_irq(&qdio->stat_lock);
  95        atomic_add(count, &qdio->req_q_free);
  96        wake_up(&qdio->req_q_wq);
  97}
  98
  99static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 100                               int queue_no, int idx, int count,
 101                               unsigned long parm)
 102{
 103        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 104        struct zfcp_adapter *adapter = qdio->adapter;
 105        int sbal_no, sbal_idx;
 106
 107        if (unlikely(qdio_err)) {
 108                if (zfcp_adapter_multi_buffer_active(adapter)) {
 109                        void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
 110                        struct qdio_buffer_element *sbale;
 111                        u64 req_id;
 112                        u8 scount;
 113
 114                        memset(pl, 0,
 115                               ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
 116                        sbale = qdio->res_q[idx]->element;
 117                        req_id = (u64) sbale->addr;
 118                        scount = min(sbale->scount + 1,
 119                                     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
 120                                     /* incl. signaling SBAL */
 121
 122                        for (sbal_no = 0; sbal_no < scount; sbal_no++) {
 123                                sbal_idx = (idx + sbal_no) %
 124                                        QDIO_MAX_BUFFERS_PER_Q;
 125                                pl[sbal_no] = qdio->res_q[sbal_idx];
 126                        }
 127                        zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
 128                }
 129                zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
 130                return;
 131        }
 132
 133        /*
 134         * go through all SBALs from input queue currently
 135         * returned by QDIO layer
 136         */
 137        for (sbal_no = 0; sbal_no < count; sbal_no++) {
 138                sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
 139                /* go through all SBALEs of SBAL */
 140                zfcp_fsf_reqid_check(qdio, sbal_idx);
 141        }
 142
 143        /*
 144         * put SBALs back to response queue
 145         */
 146        if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
 147                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
 148}
 149
 150static struct qdio_buffer_element *
 151zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 152{
 153        struct qdio_buffer_element *sbale;
 154
 155        /* set last entry flag in current SBALE of current SBAL */
 156        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 157        sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
 158
 159        /* don't exceed last allowed SBAL */
 160        if (q_req->sbal_last == q_req->sbal_limit)
 161                return NULL;
 162
 163        /* set chaining flag in first SBALE of current SBAL */
 164        sbale = zfcp_qdio_sbale_req(qdio, q_req);
 165        sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
 166
 167        /* calculate index of next SBAL */
 168        q_req->sbal_last++;
 169        q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
 170
 171        /* keep this requests number of SBALs up-to-date */
 172        q_req->sbal_number++;
 173        BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
 174
 175        /* start at first SBALE of new SBAL */
 176        q_req->sbale_curr = 0;
 177
 178        /* set storage-block type for new SBAL */
 179        sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 180        sbale->sflags |= q_req->sbtype;
 181
 182        return sbale;
 183}
 184
 185static struct qdio_buffer_element *
 186zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 187{
 188        if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
 189                return zfcp_qdio_sbal_chain(qdio, q_req);
 190        q_req->sbale_curr++;
 191        return zfcp_qdio_sbale_curr(qdio, q_req);
 192}
 193
 194/**
 195 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
 196 * @qdio: pointer to struct zfcp_qdio
 197 * @q_req: pointer to struct zfcp_qdio_req
 198 * @sg: scatter-gather list
 199 * @max_sbals: upper bound for number of SBALs to be used
 200 * Returns: zero or -EINVAL on error
 201 */
 202int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 203                            struct scatterlist *sg)
 204{
 205        struct qdio_buffer_element *sbale;
 206
 207        /* set storage-block type for this request */
 208        sbale = zfcp_qdio_sbale_req(qdio, q_req);
 209        sbale->sflags |= q_req->sbtype;
 210
 211        for (; sg; sg = sg_next(sg)) {
 212                sbale = zfcp_qdio_sbale_next(qdio, q_req);
 213                if (!sbale) {
 214                        atomic_inc(&qdio->req_q_full);
 215                        zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
 216                                             q_req->sbal_number);
 217                        return -EINVAL;
 218                }
 219                sbale->addr = sg_virt(sg);
 220                sbale->length = sg->length;
 221        }
 222        return 0;
 223}
 224
 225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 226{
 227        if (atomic_read(&qdio->req_q_free) ||
 228            !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 229                return 1;
 230        return 0;
 231}
 232
 233/**
 234 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
 235 * @qdio: pointer to struct zfcp_qdio
 236 *
 237 * The req_q_lock must be held by the caller of this function, and
 238 * this function may only be called from process context; it will
 239 * sleep when waiting for a free sbal.
 240 *
 241 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
 242 */
 243int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 244{
 245        long ret;
 246
 247        ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
 248                       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
 249
 250        if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 251                return -EIO;
 252
 253        if (ret > 0)
 254                return 0;
 255
 256        if (!ret) {
 257                atomic_inc(&qdio->req_q_full);
 258                /* assume hanging outbound queue, try queue recovery */
 259                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 260        }
 261
 262        return -EIO;
 263}
 264
 265/**
 266 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
 267 * @qdio: pointer to struct zfcp_qdio
 268 * @q_req: pointer to struct zfcp_qdio_req
 269 * Returns: 0 on success, error otherwise
 270 */
 271int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 272{
 273        int retval;
 274        u8 sbal_number = q_req->sbal_number;
 275
 276        spin_lock(&qdio->stat_lock);
 277        zfcp_qdio_account(qdio);
 278        spin_unlock(&qdio->stat_lock);
 279
 280        retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 281                         q_req->sbal_first, sbal_number);
 282
 283        if (unlikely(retval)) {
 284                zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
 285                                     sbal_number);
 286                return retval;
 287        }
 288
 289        /* account for transferred buffers */
 290        atomic_sub(sbal_number, &qdio->req_q_free);
 291        qdio->req_q_idx += sbal_number;
 292        qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
 293
 294        return 0;
 295}
 296
 297
 298static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
 299                                      struct zfcp_qdio *qdio)
 300{
 301        memset(id, 0, sizeof(*id));
 302        id->cdev = qdio->adapter->ccw_device;
 303        id->q_format = QDIO_ZFCP_QFMT;
 304        memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
 305        ASCEBC(id->adapter_name, 8);
 306        id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
 307        if (enable_multibuffer)
 308                id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
 309        id->no_input_qs = 1;
 310        id->no_output_qs = 1;
 311        id->input_handler = zfcp_qdio_int_resp;
 312        id->output_handler = zfcp_qdio_int_req;
 313        id->int_parm = (unsigned long) qdio;
 314        id->input_sbal_addr_array = (void **) (qdio->res_q);
 315        id->output_sbal_addr_array = (void **) (qdio->req_q);
 316        id->scan_threshold =
 317                QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
 318}
 319
 320/**
 321 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
 322 * @adapter: pointer to struct zfcp_adapter
 323 * Returns: -ENOMEM on memory allocation error or return value from
 324 *          qdio_allocate
 325 */
 326static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 327{
 328        struct qdio_initialize init_data;
 329
 330        if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
 331            zfcp_qdio_buffers_enqueue(qdio->res_q))
 332                return -ENOMEM;
 333
 334        zfcp_qdio_setup_init_data(&init_data, qdio);
 335        init_waitqueue_head(&qdio->req_q_wq);
 336
 337        return qdio_allocate(&init_data);
 338}
 339
 340/**
 341 * zfcp_close_qdio - close qdio queues for an adapter
 342 * @qdio: pointer to structure zfcp_qdio
 343 */
 344void zfcp_qdio_close(struct zfcp_qdio *qdio)
 345{
 346        struct zfcp_adapter *adapter = qdio->adapter;
 347        int idx, count;
 348
 349        if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 350                return;
 351
 352        /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 353        spin_lock_irq(&qdio->req_q_lock);
 354        atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 355        spin_unlock_irq(&qdio->req_q_lock);
 356
 357        wake_up(&qdio->req_q_wq);
 358
 359        qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
 360
 361        /* cleanup used outbound sbals */
 362        count = atomic_read(&qdio->req_q_free);
 363        if (count < QDIO_MAX_BUFFERS_PER_Q) {
 364                idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
 365                count = QDIO_MAX_BUFFERS_PER_Q - count;
 366                zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
 367        }
 368        qdio->req_q_idx = 0;
 369        atomic_set(&qdio->req_q_free, 0);
 370}
 371
 372/**
 373 * zfcp_qdio_open - prepare and initialize response queue
 374 * @qdio: pointer to struct zfcp_qdio
 375 * Returns: 0 on success, otherwise -EIO
 376 */
 377int zfcp_qdio_open(struct zfcp_qdio *qdio)
 378{
 379        struct qdio_buffer_element *sbale;
 380        struct qdio_initialize init_data;
 381        struct zfcp_adapter *adapter = qdio->adapter;
 382        struct ccw_device *cdev = adapter->ccw_device;
 383        struct qdio_ssqd_desc ssqd;
 384        int cc;
 385
 386        if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 387                return -EIO;
 388
 389        atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 390                          &qdio->adapter->status);
 391
 392        zfcp_qdio_setup_init_data(&init_data, qdio);
 393
 394        if (qdio_establish(&init_data))
 395                goto failed_establish;
 396
 397        if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
 398                goto failed_qdio;
 399
 400        if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
 401                atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
 402                                &qdio->adapter->status);
 403
 404        if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
 405                atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 406                qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
 407        } else {
 408                atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 409                qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
 410        }
 411
 412        qdio->max_sbale_per_req =
 413                ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
 414                - 2;
 415        if (qdio_activate(cdev))
 416                goto failed_qdio;
 417
 418        for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
 419                sbale = &(qdio->res_q[cc]->element[0]);
 420                sbale->length = 0;
 421                sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
 422                sbale->sflags = 0;
 423                sbale->addr = NULL;
 424        }
 425
 426        if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
 427                goto failed_qdio;
 428
 429        /* set index of first available SBALS / number of available SBALS */
 430        qdio->req_q_idx = 0;
 431        atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
 432        atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 433
 434        if (adapter->scsi_host) {
 435                adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
 436                adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
 437        }
 438
 439        return 0;
 440
 441failed_qdio:
 442        qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 443failed_establish:
 444        dev_err(&cdev->dev,
 445                "Setting up the QDIO connection to the FCP adapter failed\n");
 446        return -EIO;
 447}
 448
 449void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
 450{
 451        int p;
 452
 453        if (!qdio)
 454                return;
 455
 456        if (qdio->adapter->ccw_device)
 457                qdio_free(qdio->adapter->ccw_device);
 458
 459        for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
 460                free_page((unsigned long) qdio->req_q[p]);
 461                free_page((unsigned long) qdio->res_q[p]);
 462        }
 463
 464        kfree(qdio);
 465}
 466
 467int zfcp_qdio_setup(struct zfcp_adapter *adapter)
 468{
 469        struct zfcp_qdio *qdio;
 470
 471        qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
 472        if (!qdio)
 473                return -ENOMEM;
 474
 475        qdio->adapter = adapter;
 476
 477        if (zfcp_qdio_allocate(qdio)) {
 478                zfcp_qdio_destroy(qdio);
 479                return -ENOMEM;
 480        }
 481
 482        spin_lock_init(&qdio->req_q_lock);
 483        spin_lock_init(&qdio->stat_lock);
 484
 485        adapter->qdio = qdio;
 486        return 0;
 487}
 488
 489/**
 490 * zfcp_qdio_siosl - Trigger logging in FCP channel
 491 * @adapter: The zfcp_adapter where to trigger logging
 492 *
 493 * Call the cio siosl function to trigger hardware logging.  This
 494 * wrapper function sets a flag to ensure hardware logging is only
 495 * triggered once before going through qdio shutdown.
 496 *
 497 * The triggers are always run from qdio tasklet context, so no
 498 * additional synchronization is necessary.
 499 */
 500void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
 501{
 502        int rc;
 503
 504        if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
 505                return;
 506
 507        rc = ccw_device_siosl(adapter->ccw_device);
 508        if (!rc)
 509                atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 510                                &adapter->status);
 511}
 512