linux/drivers/block/rsxx/dma.c
<<
>>
Prefs
   1/*
   2* Filename: dma.c
   3*
   4*
   5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
   6*       Philip Kelleher <pjk1939@linux.vnet.ibm.com>
   7*
   8* (C) Copyright 2013 IBM Corporation
   9*
  10* This program is free software; you can redistribute it and/or
  11* modify it under the terms of the GNU General Public License as
  12* published by the Free Software Foundation; either version 2 of the
  13* License, or (at your option) any later version.
  14*
  15* This program is distributed in the hope that it will be useful, but
  16* WITHOUT ANY WARRANTY; without even the implied warranty of
  17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18* General Public License for more details.
  19*
  20* You should have received a copy of the GNU General Public License
  21* along with this program; if not, write to the Free Software Foundation,
  22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23*/
  24
  25#include <linux/slab.h>
  26#include "rsxx_priv.h"
  27
  28struct rsxx_dma {
  29        struct list_head         list;
  30        u8                       cmd;
  31        unsigned int             laddr;     /* Logical address */
  32        struct {
  33                u32              off;
  34                u32              cnt;
  35        } sub_page;
  36        dma_addr_t               dma_addr;
  37        struct page              *page;
  38        unsigned int             pg_off;    /* Page Offset */
  39        rsxx_dma_cb              cb;
  40        void                     *cb_data;
  41};
  42
  43/* This timeout is used to detect a stalled DMA channel */
  44#define DMA_ACTIVITY_TIMEOUT    msecs_to_jiffies(10000)
  45
  46struct hw_status {
  47        u8      status;
  48        u8      tag;
  49        __le16  count;
  50        __le32  _rsvd2;
  51        __le64  _rsvd3;
  52} __packed;
  53
  54enum rsxx_dma_status {
  55        DMA_SW_ERR    = 0x1,
  56        DMA_HW_FAULT  = 0x2,
  57        DMA_CANCELLED = 0x4,
  58};
  59
  60struct hw_cmd {
  61        u8      command;
  62        u8      tag;
  63        u8      _rsvd;
  64        u8      sub_page; /* Bit[0:2]: 512byte offset */
  65                          /* Bit[4:6]: 512byte count */
  66        __le32  device_addr;
  67        __le64  host_addr;
  68} __packed;
  69
  70enum rsxx_hw_cmd {
  71        HW_CMD_BLK_DISCARD      = 0x70,
  72        HW_CMD_BLK_WRITE        = 0x80,
  73        HW_CMD_BLK_READ         = 0xC0,
  74        HW_CMD_BLK_RECON_READ   = 0xE0,
  75};
  76
  77enum rsxx_hw_status {
  78        HW_STATUS_CRC           = 0x01,
  79        HW_STATUS_HARD_ERR      = 0x02,
  80        HW_STATUS_SOFT_ERR      = 0x04,
  81        HW_STATUS_FAULT         = 0x08,
  82};
  83
  84static struct kmem_cache *rsxx_dma_pool;
  85
  86struct dma_tracker {
  87        int                     next_tag;
  88        struct rsxx_dma *dma;
  89};
  90
  91#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
  92                (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
  93
  94struct dma_tracker_list {
  95        spinlock_t              lock;
  96        int                     head;
  97        struct dma_tracker      list[0];
  98};
  99
 100
 101/*----------------- Misc Utility Functions -------------------*/
 102static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
 103{
 104        unsigned long long tgt_addr8;
 105
 106        tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
 107                      card->_stripe.upper_mask) |
 108                    ((addr8) & card->_stripe.lower_mask);
 109        do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
 110        return tgt_addr8;
 111}
 112
 113static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
 114{
 115        unsigned int tgt;
 116
 117        tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
 118
 119        return tgt;
 120}
 121
 122void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
 123{
 124        /* Reset all DMA Command/Status Queues */
 125        iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
 126}
 127
 128static unsigned int get_dma_size(struct rsxx_dma *dma)
 129{
 130        if (dma->sub_page.cnt)
 131                return dma->sub_page.cnt << 9;
 132        else
 133                return RSXX_HW_BLK_SIZE;
 134}
 135
 136
 137/*----------------- DMA Tracker -------------------*/
 138static void set_tracker_dma(struct dma_tracker_list *trackers,
 139                            int tag,
 140                            struct rsxx_dma *dma)
 141{
 142        trackers->list[tag].dma = dma;
 143}
 144
 145static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
 146                                            int tag)
 147{
 148        return trackers->list[tag].dma;
 149}
 150
 151static int pop_tracker(struct dma_tracker_list *trackers)
 152{
 153        int tag;
 154
 155        spin_lock(&trackers->lock);
 156        tag = trackers->head;
 157        if (tag != -1) {
 158                trackers->head = trackers->list[tag].next_tag;
 159                trackers->list[tag].next_tag = -1;
 160        }
 161        spin_unlock(&trackers->lock);
 162
 163        return tag;
 164}
 165
 166static void push_tracker(struct dma_tracker_list *trackers, int tag)
 167{
 168        spin_lock(&trackers->lock);
 169        trackers->list[tag].next_tag = trackers->head;
 170        trackers->head = tag;
 171        trackers->list[tag].dma = NULL;
 172        spin_unlock(&trackers->lock);
 173}
 174
 175
 176/*----------------- Interrupt Coalescing -------------*/
 177/*
 178 * Interrupt Coalescing Register Format:
 179 * Interrupt Timer (64ns units) [15:0]
 180 * Interrupt Count [24:16]
 181 * Reserved [31:25]
 182*/
 183#define INTR_COAL_LATENCY_MASK       (0x0000ffff)
 184
 185#define INTR_COAL_COUNT_SHIFT        16
 186#define INTR_COAL_COUNT_BITS         9
 187#define INTR_COAL_COUNT_MASK         (((1 << INTR_COAL_COUNT_BITS) - 1) << \
 188                                        INTR_COAL_COUNT_SHIFT)
 189#define INTR_COAL_LATENCY_UNITS_NS   64
 190
 191
 192static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
 193{
 194        u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
 195
 196        if (mode == RSXX_INTR_COAL_DISABLED)
 197                return 0;
 198
 199        return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
 200                        (latency_units & INTR_COAL_LATENCY_MASK);
 201
 202}
 203
 204static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
 205{
 206        int i;
 207        u32 q_depth = 0;
 208        u32 intr_coal;
 209
 210        if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
 211            unlikely(card->eeh_state))
 212                return;
 213
 214        for (i = 0; i < card->n_targets; i++)
 215                q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
 216
 217        intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
 218                                      q_depth / 2,
 219                                      card->config.data.intr_coal.latency);
 220        iowrite32(intr_coal, card->regmap + INTR_COAL);
 221}
 222
 223/*----------------- RSXX DMA Handling -------------------*/
 224static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
 225{
 226        if (dma->cmd != HW_CMD_BLK_DISCARD) {
 227                if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
 228                        pci_unmap_page(ctrl->card->dev, dma->dma_addr,
 229                                       get_dma_size(dma),
 230                                       dma->cmd == HW_CMD_BLK_WRITE ?
 231                                                   PCI_DMA_TODEVICE :
 232                                                   PCI_DMA_FROMDEVICE);
 233                }
 234        }
 235
 236        kmem_cache_free(rsxx_dma_pool, dma);
 237}
 238
 239static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
 240                                  struct rsxx_dma *dma,
 241                                  unsigned int status)
 242{
 243        if (status & DMA_SW_ERR)
 244                ctrl->stats.dma_sw_err++;
 245        if (status & DMA_HW_FAULT)
 246                ctrl->stats.dma_hw_fault++;
 247        if (status & DMA_CANCELLED)
 248                ctrl->stats.dma_cancelled++;
 249
 250        if (dma->cb)
 251                dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
 252
 253        rsxx_free_dma(ctrl, dma);
 254}
 255
 256int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
 257                           struct list_head *q, unsigned int done)
 258{
 259        struct rsxx_dma *dma;
 260        struct rsxx_dma *tmp;
 261        int cnt = 0;
 262
 263        list_for_each_entry_safe(dma, tmp, q, list) {
 264                list_del(&dma->list);
 265                if (done & COMPLETE_DMA)
 266                        rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
 267                else
 268                        rsxx_free_dma(ctrl, dma);
 269                cnt++;
 270        }
 271
 272        return cnt;
 273}
 274
 275static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
 276                                 struct rsxx_dma *dma)
 277{
 278        /*
 279         * Requeued DMAs go to the front of the queue so they are issued
 280         * first.
 281         */
 282        spin_lock_bh(&ctrl->queue_lock);
 283        ctrl->stats.sw_q_depth++;
 284        list_add(&dma->list, &ctrl->queue);
 285        spin_unlock_bh(&ctrl->queue_lock);
 286}
 287
 288static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
 289                                      struct rsxx_dma *dma,
 290                                      u8 hw_st)
 291{
 292        unsigned int status = 0;
 293        int requeue_cmd = 0;
 294
 295        dev_dbg(CARD_TO_DEV(ctrl->card),
 296                "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
 297                dma->cmd, dma->laddr, hw_st);
 298
 299        if (hw_st & HW_STATUS_CRC)
 300                ctrl->stats.crc_errors++;
 301        if (hw_st & HW_STATUS_HARD_ERR)
 302                ctrl->stats.hard_errors++;
 303        if (hw_st & HW_STATUS_SOFT_ERR)
 304                ctrl->stats.soft_errors++;
 305
 306        switch (dma->cmd) {
 307        case HW_CMD_BLK_READ:
 308                if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
 309                        if (ctrl->card->scrub_hard) {
 310                                dma->cmd = HW_CMD_BLK_RECON_READ;
 311                                requeue_cmd = 1;
 312                                ctrl->stats.reads_retried++;
 313                        } else {
 314                                status |= DMA_HW_FAULT;
 315                                ctrl->stats.reads_failed++;
 316                        }
 317                } else if (hw_st & HW_STATUS_FAULT) {
 318                        status |= DMA_HW_FAULT;
 319                        ctrl->stats.reads_failed++;
 320                }
 321
 322                break;
 323        case HW_CMD_BLK_RECON_READ:
 324                if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
 325                        /* Data could not be reconstructed. */
 326                        status |= DMA_HW_FAULT;
 327                        ctrl->stats.reads_failed++;
 328                }
 329
 330                break;
 331        case HW_CMD_BLK_WRITE:
 332                status |= DMA_HW_FAULT;
 333                ctrl->stats.writes_failed++;
 334
 335                break;
 336        case HW_CMD_BLK_DISCARD:
 337                status |= DMA_HW_FAULT;
 338                ctrl->stats.discards_failed++;
 339
 340                break;
 341        default:
 342                dev_err(CARD_TO_DEV(ctrl->card),
 343                        "Unknown command in DMA!(cmd: x%02x "
 344                           "laddr x%08x st: x%02x\n",
 345                           dma->cmd, dma->laddr, hw_st);
 346                status |= DMA_SW_ERR;
 347
 348                break;
 349        }
 350
 351        if (requeue_cmd)
 352                rsxx_requeue_dma(ctrl, dma);
 353        else
 354                rsxx_complete_dma(ctrl, dma, status);
 355}
 356
 357static void dma_engine_stalled(unsigned long data)
 358{
 359        struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
 360        int cnt;
 361
 362        if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
 363            unlikely(ctrl->card->eeh_state))
 364                return;
 365
 366        if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
 367                /*
 368                 * The dma engine was stalled because the SW_CMD_IDX write
 369                 * was lost. Issue it again to recover.
 370                 */
 371                dev_warn(CARD_TO_DEV(ctrl->card),
 372                        "SW_CMD_IDX write was lost, re-writing...\n");
 373                iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
 374                mod_timer(&ctrl->activity_timer,
 375                          jiffies + DMA_ACTIVITY_TIMEOUT);
 376        } else {
 377                dev_warn(CARD_TO_DEV(ctrl->card),
 378                        "DMA channel %d has stalled, faulting interface.\n",
 379                        ctrl->id);
 380                ctrl->card->dma_fault = 1;
 381
 382                /* Clean up the DMA queue */
 383                spin_lock(&ctrl->queue_lock);
 384                cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
 385                spin_unlock(&ctrl->queue_lock);
 386
 387                cnt += rsxx_dma_cancel(ctrl);
 388
 389                if (cnt)
 390                        dev_info(CARD_TO_DEV(ctrl->card),
 391                                "Freed %d queued DMAs on channel %d\n",
 392                                cnt, ctrl->id);
 393        }
 394}
 395
 396static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
 397{
 398        struct rsxx_dma *dma;
 399        int tag;
 400        int cmds_pending = 0;
 401        struct hw_cmd *hw_cmd_buf;
 402        int dir;
 403
 404        hw_cmd_buf = ctrl->cmd.buf;
 405
 406        if (unlikely(ctrl->card->halt) ||
 407            unlikely(ctrl->card->eeh_state))
 408                return;
 409
 410        while (1) {
 411                spin_lock_bh(&ctrl->queue_lock);
 412                if (list_empty(&ctrl->queue)) {
 413                        spin_unlock_bh(&ctrl->queue_lock);
 414                        break;
 415                }
 416                spin_unlock_bh(&ctrl->queue_lock);
 417
 418                tag = pop_tracker(ctrl->trackers);
 419                if (tag == -1)
 420                        break;
 421
 422                spin_lock_bh(&ctrl->queue_lock);
 423                dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
 424                list_del(&dma->list);
 425                ctrl->stats.sw_q_depth--;
 426                spin_unlock_bh(&ctrl->queue_lock);
 427
 428                /*
 429                 * This will catch any DMAs that slipped in right before the
 430                 * fault, but was queued after all the other DMAs were
 431                 * cancelled.
 432                 */
 433                if (unlikely(ctrl->card->dma_fault)) {
 434                        push_tracker(ctrl->trackers, tag);
 435                        rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
 436                        continue;
 437                }
 438
 439                if (dma->cmd != HW_CMD_BLK_DISCARD) {
 440                        if (dma->cmd == HW_CMD_BLK_WRITE)
 441                                dir = PCI_DMA_TODEVICE;
 442                        else
 443                                dir = PCI_DMA_FROMDEVICE;
 444
 445                        /*
 446                         * The function pci_map_page is placed here because we
 447                         * can only, by design, issue up to 255 commands to the
 448                         * hardware at one time per DMA channel. So the maximum
 449                         * amount of mapped memory would be 255 * 4 channels *
 450                         * 4096 Bytes which is less than 2GB, the limit of a x8
 451                         * Non-HWWD PCIe slot. This way the pci_map_page
 452                         * function should never fail because of a lack of
 453                         * mappable memory.
 454                         */
 455                        dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
 456                                        dma->pg_off, dma->sub_page.cnt << 9, dir);
 457                        if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
 458                                push_tracker(ctrl->trackers, tag);
 459                                rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
 460                                continue;
 461                        }
 462                }
 463
 464                set_tracker_dma(ctrl->trackers, tag, dma);
 465                hw_cmd_buf[ctrl->cmd.idx].command  = dma->cmd;
 466                hw_cmd_buf[ctrl->cmd.idx].tag      = tag;
 467                hw_cmd_buf[ctrl->cmd.idx]._rsvd    = 0;
 468                hw_cmd_buf[ctrl->cmd.idx].sub_page =
 469                                        ((dma->sub_page.cnt & 0x7) << 4) |
 470                                         (dma->sub_page.off & 0x7);
 471
 472                hw_cmd_buf[ctrl->cmd.idx].device_addr =
 473                                        cpu_to_le32(dma->laddr);
 474
 475                hw_cmd_buf[ctrl->cmd.idx].host_addr =
 476                                        cpu_to_le64(dma->dma_addr);
 477
 478                dev_dbg(CARD_TO_DEV(ctrl->card),
 479                        "Issue DMA%d(laddr %d tag %d) to idx %d\n",
 480                        ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
 481
 482                ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
 483                cmds_pending++;
 484
 485                if (dma->cmd == HW_CMD_BLK_WRITE)
 486                        ctrl->stats.writes_issued++;
 487                else if (dma->cmd == HW_CMD_BLK_DISCARD)
 488                        ctrl->stats.discards_issued++;
 489                else
 490                        ctrl->stats.reads_issued++;
 491        }
 492
 493        /* Let HW know we've queued commands. */
 494        if (cmds_pending) {
 495                atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
 496                mod_timer(&ctrl->activity_timer,
 497                          jiffies + DMA_ACTIVITY_TIMEOUT);
 498
 499                if (unlikely(ctrl->card->eeh_state)) {
 500                        del_timer_sync(&ctrl->activity_timer);
 501                        return;
 502                }
 503
 504                iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
 505        }
 506}
 507
 508static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
 509{
 510        struct rsxx_dma *dma;
 511        unsigned long flags;
 512        u16 count;
 513        u8 status;
 514        u8 tag;
 515        struct hw_status *hw_st_buf;
 516
 517        hw_st_buf = ctrl->status.buf;
 518
 519        if (unlikely(ctrl->card->halt) ||
 520            unlikely(ctrl->card->dma_fault) ||
 521            unlikely(ctrl->card->eeh_state))
 522                return;
 523
 524        count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
 525
 526        while (count == ctrl->e_cnt) {
 527                /*
 528                 * The read memory-barrier is necessary to keep aggressive
 529                 * processors/optimizers (such as the PPC Apple G5) from
 530                 * reordering the following status-buffer tag & status read
 531                 * *before* the count read on subsequent iterations of the
 532                 * loop!
 533                 */
 534                rmb();
 535
 536                status = hw_st_buf[ctrl->status.idx].status;
 537                tag    = hw_st_buf[ctrl->status.idx].tag;
 538
 539                dma = get_tracker_dma(ctrl->trackers, tag);
 540                if (dma == NULL) {
 541                        spin_lock_irqsave(&ctrl->card->irq_lock, flags);
 542                        rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
 543                        spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
 544
 545                        dev_err(CARD_TO_DEV(ctrl->card),
 546                                "No tracker for tag %d "
 547                                "(idx %d id %d)\n",
 548                                tag, ctrl->status.idx, ctrl->id);
 549                        return;
 550                }
 551
 552                dev_dbg(CARD_TO_DEV(ctrl->card),
 553                        "Completing DMA%d"
 554                        "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
 555                        ctrl->id, dma->laddr, tag, status, count,
 556                        ctrl->status.idx);
 557
 558                atomic_dec(&ctrl->stats.hw_q_depth);
 559
 560                mod_timer(&ctrl->activity_timer,
 561                          jiffies + DMA_ACTIVITY_TIMEOUT);
 562
 563                if (status)
 564                        rsxx_handle_dma_error(ctrl, dma, status);
 565                else
 566                        rsxx_complete_dma(ctrl, dma, 0);
 567
 568                push_tracker(ctrl->trackers, tag);
 569
 570                ctrl->status.idx = (ctrl->status.idx + 1) &
 571                                   RSXX_CS_IDX_MASK;
 572                ctrl->e_cnt++;
 573
 574                count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
 575        }
 576
 577        dma_intr_coal_auto_tune(ctrl->card);
 578
 579        if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
 580                del_timer_sync(&ctrl->activity_timer);
 581
 582        spin_lock_irqsave(&ctrl->card->irq_lock, flags);
 583        rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
 584        spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
 585
 586        spin_lock_bh(&ctrl->queue_lock);
 587        if (ctrl->stats.sw_q_depth)
 588                queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
 589        spin_unlock_bh(&ctrl->queue_lock);
 590}
 591
 592static void rsxx_schedule_issue(struct work_struct *work)
 593{
 594        struct rsxx_dma_ctrl *ctrl;
 595
 596        ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
 597
 598        mutex_lock(&ctrl->work_lock);
 599        rsxx_issue_dmas(ctrl);
 600        mutex_unlock(&ctrl->work_lock);
 601}
 602
 603static void rsxx_schedule_done(struct work_struct *work)
 604{
 605        struct rsxx_dma_ctrl *ctrl;
 606
 607        ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
 608
 609        mutex_lock(&ctrl->work_lock);
 610        rsxx_dma_done(ctrl);
 611        mutex_unlock(&ctrl->work_lock);
 612}
 613
 614static int rsxx_queue_discard(struct rsxx_cardinfo *card,
 615                                  struct list_head *q,
 616                                  unsigned int laddr,
 617                                  rsxx_dma_cb cb,
 618                                  void *cb_data)
 619{
 620        struct rsxx_dma *dma;
 621
 622        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
 623        if (!dma)
 624                return -ENOMEM;
 625
 626        dma->cmd          = HW_CMD_BLK_DISCARD;
 627        dma->laddr        = laddr;
 628        dma->dma_addr     = 0;
 629        dma->sub_page.off = 0;
 630        dma->sub_page.cnt = 0;
 631        dma->page         = NULL;
 632        dma->pg_off       = 0;
 633        dma->cb           = cb;
 634        dma->cb_data      = cb_data;
 635
 636        dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
 637
 638        list_add_tail(&dma->list, q);
 639
 640        return 0;
 641}
 642
 643static int rsxx_queue_dma(struct rsxx_cardinfo *card,
 644                              struct list_head *q,
 645                              int dir,
 646                              unsigned int dma_off,
 647                              unsigned int dma_len,
 648                              unsigned int laddr,
 649                              struct page *page,
 650                              unsigned int pg_off,
 651                              rsxx_dma_cb cb,
 652                              void *cb_data)
 653{
 654        struct rsxx_dma *dma;
 655
 656        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
 657        if (!dma)
 658                return -ENOMEM;
 659
 660        dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
 661        dma->laddr        = laddr;
 662        dma->sub_page.off = (dma_off >> 9);
 663        dma->sub_page.cnt = (dma_len >> 9);
 664        dma->page         = page;
 665        dma->pg_off       = pg_off;
 666        dma->cb           = cb;
 667        dma->cb_data      = cb_data;
 668
 669        dev_dbg(CARD_TO_DEV(card),
 670                "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
 671                dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
 672                dma->sub_page.cnt, dma->page, dma->pg_off);
 673
 674        /* Queue the DMA */
 675        list_add_tail(&dma->list, q);
 676
 677        return 0;
 678}
 679
 680int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
 681                           struct bio *bio,
 682                           atomic_t *n_dmas,
 683                           rsxx_dma_cb cb,
 684                           void *cb_data)
 685{
 686        struct list_head dma_list[RSXX_MAX_TARGETS];
 687        struct bio_vec bvec;
 688        struct bvec_iter iter;
 689        unsigned long long addr8;
 690        unsigned int laddr;
 691        unsigned int bv_len;
 692        unsigned int bv_off;
 693        unsigned int dma_off;
 694        unsigned int dma_len;
 695        int dma_cnt[RSXX_MAX_TARGETS];
 696        int tgt;
 697        int st;
 698        int i;
 699
 700        addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
 701        atomic_set(n_dmas, 0);
 702
 703        for (i = 0; i < card->n_targets; i++) {
 704                INIT_LIST_HEAD(&dma_list[i]);
 705                dma_cnt[i] = 0;
 706        }
 707
 708        if (bio->bi_rw & REQ_DISCARD) {
 709                bv_len = bio->bi_iter.bi_size;
 710
 711                while (bv_len > 0) {
 712                        tgt   = rsxx_get_dma_tgt(card, addr8);
 713                        laddr = rsxx_addr8_to_laddr(addr8, card);
 714
 715                        st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
 716                                                    cb, cb_data);
 717                        if (st)
 718                                goto bvec_err;
 719
 720                        dma_cnt[tgt]++;
 721                        atomic_inc(n_dmas);
 722                        addr8  += RSXX_HW_BLK_SIZE;
 723                        bv_len -= RSXX_HW_BLK_SIZE;
 724                }
 725        } else {
 726                bio_for_each_segment(bvec, bio, iter) {
 727                        bv_len = bvec.bv_len;
 728                        bv_off = bvec.bv_offset;
 729
 730                        while (bv_len > 0) {
 731                                tgt   = rsxx_get_dma_tgt(card, addr8);
 732                                laddr = rsxx_addr8_to_laddr(addr8, card);
 733                                dma_off = addr8 & RSXX_HW_BLK_MASK;
 734                                dma_len = min(bv_len,
 735                                              RSXX_HW_BLK_SIZE - dma_off);
 736
 737                                st = rsxx_queue_dma(card, &dma_list[tgt],
 738                                                        bio_data_dir(bio),
 739                                                        dma_off, dma_len,
 740                                                        laddr, bvec.bv_page,
 741                                                        bv_off, cb, cb_data);
 742                                if (st)
 743                                        goto bvec_err;
 744
 745                                dma_cnt[tgt]++;
 746                                atomic_inc(n_dmas);
 747                                addr8  += dma_len;
 748                                bv_off += dma_len;
 749                                bv_len -= dma_len;
 750                        }
 751                }
 752        }
 753
 754        for (i = 0; i < card->n_targets; i++) {
 755                if (!list_empty(&dma_list[i])) {
 756                        spin_lock_bh(&card->ctrl[i].queue_lock);
 757                        card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
 758                        list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
 759                        spin_unlock_bh(&card->ctrl[i].queue_lock);
 760
 761                        queue_work(card->ctrl[i].issue_wq,
 762                                   &card->ctrl[i].issue_dma_work);
 763                }
 764        }
 765
 766        return 0;
 767
 768bvec_err:
 769        for (i = 0; i < card->n_targets; i++)
 770                rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
 771                                        FREE_DMA);
 772
 773        return st;
 774}
 775
 776
 777/*----------------- DMA Engine Initialization & Setup -------------------*/
 778int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
 779{
 780        ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
 781                                &ctrl->status.dma_addr);
 782        ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
 783                                &ctrl->cmd.dma_addr);
 784        if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
 785                return -ENOMEM;
 786
 787        memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
 788        iowrite32(lower_32_bits(ctrl->status.dma_addr),
 789                ctrl->regmap + SB_ADD_LO);
 790        iowrite32(upper_32_bits(ctrl->status.dma_addr),
 791                ctrl->regmap + SB_ADD_HI);
 792
 793        memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
 794        iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
 795        iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
 796
 797        ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
 798        if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
 799                dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
 800                        ctrl->status.idx);
 801                return -EINVAL;
 802        }
 803        iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
 804        iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
 805
 806        ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
 807        if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
 808                dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
 809                        ctrl->status.idx);
 810                return -EINVAL;
 811        }
 812        iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
 813        iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
 814
 815        return 0;
 816}
 817
 818static int rsxx_dma_ctrl_init(struct pci_dev *dev,
 819                                  struct rsxx_dma_ctrl *ctrl)
 820{
 821        int i;
 822        int st;
 823
 824        memset(&ctrl->stats, 0, sizeof(ctrl->stats));
 825
 826        ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
 827        if (!ctrl->trackers)
 828                return -ENOMEM;
 829
 830        ctrl->trackers->head = 0;
 831        for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
 832                ctrl->trackers->list[i].next_tag = i + 1;
 833                ctrl->trackers->list[i].dma = NULL;
 834        }
 835        ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
 836        spin_lock_init(&ctrl->trackers->lock);
 837
 838        spin_lock_init(&ctrl->queue_lock);
 839        mutex_init(&ctrl->work_lock);
 840        INIT_LIST_HEAD(&ctrl->queue);
 841
 842        setup_timer(&ctrl->activity_timer, dma_engine_stalled,
 843                                        (unsigned long)ctrl);
 844
 845        ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
 846        if (!ctrl->issue_wq)
 847                return -ENOMEM;
 848
 849        ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
 850        if (!ctrl->done_wq)
 851                return -ENOMEM;
 852
 853        INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
 854        INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
 855
 856        st = rsxx_hw_buffers_init(dev, ctrl);
 857        if (st)
 858                return st;
 859
 860        return 0;
 861}
 862
 863static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
 864                              unsigned int stripe_size8)
 865{
 866        if (!is_power_of_2(stripe_size8)) {
 867                dev_err(CARD_TO_DEV(card),
 868                        "stripe_size is NOT a power of 2!\n");
 869                return -EINVAL;
 870        }
 871
 872        card->_stripe.lower_mask = stripe_size8 - 1;
 873
 874        card->_stripe.upper_mask  = ~(card->_stripe.lower_mask);
 875        card->_stripe.upper_shift = ffs(card->n_targets) - 1;
 876
 877        card->_stripe.target_mask = card->n_targets - 1;
 878        card->_stripe.target_shift = ffs(stripe_size8) - 1;
 879
 880        dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask   = x%016llx\n",
 881                card->_stripe.lower_mask);
 882        dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift  = x%016llx\n",
 883                card->_stripe.upper_shift);
 884        dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask   = x%016llx\n",
 885                card->_stripe.upper_mask);
 886        dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask  = x%016llx\n",
 887                card->_stripe.target_mask);
 888        dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
 889                card->_stripe.target_shift);
 890
 891        return 0;
 892}
 893
 894int rsxx_dma_configure(struct rsxx_cardinfo *card)
 895{
 896        u32 intr_coal;
 897
 898        intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
 899                                      card->config.data.intr_coal.count,
 900                                      card->config.data.intr_coal.latency);
 901        iowrite32(intr_coal, card->regmap + INTR_COAL);
 902
 903        return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
 904}
 905
 906int rsxx_dma_setup(struct rsxx_cardinfo *card)
 907{
 908        unsigned long flags;
 909        int st;
 910        int i;
 911
 912        dev_info(CARD_TO_DEV(card),
 913                "Initializing %d DMA targets\n",
 914                card->n_targets);
 915
 916        /* Regmap is divided up into 4K chunks. One for each DMA channel */
 917        for (i = 0; i < card->n_targets; i++)
 918                card->ctrl[i].regmap = card->regmap + (i * 4096);
 919
 920        card->dma_fault = 0;
 921
 922        /* Reset the DMA queues */
 923        rsxx_dma_queue_reset(card);
 924
 925        /************* Setup DMA Control *************/
 926        for (i = 0; i < card->n_targets; i++) {
 927                st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
 928                if (st)
 929                        goto failed_dma_setup;
 930
 931                card->ctrl[i].card = card;
 932                card->ctrl[i].id = i;
 933        }
 934
 935        card->scrub_hard = 1;
 936
 937        if (card->config_valid)
 938                rsxx_dma_configure(card);
 939
 940        /* Enable the interrupts after all setup has completed. */
 941        for (i = 0; i < card->n_targets; i++) {
 942                spin_lock_irqsave(&card->irq_lock, flags);
 943                rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
 944                spin_unlock_irqrestore(&card->irq_lock, flags);
 945        }
 946
 947        return 0;
 948
 949failed_dma_setup:
 950        for (i = 0; i < card->n_targets; i++) {
 951                struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
 952
 953                if (ctrl->issue_wq) {
 954                        destroy_workqueue(ctrl->issue_wq);
 955                        ctrl->issue_wq = NULL;
 956                }
 957
 958                if (ctrl->done_wq) {
 959                        destroy_workqueue(ctrl->done_wq);
 960                        ctrl->done_wq = NULL;
 961                }
 962
 963                if (ctrl->trackers)
 964                        vfree(ctrl->trackers);
 965
 966                if (ctrl->status.buf)
 967                        pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
 968                                            ctrl->status.buf,
 969                                            ctrl->status.dma_addr);
 970                if (ctrl->cmd.buf)
 971                        pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
 972                                            ctrl->cmd.buf, ctrl->cmd.dma_addr);
 973        }
 974
 975        return st;
 976}
 977
 978int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
 979{
 980        struct rsxx_dma *dma;
 981        int i;
 982        int cnt = 0;
 983
 984        /* Clean up issued DMAs */
 985        for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
 986                dma = get_tracker_dma(ctrl->trackers, i);
 987                if (dma) {
 988                        atomic_dec(&ctrl->stats.hw_q_depth);
 989                        rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
 990                        push_tracker(ctrl->trackers, i);
 991                        cnt++;
 992                }
 993        }
 994
 995        return cnt;
 996}
 997
 998void rsxx_dma_destroy(struct rsxx_cardinfo *card)
 999{
1000        struct rsxx_dma_ctrl *ctrl;
1001        int i;
1002
1003        for (i = 0; i < card->n_targets; i++) {
1004                ctrl = &card->ctrl[i];
1005
1006                if (ctrl->issue_wq) {
1007                        destroy_workqueue(ctrl->issue_wq);
1008                        ctrl->issue_wq = NULL;
1009                }
1010
1011                if (ctrl->done_wq) {
1012                        destroy_workqueue(ctrl->done_wq);
1013                        ctrl->done_wq = NULL;
1014                }
1015
1016                if (timer_pending(&ctrl->activity_timer))
1017                        del_timer_sync(&ctrl->activity_timer);
1018
1019                /* Clean up the DMA queue */
1020                spin_lock_bh(&ctrl->queue_lock);
1021                rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
1022                spin_unlock_bh(&ctrl->queue_lock);
1023
1024                rsxx_dma_cancel(ctrl);
1025
1026                vfree(ctrl->trackers);
1027
1028                pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
1029                                    ctrl->status.buf, ctrl->status.dma_addr);
1030                pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
1031                                    ctrl->cmd.buf, ctrl->cmd.dma_addr);
1032        }
1033}
1034
1035int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1036{
1037        int i;
1038        int j;
1039        int cnt;
1040        struct rsxx_dma *dma;
1041        struct list_head *issued_dmas;
1042
1043        issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
1044                              GFP_KERNEL);
1045        if (!issued_dmas)
1046                return -ENOMEM;
1047
1048        for (i = 0; i < card->n_targets; i++) {
1049                INIT_LIST_HEAD(&issued_dmas[i]);
1050                cnt = 0;
1051                for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1052                        dma = get_tracker_dma(card->ctrl[i].trackers, j);
1053                        if (dma == NULL)
1054                                continue;
1055
1056                        if (dma->cmd == HW_CMD_BLK_WRITE)
1057                                card->ctrl[i].stats.writes_issued--;
1058                        else if (dma->cmd == HW_CMD_BLK_DISCARD)
1059                                card->ctrl[i].stats.discards_issued--;
1060                        else
1061                                card->ctrl[i].stats.reads_issued--;
1062
1063                        if (dma->cmd != HW_CMD_BLK_DISCARD) {
1064                                pci_unmap_page(card->dev, dma->dma_addr,
1065                                               get_dma_size(dma),
1066                                               dma->cmd == HW_CMD_BLK_WRITE ?
1067                                               PCI_DMA_TODEVICE :
1068                                               PCI_DMA_FROMDEVICE);
1069                        }
1070
1071                        list_add_tail(&dma->list, &issued_dmas[i]);
1072                        push_tracker(card->ctrl[i].trackers, j);
1073                        cnt++;
1074                }
1075
1076                spin_lock_bh(&card->ctrl[i].queue_lock);
1077                list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1078
1079                atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1080                card->ctrl[i].stats.sw_q_depth += cnt;
1081                card->ctrl[i].e_cnt = 0;
1082                spin_unlock_bh(&card->ctrl[i].queue_lock);
1083        }
1084
1085        kfree(issued_dmas);
1086
1087        return 0;
1088}
1089
1090int rsxx_dma_init(void)
1091{
1092        rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
1093        if (!rsxx_dma_pool)
1094                return -ENOMEM;
1095
1096        return 0;
1097}
1098
1099
1100void rsxx_dma_cleanup(void)
1101{
1102        kmem_cache_destroy(rsxx_dma_pool);
1103}
1104
1105