linux/drivers/dma/mic_x100_dma.c
<<
>>
Prefs
   1/*
   2 * Intel MIC Platform Software Stack (MPSS)
   3 *
   4 * Copyright(c) 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13 * General Public License for more details.
  14 *
  15 * The full GNU General Public License is included in this distribution in
  16 * the file called "COPYING".
  17 *
  18 * Intel MIC X100 DMA Driver.
  19 *
  20 * Adapted from IOAT dma driver.
  21 */
  22#include <linux/module.h>
  23#include <linux/io.h>
  24#include <linux/seq_file.h>
  25
  26#include "mic_x100_dma.h"
  27
  28#define MIC_DMA_MAX_XFER_SIZE_CARD  (1 * 1024 * 1024 -\
  29                                       MIC_DMA_ALIGN_BYTES)
  30#define MIC_DMA_MAX_XFER_SIZE_HOST  (1 * 1024 * 1024 >> 1)
  31#define MIC_DMA_DESC_TYPE_SHIFT 60
  32#define MIC_DMA_MEMCPY_LEN_SHIFT 46
  33#define MIC_DMA_STAT_INTR_SHIFT 59
  34
  35/* high-water mark for pushing dma descriptors */
  36static int mic_dma_pending_level = 4;
  37
  38/* Status descriptor is used to write a 64 bit value to a memory location */
  39enum mic_dma_desc_format_type {
  40        MIC_DMA_MEMCPY = 1,
  41        MIC_DMA_STATUS,
  42};
  43
  44static inline u32 mic_dma_hw_ring_inc(u32 val)
  45{
  46        return (val + 1) % MIC_DMA_DESC_RX_SIZE;
  47}
  48
  49static inline u32 mic_dma_hw_ring_dec(u32 val)
  50{
  51        return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
  52}
  53
  54static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
  55{
  56        ch->head = mic_dma_hw_ring_inc(ch->head);
  57}
  58
  59/* Prepare a memcpy desc */
  60static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
  61        dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
  62{
  63        u64 qw0, qw1;
  64
  65        qw0 = src_phys;
  66        qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
  67        qw1 = MIC_DMA_MEMCPY;
  68        qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
  69        qw1 |= dst_phys;
  70        desc->qw0 = qw0;
  71        desc->qw1 = qw1;
  72}
  73
  74/* Prepare a status desc. with @data to be written at @dst_phys */
  75static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
  76        dma_addr_t dst_phys, bool generate_intr)
  77{
  78        u64 qw0, qw1;
  79
  80        qw0 = data;
  81        qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
  82        if (generate_intr)
  83                qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
  84        desc->qw0 = qw0;
  85        desc->qw1 = qw1;
  86}
  87
  88static void mic_dma_cleanup(struct mic_dma_chan *ch)
  89{
  90        struct dma_async_tx_descriptor *tx;
  91        u32 tail;
  92        u32 last_tail;
  93
  94        spin_lock(&ch->cleanup_lock);
  95        tail = mic_dma_read_cmp_cnt(ch);
  96        /*
  97         * This is the barrier pair for smp_wmb() in fn.
  98         * mic_dma_tx_submit_unlock. It's required so that we read the
  99         * updated cookie value from tx->cookie.
 100         */
 101        smp_rmb();
 102        for (last_tail = ch->last_tail; tail != last_tail;) {
 103                tx = &ch->tx_array[last_tail];
 104                if (tx->cookie) {
 105                        dma_cookie_complete(tx);
 106                        if (tx->callback) {
 107                                tx->callback(tx->callback_param);
 108                                tx->callback = NULL;
 109                        }
 110                }
 111                last_tail = mic_dma_hw_ring_inc(last_tail);
 112        }
 113        /* finish all completion callbacks before incrementing tail */
 114        smp_mb();
 115        ch->last_tail = last_tail;
 116        spin_unlock(&ch->cleanup_lock);
 117}
 118
 119static u32 mic_dma_ring_count(u32 head, u32 tail)
 120{
 121        u32 count;
 122
 123        if (head >= tail)
 124                count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
 125        else
 126                count = tail - head;
 127        return count - 1;
 128}
 129
 130/* Returns the num. of free descriptors on success, -ENOMEM on failure */
 131static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
 132{
 133        struct device *dev = mic_dma_ch_to_device(ch);
 134        u32 count;
 135
 136        count = mic_dma_ring_count(ch->head, ch->last_tail);
 137        if (count < required) {
 138                mic_dma_cleanup(ch);
 139                count = mic_dma_ring_count(ch->head, ch->last_tail);
 140        }
 141
 142        if (count < required) {
 143                dev_dbg(dev, "Not enough desc space");
 144                dev_dbg(dev, "%s %d required=%u, avail=%u\n",
 145                        __func__, __LINE__, required, count);
 146                return -ENOMEM;
 147        } else {
 148                return count;
 149        }
 150}
 151
 152/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
 153static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
 154                                    dma_addr_t dst, size_t len)
 155{
 156        size_t current_transfer_len;
 157        size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
 158        /* 3 is added to make sure we have enough space for status desc */
 159        int num_desc = len / max_xfer_size + 3;
 160        int ret;
 161
 162        if (len % max_xfer_size)
 163                num_desc++;
 164
 165        ret = mic_dma_avail_desc_ring_space(ch, num_desc);
 166        if (ret < 0)
 167                return ret;
 168        do {
 169                current_transfer_len = min(len, max_xfer_size);
 170                mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
 171                                    src, dst, current_transfer_len);
 172                mic_dma_hw_ring_inc_head(ch);
 173                len -= current_transfer_len;
 174                dst = dst + current_transfer_len;
 175                src = src + current_transfer_len;
 176        } while (len > 0);
 177        return 0;
 178}
 179
 180/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
 181static void mic_dma_prog_intr(struct mic_dma_chan *ch)
 182{
 183        mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
 184                                 ch->status_dest_micpa, false);
 185        mic_dma_hw_ring_inc_head(ch);
 186        mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
 187                                 ch->status_dest_micpa, true);
 188        mic_dma_hw_ring_inc_head(ch);
 189}
 190
 191/* Wrapper function to program memcpy descriptors/status descriptors */
 192static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
 193                          dma_addr_t dst, size_t len)
 194{
 195        if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len))
 196                return -ENOMEM;
 197        /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
 198        if (flags & DMA_PREP_FENCE) {
 199                mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
 200                                         ch->status_dest_micpa, false);
 201                mic_dma_hw_ring_inc_head(ch);
 202        }
 203
 204        if (flags & DMA_PREP_INTERRUPT)
 205                mic_dma_prog_intr(ch);
 206
 207        return 0;
 208}
 209
 210static inline void mic_dma_issue_pending(struct dma_chan *ch)
 211{
 212        struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
 213
 214        spin_lock(&mic_ch->issue_lock);
 215        /*
 216         * Write to head triggers h/w to act on the descriptors.
 217         * On MIC, writing the same head value twice causes
 218         * a h/w error. On second write, h/w assumes we filled
 219         * the entire ring & overwrote some of the descriptors.
 220         */
 221        if (mic_ch->issued == mic_ch->submitted)
 222                goto out;
 223        mic_ch->issued = mic_ch->submitted;
 224        /*
 225         * make descriptor updates visible before advancing head,
 226         * this is purposefully not smp_wmb() since we are also
 227         * publishing the descriptor updates to a dma device
 228         */
 229        wmb();
 230        mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
 231out:
 232        spin_unlock(&mic_ch->issue_lock);
 233}
 234
 235static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
 236{
 237        if (mic_dma_ring_count(ch->issued, ch->submitted)
 238                        > mic_dma_pending_level)
 239                mic_dma_issue_pending(&ch->api_ch);
 240}
 241
 242static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
 243{
 244        struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
 245        dma_cookie_t cookie;
 246
 247        dma_cookie_assign(tx);
 248        cookie = tx->cookie;
 249        /*
 250         * We need an smp write barrier here because another CPU might see
 251         * an update to submitted and update h/w head even before we
 252         * assigned a cookie to this tx.
 253         */
 254        smp_wmb();
 255        mic_ch->submitted = mic_ch->head;
 256        spin_unlock(&mic_ch->prep_lock);
 257        mic_dma_update_pending(mic_ch);
 258        return cookie;
 259}
 260
 261static inline struct dma_async_tx_descriptor *
 262allocate_tx(struct mic_dma_chan *ch)
 263{
 264        u32 idx = mic_dma_hw_ring_dec(ch->head);
 265        struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
 266
 267        dma_async_tx_descriptor_init(tx, &ch->api_ch);
 268        tx->tx_submit = mic_dma_tx_submit_unlock;
 269        return tx;
 270}
 271
 272/*
 273 * Prepare a memcpy descriptor to be added to the ring.
 274 * Note that the temporary descriptor adds an extra overhead of copying the
 275 * descriptor to ring. So, we copy directly to the descriptor ring
 276 */
 277static struct dma_async_tx_descriptor *
 278mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
 279                         dma_addr_t dma_src, size_t len, unsigned long flags)
 280{
 281        struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
 282        struct device *dev = mic_dma_ch_to_device(mic_ch);
 283        int result;
 284
 285        if (!len && !flags)
 286                return NULL;
 287
 288        spin_lock(&mic_ch->prep_lock);
 289        result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
 290        if (result >= 0)
 291                return allocate_tx(mic_ch);
 292        dev_err(dev, "Error enqueueing dma, error=%d\n", result);
 293        spin_unlock(&mic_ch->prep_lock);
 294        return NULL;
 295}
 296
 297static struct dma_async_tx_descriptor *
 298mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
 299{
 300        struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
 301        int ret;
 302
 303        spin_lock(&mic_ch->prep_lock);
 304        ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
 305        if (!ret)
 306                return allocate_tx(mic_ch);
 307        spin_unlock(&mic_ch->prep_lock);
 308        return NULL;
 309}
 310
 311/* Return the status of the transaction */
 312static enum dma_status
 313mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
 314                  struct dma_tx_state *txstate)
 315{
 316        struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
 317
 318        if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
 319                mic_dma_cleanup(mic_ch);
 320
 321        return dma_cookie_status(ch, cookie, txstate);
 322}
 323
 324static irqreturn_t mic_dma_thread_fn(int irq, void *data)
 325{
 326        mic_dma_cleanup((struct mic_dma_chan *)data);
 327        return IRQ_HANDLED;
 328}
 329
 330static irqreturn_t mic_dma_intr_handler(int irq, void *data)
 331{
 332        struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
 333
 334        mic_dma_ack_interrupt(ch);
 335        return IRQ_WAKE_THREAD;
 336}
 337
 338static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
 339{
 340        u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
 341        struct device *dev = &to_mbus_device(ch)->dev;
 342
 343        desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
 344        ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
 345
 346        if (!ch->desc_ring)
 347                return -ENOMEM;
 348
 349        ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
 350                                             desc_ring_size, DMA_BIDIRECTIONAL);
 351        if (dma_mapping_error(dev, ch->desc_ring_micpa))
 352                goto map_error;
 353
 354        ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array));
 355        if (!ch->tx_array)
 356                goto tx_error;
 357        return 0;
 358tx_error:
 359        dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
 360                         DMA_BIDIRECTIONAL);
 361map_error:
 362        kfree(ch->desc_ring);
 363        return -ENOMEM;
 364}
 365
 366static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
 367{
 368        u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
 369
 370        vfree(ch->tx_array);
 371        desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
 372        dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
 373                         desc_ring_size, DMA_BIDIRECTIONAL);
 374        kfree(ch->desc_ring);
 375        ch->desc_ring = NULL;
 376}
 377
 378static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
 379{
 380        dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
 381                         L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
 382        kfree(ch->status_dest);
 383}
 384
 385static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
 386{
 387        struct device *dev = &to_mbus_device(ch)->dev;
 388
 389        ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
 390        if (!ch->status_dest)
 391                return -ENOMEM;
 392        ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
 393                                        L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
 394        if (dma_mapping_error(dev, ch->status_dest_micpa)) {
 395                kfree(ch->status_dest);
 396                ch->status_dest = NULL;
 397                return -ENOMEM;
 398        }
 399        return 0;
 400}
 401
 402static int mic_dma_check_chan(struct mic_dma_chan *ch)
 403{
 404        if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
 405            mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
 406                mic_dma_disable_chan(ch);
 407                mic_dma_chan_mask_intr(ch);
 408                dev_err(mic_dma_ch_to_device(ch),
 409                        "%s %d error setting up mic dma chan %d\n",
 410                        __func__, __LINE__, ch->ch_num);
 411                return -EBUSY;
 412        }
 413        return 0;
 414}
 415
 416static int mic_dma_chan_setup(struct mic_dma_chan *ch)
 417{
 418        if (MIC_DMA_CHAN_MIC == ch->owner)
 419                mic_dma_chan_set_owner(ch);
 420        mic_dma_disable_chan(ch);
 421        mic_dma_chan_mask_intr(ch);
 422        mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
 423        mic_dma_chan_set_desc_ring(ch);
 424        ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
 425        ch->head = ch->last_tail;
 426        ch->issued = 0;
 427        mic_dma_chan_unmask_intr(ch);
 428        mic_dma_enable_chan(ch);
 429        return mic_dma_check_chan(ch);
 430}
 431
 432static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
 433{
 434        mic_dma_disable_chan(ch);
 435        mic_dma_chan_mask_intr(ch);
 436}
 437
 438static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
 439{
 440        dma_async_device_unregister(&mic_dma_dev->dma_dev);
 441}
 442
 443static int mic_dma_setup_irq(struct mic_dma_chan *ch)
 444{
 445        ch->cookie =
 446                to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
 447                        mic_dma_intr_handler, mic_dma_thread_fn,
 448                        "mic dma_channel", ch, ch->ch_num);
 449        if (IS_ERR(ch->cookie))
 450                return IS_ERR(ch->cookie);
 451        return 0;
 452}
 453
 454static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
 455{
 456        to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
 457}
 458
 459static int mic_dma_chan_init(struct mic_dma_chan *ch)
 460{
 461        int ret = mic_dma_alloc_desc_ring(ch);
 462
 463        if (ret)
 464                goto ring_error;
 465        ret = mic_dma_alloc_status_dest(ch);
 466        if (ret)
 467                goto status_error;
 468        ret = mic_dma_chan_setup(ch);
 469        if (ret)
 470                goto chan_error;
 471        return ret;
 472chan_error:
 473        mic_dma_free_status_dest(ch);
 474status_error:
 475        mic_dma_free_desc_ring(ch);
 476ring_error:
 477        return ret;
 478}
 479
 480static int mic_dma_drain_chan(struct mic_dma_chan *ch)
 481{
 482        struct dma_async_tx_descriptor *tx;
 483        int err = 0;
 484        dma_cookie_t cookie;
 485
 486        tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
 487        if (!tx) {
 488                err = -ENOMEM;
 489                goto error;
 490        }
 491
 492        cookie = tx->tx_submit(tx);
 493        if (dma_submit_error(cookie))
 494                err = -ENOMEM;
 495        else
 496                err = dma_sync_wait(&ch->api_ch, cookie);
 497        if (err) {
 498                dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
 499                        __func__, __LINE__, ch->ch_num);
 500                err = -EIO;
 501        }
 502error:
 503        mic_dma_cleanup(ch);
 504        return err;
 505}
 506
 507static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
 508{
 509        mic_dma_chan_destroy(ch);
 510        mic_dma_cleanup(ch);
 511        mic_dma_free_status_dest(ch);
 512        mic_dma_free_desc_ring(ch);
 513}
 514
 515static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
 516                        enum mic_dma_chan_owner owner)
 517{
 518        int i, first_chan = mic_dma_dev->start_ch;
 519        struct mic_dma_chan *ch;
 520        int ret;
 521
 522        for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
 523                unsigned long data;
 524                ch = &mic_dma_dev->mic_ch[i];
 525                data = (unsigned long)ch;
 526                ch->ch_num = i;
 527                ch->owner = owner;
 528                spin_lock_init(&ch->cleanup_lock);
 529                spin_lock_init(&ch->prep_lock);
 530                spin_lock_init(&ch->issue_lock);
 531                ret = mic_dma_setup_irq(ch);
 532                if (ret)
 533                        goto error;
 534        }
 535        return 0;
 536error:
 537        for (i = i - 1; i >= first_chan; i--)
 538                mic_dma_free_irq(ch);
 539        return ret;
 540}
 541
 542static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
 543{
 544        int i, first_chan = mic_dma_dev->start_ch;
 545        struct mic_dma_chan *ch;
 546
 547        for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
 548                ch = &mic_dma_dev->mic_ch[i];
 549                mic_dma_free_irq(ch);
 550        }
 551}
 552
 553static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
 554{
 555        int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
 556        if (ret)
 557                return ret;
 558        return MIC_DMA_DESC_RX_SIZE;
 559}
 560
 561static void mic_dma_free_chan_resources(struct dma_chan *ch)
 562{
 563        struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
 564        mic_dma_drain_chan(mic_ch);
 565        mic_dma_chan_uninit(mic_ch);
 566}
 567
 568/* Set the fn. handlers and register the dma device with dma api */
 569static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
 570                                       enum mic_dma_chan_owner owner)
 571{
 572        int i, first_chan = mic_dma_dev->start_ch;
 573
 574        dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
 575        /*
 576         * This dma engine is not capable of host memory to host memory
 577         * transfers
 578         */
 579        dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
 580
 581        if (MIC_DMA_CHAN_HOST == owner)
 582                dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
 583        mic_dma_dev->dma_dev.device_alloc_chan_resources =
 584                mic_dma_alloc_chan_resources;
 585        mic_dma_dev->dma_dev.device_free_chan_resources =
 586                mic_dma_free_chan_resources;
 587        mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
 588        mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
 589        mic_dma_dev->dma_dev.device_prep_dma_interrupt =
 590                mic_dma_prep_interrupt_lock;
 591        mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
 592        mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
 593        INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
 594        for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
 595                mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
 596                dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
 597                list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
 598                              &mic_dma_dev->dma_dev.channels);
 599        }
 600        return dma_async_device_register(&mic_dma_dev->dma_dev);
 601}
 602
 603/*
 604 * Initializes dma channels and registers the dma device with the
 605 * dma engine api.
 606 */
 607static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
 608                                              enum mic_dma_chan_owner owner)
 609{
 610        struct mic_dma_device *mic_dma_dev;
 611        int ret;
 612        struct device *dev = &mbdev->dev;
 613
 614        mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
 615        if (!mic_dma_dev) {
 616                ret = -ENOMEM;
 617                goto alloc_error;
 618        }
 619        mic_dma_dev->mbdev = mbdev;
 620        mic_dma_dev->dma_dev.dev = dev;
 621        mic_dma_dev->mmio = mbdev->mmio_va;
 622        if (MIC_DMA_CHAN_HOST == owner) {
 623                mic_dma_dev->start_ch = 0;
 624                mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
 625        } else {
 626                mic_dma_dev->start_ch = 4;
 627                mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
 628        }
 629        ret = mic_dma_init(mic_dma_dev, owner);
 630        if (ret)
 631                goto init_error;
 632        ret = mic_dma_register_dma_device(mic_dma_dev, owner);
 633        if (ret)
 634                goto reg_error;
 635        return mic_dma_dev;
 636reg_error:
 637        mic_dma_uninit(mic_dma_dev);
 638init_error:
 639        kfree(mic_dma_dev);
 640        mic_dma_dev = NULL;
 641alloc_error:
 642        dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
 643        return mic_dma_dev;
 644}
 645
 646static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
 647{
 648        mic_dma_unregister_dma_device(mic_dma_dev);
 649        mic_dma_uninit(mic_dma_dev);
 650        kfree(mic_dma_dev);
 651}
 652
 653/* DEBUGFS CODE */
 654static int mic_dma_reg_seq_show(struct seq_file *s, void *pos)
 655{
 656        struct mic_dma_device *mic_dma_dev = s->private;
 657        int i, chan_num, first_chan = mic_dma_dev->start_ch;
 658        struct mic_dma_chan *ch;
 659
 660        seq_printf(s, "SBOX_DCR: %#x\n",
 661                   mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
 662                                     MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
 663        seq_puts(s, "DMA Channel Registers\n");
 664        seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
 665                   "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
 666        seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
 667        for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
 668                ch = &mic_dma_dev->mic_ch[i];
 669                chan_num = ch->ch_num;
 670                seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
 671                           chan_num,
 672                           mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
 673                           mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
 674                           mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
 675                           mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
 676                seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
 677                           mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
 678                           mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
 679                           mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
 680                           mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
 681        }
 682        return 0;
 683}
 684
 685static int mic_dma_reg_debug_open(struct inode *inode, struct file *file)
 686{
 687        return single_open(file, mic_dma_reg_seq_show, inode->i_private);
 688}
 689
 690static int mic_dma_reg_debug_release(struct inode *inode, struct file *file)
 691{
 692        return single_release(inode, file);
 693}
 694
 695static const struct file_operations mic_dma_reg_ops = {
 696        .owner   = THIS_MODULE,
 697        .open    = mic_dma_reg_debug_open,
 698        .read    = seq_read,
 699        .llseek  = seq_lseek,
 700        .release = mic_dma_reg_debug_release
 701};
 702
 703/* Debugfs parent dir */
 704static struct dentry *mic_dma_dbg;
 705
 706static int mic_dma_driver_probe(struct mbus_device *mbdev)
 707{
 708        struct mic_dma_device *mic_dma_dev;
 709        enum mic_dma_chan_owner owner;
 710
 711        if (MBUS_DEV_DMA_MIC == mbdev->id.device)
 712                owner = MIC_DMA_CHAN_MIC;
 713        else
 714                owner = MIC_DMA_CHAN_HOST;
 715
 716        mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
 717        dev_set_drvdata(&mbdev->dev, mic_dma_dev);
 718
 719        if (mic_dma_dbg) {
 720                mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
 721                                                          mic_dma_dbg);
 722                if (mic_dma_dev->dbg_dir)
 723                        debugfs_create_file("mic_dma_reg", 0444,
 724                                            mic_dma_dev->dbg_dir, mic_dma_dev,
 725                                            &mic_dma_reg_ops);
 726        }
 727        return 0;
 728}
 729
 730static void mic_dma_driver_remove(struct mbus_device *mbdev)
 731{
 732        struct mic_dma_device *mic_dma_dev;
 733
 734        mic_dma_dev = dev_get_drvdata(&mbdev->dev);
 735        debugfs_remove_recursive(mic_dma_dev->dbg_dir);
 736        mic_dma_dev_unreg(mic_dma_dev);
 737}
 738
 739static struct mbus_device_id id_table[] = {
 740        {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
 741        {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
 742        {0},
 743};
 744
 745static struct mbus_driver mic_dma_driver = {
 746        .driver.name =  KBUILD_MODNAME,
 747        .driver.owner = THIS_MODULE,
 748        .id_table = id_table,
 749        .probe = mic_dma_driver_probe,
 750        .remove = mic_dma_driver_remove,
 751};
 752
 753static int __init mic_x100_dma_init(void)
 754{
 755        int rc = mbus_register_driver(&mic_dma_driver);
 756        if (rc)
 757                return rc;
 758        mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
 759        return 0;
 760}
 761
 762static void __exit mic_x100_dma_exit(void)
 763{
 764        debugfs_remove_recursive(mic_dma_dbg);
 765        mbus_unregister_driver(&mic_dma_driver);
 766}
 767
 768module_init(mic_x100_dma_init);
 769module_exit(mic_x100_dma_exit);
 770
 771MODULE_DEVICE_TABLE(mbus, id_table);
 772MODULE_AUTHOR("Intel Corporation");
 773MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
 774MODULE_LICENSE("GPL v2");
 775