linux/drivers/net/ethernet/cisco/enic/vnic_wq.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 *
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/types.h>
  23#include <linux/pci.h>
  24#include <linux/delay.h>
  25#include <linux/slab.h>
  26
  27#include "vnic_dev.h"
  28#include "vnic_wq.h"
  29#include "enic.h"
  30
  31static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
  32{
  33        struct vnic_wq_buf *buf;
  34        unsigned int i, j, count = wq->ring.desc_count;
  35        unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
  36
  37        for (i = 0; i < blks; i++) {
  38                wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
  39                if (!wq->bufs[i])
  40                        return -ENOMEM;
  41        }
  42
  43        for (i = 0; i < blks; i++) {
  44                buf = wq->bufs[i];
  45                for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
  46                        buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
  47                        buf->desc = (u8 *)wq->ring.descs +
  48                                wq->ring.desc_size * buf->index;
  49                        if (buf->index + 1 == count) {
  50                                buf->next = wq->bufs[0];
  51                                buf->next->prev = buf;
  52                                break;
  53                        } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
  54                                buf->next = wq->bufs[i + 1];
  55                                buf->next->prev = buf;
  56                        } else {
  57                                buf->next = buf + 1;
  58                                buf->next->prev = buf;
  59                                buf++;
  60                        }
  61                }
  62        }
  63
  64        wq->to_use = wq->to_clean = wq->bufs[0];
  65
  66        return 0;
  67}
  68
  69void vnic_wq_free(struct vnic_wq *wq)
  70{
  71        struct vnic_dev *vdev;
  72        unsigned int i;
  73
  74        vdev = wq->vdev;
  75
  76        vnic_dev_free_desc_ring(vdev, &wq->ring);
  77
  78        for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
  79                if (wq->bufs[i]) {
  80                        kfree(wq->bufs[i]);
  81                        wq->bufs[i] = NULL;
  82                }
  83        }
  84
  85        wq->ctrl = NULL;
  86}
  87
  88int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
  89        unsigned int desc_count, unsigned int desc_size)
  90{
  91        int err;
  92
  93        wq->index = index;
  94        wq->vdev = vdev;
  95
  96        wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
  97        if (!wq->ctrl) {
  98                vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index);
  99                return -EINVAL;
 100        }
 101
 102        vnic_wq_disable(wq);
 103
 104        err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
 105        if (err)
 106                return err;
 107
 108        err = vnic_wq_alloc_bufs(wq);
 109        if (err) {
 110                vnic_wq_free(wq);
 111                return err;
 112        }
 113
 114        return 0;
 115}
 116
 117int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
 118                          unsigned int desc_count, unsigned int desc_size)
 119{
 120        int err;
 121
 122        wq->index = 0;
 123        wq->vdev = vdev;
 124
 125        wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
 126        if (!wq->ctrl)
 127                return -EINVAL;
 128        vnic_wq_disable(wq);
 129        err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
 130
 131        return err;
 132}
 133
 134void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
 135                        unsigned int fetch_index, unsigned int posted_index,
 136                        unsigned int error_interrupt_enable,
 137                        unsigned int error_interrupt_offset)
 138{
 139        u64 paddr;
 140        unsigned int count = wq->ring.desc_count;
 141
 142        paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
 143        writeq(paddr, &wq->ctrl->ring_base);
 144        iowrite32(count, &wq->ctrl->ring_size);
 145        iowrite32(fetch_index, &wq->ctrl->fetch_index);
 146        iowrite32(posted_index, &wq->ctrl->posted_index);
 147        iowrite32(cq_index, &wq->ctrl->cq_index);
 148        iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
 149        iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
 150        iowrite32(0, &wq->ctrl->error_status);
 151
 152        wq->to_use = wq->to_clean =
 153                &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
 154                        [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
 155}
 156
 157void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
 158        unsigned int error_interrupt_enable,
 159        unsigned int error_interrupt_offset)
 160{
 161        enic_wq_init_start(wq, cq_index, 0, 0,
 162                error_interrupt_enable,
 163                error_interrupt_offset);
 164}
 165
 166unsigned int vnic_wq_error_status(struct vnic_wq *wq)
 167{
 168        return ioread32(&wq->ctrl->error_status);
 169}
 170
 171void vnic_wq_enable(struct vnic_wq *wq)
 172{
 173        iowrite32(1, &wq->ctrl->enable);
 174}
 175
 176int vnic_wq_disable(struct vnic_wq *wq)
 177{
 178        unsigned int wait;
 179        struct vnic_dev *vdev = wq->vdev;
 180
 181        iowrite32(0, &wq->ctrl->enable);
 182
 183        /* Wait for HW to ACK disable request */
 184        for (wait = 0; wait < 1000; wait++) {
 185                if (!(ioread32(&wq->ctrl->running)))
 186                        return 0;
 187                udelay(10);
 188        }
 189
 190        vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index);
 191
 192        return -ETIMEDOUT;
 193}
 194
 195void vnic_wq_clean(struct vnic_wq *wq,
 196        void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
 197{
 198        struct vnic_wq_buf *buf;
 199
 200        buf = wq->to_clean;
 201
 202        while (vnic_wq_desc_used(wq) > 0) {
 203
 204                (*buf_clean)(wq, buf);
 205
 206                buf = wq->to_clean = buf->next;
 207                wq->ring.desc_avail++;
 208        }
 209
 210        wq->to_use = wq->to_clean = wq->bufs[0];
 211
 212        iowrite32(0, &wq->ctrl->fetch_index);
 213        iowrite32(0, &wq->ctrl->posted_index);
 214        iowrite32(0, &wq->ctrl->error_status);
 215
 216        vnic_dev_clear_desc_ring(&wq->ring);
 217}
 218