linux/drivers/scsi/fnic/vnic_wq.h
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 */
  18#ifndef _VNIC_WQ_H_
  19#define _VNIC_WQ_H_
  20
  21#include <linux/pci.h>
  22#include "vnic_dev.h"
  23#include "vnic_cq.h"
  24
  25/*
  26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
  27 * Driver) when both are built with CONFIG options =y
  28 */
  29#define vnic_wq_desc_avail fnic_wq_desc_avail
  30#define vnic_wq_desc_used fnic_wq_desc_used
  31#define vnic_wq_next_desc fni_cwq_next_desc
  32#define vnic_wq_post fnic_wq_post
  33#define vnic_wq_service fnic_wq_service
  34#define vnic_wq_free fnic_wq_free
  35#define vnic_wq_alloc fnic_wq_alloc
  36#define vnic_wq_devcmd2_alloc fnic_wq_devcmd2_alloc
  37#define vnic_wq_init_start fnic_wq_init_start
  38#define vnic_wq_init fnic_wq_init
  39#define vnic_wq_error_status fnic_wq_error_status
  40#define vnic_wq_enable fnic_wq_enable
  41#define vnic_wq_disable fnic_wq_disable
  42#define vnic_wq_clean fnic_wq_clean
  43
  44/* Work queue control */
  45struct vnic_wq_ctrl {
  46        u64 ring_base;                  /* 0x00 */
  47        u32 ring_size;                  /* 0x08 */
  48        u32 pad0;
  49        u32 posted_index;               /* 0x10 */
  50        u32 pad1;
  51        u32 cq_index;                   /* 0x18 */
  52        u32 pad2;
  53        u32 enable;                     /* 0x20 */
  54        u32 pad3;
  55        u32 running;                    /* 0x28 */
  56        u32 pad4;
  57        u32 fetch_index;                /* 0x30 */
  58        u32 pad5;
  59        u32 dca_value;                  /* 0x38 */
  60        u32 pad6;
  61        u32 error_interrupt_enable;     /* 0x40 */
  62        u32 pad7;
  63        u32 error_interrupt_offset;     /* 0x48 */
  64        u32 pad8;
  65        u32 error_status;               /* 0x50 */
  66        u32 pad9;
  67};
  68
  69struct vnic_wq_buf {
  70        struct vnic_wq_buf *next;
  71        dma_addr_t dma_addr;
  72        void *os_buf;
  73        unsigned int len;
  74        unsigned int index;
  75        int sop;
  76        void *desc;
  77};
  78
  79/* Break the vnic_wq_buf allocations into blocks of 64 entries */
  80#define VNIC_WQ_BUF_BLK_ENTRIES 64
  81#define VNIC_WQ_BUF_BLK_SZ \
  82        (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
  83#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
  84        DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
  85#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
  86
  87struct vnic_wq {
  88        unsigned int index;
  89        struct vnic_dev *vdev;
  90        struct vnic_wq_ctrl __iomem *ctrl;      /* memory-mapped */
  91        struct vnic_dev_ring ring;
  92        struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
  93        struct vnic_wq_buf *to_use;
  94        struct vnic_wq_buf *to_clean;
  95        unsigned int pkts_outstanding;
  96};
  97
  98static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
  99{
 100        /* how many does SW own? */
 101        return wq->ring.desc_avail;
 102}
 103
 104static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
 105{
 106        /* how many does HW own? */
 107        return wq->ring.desc_count - wq->ring.desc_avail - 1;
 108}
 109
 110static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
 111{
 112        return wq->to_use->desc;
 113}
 114
 115static inline void vnic_wq_post(struct vnic_wq *wq,
 116        void *os_buf, dma_addr_t dma_addr,
 117        unsigned int len, int sop, int eop)
 118{
 119        struct vnic_wq_buf *buf = wq->to_use;
 120
 121        buf->sop = sop;
 122        buf->os_buf = eop ? os_buf : NULL;
 123        buf->dma_addr = dma_addr;
 124        buf->len = len;
 125
 126        buf = buf->next;
 127        if (eop) {
 128                /* Adding write memory barrier prevents compiler and/or CPU
 129                 * reordering, thus avoiding descriptor posting before
 130                 * descriptor is initialized. Otherwise, hardware can read
 131                 * stale descriptor fields.
 132                 */
 133                wmb();
 134                iowrite32(buf->index, &wq->ctrl->posted_index);
 135        }
 136        wq->to_use = buf;
 137
 138        wq->ring.desc_avail--;
 139}
 140
 141static inline void vnic_wq_service(struct vnic_wq *wq,
 142        struct cq_desc *cq_desc, u16 completed_index,
 143        void (*buf_service)(struct vnic_wq *wq,
 144        struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
 145        void *opaque)
 146{
 147        struct vnic_wq_buf *buf;
 148
 149        buf = wq->to_clean;
 150        while (1) {
 151
 152                (*buf_service)(wq, cq_desc, buf, opaque);
 153
 154                wq->ring.desc_avail++;
 155
 156                wq->to_clean = buf->next;
 157
 158                if (buf->index == completed_index)
 159                        break;
 160
 161                buf = wq->to_clean;
 162        }
 163}
 164
 165void vnic_wq_free(struct vnic_wq *wq);
 166int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
 167        unsigned int desc_count, unsigned int desc_size);
 168int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
 169                unsigned int desc_count, unsigned int desc_size);
 170void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
 171                unsigned int fetch_index, unsigned int posted_index,
 172                unsigned int error_interrupt_enable,
 173                unsigned int error_interrupt_offset);
 174void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
 175        unsigned int error_interrupt_enable,
 176        unsigned int error_interrupt_offset);
 177unsigned int vnic_wq_error_status(struct vnic_wq *wq);
 178void vnic_wq_enable(struct vnic_wq *wq);
 179int vnic_wq_disable(struct vnic_wq *wq);
 180void vnic_wq_clean(struct vnic_wq *wq,
 181        void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
 182
 183#endif /* _VNIC_WQ_H_ */
 184