linux/drivers/net/ethernet/cisco/enic/vnic_wq.h
<<
>>
Prefs
   1/*
   2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 *
  18 */
  19
  20#ifndef _VNIC_WQ_H_
  21#define _VNIC_WQ_H_
  22
  23#include <linux/pci.h>
  24
  25#include "vnic_dev.h"
  26#include "vnic_cq.h"
  27
  28/* Work queue control */
  29struct vnic_wq_ctrl {
  30        u64 ring_base;                  /* 0x00 */
  31        u32 ring_size;                  /* 0x08 */
  32        u32 pad0;
  33        u32 posted_index;               /* 0x10 */
  34        u32 pad1;
  35        u32 cq_index;                   /* 0x18 */
  36        u32 pad2;
  37        u32 enable;                     /* 0x20 */
  38        u32 pad3;
  39        u32 running;                    /* 0x28 */
  40        u32 pad4;
  41        u32 fetch_index;                /* 0x30 */
  42        u32 pad5;
  43        u32 dca_value;                  /* 0x38 */
  44        u32 pad6;
  45        u32 error_interrupt_enable;     /* 0x40 */
  46        u32 pad7;
  47        u32 error_interrupt_offset;     /* 0x48 */
  48        u32 pad8;
  49        u32 error_status;               /* 0x50 */
  50        u32 pad9;
  51};
  52
  53struct vnic_wq_buf {
  54        struct vnic_wq_buf *next;
  55        dma_addr_t dma_addr;
  56        void *os_buf;
  57        unsigned int len;
  58        unsigned int index;
  59        int sop;
  60        void *desc;
  61        uint64_t wr_id; /* Cookie */
  62        uint8_t cq_entry; /* Gets completion event from hw */
  63        uint8_t desc_skip_cnt; /* Num descs to occupy */
  64        uint8_t compressed_send; /* Both hdr and payload in one desc */
  65        struct vnic_wq_buf *prev;
  66};
  67
  68/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
  69#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
  70#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
  71#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
  72        ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
  73        VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
  74#define VNIC_WQ_BUF_BLK_SZ(entries) \
  75        (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
  76#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
  77        DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
  78#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
  79
  80struct vnic_wq {
  81        unsigned int index;
  82        struct vnic_dev *vdev;
  83        struct vnic_wq_ctrl __iomem *ctrl;              /* memory-mapped */
  84        struct vnic_dev_ring ring;
  85        struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
  86        struct vnic_wq_buf *to_use;
  87        struct vnic_wq_buf *to_clean;
  88        unsigned int pkts_outstanding;
  89};
  90
  91struct devcmd2_controller {
  92        struct vnic_wq_ctrl __iomem *wq_ctrl;
  93        struct vnic_devcmd2 *cmd_ring;
  94        struct devcmd2_result *result;
  95        u16 next_result;
  96        u16 result_size;
  97        int color;
  98        struct vnic_dev_ring results_ring;
  99        struct vnic_wq wq;
 100        u32 posted;
 101};
 102
 103static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
 104{
 105        /* how many does SW own? */
 106        return wq->ring.desc_avail;
 107}
 108
 109static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
 110{
 111        /* how many does HW own? */
 112        return wq->ring.desc_count - wq->ring.desc_avail - 1;
 113}
 114
 115static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
 116{
 117        return wq->to_use->desc;
 118}
 119
 120static inline void vnic_wq_doorbell(struct vnic_wq *wq)
 121{
 122        /* Adding write memory barrier prevents compiler and/or CPU
 123         * reordering, thus avoiding descriptor posting before
 124         * descriptor is initialized. Otherwise, hardware can read
 125         * stale descriptor fields.
 126         */
 127        wmb();
 128        iowrite32(wq->to_use->index, &wq->ctrl->posted_index);
 129}
 130
 131static inline void vnic_wq_post(struct vnic_wq *wq,
 132        void *os_buf, dma_addr_t dma_addr,
 133        unsigned int len, int sop, int eop,
 134        uint8_t desc_skip_cnt, uint8_t cq_entry,
 135        uint8_t compressed_send, uint64_t wrid)
 136{
 137        struct vnic_wq_buf *buf = wq->to_use;
 138
 139        buf->sop = sop;
 140        buf->cq_entry = cq_entry;
 141        buf->compressed_send = compressed_send;
 142        buf->desc_skip_cnt = desc_skip_cnt;
 143        buf->os_buf = eop ? os_buf : NULL;
 144        buf->dma_addr = dma_addr;
 145        buf->len = len;
 146        buf->wr_id = wrid;
 147
 148        buf = buf->next;
 149        wq->to_use = buf;
 150
 151        wq->ring.desc_avail -= desc_skip_cnt;
 152}
 153
 154static inline void vnic_wq_service(struct vnic_wq *wq,
 155        struct cq_desc *cq_desc, u16 completed_index,
 156        void (*buf_service)(struct vnic_wq *wq,
 157        struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
 158        void *opaque)
 159{
 160        struct vnic_wq_buf *buf;
 161
 162        buf = wq->to_clean;
 163        while (1) {
 164
 165                (*buf_service)(wq, cq_desc, buf, opaque);
 166
 167                wq->ring.desc_avail++;
 168
 169                wq->to_clean = buf->next;
 170
 171                if (buf->index == completed_index)
 172                        break;
 173
 174                buf = wq->to_clean;
 175        }
 176}
 177
 178void vnic_wq_free(struct vnic_wq *wq);
 179int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
 180        unsigned int desc_count, unsigned int desc_size);
 181void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
 182        unsigned int error_interrupt_enable,
 183        unsigned int error_interrupt_offset);
 184unsigned int vnic_wq_error_status(struct vnic_wq *wq);
 185void vnic_wq_enable(struct vnic_wq *wq);
 186int vnic_wq_disable(struct vnic_wq *wq);
 187void vnic_wq_clean(struct vnic_wq *wq,
 188        void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
 189int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
 190                          unsigned int desc_count, unsigned int desc_size);
 191void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
 192                        unsigned int fetch_index, unsigned int posted_index,
 193                        unsigned int error_interrupt_enable,
 194                        unsigned int error_interrupt_offset);
 195
 196#endif /* _VNIC_WQ_H_ */
 197