linux/drivers/net/ethernet/amazon/ena/ena_eth_com.h
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef ENA_ETH_COM_H_
  34#define ENA_ETH_COM_H_
  35
  36#include "ena_com.h"
  37
  38/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
  39#define ENA_COMP_HEAD_THRESH 4
  40
  41struct ena_com_tx_ctx {
  42        struct ena_com_tx_meta ena_meta;
  43        struct ena_com_buf *ena_bufs;
  44        /* For LLQ, header buffer - pushed to the device mem space */
  45        void *push_header;
  46
  47        enum ena_eth_io_l3_proto_index l3_proto;
  48        enum ena_eth_io_l4_proto_index l4_proto;
  49        u16 num_bufs;
  50        u16 req_id;
  51        /* For regular queue, indicate the size of the header
  52         * For LLQ, indicate the size of the pushed buffer
  53         */
  54        u16 header_len;
  55
  56        u8 meta_valid;
  57        u8 tso_enable;
  58        u8 l3_csum_enable;
  59        u8 l4_csum_enable;
  60        u8 l4_csum_partial;
  61        u8 df; /* Don't fragment */
  62};
  63
  64struct ena_com_rx_ctx {
  65        struct ena_com_rx_buf_info *ena_bufs;
  66        enum ena_eth_io_l3_proto_index l3_proto;
  67        enum ena_eth_io_l4_proto_index l4_proto;
  68        bool l3_csum_err;
  69        bool l4_csum_err;
  70        u8 l4_csum_checked;
  71        /* fragmented packet */
  72        bool frag;
  73        u32 hash;
  74        u16 descs;
  75        int max_bufs;
  76};
  77
  78int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
  79                       struct ena_com_tx_ctx *ena_tx_ctx,
  80                       int *nb_hw_desc);
  81
  82int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
  83                   struct ena_com_io_sq *io_sq,
  84                   struct ena_com_rx_ctx *ena_rx_ctx);
  85
  86int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
  87                               struct ena_com_buf *ena_buf,
  88                               u16 req_id);
  89
  90bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
  91
  92static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
  93                                       struct ena_eth_io_intr_reg *intr_reg)
  94{
  95        writel(intr_reg->intr_control, io_cq->unmask_reg);
  96}
  97
  98static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
  99{
 100        u16 tail, next_to_comp, cnt;
 101
 102        next_to_comp = io_sq->next_to_comp;
 103        tail = io_sq->tail;
 104        cnt = tail - next_to_comp;
 105
 106        return io_sq->q_depth - 1 - cnt;
 107}
 108
 109/* Check if the submission queue has enough space to hold required_buffers */
 110static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
 111                                                u16 required_buffers)
 112{
 113        int temp;
 114
 115        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
 116                return ena_com_free_desc(io_sq) >= required_buffers;
 117
 118        /* This calculation doesn't need to be 100% accurate. So to reduce
 119         * the calculation overhead just Subtract 2 lines from the free descs
 120         * (one for the header line and one to compensate the devision
 121         * down calculation.
 122         */
 123        temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
 124
 125        return ena_com_free_desc(io_sq) > temp;
 126}
 127
 128static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 129{
 130        u16 tail = io_sq->tail;
 131
 132        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
 133                 io_sq->qid, tail);
 134
 135        writel(tail, io_sq->db_addr);
 136
 137        return 0;
 138}
 139
 140static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
 141{
 142        u16 unreported_comp, head;
 143        bool need_update;
 144
 145        head = io_cq->head;
 146        unreported_comp = head - io_cq->last_head_update;
 147        need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
 148
 149        if (io_cq->cq_head_db_reg && need_update) {
 150                pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
 151                         io_cq->qid, head);
 152                writel(head, io_cq->cq_head_db_reg);
 153                io_cq->last_head_update = head;
 154        }
 155
 156        return 0;
 157}
 158
 159static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
 160                                            u8 numa_node)
 161{
 162        struct ena_eth_io_numa_node_cfg_reg numa_cfg;
 163
 164        if (!io_cq->numa_node_cfg_reg)
 165                return;
 166
 167        numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
 168                | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
 169
 170        writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
 171}
 172
 173static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
 174{
 175        io_sq->next_to_comp += elem;
 176}
 177
 178static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
 179{
 180        io_cq->head++;
 181
 182        /* Switch phase bit in case of wrap around */
 183        if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
 184                io_cq->phase ^= 1;
 185}
 186
 187static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
 188                                             u16 *req_id)
 189{
 190        u8 expected_phase, cdesc_phase;
 191        struct ena_eth_io_tx_cdesc *cdesc;
 192        u16 masked_head;
 193
 194        masked_head = io_cq->head & (io_cq->q_depth - 1);
 195        expected_phase = io_cq->phase;
 196
 197        cdesc = (struct ena_eth_io_tx_cdesc *)
 198                ((uintptr_t)io_cq->cdesc_addr.virt_addr +
 199                (masked_head * io_cq->cdesc_entry_size_in_bytes));
 200
 201        /* When the current completion descriptor phase isn't the same as the
 202         * expected, it mean that the device still didn't update
 203         * this completion.
 204         */
 205        cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
 206        if (cdesc_phase != expected_phase)
 207                return -EAGAIN;
 208
 209        dma_rmb();
 210
 211        *req_id = READ_ONCE(cdesc->req_id);
 212        if (unlikely(*req_id >= io_cq->q_depth)) {
 213                pr_err("Invalid req id %d\n", cdesc->req_id);
 214                return -EINVAL;
 215        }
 216
 217        ena_com_cq_inc_head(io_cq);
 218
 219        return 0;
 220}
 221
 222#endif /* ENA_ETH_COM_H_ */
 223