linux/drivers/net/ethernet/amazon/ena/ena_eth_com.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include "ena_eth_com.h"
  34
  35static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
  36        struct ena_com_io_cq *io_cq)
  37{
  38        struct ena_eth_io_rx_cdesc_base *cdesc;
  39        u16 expected_phase, head_masked;
  40        u16 desc_phase;
  41
  42        head_masked = io_cq->head & (io_cq->q_depth - 1);
  43        expected_phase = io_cq->phase;
  44
  45        cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
  46                        + (head_masked * io_cq->cdesc_entry_size_in_bytes));
  47
  48        desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
  49                        ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
  50
  51        if (desc_phase != expected_phase)
  52                return NULL;
  53
  54        /* Make sure we read the rest of the descriptor after the phase bit
  55         * has been read
  56         */
  57        dma_rmb();
  58
  59        return cdesc;
  60}
  61
  62static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
  63{
  64        u16 tail_masked;
  65        u32 offset;
  66
  67        tail_masked = io_sq->tail & (io_sq->q_depth - 1);
  68
  69        offset = tail_masked * io_sq->desc_entry_size;
  70
  71        return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
  72}
  73
  74static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
  75                                                     u8 *bounce_buffer)
  76{
  77        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
  78
  79        u16 dst_tail_mask;
  80        u32 dst_offset;
  81
  82        dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
  83        dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
  84
  85        if (is_llq_max_tx_burst_exists(io_sq)) {
  86                if (unlikely(!io_sq->entries_in_tx_burst_left)) {
  87                        pr_err("Error: trying to send more packets than tx burst allows\n");
  88                        return -ENOSPC;
  89                }
  90
  91                io_sq->entries_in_tx_burst_left--;
  92                pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
  93                         io_sq->qid, io_sq->entries_in_tx_burst_left);
  94        }
  95
  96        /* Make sure everything was written into the bounce buffer before
  97         * writing the bounce buffer to the device
  98         */
  99        wmb();
 100
 101        /* The line is completed. Copy it to dev */
 102        __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
 103                         bounce_buffer, (llq_info->desc_list_entry_size) / 8);
 104
 105        io_sq->tail++;
 106
 107        /* Switch phase bit in case of wrap around */
 108        if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
 109                io_sq->phase ^= 1;
 110
 111        return 0;
 112}
 113
 114static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
 115                                                 u8 *header_src,
 116                                                 u16 header_len)
 117{
 118        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 119        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 120        u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
 121        u16 header_offset;
 122
 123        if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
 124                return 0;
 125
 126        header_offset =
 127                llq_info->descs_num_before_header * io_sq->desc_entry_size;
 128
 129        if (unlikely((header_offset + header_len) >
 130                     llq_info->desc_list_entry_size)) {
 131                pr_err("trying to write header larger than llq entry can accommodate\n");
 132                return -EFAULT;
 133        }
 134
 135        if (unlikely(!bounce_buffer)) {
 136                pr_err("bounce buffer is NULL\n");
 137                return -EFAULT;
 138        }
 139
 140        memcpy(bounce_buffer + header_offset, header_src, header_len);
 141
 142        return 0;
 143}
 144
 145static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
 146{
 147        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 148        u8 *bounce_buffer;
 149        void *sq_desc;
 150
 151        bounce_buffer = pkt_ctrl->curr_bounce_buf;
 152
 153        if (unlikely(!bounce_buffer)) {
 154                pr_err("bounce buffer is NULL\n");
 155                return NULL;
 156        }
 157
 158        sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
 159        pkt_ctrl->idx++;
 160        pkt_ctrl->descs_left_in_line--;
 161
 162        return sq_desc;
 163}
 164
 165static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
 166{
 167        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 168        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 169        int rc;
 170
 171        if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
 172                return 0;
 173
 174        /* bounce buffer was used, so write it and get a new one */
 175        if (pkt_ctrl->idx) {
 176                rc = ena_com_write_bounce_buffer_to_dev(io_sq,
 177                                                        pkt_ctrl->curr_bounce_buf);
 178                if (unlikely(rc))
 179                        return rc;
 180
 181                pkt_ctrl->curr_bounce_buf =
 182                        ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 183                memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 184                       0x0, llq_info->desc_list_entry_size);
 185        }
 186
 187        pkt_ctrl->idx = 0;
 188        pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
 189        return 0;
 190}
 191
 192static void *get_sq_desc(struct ena_com_io_sq *io_sq)
 193{
 194        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
 195                return get_sq_desc_llq(io_sq);
 196
 197        return get_sq_desc_regular_queue(io_sq);
 198}
 199
 200static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
 201{
 202        struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
 203        struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 204        int rc;
 205
 206        if (!pkt_ctrl->descs_left_in_line) {
 207                rc = ena_com_write_bounce_buffer_to_dev(io_sq,
 208                                                        pkt_ctrl->curr_bounce_buf);
 209                if (unlikely(rc))
 210                        return rc;
 211
 212                pkt_ctrl->curr_bounce_buf =
 213                        ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
 214                        memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
 215                               0x0, llq_info->desc_list_entry_size);
 216
 217                pkt_ctrl->idx = 0;
 218                if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
 219                        pkt_ctrl->descs_left_in_line = 1;
 220                else
 221                        pkt_ctrl->descs_left_in_line =
 222                        llq_info->desc_list_entry_size / io_sq->desc_entry_size;
 223        }
 224
 225        return 0;
 226}
 227
 228static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
 229{
 230        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
 231                return ena_com_sq_update_llq_tail(io_sq);
 232
 233        io_sq->tail++;
 234
 235        /* Switch phase bit in case of wrap around */
 236        if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
 237                io_sq->phase ^= 1;
 238
 239        return 0;
 240}
 241
 242static struct ena_eth_io_rx_cdesc_base *
 243        ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
 244{
 245        idx &= (io_cq->q_depth - 1);
 246        return (struct ena_eth_io_rx_cdesc_base *)
 247                ((uintptr_t)io_cq->cdesc_addr.virt_addr +
 248                idx * io_cq->cdesc_entry_size_in_bytes);
 249}
 250
 251static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
 252                                           u16 *first_cdesc_idx)
 253{
 254        struct ena_eth_io_rx_cdesc_base *cdesc;
 255        u16 count = 0, head_masked;
 256        u32 last = 0;
 257
 258        do {
 259                cdesc = ena_com_get_next_rx_cdesc(io_cq);
 260                if (!cdesc)
 261                        break;
 262
 263                ena_com_cq_inc_head(io_cq);
 264                count++;
 265                last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
 266                        ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
 267        } while (!last);
 268
 269        if (last) {
 270                *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
 271                count += io_cq->cur_rx_pkt_cdesc_count;
 272
 273                head_masked = io_cq->head & (io_cq->q_depth - 1);
 274
 275                io_cq->cur_rx_pkt_cdesc_count = 0;
 276                io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
 277
 278                pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
 279                         io_cq->qid, *first_cdesc_idx, count);
 280        } else {
 281                io_cq->cur_rx_pkt_cdesc_count += count;
 282                count = 0;
 283        }
 284
 285        return count;
 286}
 287
 288static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
 289                                                        struct ena_com_tx_ctx *ena_tx_ctx)
 290{
 291        struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
 292        struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
 293
 294        meta_desc = get_sq_desc(io_sq);
 295        memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
 296
 297        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
 298
 299        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
 300
 301        /* bits 0-9 of the mss */
 302        meta_desc->word2 |= (ena_meta->mss <<
 303                ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
 304                ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
 305        /* bits 10-13 of the mss */
 306        meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
 307                ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
 308                ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
 309
 310        /* Extended meta desc */
 311        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
 312        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
 313        meta_desc->len_ctrl |= (io_sq->phase <<
 314                ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
 315                ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
 316
 317        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
 318        meta_desc->word2 |= ena_meta->l3_hdr_len &
 319                ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
 320        meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
 321                ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
 322                ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
 323
 324        meta_desc->word2 |= (ena_meta->l4_hdr_len <<
 325                ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
 326                ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
 327
 328        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
 329
 330        /* Cached the meta desc */
 331        memcpy(&io_sq->cached_tx_meta, ena_meta,
 332               sizeof(struct ena_com_tx_meta));
 333
 334        return ena_com_sq_update_tail(io_sq);
 335}
 336
 337static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
 338                                        struct ena_eth_io_rx_cdesc_base *cdesc)
 339{
 340        ena_rx_ctx->l3_proto = cdesc->status &
 341                ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
 342        ena_rx_ctx->l4_proto =
 343                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
 344                ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
 345        ena_rx_ctx->l3_csum_err =
 346                !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
 347                ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
 348        ena_rx_ctx->l4_csum_err =
 349                !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
 350                ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
 351        ena_rx_ctx->l4_csum_checked =
 352                !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
 353                ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
 354        ena_rx_ctx->hash = cdesc->hash;
 355        ena_rx_ctx->frag =
 356                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
 357                ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
 358
 359        pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
 360                 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
 361                 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
 362                 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
 363}
 364
 365/*****************************************************************************/
 366/*****************************     API      **********************************/
 367/*****************************************************************************/
 368
 369int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
 370                       struct ena_com_tx_ctx *ena_tx_ctx,
 371                       int *nb_hw_desc)
 372{
 373        struct ena_eth_io_tx_desc *desc = NULL;
 374        struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
 375        void *buffer_to_push = ena_tx_ctx->push_header;
 376        u16 header_len = ena_tx_ctx->header_len;
 377        u16 num_bufs = ena_tx_ctx->num_bufs;
 378        u16 start_tail = io_sq->tail;
 379        int i, rc;
 380        bool have_meta;
 381        u64 addr_hi;
 382
 383        WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
 384
 385        /* num_bufs +1 for potential meta desc */
 386        if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
 387                pr_debug("Not enough space in the tx queue\n");
 388                return -ENOMEM;
 389        }
 390
 391        if (unlikely(header_len > io_sq->tx_max_header_size)) {
 392                pr_err("header size is too large %d max header: %d\n",
 393                       header_len, io_sq->tx_max_header_size);
 394                return -EINVAL;
 395        }
 396
 397        if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
 398                     !buffer_to_push))
 399                return -EINVAL;
 400
 401        rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
 402        if (unlikely(rc))
 403                return rc;
 404
 405        have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
 406                        ena_tx_ctx);
 407        if (have_meta) {
 408                rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
 409                if (unlikely(rc))
 410                        return rc;
 411        }
 412
 413        /* If the caller doesn't want to send packets */
 414        if (unlikely(!num_bufs && !header_len)) {
 415                rc = ena_com_close_bounce_buffer(io_sq);
 416                *nb_hw_desc = io_sq->tail - start_tail;
 417                return rc;
 418        }
 419
 420        desc = get_sq_desc(io_sq);
 421        if (unlikely(!desc))
 422                return -EFAULT;
 423        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 424
 425        /* Set first desc when we don't have meta descriptor */
 426        if (!have_meta)
 427                desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
 428
 429        desc->buff_addr_hi_hdr_sz |= (header_len <<
 430                ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
 431                ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
 432        desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
 433                ENA_ETH_IO_TX_DESC_PHASE_MASK;
 434
 435        desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
 436
 437        /* Bits 0-9 */
 438        desc->meta_ctrl |= (ena_tx_ctx->req_id <<
 439                ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
 440                ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
 441
 442        desc->meta_ctrl |= (ena_tx_ctx->df <<
 443                ENA_ETH_IO_TX_DESC_DF_SHIFT) &
 444                ENA_ETH_IO_TX_DESC_DF_MASK;
 445
 446        /* Bits 10-15 */
 447        desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
 448                ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
 449                ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
 450
 451        if (ena_tx_ctx->meta_valid) {
 452                desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
 453                        ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
 454                        ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
 455                desc->meta_ctrl |= ena_tx_ctx->l3_proto &
 456                        ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
 457                desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
 458                        ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
 459                        ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
 460                desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
 461                        ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
 462                        ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
 463                desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
 464                        ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
 465                        ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
 466                desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
 467                        ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
 468                        ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
 469        }
 470
 471        for (i = 0; i < num_bufs; i++) {
 472                /* The first desc share the same desc as the header */
 473                if (likely(i != 0)) {
 474                        rc = ena_com_sq_update_tail(io_sq);
 475                        if (unlikely(rc))
 476                                return rc;
 477
 478                        desc = get_sq_desc(io_sq);
 479                        if (unlikely(!desc))
 480                                return -EFAULT;
 481
 482                        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 483
 484                        desc->len_ctrl |= (io_sq->phase <<
 485                                ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
 486                                ENA_ETH_IO_TX_DESC_PHASE_MASK;
 487                }
 488
 489                desc->len_ctrl |= ena_bufs->len &
 490                        ENA_ETH_IO_TX_DESC_LENGTH_MASK;
 491
 492                addr_hi = ((ena_bufs->paddr &
 493                        GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
 494
 495                desc->buff_addr_lo = (u32)ena_bufs->paddr;
 496                desc->buff_addr_hi_hdr_sz |= addr_hi &
 497                        ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
 498                ena_bufs++;
 499        }
 500
 501        /* set the last desc indicator */
 502        desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
 503
 504        rc = ena_com_sq_update_tail(io_sq);
 505        if (unlikely(rc))
 506                return rc;
 507
 508        rc = ena_com_close_bounce_buffer(io_sq);
 509
 510        *nb_hw_desc = io_sq->tail - start_tail;
 511        return rc;
 512}
 513
 514int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
 515                   struct ena_com_io_sq *io_sq,
 516                   struct ena_com_rx_ctx *ena_rx_ctx)
 517{
 518        struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
 519        struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
 520        u16 cdesc_idx = 0;
 521        u16 nb_hw_desc;
 522        u16 i;
 523
 524        WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
 525
 526        nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
 527        if (nb_hw_desc == 0) {
 528                ena_rx_ctx->descs = nb_hw_desc;
 529                return 0;
 530        }
 531
 532        pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
 533                 nb_hw_desc);
 534
 535        if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
 536                pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
 537                       ena_rx_ctx->max_bufs);
 538                return -ENOSPC;
 539        }
 540
 541        for (i = 0; i < nb_hw_desc; i++) {
 542                cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
 543
 544                ena_buf->len = cdesc->length;
 545                ena_buf->req_id = cdesc->req_id;
 546                ena_buf++;
 547        }
 548
 549        /* Update SQ head ptr */
 550        io_sq->next_to_comp += nb_hw_desc;
 551
 552        pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
 553                 io_sq->next_to_comp);
 554
 555        /* Get rx flags from the last pkt */
 556        ena_com_rx_set_flags(ena_rx_ctx, cdesc);
 557
 558        ena_rx_ctx->descs = nb_hw_desc;
 559        return 0;
 560}
 561
 562int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 563                               struct ena_com_buf *ena_buf,
 564                               u16 req_id)
 565{
 566        struct ena_eth_io_rx_desc *desc;
 567
 568        WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
 569
 570        if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
 571                return -ENOSPC;
 572
 573        desc = get_sq_desc(io_sq);
 574        if (unlikely(!desc))
 575                return -EFAULT;
 576
 577        memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
 578
 579        desc->length = ena_buf->len;
 580
 581        desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
 582        desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
 583        desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
 584        desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
 585
 586        desc->req_id = req_id;
 587
 588        desc->buff_addr_lo = (u32)ena_buf->paddr;
 589        desc->buff_addr_hi =
 590                ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
 591
 592        return ena_com_sq_update_tail(io_sq);
 593}
 594
 595bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
 596{
 597        struct ena_eth_io_rx_cdesc_base *cdesc;
 598
 599        cdesc = ena_com_get_next_rx_cdesc(io_cq);
 600        if (cdesc)
 601                return false;
 602        else
 603                return true;
 604}
 605