linux/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/irq.h>
  34#include "en.h"
  35#include "en/xdp.h"
  36#include "en/xsk/tx.h"
  37
  38static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
  39{
  40        int current_cpu = smp_processor_id();
  41        const struct cpumask *aff;
  42        struct irq_data *idata;
  43
  44        idata = irq_desc_get_irq_data(c->irq_desc);
  45        aff = irq_data_get_affinity_mask(idata);
  46        return cpumask_test_cpu(current_cpu, aff);
  47}
  48
  49static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
  50{
  51        struct mlx5e_sq_stats *stats = sq->stats;
  52        struct dim_sample dim_sample = {};
  53
  54        if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
  55                return;
  56
  57        dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
  58        net_dim(&sq->dim, dim_sample);
  59}
  60
  61static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
  62{
  63        struct mlx5e_rq_stats *stats = rq->stats;
  64        struct dim_sample dim_sample = {};
  65
  66        if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
  67                return;
  68
  69        dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
  70        net_dim(&rq->dim, dim_sample);
  71}
  72
  73void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
  74{
  75        struct mlx5_wq_cyc *wq = &sq->wq;
  76        struct mlx5e_tx_wqe *nopwqe;
  77        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
  78
  79        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
  80        nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
  81        mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
  82}
  83
  84int mlx5e_napi_poll(struct napi_struct *napi, int budget)
  85{
  86        struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
  87                                               napi);
  88        struct mlx5e_ch_stats *ch_stats = c->stats;
  89        struct mlx5e_xdpsq *xsksq = &c->xsksq;
  90        struct mlx5e_rq *xskrq = &c->xskrq;
  91        struct mlx5e_rq *rq = &c->rq;
  92        bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
  93        bool aff_change = false;
  94        bool busy_xsk = false;
  95        bool busy = false;
  96        int work_done = 0;
  97        int i;
  98
  99        ch_stats->poll++;
 100
 101        for (i = 0; i < c->num_tc; i++)
 102                busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
 103
 104        busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
 105
 106        if (c->xdp)
 107                busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
 108
 109        if (likely(budget)) { /* budget=0 means: don't poll rx rings */
 110                if (xsk_open)
 111                        work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
 112
 113                if (likely(budget - work_done))
 114                        work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
 115
 116                busy |= work_done == budget;
 117        }
 118
 119        mlx5e_poll_ico_cq(&c->icosq.cq);
 120
 121        busy |= rq->post_wqes(rq);
 122        if (xsk_open) {
 123                mlx5e_poll_ico_cq(&c->xskicosq.cq);
 124                busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
 125                busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
 126                busy_xsk |= xskrq->post_wqes(xskrq);
 127        }
 128
 129        busy |= busy_xsk;
 130
 131        if (busy) {
 132                if (likely(mlx5e_channel_no_affinity_change(c)))
 133                        return budget;
 134                ch_stats->aff_change++;
 135                aff_change = true;
 136                if (budget && work_done == budget)
 137                        work_done--;
 138        }
 139
 140        if (unlikely(!napi_complete_done(napi, work_done)))
 141                return work_done;
 142
 143        ch_stats->arm++;
 144
 145        for (i = 0; i < c->num_tc; i++) {
 146                mlx5e_handle_tx_dim(&c->sq[i]);
 147                mlx5e_cq_arm(&c->sq[i].cq);
 148        }
 149
 150        mlx5e_handle_rx_dim(rq);
 151
 152        mlx5e_cq_arm(&rq->cq);
 153        mlx5e_cq_arm(&c->icosq.cq);
 154        mlx5e_cq_arm(&c->xdpsq.cq);
 155
 156        if (xsk_open) {
 157                mlx5e_handle_rx_dim(xskrq);
 158                mlx5e_cq_arm(&c->xskicosq.cq);
 159                mlx5e_cq_arm(&xsksq->cq);
 160                mlx5e_cq_arm(&xskrq->cq);
 161        }
 162
 163        if (unlikely(aff_change && busy_xsk)) {
 164                mlx5e_trigger_irq(&c->icosq);
 165                ch_stats->force_irq++;
 166        }
 167
 168        return work_done;
 169}
 170
 171void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
 172{
 173        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 174
 175        napi_schedule(cq->napi);
 176        cq->event_ctr++;
 177        cq->channel->stats->events++;
 178}
 179
 180void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
 181{
 182        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 183        struct mlx5e_channel *c = cq->channel;
 184        struct net_device *netdev = c->netdev;
 185
 186        netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
 187                   __func__, mcq->cqn, event);
 188}
 189