linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#ifndef __MLX5E_EN_ACCEL_H__
  35#define __MLX5E_EN_ACCEL_H__
  36
  37#include <linux/skbuff.h>
  38#include <linux/netdevice.h>
  39#include "en_accel/ipsec_rxtx.h"
  40#include "en_accel/tls.h"
  41#include "en_accel/tls_rxtx.h"
  42#include "en.h"
  43#include "en/txrx.h"
  44
  45#if IS_ENABLED(CONFIG_GENEVE)
  46#include <net/geneve.h>
  47
  48static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
  49{
  50        return mlx5_tx_swp_supported(mdev);
  51}
  52
  53static inline void
  54mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
  55{
  56        struct mlx5e_swp_spec swp_spec = {};
  57        unsigned int offset = 0;
  58        __be16 l3_proto;
  59        u8 l4_proto;
  60
  61        l3_proto = vlan_get_protocol(skb);
  62        switch (l3_proto) {
  63        case htons(ETH_P_IP):
  64                l4_proto = ip_hdr(skb)->protocol;
  65                break;
  66        case htons(ETH_P_IPV6):
  67                l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
  68                break;
  69        default:
  70                return;
  71        }
  72
  73        if (l4_proto != IPPROTO_UDP ||
  74            udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
  75                return;
  76        swp_spec.l3_proto = l3_proto;
  77        swp_spec.l4_proto = l4_proto;
  78        swp_spec.is_tun = true;
  79        if (inner_ip_hdr(skb)->version == 6) {
  80                swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
  81                swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
  82        } else {
  83                swp_spec.tun_l3_proto = htons(ETH_P_IP);
  84                swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
  85        }
  86
  87        mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
  88        if (skb_vlan_tag_present(skb) && ihs)
  89                mlx5e_eseg_swp_offsets_add_vlan(eseg);
  90}
  91
  92#else
  93static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
  94{
  95        return false;
  96}
  97
  98#endif /* CONFIG_GENEVE */
  99
 100static inline void
 101mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
 102{
 103        int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
 104
 105        udp_hdr(skb)->len = htons(payload_len);
 106}
 107
 108struct mlx5e_accel_tx_state {
 109#ifdef CONFIG_MLX5_EN_TLS
 110        struct mlx5e_accel_tx_tls_state tls;
 111#endif
 112#ifdef CONFIG_MLX5_EN_IPSEC
 113        struct mlx5e_accel_tx_ipsec_state ipsec;
 114#endif
 115};
 116
 117static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
 118                                        struct mlx5e_txqsq *sq,
 119                                        struct sk_buff *skb,
 120                                        struct mlx5e_accel_tx_state *state)
 121{
 122        if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
 123                mlx5e_udp_gso_handle_tx_skb(skb);
 124
 125#ifdef CONFIG_MLX5_EN_TLS
 126        /* May send SKBs and WQEs. */
 127        if (mlx5e_tls_skb_offloaded(skb))
 128                if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
 129                        return false;
 130#endif
 131
 132#ifdef CONFIG_MLX5_EN_IPSEC
 133        if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
 134                if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
 135                        return false;
 136        }
 137#endif
 138
 139        return true;
 140}
 141
 142static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
 143{
 144#ifdef CONFIG_MLX5_EN_IPSEC
 145        return mlx5e_ipsec_is_tx_flow(&state->ipsec);
 146#else
 147        return false;
 148#endif
 149}
 150
 151static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
 152                                                  struct mlx5e_accel_tx_state *state)
 153{
 154#ifdef CONFIG_MLX5_EN_IPSEC
 155        if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
 156                return mlx5e_ipsec_tx_ids_len(&state->ipsec);
 157#endif
 158
 159        return 0;
 160}
 161
 162/* Part of the eseg touched by TX offloads */
 163#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
 164
 165static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
 166                                       struct sk_buff *skb,
 167                                       struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 168{
 169#ifdef CONFIG_MLX5_EN_IPSEC
 170        if (xfrm_offload(skb))
 171                mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
 172#endif
 173
 174#if IS_ENABLED(CONFIG_GENEVE)
 175        if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
 176                mlx5e_tx_tunnel_accel(skb, eseg, ihs);
 177#endif
 178}
 179
 180static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
 181                                         struct mlx5e_tx_wqe *wqe,
 182                                         struct mlx5e_accel_tx_state *state,
 183                                         struct mlx5_wqe_inline_seg *inlseg)
 184{
 185#ifdef CONFIG_MLX5_EN_TLS
 186        mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls);
 187#endif
 188
 189#ifdef CONFIG_MLX5_EN_IPSEC
 190        if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
 191            state->ipsec.xo && state->ipsec.tailen)
 192                mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
 193#endif
 194}
 195
 196static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
 197{
 198        return mlx5e_ktls_init_rx(priv);
 199}
 200
 201static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
 202{
 203        mlx5e_ktls_cleanup_rx(priv);
 204}
 205#endif /* __MLX5E_EN_ACCEL_H__ */
 206