dpdk/drivers/net/ice/base/ice_flow.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2001-2021 Intel Corporation
   3 */
   4
   5#include "ice_common.h"
   6#include "ice_flow.h"
   7
   8/* Size of known protocol header fields */
   9#define ICE_FLOW_FLD_SZ_ETH_TYPE        2
  10#define ICE_FLOW_FLD_SZ_VLAN            2
  11#define ICE_FLOW_FLD_SZ_IPV4_ADDR       4
  12#define ICE_FLOW_FLD_SZ_IPV6_ADDR       16
  13#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
  14#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
  15#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
  16#define ICE_FLOW_FLD_SZ_IPV4_ID         2
  17#define ICE_FLOW_FLD_SZ_IPV6_ID         4
  18#define ICE_FLOW_FLD_SZ_IP_DSCP         1
  19#define ICE_FLOW_FLD_SZ_IP_TTL          1
  20#define ICE_FLOW_FLD_SZ_IP_PROT         1
  21#define ICE_FLOW_FLD_SZ_PORT            2
  22#define ICE_FLOW_FLD_SZ_TCP_FLAGS       1
  23#define ICE_FLOW_FLD_SZ_ICMP_TYPE       1
  24#define ICE_FLOW_FLD_SZ_ICMP_CODE       1
  25#define ICE_FLOW_FLD_SZ_ARP_OPER        2
  26#define ICE_FLOW_FLD_SZ_GRE_KEYID       4
  27#define ICE_FLOW_FLD_SZ_GTP_TEID        4
  28#define ICE_FLOW_FLD_SZ_GTP_QFI         2
  29#define ICE_FLOW_FLD_SZ_PPPOE_SESS_ID   2
  30#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
  31#define ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID  4
  32#define ICE_FLOW_FLD_SZ_ESP_SPI 4
  33#define ICE_FLOW_FLD_SZ_AH_SPI  4
  34#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI   4
  35#define ICE_FLOW_FLD_SZ_VXLAN_VNI       4
  36#define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
  37
  38/* Describe properties of a protocol header field */
  39struct ice_flow_field_info {
  40        enum ice_flow_seg_hdr hdr;
  41        s16 off;        /* Offset from start of a protocol header, in bits */
  42        u16 size;       /* Size of fields in bits */
  43        u16 mask;       /* 16-bit mask for field */
  44};
  45
  46#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
  47        .hdr = _hdr, \
  48        .off = (_offset_bytes) * BITS_PER_BYTE, \
  49        .size = (_size_bytes) * BITS_PER_BYTE, \
  50        .mask = 0, \
  51}
  52
  53#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
  54        .hdr = _hdr, \
  55        .off = (_offset_bytes) * BITS_PER_BYTE, \
  56        .size = (_size_bytes) * BITS_PER_BYTE, \
  57        .mask = _mask, \
  58}
  59
  60/* Table containing properties of supported protocol header fields */
  61static const
  62struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
  63        /* Ether */
  64        /* ICE_FLOW_FIELD_IDX_ETH_DA */
  65        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
  66        /* ICE_FLOW_FIELD_IDX_ETH_SA */
  67        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
  68        /* ICE_FLOW_FIELD_IDX_S_VLAN */
  69        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
  70        /* ICE_FLOW_FIELD_IDX_C_VLAN */
  71        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
  72        /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
  73        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
  74        /* IPv4 / IPv6 */
  75        /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
  76        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
  77                              0x00fc),
  78        /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
  79        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP,
  80                              0x0ff0),
  81        /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
  82        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
  83                              ICE_FLOW_FLD_SZ_IP_TTL, 0xff00),
  84        /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
  85        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8,
  86                              ICE_FLOW_FLD_SZ_IP_PROT, 0x00ff),
  87        /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
  88        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
  89                              ICE_FLOW_FLD_SZ_IP_TTL, 0x00ff),
  90        /* ICE_FLOW_FIELD_IDX_IPV6_PROT */
  91        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6,
  92                              ICE_FLOW_FLD_SZ_IP_PROT, 0xff00),
  93        /* ICE_FLOW_FIELD_IDX_IPV4_SA */
  94        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
  95        /* ICE_FLOW_FIELD_IDX_IPV4_DA */
  96        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
  97        /* ICE_FLOW_FIELD_IDX_IPV6_SA */
  98        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
  99        /* ICE_FLOW_FIELD_IDX_IPV6_DA */
 100        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
 101        /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
 102        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
 103                          ICE_FLOW_FLD_SZ_IPV4_ID),
 104        /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
 105        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
 106                          ICE_FLOW_FLD_SZ_IPV6_ID),
 107        /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
 108        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
 109                          ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
 110        /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
 111        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
 112                          ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
 113        /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
 114        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
 115                          ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
 116        /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
 117        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
 118                          ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
 119        /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
 120        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
 121                          ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
 122        /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
 123        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
 124                          ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
 125        /* Transport */
 126        /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
 127        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
 128        /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
 129        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
 130        /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
 131        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
 132        /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
 133        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
 134        /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
 135        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
 136        /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
 137        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
 138        /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
 139        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
 140        /* ARP */
 141        /* ICE_FLOW_FIELD_IDX_ARP_SIP */
 142        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
 143        /* ICE_FLOW_FIELD_IDX_ARP_DIP */
 144        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
 145        /* ICE_FLOW_FIELD_IDX_ARP_SHA */
 146        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
 147        /* ICE_FLOW_FIELD_IDX_ARP_DHA */
 148        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
 149        /* ICE_FLOW_FIELD_IDX_ARP_OP */
 150        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
 151        /* ICMP */
 152        /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
 153        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
 154        /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
 155        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
 156        /* GRE */
 157        /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
 158        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
 159        /* GTP */
 160        /* ICE_FLOW_FIELD_IDX_GTPC_TEID */
 161        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12,
 162                          ICE_FLOW_FLD_SZ_GTP_TEID),
 163        /* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
 164        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12,
 165                          ICE_FLOW_FLD_SZ_GTP_TEID),
 166        /* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
 167        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12,
 168                          ICE_FLOW_FLD_SZ_GTP_TEID),
 169        /* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
 170        ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22,
 171                              ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
 172        /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
 173        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
 174                          ICE_FLOW_FLD_SZ_GTP_TEID),
 175        /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
 176        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
 177                          ICE_FLOW_FLD_SZ_GTP_TEID),
 178        /* PPPOE */
 179        /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
 180        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2,
 181                          ICE_FLOW_FLD_SZ_PPPOE_SESS_ID),
 182        /* PFCP */
 183        /* ICE_FLOW_FIELD_IDX_PFCP_SEID */
 184        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12,
 185                          ICE_FLOW_FLD_SZ_PFCP_SEID),
 186        /* L2TPV3 */
 187        /* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
 188        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0,
 189                          ICE_FLOW_FLD_SZ_L2TPV3_SESS_ID),
 190        /* ESP */
 191        /* ICE_FLOW_FIELD_IDX_ESP_SPI */
 192        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0,
 193                          ICE_FLOW_FLD_SZ_ESP_SPI),
 194        /* AH */
 195        /* ICE_FLOW_FIELD_IDX_AH_SPI */
 196        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4,
 197                          ICE_FLOW_FLD_SZ_AH_SPI),
 198        /* NAT_T_ESP */
 199        /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
 200        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
 201                          ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
 202        /* ICE_FLOW_FIELD_IDX_VXLAN_VNI */
 203        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VXLAN, 12,
 204                          ICE_FLOW_FLD_SZ_VXLAN_VNI),
 205        /* ECPRI_TP0 */
 206        /* ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID */
 207        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ECPRI_TP0, 4,
 208                          ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
 209        /* UDP_ECPRI_TP0 */
 210        /* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
 211        ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
 212                          ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
 213};
 214
 215/* Bitmaps indicating relevant packet types for a particular protocol header
 216 *
 217 * Packet types for packets with an Outer/First/Single MAC header
 218 */
 219static const u32 ice_ptypes_mac_ofos[] = {
 220        0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
 221        0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
 222        0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
 223        0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
 224        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 225        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 226        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 227        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 228};
 229
 230/* Packet types for packets with an Innermost/Last MAC VLAN header */
 231static const u32 ice_ptypes_macvlan_il[] = {
 232        0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
 233        0x0000077E, 0x00000000, 0x00000000, 0x00000000,
 234        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 235        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 236        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 237        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 238        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 239        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 240};
 241
 242/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
 243 * does NOT include IPV4 other PTYPEs
 244 */
 245static const u32 ice_ptypes_ipv4_ofos[] = {
 246        0x1D800000, 0x24000800, 0x00000000, 0x00000000,
 247        0x00000000, 0x00000155, 0x00000000, 0x00000000,
 248        0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
 249        0x00001500, 0x00000000, 0x00000000, 0x00000000,
 250        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 251        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 252        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 253        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 254};
 255
 256/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
 257 * includes IPV4 other PTYPEs
 258 */
 259static const u32 ice_ptypes_ipv4_ofos_all[] = {
 260        0x1D800000, 0x24000800, 0x00000000, 0x00000000,
 261        0x00000000, 0x00000155, 0x00000000, 0x00000000,
 262        0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
 263        0x03FFD500, 0x00000000, 0x00000000, 0x00000000,
 264        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 265        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 266        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 267        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 268};
 269
 270/* Packet types for packets with an Innermost/Last IPv4 header */
 271static const u32 ice_ptypes_ipv4_il[] = {
 272        0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
 273        0x0000000E, 0x00000000, 0x00000000, 0x00000000,
 274        0x00000000, 0x00000000, 0x001FF800, 0x00100000,
 275        0xFC0FC000, 0x00000000, 0x00000000, 0x00000000,
 276        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 277        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 278        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 279        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 280};
 281
 282/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
 283 * does NOT include IVP6 other PTYPEs
 284 */
 285static const u32 ice_ptypes_ipv6_ofos[] = {
 286        0x00000000, 0x00000000, 0x76000000, 0x10002000,
 287        0x00000000, 0x000002AA, 0x00000000, 0x00000000,
 288        0x00000000, 0x03F00000, 0x00000540, 0x00000000,
 289        0x00002A00, 0x00000000, 0x00000000, 0x00000000,
 290        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 291        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 292        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 293        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 294};
 295
 296/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
 297 * includes IPV6 other PTYPEs
 298 */
 299static const u32 ice_ptypes_ipv6_ofos_all[] = {
 300        0x00000000, 0x00000000, 0x76000000, 0x10002000,
 301        0x00000000, 0x000002AA, 0x00000000, 0x00000000,
 302        0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
 303        0xFC002A00, 0x0000003F, 0x00000000, 0x00000000,
 304        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 305        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 306        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 307        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 308};
 309
 310/* Packet types for packets with an Innermost/Last IPv6 header */
 311static const u32 ice_ptypes_ipv6_il[] = {
 312        0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
 313        0x00000770, 0x00000000, 0x00000000, 0x00000000,
 314        0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
 315        0x03F00000, 0x0000003F, 0x00000000, 0x00000000,
 316        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 317        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 318        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 319        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 320};
 321
 322/* Packet types for packets with an Outer/First/Single
 323 * non-frag IPv4 header - no L4
 324 */
 325static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
 326        0x10800000, 0x04000800, 0x00000000, 0x00000000,
 327        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 328        0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
 329        0x00001500, 0x00000000, 0x00000000, 0x00000000,
 330        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 331        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 332        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 333        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 334};
 335
 336/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
 337static const u32 ice_ptypes_ipv4_il_no_l4[] = {
 338        0x60000000, 0x18043008, 0x80000002, 0x6010c021,
 339        0x00000008, 0x00000000, 0x00000000, 0x00000000,
 340        0x00000000, 0x00000000, 0x00139800, 0x00000000,
 341        0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
 342        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 343        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 344        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 345        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 346};
 347
 348/* Packet types for packets with an Outer/First/Single
 349 * non-frag IPv6 header - no L4
 350 */
 351static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
 352        0x00000000, 0x00000000, 0x42000000, 0x10002000,
 353        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 354        0x00000000, 0x02300000, 0x00000540, 0x00000000,
 355        0x00002A00, 0x00000000, 0x00000000, 0x00000000,
 356        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 357        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 358        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 359        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 360};
 361
 362/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
 363static const u32 ice_ptypes_ipv6_il_no_l4[] = {
 364        0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
 365        0x00000430, 0x00000000, 0x00000000, 0x00000000,
 366        0x00000000, 0x00000000, 0x4e600000, 0x00000000,
 367        0x02300000, 0x00000023, 0x00000000, 0x00000000,
 368        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 369        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 370        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 371        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 372};
 373
 374/* Packet types for packets with an Outermost/First ARP header */
 375static const u32 ice_ptypes_arp_of[] = {
 376        0x00000800, 0x00000000, 0x00000000, 0x00000000,
 377        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 378        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 379        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 380        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 381        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 382        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 383        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 384};
 385
 386/* UDP Packet types for non-tunneled packets or tunneled
 387 * packets with inner UDP.
 388 */
 389static const u32 ice_ptypes_udp_il[] = {
 390        0x81000000, 0x20204040, 0x04000010, 0x80810102,
 391        0x00000040, 0x00000000, 0x00000000, 0x00000000,
 392        0x00000000, 0x00410000, 0x908427E0, 0x00100007,
 393        0x10410000, 0x00000004, 0x00000000, 0x00000000,
 394        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 395        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 396        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 397        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 398};
 399
 400/* Packet types for packets with an Innermost/Last TCP header */
 401static const u32 ice_ptypes_tcp_il[] = {
 402        0x04000000, 0x80810102, 0x10000040, 0x02040408,
 403        0x00000102, 0x00000000, 0x00000000, 0x00000000,
 404        0x00000000, 0x00820000, 0x21084000, 0x00000000,
 405        0x20820000, 0x00000008, 0x00000000, 0x00000000,
 406        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 407        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 408        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 409        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 410};
 411
 412/* Packet types for packets with an Innermost/Last SCTP header */
 413static const u32 ice_ptypes_sctp_il[] = {
 414        0x08000000, 0x01020204, 0x20000081, 0x04080810,
 415        0x00000204, 0x00000000, 0x00000000, 0x00000000,
 416        0x00000000, 0x01040000, 0x00000000, 0x00000000,
 417        0x41040000, 0x00000010, 0x00000000, 0x00000000,
 418        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 419        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 420        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 421        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 422};
 423
 424/* Packet types for packets with an Outermost/First ICMP header */
 425static const u32 ice_ptypes_icmp_of[] = {
 426        0x10000000, 0x00000000, 0x00000000, 0x00000000,
 427        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 428        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 429        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 430        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 431        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 432        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 433        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 434};
 435
 436/* Packet types for packets with an Innermost/Last ICMP header */
 437static const u32 ice_ptypes_icmp_il[] = {
 438        0x00000000, 0x02040408, 0x40000102, 0x08101020,
 439        0x00000408, 0x00000000, 0x00000000, 0x00000000,
 440        0x00000000, 0x00000000, 0x42108000, 0x00000000,
 441        0x82080000, 0x00000020, 0x00000000, 0x00000000,
 442        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 443        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 444        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 445        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 446};
 447
 448/* Packet types for packets with an Outermost/First GRE header */
 449static const u32 ice_ptypes_gre_of[] = {
 450        0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
 451        0x0000017E, 0x00000000, 0x00000000, 0x00000000,
 452        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 453        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 454        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 455        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 456        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 457        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 458};
 459
 460/* Packet types for packets with an Innermost/Last MAC header */
 461static const u32 ice_ptypes_mac_il[] = {
 462        0x00000000, 0x20000000, 0x00000000, 0x00000000,
 463        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 464        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 465        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 466        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 467        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 468        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 469        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 470};
 471
 472/* Packet types for GTPC */
 473static const u32 ice_ptypes_gtpc[] = {
 474        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 475        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 476        0x00000000, 0x00000000, 0x000001E0, 0x00000000,
 477        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 478        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 479        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 480        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 481        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 482};
 483
 484/* Packet types for VXLAN with VNI */
 485static const u32 ice_ptypes_vxlan_vni[] = {
 486        0x00000000, 0xBFBFF800, 0x00EFDFDF, 0xFEFDE000,
 487        0x03BF7F7E, 0x00000000, 0x00000000, 0x00000000,
 488        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 489        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 490        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 491        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 492        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 493        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 494};
 495
 496/* Packet types for GTPC with TEID */
 497static const u32 ice_ptypes_gtpc_tid[] = {
 498        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 499        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 500        0x00000000, 0x00000000, 0x00000060, 0x00000000,
 501        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 502        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 503        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 504        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 505        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 506};
 507
 508/* Packet types for GTPU */
 509static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
 510        { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
 511        { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
 512        { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 513        { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
 514        { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
 515        { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
 516        { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
 517        { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 518        { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
 519        { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_SESSION },
 520        { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
 521        { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
 522        { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 523        { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
 524        { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
 525        { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_SESSION },
 526        { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_SESSION },
 527        { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
 528        { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_SESSION },
 529        { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_SESSION },
 530};
 531
 532static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
 533        { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 534        { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 535        { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 536        { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 537        { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 538        { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 539        { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 540        { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 541        { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 542        { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 543        { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 544        { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 545        { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 546        { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 547        { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
 548        { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_PDU_EH },
 549        { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 550        { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
 551        { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_PDU_EH },
 552        { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_PDU_EH },
 553};
 554
 555static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
 556        { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 557        { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 558        { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 559        { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 560        { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 561        { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 562        { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 563        { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 564        { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 565        { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 566        { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 567        { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 568        { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 569        { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 570        { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 571        { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_DOWNLINK },
 572        { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 573        { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
 574        { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_DOWNLINK },
 575        { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_DOWNLINK },
 576};
 577
 578static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
 579        { ICE_MAC_IPV4_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
 580        { ICE_MAC_IPV4_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
 581        { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 582        { ICE_MAC_IPV4_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
 583        { ICE_MAC_IPV4_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
 584        { ICE_MAC_IPV6_GTPU_IPV4_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
 585        { ICE_MAC_IPV6_GTPU_IPV4_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
 586        { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 587        { ICE_MAC_IPV6_GTPU_IPV4_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
 588        { ICE_MAC_IPV6_GTPU_IPV4_ICMP,    ICE_PTYPE_ATTR_GTP_UPLINK },
 589        { ICE_MAC_IPV4_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
 590        { ICE_MAC_IPV4_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
 591        { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 592        { ICE_MAC_IPV4_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
 593        { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
 594        { ICE_MAC_IPV6_GTPU_IPV6_FRAG,    ICE_PTYPE_ATTR_GTP_UPLINK },
 595        { ICE_MAC_IPV6_GTPU_IPV6_PAY,     ICE_PTYPE_ATTR_GTP_UPLINK },
 596        { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
 597        { ICE_MAC_IPV6_GTPU_IPV6_TCP,     ICE_PTYPE_ATTR_GTP_UPLINK },
 598        { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6,  ICE_PTYPE_ATTR_GTP_UPLINK },
 599};
 600
 601static const u32 ice_ptypes_gtpu[] = {
 602        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 603        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 604        0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
 605        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 606        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 607        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 608        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 609        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 610};
 611
 612/* Packet types for pppoe */
 613static const u32 ice_ptypes_pppoe[] = {
 614        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 615        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 616        0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
 617        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 618        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 619        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 620        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 621        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 622};
 623
 624/* Packet types for packets with PFCP NODE header */
 625static const u32 ice_ptypes_pfcp_node[] = {
 626        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 627        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 628        0x00000000, 0x00000000, 0x80000000, 0x00000002,
 629        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 630        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 631        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 632        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 633        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 634};
 635
 636/* Packet types for packets with PFCP SESSION header */
 637static const u32 ice_ptypes_pfcp_session[] = {
 638        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 639        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 640        0x00000000, 0x00000000, 0x00000000, 0x00000005,
 641        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 642        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 643        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 644        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 645        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 646};
 647
 648/* Packet types for l2tpv3 */
 649static const u32 ice_ptypes_l2tpv3[] = {
 650        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 651        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 652        0x00000000, 0x00000000, 0x00000000, 0x00000300,
 653        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 654        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 655        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 656        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 657        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 658};
 659
 660/* Packet types for esp */
 661static const u32 ice_ptypes_esp[] = {
 662        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 663        0x00000000, 0x00000003, 0x00000000, 0x00000000,
 664        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 665        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 666        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 667        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 668        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 669        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 670};
 671
 672/* Packet types for ah */
 673static const u32 ice_ptypes_ah[] = {
 674        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 675        0x00000000, 0x0000000C, 0x00000000, 0x00000000,
 676        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 677        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 678        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 679        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 680        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 681        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 682};
 683
 684/* Packet types for packets with NAT_T ESP header */
 685static const u32 ice_ptypes_nat_t_esp[] = {
 686        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 687        0x00000000, 0x00000030, 0x00000000, 0x00000000,
 688        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 689        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 690        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 691        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 692        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 693        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 694};
 695
 696static const u32 ice_ptypes_mac_non_ip_ofos[] = {
 697        0x00000846, 0x00000000, 0x00000000, 0x00000000,
 698        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 699        0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
 700        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 701        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 702        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 703        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 704        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 705};
 706
 707static const u32 ice_ptypes_gtpu_no_ip[] = {
 708        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 709        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 710        0x00000000, 0x00000000, 0x00000600, 0x00000000,
 711        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 712        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 713        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 714        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 715        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 716};
 717
 718static const u32 ice_ptypes_ecpri_tp0[] = {
 719        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 720        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 721        0x00000000, 0x00000000, 0x00000000, 0x00000400,
 722        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 723        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 724        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 725        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 726        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 727};
 728
 729static const u32 ice_ptypes_udp_ecpri_tp0[] = {
 730        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 731        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 732        0x00000000, 0x00000000, 0x00000000, 0x00100000,
 733        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 734        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 735        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 736        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 737        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 738};
 739
 740static const u32 ice_ptypes_l2tpv2[] = {
 741        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 742        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 743        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 744        0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
 745        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 746        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 747        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 748        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 749};
 750
 751static const u32 ice_ptypes_ppp[] = {
 752        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 753        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 754        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 755        0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
 756        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 757        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 758        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 759        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 760};
 761
 762static const u32 ice_ptypes_ipv4_frag[] = {
 763        0x00400000, 0x00000000, 0x00000000, 0x00000000,
 764        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 765        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 766        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 767        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 768        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 769        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 770        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 771};
 772
 773static const u32 ice_ptypes_ipv6_frag[] = {
 774        0x00000000, 0x00000000, 0x01000000, 0x00000000,
 775        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 776        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 777        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 778        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 779        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 780        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 781        0x00000000, 0x00000000, 0x00000000, 0x00000000,
 782};
 783
 784/* Manage parameters and info. used during the creation of a flow profile */
 785struct ice_flow_prof_params {
 786        enum ice_block blk;
 787        u16 entry_length; /* # of bytes formatted entry will require */
 788        u8 es_cnt;
 789        struct ice_flow_prof *prof;
 790
 791        /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
 792         * This will give us the direction flags.
 793         */
 794        struct ice_fv_word es[ICE_MAX_FV_WORDS];
 795        /* attributes can be used to add attributes to a particular PTYPE */
 796        const struct ice_ptype_attributes *attr;
 797        u16 attr_cnt;
 798
 799        u16 mask[ICE_MAX_FV_WORDS];
 800        ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
 801};
 802
 803#define ICE_FLOW_RSS_HDRS_INNER_MASK \
 804        (ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
 805        ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
 806        ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
 807        ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
 808        ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
 809        ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
 810        ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
 811
 812#define ICE_FLOW_SEG_HDRS_L2_MASK       \
 813        (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
 814#define ICE_FLOW_SEG_HDRS_L3_MASK       \
 815        (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
 816         ICE_FLOW_SEG_HDR_ARP)
 817#define ICE_FLOW_SEG_HDRS_L4_MASK       \
 818        (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
 819         ICE_FLOW_SEG_HDR_SCTP)
 820/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
 821#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER      \
 822        (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
 823
 824/**
 825 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
 826 * @segs: array of one or more packet segments that describe the flow
 827 * @segs_cnt: number of packet segments provided
 828 */
 829static enum ice_status
 830ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
 831{
 832        u8 i;
 833
 834        for (i = 0; i < segs_cnt; i++) {
 835                /* Multiple L3 headers */
 836                if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
 837                    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
 838                        return ICE_ERR_PARAM;
 839
 840                /* Multiple L4 headers */
 841                if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
 842                    !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
 843                        return ICE_ERR_PARAM;
 844        }
 845
 846        return ICE_SUCCESS;
 847}
 848
 849/* Sizes of fixed known protocol headers without header options */
 850#define ICE_FLOW_PROT_HDR_SZ_MAC        14
 851#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN   (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
 852#define ICE_FLOW_PROT_HDR_SZ_IPV4       20
 853#define ICE_FLOW_PROT_HDR_SZ_IPV6       40
 854#define ICE_FLOW_PROT_HDR_SZ_ARP        28
 855#define ICE_FLOW_PROT_HDR_SZ_ICMP       8
 856#define ICE_FLOW_PROT_HDR_SZ_TCP        20
 857#define ICE_FLOW_PROT_HDR_SZ_UDP        8
 858#define ICE_FLOW_PROT_HDR_SZ_SCTP       12
 859
 860/**
 861 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
 862 * @params: information about the flow to be processed
 863 * @seg: index of packet segment whose header size is to be determined
 864 */
 865static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
 866{
 867        u16 sz;
 868
 869        /* L2 headers */
 870        sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
 871                ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
 872
 873        /* L3 headers */
 874        if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
 875                sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
 876        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
 877                sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
 878        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
 879                sz += ICE_FLOW_PROT_HDR_SZ_ARP;
 880        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
 881                /* A L3 header is required if L4 is specified */
 882                return 0;
 883
 884        /* L4 headers */
 885        if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
 886                sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
 887        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
 888                sz += ICE_FLOW_PROT_HDR_SZ_TCP;
 889        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
 890                sz += ICE_FLOW_PROT_HDR_SZ_UDP;
 891        else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
 892                sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
 893
 894        return sz;
 895}
 896
 897/**
 898 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
 899 * @params: information about the flow to be processed
 900 *
 901 * This function identifies the packet types associated with the protocol
 902 * headers being present in packet segments of the specified flow profile.
 903 */
 904static enum ice_status
 905ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
 906{
 907        struct ice_flow_prof *prof;
 908        u8 i;
 909
 910        ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
 911                   ICE_NONDMA_MEM);
 912
 913        prof = params->prof;
 914
 915        for (i = 0; i < params->prof->segs_cnt; i++) {
 916                const ice_bitmap_t *src;
 917                u32 hdrs;
 918
 919                hdrs = prof->segs[i].hdrs;
 920
 921                if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
 922                        src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
 923                                (const ice_bitmap_t *)ice_ptypes_mac_il;
 924                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 925                                       ICE_FLOW_PTYPE_MAX);
 926                }
 927
 928                if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
 929                        src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
 930                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 931                                       ICE_FLOW_PTYPE_MAX);
 932                }
 933
 934                if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
 935                        ice_and_bitmap(params->ptypes, params->ptypes,
 936                                       (const ice_bitmap_t *)ice_ptypes_arp_of,
 937                                       ICE_FLOW_PTYPE_MAX);
 938                }
 939
 940                if (hdrs & ICE_FLOW_SEG_HDR_ECPRI_TP0) {
 941                        src = (const ice_bitmap_t *)ice_ptypes_ecpri_tp0;
 942                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 943                                       ICE_FLOW_PTYPE_MAX);
 944                }
 945                if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
 946                    (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
 947                        src = i ?
 948                                (const ice_bitmap_t *)ice_ptypes_ipv4_il :
 949                                (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
 950                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 951                                       ICE_FLOW_PTYPE_MAX);
 952                } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
 953                           (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
 954                        src = i ?
 955                                (const ice_bitmap_t *)ice_ptypes_ipv6_il :
 956                                (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
 957                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 958                                       ICE_FLOW_PTYPE_MAX);
 959                } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
 960                                (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
 961                        src = (const ice_bitmap_t *)ice_ptypes_ipv4_frag;
 962                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 963                                       ICE_FLOW_PTYPE_MAX);
 964                } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
 965                                (hdrs & ICE_FLOW_SEG_HDR_IPV_FRAG)) {
 966                        src = (const ice_bitmap_t *)ice_ptypes_ipv6_frag;
 967                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 968                                       ICE_FLOW_PTYPE_MAX);
 969                } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
 970                           !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
 971                        src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
 972                                (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
 973                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 974                                       ICE_FLOW_PTYPE_MAX);
 975                } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
 976                        src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
 977                                (const ice_bitmap_t *)ice_ptypes_ipv4_il;
 978                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 979                                       ICE_FLOW_PTYPE_MAX);
 980                } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
 981                           !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
 982                        src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
 983                                (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
 984                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 985                                       ICE_FLOW_PTYPE_MAX);
 986                } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
 987                        src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
 988                                (const ice_bitmap_t *)ice_ptypes_ipv6_il;
 989                        ice_and_bitmap(params->ptypes, params->ptypes, src,
 990                                       ICE_FLOW_PTYPE_MAX);
 991                }
 992
 993                if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
 994                        src = (const ice_bitmap_t *)ice_ptypes_mac_non_ip_ofos;
 995                        ice_and_bitmap(params->ptypes, params->ptypes,
 996                                       src, ICE_FLOW_PTYPE_MAX);
 997                } else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
 998                        src = (const ice_bitmap_t *)ice_ptypes_pppoe;
 999                        ice_and_bitmap(params->ptypes, params->ptypes, src,
1000                                       ICE_FLOW_PTYPE_MAX);
1001                } else {
1002                        src = (const ice_bitmap_t *)ice_ptypes_pppoe;
1003                        ice_andnot_bitmap(params->ptypes, params->ptypes, src,
1004                                          ICE_FLOW_PTYPE_MAX);
1005                }
1006
1007                if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
1008                        src = (const ice_bitmap_t *)ice_ptypes_udp_il;
1009                        ice_and_bitmap(params->ptypes, params->ptypes, src,
1010                                       ICE_FLOW_PTYPE_MAX);
1011                } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
1012                        ice_and_bitmap(params->ptypes, params->ptypes,
1013                                       (const ice_bitmap_t *)ice_ptypes_tcp_il,
1014                                       ICE_FLOW_PTYPE_MAX);
1015                } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
1016                        src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
1017                        ice_and_bitmap(params->ptypes, params->ptypes, src,
1018                                       ICE_FLOW_PTYPE_MAX);
1019                }
1020
1021                if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
1022                        src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
1023                                (const ice_bitmap_t *)ice_ptypes_icmp_il;
1024                        ice_and_bitmap(params->ptypes, params->ptypes, src,
1025                                       ICE_FLOW_PTYPE_MAX);
1026                } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
1027                        if (!i) {
1028                                src = (const ice_bitmap_t *)ice_ptypes_gre_of;
1029                                ice_and_bitmap(params->ptypes, params->ptypes,
1030                                               src, ICE_FLOW_PTYPE_MAX);
1031                        }
1032                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
1033                        src = (const ice_bitmap_t *)ice_ptypes_gtpc;
1034                        ice_and_bitmap(params->ptypes, params->ptypes,
1035                                       src, ICE_FLOW_PTYPE_MAX);
1036                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
1037                        src = (const ice_bitmap_t *)ice_ptypes_gtpc_tid;
1038                        ice_and_bitmap(params->ptypes, params->ptypes,
1039                                       src, ICE_FLOW_PTYPE_MAX);
1040                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_NON_IP) {
1041                        src = (const ice_bitmap_t *)ice_ptypes_gtpu_no_ip;
1042                        ice_and_bitmap(params->ptypes, params->ptypes,
1043                                       src, ICE_FLOW_PTYPE_MAX);
1044                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
1045                        src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1046                        ice_and_bitmap(params->ptypes, params->ptypes,
1047                                       src, ICE_FLOW_PTYPE_MAX);
1048
1049                        /* Attributes for GTP packet with downlink */
1050                        params->attr = ice_attr_gtpu_down;
1051                        params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
1052                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
1053                        src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1054                        ice_and_bitmap(params->ptypes, params->ptypes,
1055                                       src, ICE_FLOW_PTYPE_MAX);
1056
1057                        /* Attributes for GTP packet with uplink */
1058                        params->attr = ice_attr_gtpu_up;
1059                        params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
1060                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
1061                        src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1062                        ice_and_bitmap(params->ptypes, params->ptypes,
1063                                       src, ICE_FLOW_PTYPE_MAX);
1064
1065                        /* Attributes for GTP packet with Extension Header */
1066                        params->attr = ice_attr_gtpu_eh;
1067                        params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
1068                } else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
1069                        src = (const ice_bitmap_t *)ice_ptypes_gtpu;
1070                        ice_and_bitmap(params->ptypes, params->ptypes,
1071                                       src, ICE_FLOW_PTYPE_MAX);
1072
1073                        /* Attributes for GTP packet without Extension Header */
1074                        params->attr = ice_attr_gtpu_session;
1075                        params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
1076                } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV2) {
1077                        src = (const ice_bitmap_t *)ice_ptypes_l2tpv2;
1078                        ice_and_bitmap(params->ptypes, params->ptypes,
1079                                       src, ICE_FLOW_PTYPE_MAX);
1080                } else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
1081                        src = (const ice_bitmap_t *)ice_ptypes_l2tpv3;
1082                        ice_and_bitmap(params->ptypes, params->ptypes,
1083                                       src, ICE_FLOW_PTYPE_MAX);
1084                } else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
1085                        src = (const ice_bitmap_t *)ice_ptypes_esp;
1086                        ice_and_bitmap(params->ptypes, params->ptypes,
1087                                       src, ICE_FLOW_PTYPE_MAX);
1088                } else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
1089                        src = (const ice_bitmap_t *)ice_ptypes_ah;
1090                        ice_and_bitmap(params->ptypes, params->ptypes,
1091                                       src, ICE_FLOW_PTYPE_MAX);
1092                } else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
1093                        src = (const ice_bitmap_t *)ice_ptypes_nat_t_esp;
1094                        ice_and_bitmap(params->ptypes, params->ptypes,
1095                                       src, ICE_FLOW_PTYPE_MAX);
1096                } else if (hdrs & ICE_FLOW_SEG_HDR_VXLAN) {
1097                        src = (const ice_bitmap_t *)ice_ptypes_vxlan_vni;
1098                        ice_and_bitmap(params->ptypes, params->ptypes,
1099                                       src, ICE_FLOW_PTYPE_MAX);
1100                } else if (hdrs & ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0) {
1101                        src = (const ice_bitmap_t *)ice_ptypes_udp_ecpri_tp0;
1102                        ice_and_bitmap(params->ptypes, params->ptypes,
1103                                       src, ICE_FLOW_PTYPE_MAX);
1104                }
1105
1106                if (hdrs & ICE_FLOW_SEG_HDR_PPP) {
1107                        src = (const ice_bitmap_t *)ice_ptypes_ppp;
1108                        ice_and_bitmap(params->ptypes, params->ptypes,
1109                                       src, ICE_FLOW_PTYPE_MAX);
1110                }
1111
1112                if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
1113                        if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
1114                                src =
1115                                (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1116                        else
1117                                src =
1118                                (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1119
1120                        ice_and_bitmap(params->ptypes, params->ptypes,
1121                                       src, ICE_FLOW_PTYPE_MAX);
1122                } else {
1123                        src = (const ice_bitmap_t *)ice_ptypes_pfcp_node;
1124                        ice_andnot_bitmap(params->ptypes, params->ptypes,
1125                                          src, ICE_FLOW_PTYPE_MAX);
1126
1127                        src = (const ice_bitmap_t *)ice_ptypes_pfcp_session;
1128                        ice_andnot_bitmap(params->ptypes, params->ptypes,
1129                                          src, ICE_FLOW_PTYPE_MAX);
1130                }
1131        }
1132
1133        return ICE_SUCCESS;
1134}
1135
1136/**
1137 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
1138 * @hw: pointer to the HW struct
1139 * @params: information about the flow to be processed
1140 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
1141 *
1142 * This function will allocate an extraction sequence entries for a DWORD size
1143 * chunk of the packet flags.
1144 */
1145static enum ice_status
1146ice_flow_xtract_pkt_flags(struct ice_hw *hw,
1147                          struct ice_flow_prof_params *params,
1148                          enum ice_flex_mdid_pkt_flags flags)
1149{
1150        u8 fv_words = hw->blk[params->blk].es.fvw;
1151        u8 idx;
1152
1153        /* Make sure the number of extraction sequence entries required does not
1154         * exceed the block's capacity.
1155         */
1156        if (params->es_cnt >= fv_words)
1157                return ICE_ERR_MAX_LIMIT;
1158
1159        /* some blocks require a reversed field vector layout */
1160        if (hw->blk[params->blk].es.reverse)
1161                idx = fv_words - params->es_cnt - 1;
1162        else
1163                idx = params->es_cnt;
1164
1165        params->es[idx].prot_id = ICE_PROT_META_ID;
1166        params->es[idx].off = flags;
1167        params->es_cnt++;
1168
1169        return ICE_SUCCESS;
1170}
1171
1172/**
1173 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
1174 * @hw: pointer to the HW struct
1175 * @params: information about the flow to be processed
1176 * @seg: packet segment index of the field to be extracted
1177 * @fld: ID of field to be extracted
1178 * @match: bitfield of all fields
1179 *
1180 * This function determines the protocol ID, offset, and size of the given
1181 * field. It then allocates one or more extraction sequence entries for the
1182 * given field, and fill the entries with protocol ID and offset information.
1183 */
1184static enum ice_status
1185ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
1186                    u8 seg, enum ice_flow_field fld, u64 match)
1187{
1188        enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
1189        enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
1190        u8 fv_words = hw->blk[params->blk].es.fvw;
1191        struct ice_flow_fld_info *flds;
1192        u16 cnt, ese_bits, i;
1193        u16 sib_mask = 0;
1194        u16 mask;
1195        u16 off;
1196
1197        flds = params->prof->segs[seg].fields;
1198
1199        switch (fld) {
1200        case ICE_FLOW_FIELD_IDX_ETH_DA:
1201        case ICE_FLOW_FIELD_IDX_ETH_SA:
1202        case ICE_FLOW_FIELD_IDX_S_VLAN:
1203        case ICE_FLOW_FIELD_IDX_C_VLAN:
1204                prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
1205                break;
1206        case ICE_FLOW_FIELD_IDX_ETH_TYPE:
1207                prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
1208                break;
1209        case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
1210                prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1211                break;
1212        case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
1213                prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1214                break;
1215        case ICE_FLOW_FIELD_IDX_IPV4_TTL:
1216        case ICE_FLOW_FIELD_IDX_IPV4_PROT:
1217                prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1218
1219                /* TTL and PROT share the same extraction seq. entry.
1220                 * Each is considered a sibling to the other in terms of sharing
1221                 * the same extraction sequence entry.
1222                 */
1223                if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
1224                        sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
1225                else
1226                        sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
1227
1228                /* If the sibling field is also included, that field's
1229                 * mask needs to be included.
1230                 */
1231                if (match & BIT(sib))
1232                        sib_mask = ice_flds_info[sib].mask;
1233                break;
1234        case ICE_FLOW_FIELD_IDX_IPV6_TTL:
1235        case ICE_FLOW_FIELD_IDX_IPV6_PROT:
1236                prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1237
1238                /* TTL and PROT share the same extraction seq. entry.
1239                 * Each is considered a sibling to the other in terms of sharing
1240                 * the same extraction sequence entry.
1241                 */
1242                if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
1243                        sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
1244                else
1245                        sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
1246
1247                /* If the sibling field is also included, that field's
1248                 * mask needs to be included.
1249                 */
1250                if (match & BIT(sib))
1251                        sib_mask = ice_flds_info[sib].mask;
1252                break;
1253        case ICE_FLOW_FIELD_IDX_IPV4_SA:
1254        case ICE_FLOW_FIELD_IDX_IPV4_DA:
1255                prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
1256                break;
1257        case ICE_FLOW_FIELD_IDX_IPV4_ID:
1258                prot_id = ICE_PROT_IPV4_OF_OR_S;
1259                break;
1260        case ICE_FLOW_FIELD_IDX_IPV6_SA:
1261        case ICE_FLOW_FIELD_IDX_IPV6_DA:
1262        case ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA:
1263        case ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA:
1264        case ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA:
1265        case ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA:
1266        case ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA:
1267        case ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA:
1268                prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
1269                break;
1270        case ICE_FLOW_FIELD_IDX_IPV6_ID:
1271                prot_id = ICE_PROT_IPV6_FRAG;
1272                break;
1273        case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
1274        case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
1275        case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
1276                prot_id = ICE_PROT_TCP_IL;
1277                break;
1278        case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
1279        case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
1280                prot_id = ICE_PROT_UDP_IL_OR_S;
1281                break;
1282        case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
1283        case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
1284                prot_id = ICE_PROT_SCTP_IL;
1285                break;
1286        case ICE_FLOW_FIELD_IDX_VXLAN_VNI:
1287        case ICE_FLOW_FIELD_IDX_GTPC_TEID:
1288        case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
1289        case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
1290        case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
1291        case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
1292        case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
1293                /* GTP is accessed through UDP OF protocol */
1294                prot_id = ICE_PROT_UDP_OF;
1295                break;
1296        case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
1297                prot_id = ICE_PROT_PPPOE;
1298                break;
1299        case ICE_FLOW_FIELD_IDX_PFCP_SEID:
1300                prot_id = ICE_PROT_UDP_IL_OR_S;
1301                break;
1302        case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
1303                prot_id = ICE_PROT_L2TPV3;
1304                break;
1305        case ICE_FLOW_FIELD_IDX_ESP_SPI:
1306                prot_id = ICE_PROT_ESP_F;
1307                break;
1308        case ICE_FLOW_FIELD_IDX_AH_SPI:
1309                prot_id = ICE_PROT_ESP_2;
1310                break;
1311        case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
1312                prot_id = ICE_PROT_UDP_IL_OR_S;
1313                break;
1314        case ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID:
1315                prot_id = ICE_PROT_ECPRI;
1316                break;
1317        case ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID:
1318                prot_id = ICE_PROT_UDP_IL_OR_S;
1319                break;
1320        case ICE_FLOW_FIELD_IDX_ARP_SIP:
1321        case ICE_FLOW_FIELD_IDX_ARP_DIP:
1322        case ICE_FLOW_FIELD_IDX_ARP_SHA:
1323        case ICE_FLOW_FIELD_IDX_ARP_DHA:
1324        case ICE_FLOW_FIELD_IDX_ARP_OP:
1325                prot_id = ICE_PROT_ARP_OF;
1326                break;
1327        case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
1328        case ICE_FLOW_FIELD_IDX_ICMP_CODE:
1329                /* ICMP type and code share the same extraction seq. entry */
1330                prot_id = (params->prof->segs[seg].hdrs &
1331                           ICE_FLOW_SEG_HDR_IPV4) ?
1332                        ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
1333                sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
1334                        ICE_FLOW_FIELD_IDX_ICMP_CODE :
1335                        ICE_FLOW_FIELD_IDX_ICMP_TYPE;
1336                break;
1337        case ICE_FLOW_FIELD_IDX_GRE_KEYID:
1338                prot_id = ICE_PROT_GRE_OF;
1339                break;
1340        default:
1341                return ICE_ERR_NOT_IMPL;
1342        }
1343
1344        /* Each extraction sequence entry is a word in size, and extracts a
1345         * word-aligned offset from a protocol header.
1346         */
1347        ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
1348
1349        flds[fld].xtrct.prot_id = prot_id;
1350        flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
1351                ICE_FLOW_FV_EXTRACT_SZ;
1352        flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
1353        flds[fld].xtrct.idx = params->es_cnt;
1354        flds[fld].xtrct.mask = ice_flds_info[fld].mask;
1355
1356        /* Adjust the next field-entry index after accommodating the number of
1357         * entries this field consumes
1358         */
1359        cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
1360                                  ice_flds_info[fld].size, ese_bits);
1361
1362        /* Fill in the extraction sequence entries needed for this field */
1363        off = flds[fld].xtrct.off;
1364        mask = flds[fld].xtrct.mask;
1365        for (i = 0; i < cnt; i++) {
1366                /* Only consume an extraction sequence entry if there is no
1367                 * sibling field associated with this field or the sibling entry
1368                 * already extracts the word shared with this field.
1369                 */
1370                if (sib == ICE_FLOW_FIELD_IDX_MAX ||
1371                    flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
1372                    flds[sib].xtrct.off != off) {
1373                        u8 idx;
1374
1375                        /* Make sure the number of extraction sequence required
1376                         * does not exceed the block's capability
1377                         */
1378                        if (params->es_cnt >= fv_words)
1379                                return ICE_ERR_MAX_LIMIT;
1380
1381                        /* some blocks require a reversed field vector layout */
1382                        if (hw->blk[params->blk].es.reverse)
1383                                idx = fv_words - params->es_cnt - 1;
1384                        else
1385                                idx = params->es_cnt;
1386
1387                        params->es[idx].prot_id = prot_id;
1388                        params->es[idx].off = off;
1389                        params->mask[idx] = mask | sib_mask;
1390                        params->es_cnt++;
1391                }
1392
1393                off += ICE_FLOW_FV_EXTRACT_SZ;
1394        }
1395
1396        return ICE_SUCCESS;
1397}
1398
1399/**
1400 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
1401 * @hw: pointer to the HW struct
1402 * @params: information about the flow to be processed
1403 * @seg: index of packet segment whose raw fields are to be extracted
1404 */
1405static enum ice_status
1406ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
1407                     u8 seg)
1408{
1409        u16 fv_words;
1410        u16 hdrs_sz;
1411        u8 i;
1412
1413        if (!params->prof->segs[seg].raws_cnt)
1414                return ICE_SUCCESS;
1415
1416        if (params->prof->segs[seg].raws_cnt >
1417            ARRAY_SIZE(params->prof->segs[seg].raws))
1418                return ICE_ERR_MAX_LIMIT;
1419
1420        /* Offsets within the segment headers are not supported */
1421        hdrs_sz = ice_flow_calc_seg_sz(params, seg);
1422        if (!hdrs_sz)
1423                return ICE_ERR_PARAM;
1424
1425        fv_words = hw->blk[params->blk].es.fvw;
1426
1427        for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
1428                struct ice_flow_seg_fld_raw *raw;
1429                u16 off, cnt, j;
1430
1431                raw = &params->prof->segs[seg].raws[i];
1432
1433                /* Storing extraction information */
1434                raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
1435                raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
1436                        ICE_FLOW_FV_EXTRACT_SZ;
1437                raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
1438                        BITS_PER_BYTE;
1439                raw->info.xtrct.idx = params->es_cnt;
1440
1441                /* Determine the number of field vector entries this raw field
1442                 * consumes.
1443                 */
1444                cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
1445                                          (raw->info.src.last * BITS_PER_BYTE),
1446                                          (ICE_FLOW_FV_EXTRACT_SZ *
1447                                           BITS_PER_BYTE));
1448                off = raw->info.xtrct.off;
1449                for (j = 0; j < cnt; j++) {
1450                        u16 idx;
1451
1452                        /* Make sure the number of extraction sequence required
1453                         * does not exceed the block's capability
1454                         */
1455                        if (params->es_cnt >= hw->blk[params->blk].es.count ||
1456                            params->es_cnt >= ICE_MAX_FV_WORDS)
1457                                return ICE_ERR_MAX_LIMIT;
1458
1459                        /* some blocks require a reversed field vector layout */
1460                        if (hw->blk[params->blk].es.reverse)
1461                                idx = fv_words - params->es_cnt - 1;
1462                        else
1463                                idx = params->es_cnt;
1464
1465                        params->es[idx].prot_id = raw->info.xtrct.prot_id;
1466                        params->es[idx].off = off;
1467                        params->es_cnt++;
1468                        off += ICE_FLOW_FV_EXTRACT_SZ;
1469                }
1470        }
1471
1472        return ICE_SUCCESS;
1473}
1474
1475/**
1476 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
1477 * @hw: pointer to the HW struct
1478 * @params: information about the flow to be processed
1479 *
1480 * This function iterates through all matched fields in the given segments, and
1481 * creates an extraction sequence for the fields.
1482 */
1483static enum ice_status
1484ice_flow_create_xtrct_seq(struct ice_hw *hw,
1485                          struct ice_flow_prof_params *params)
1486{
1487        enum ice_status status = ICE_SUCCESS;
1488        u8 i;
1489
1490        /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
1491         * packet flags
1492         */
1493        if (params->blk == ICE_BLK_ACL) {
1494                status = ice_flow_xtract_pkt_flags(hw, params,
1495                                                   ICE_RX_MDID_PKT_FLAGS_15_0);
1496                if (status)
1497                        return status;
1498        }
1499
1500        for (i = 0; i < params->prof->segs_cnt; i++) {
1501                u64 match = params->prof->segs[i].match;
1502                enum ice_flow_field j;
1503
1504                ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
1505                                     ICE_FLOW_FIELD_IDX_MAX) {
1506                        status = ice_flow_xtract_fld(hw, params, i, j, match);
1507                        if (status)
1508                                return status;
1509                        ice_clear_bit(j, (ice_bitmap_t *)&match);
1510                }
1511
1512                /* Process raw matching bytes */
1513                status = ice_flow_xtract_raws(hw, params, i);
1514                if (status)
1515                        return status;
1516        }
1517
1518        return status;
1519}
1520
1521/**
1522 * ice_flow_sel_acl_scen - returns the specific scenario
1523 * @hw: pointer to the hardware structure
1524 * @params: information about the flow to be processed
1525 *
1526 * This function will return the specific scenario based on the
1527 * params passed to it
1528 */
1529static enum ice_status
1530ice_flow_sel_acl_scen(struct ice_hw *hw, struct ice_flow_prof_params *params)
1531{
1532        /* Find the best-fit scenario for the provided match width */
1533        struct ice_acl_scen *cand_scen = NULL, *scen;
1534
1535        if (!hw->acl_tbl)
1536                return ICE_ERR_DOES_NOT_EXIST;
1537
1538        /* Loop through each scenario and match against the scenario width
1539         * to select the specific scenario
1540         */
1541        LIST_FOR_EACH_ENTRY(scen, &hw->acl_tbl->scens, ice_acl_scen, list_entry)
1542                if (scen->eff_width >= params->entry_length &&
1543                    (!cand_scen || cand_scen->eff_width > scen->eff_width))
1544                        cand_scen = scen;
1545        if (!cand_scen)
1546                return ICE_ERR_DOES_NOT_EXIST;
1547
1548        params->prof->cfg.scen = cand_scen;
1549
1550        return ICE_SUCCESS;
1551}
1552
1553/**
1554 * ice_flow_acl_def_entry_frmt - Determine the layout of flow entries
1555 * @params: information about the flow to be processed
1556 */
1557static enum ice_status
1558ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
1559{
1560        u16 index, i, range_idx = 0;
1561
1562        index = ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1563
1564        for (i = 0; i < params->prof->segs_cnt; i++) {
1565                struct ice_flow_seg_info *seg = &params->prof->segs[i];
1566                u8 j;
1567
1568                ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
1569                                     ICE_FLOW_FIELD_IDX_MAX) {
1570                        struct ice_flow_fld_info *fld = &seg->fields[j];
1571
1572                        fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1573
1574                        if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
1575                                fld->entry.last = ICE_FLOW_FLD_OFF_INVAL;
1576
1577                                /* Range checking only supported for single
1578                                 * words
1579                                 */
1580                                if (DIVIDE_AND_ROUND_UP(ice_flds_info[j].size +
1581                                                        fld->xtrct.disp,
1582                                                        BITS_PER_BYTE * 2) > 1)
1583                                        return ICE_ERR_PARAM;
1584
1585                                /* Ranges must define low and high values */
1586                                if (fld->src.val == ICE_FLOW_FLD_OFF_INVAL ||
1587                                    fld->src.last == ICE_FLOW_FLD_OFF_INVAL)
1588                                        return ICE_ERR_PARAM;
1589
1590                                fld->entry.val = range_idx++;
1591                        } else {
1592                                /* Store adjusted byte-length of field for later
1593                                 * use, taking into account potential
1594                                 * non-byte-aligned displacement
1595                                 */
1596                                fld->entry.last = DIVIDE_AND_ROUND_UP
1597                                        (ice_flds_info[j].size +
1598                                         (fld->xtrct.disp % BITS_PER_BYTE),
1599                                         BITS_PER_BYTE);
1600                                fld->entry.val = index;
1601                                index += fld->entry.last;
1602                        }
1603                }
1604
1605                for (j = 0; j < seg->raws_cnt; j++) {
1606                        struct ice_flow_seg_fld_raw *raw = &seg->raws[j];
1607
1608                        raw->info.entry.mask = ICE_FLOW_FLD_OFF_INVAL;
1609                        raw->info.entry.val = index;
1610                        raw->info.entry.last = raw->info.src.last;
1611                        index += raw->info.entry.last;
1612                }
1613        }
1614
1615        /* Currently only support using the byte selection base, which only
1616         * allows for an effective entry size of 30 bytes. Reject anything
1617         * larger.
1618         */
1619        if (index > ICE_AQC_ACL_PROF_BYTE_SEL_ELEMS)
1620                return ICE_ERR_PARAM;
1621
1622        /* Only 8 range checkers per profile, reject anything trying to use
1623         * more
1624         */
1625        if (range_idx > ICE_AQC_ACL_PROF_RANGES_NUM_CFG)
1626                return ICE_ERR_PARAM;
1627
1628        /* Store # bytes required for entry for later use */
1629        params->entry_length = index - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
1630
1631        return ICE_SUCCESS;
1632}
1633
1634/**
1635 * ice_flow_proc_segs - process all packet segments associated with a profile
1636 * @hw: pointer to the HW struct
1637 * @params: information about the flow to be processed
1638 */
1639static enum ice_status
1640ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
1641{
1642        enum ice_status status;
1643
1644        status = ice_flow_proc_seg_hdrs(params);
1645        if (status)
1646                return status;
1647
1648        status = ice_flow_create_xtrct_seq(hw, params);
1649        if (status)
1650                return status;
1651
1652        switch (params->blk) {
1653        case ICE_BLK_FD:
1654        case ICE_BLK_RSS:
1655                status = ICE_SUCCESS;
1656                break;
1657        case ICE_BLK_ACL:
1658                status = ice_flow_acl_def_entry_frmt(params);
1659                if (status)
1660                        return status;
1661                status = ice_flow_sel_acl_scen(hw, params);
1662                if (status)
1663                        return status;
1664                break;
1665        default:
1666                return ICE_ERR_NOT_IMPL;
1667        }
1668
1669        return status;
1670}
1671
1672#define ICE_FLOW_FIND_PROF_CHK_FLDS     0x00000001
1673#define ICE_FLOW_FIND_PROF_CHK_VSI      0x00000002
1674#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR  0x00000004
1675
1676/**
1677 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
1678 * @hw: pointer to the HW struct
1679 * @blk: classification stage
1680 * @dir: flow direction
1681 * @segs: array of one or more packet segments that describe the flow
1682 * @segs_cnt: number of packet segments provided
1683 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
1684 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
1685 */
1686static struct ice_flow_prof *
1687ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
1688                         enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
1689                         u8 segs_cnt, u16 vsi_handle, u32 conds)
1690{
1691        struct ice_flow_prof *p, *prof = NULL;
1692
1693        ice_acquire_lock(&hw->fl_profs_locks[blk]);
1694        LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1695                if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
1696                    segs_cnt && segs_cnt == p->segs_cnt) {
1697                        u8 i;
1698
1699                        /* Check for profile-VSI association if specified */
1700                        if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
1701                            ice_is_vsi_valid(hw, vsi_handle) &&
1702                            !ice_is_bit_set(p->vsis, vsi_handle))
1703                                continue;
1704
1705                        /* Protocol headers must be checked. Matched fields are
1706                         * checked if specified.
1707                         */
1708                        for (i = 0; i < segs_cnt; i++)
1709                                if (segs[i].hdrs != p->segs[i].hdrs ||
1710                                    ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
1711                                     segs[i].match != p->segs[i].match))
1712                                        break;
1713
1714                        /* A match is found if all segments are matched */
1715                        if (i == segs_cnt) {
1716                                prof = p;
1717                                break;
1718                        }
1719                }
1720        ice_release_lock(&hw->fl_profs_locks[blk]);
1721
1722        return prof;
1723}
1724
1725/**
1726 * ice_flow_find_prof - Look up a profile matching headers and matched fields
1727 * @hw: pointer to the HW struct
1728 * @blk: classification stage
1729 * @dir: flow direction
1730 * @segs: array of one or more packet segments that describe the flow
1731 * @segs_cnt: number of packet segments provided
1732 */
1733u64
1734ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
1735                   struct ice_flow_seg_info *segs, u8 segs_cnt)
1736{
1737        struct ice_flow_prof *p;
1738
1739        p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
1740                                     ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
1741
1742        return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
1743}
1744
1745/**
1746 * ice_flow_find_prof_id - Look up a profile with given profile ID
1747 * @hw: pointer to the HW struct
1748 * @blk: classification stage
1749 * @prof_id: unique ID to identify this flow profile
1750 */
1751static struct ice_flow_prof *
1752ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
1753{
1754        struct ice_flow_prof *p;
1755
1756        LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
1757                if (p->id == prof_id)
1758                        return p;
1759
1760        return NULL;
1761}
1762
1763/**
1764 * ice_dealloc_flow_entry - Deallocate flow entry memory
1765 * @hw: pointer to the HW struct
1766 * @entry: flow entry to be removed
1767 */
1768static void
1769ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
1770{
1771        if (!entry)
1772                return;
1773
1774        if (entry->entry)
1775                ice_free(hw, entry->entry);
1776
1777        if (entry->range_buf) {
1778                ice_free(hw, entry->range_buf);
1779                entry->range_buf = NULL;
1780        }
1781
1782        if (entry->acts) {
1783                ice_free(hw, entry->acts);
1784                entry->acts = NULL;
1785                entry->acts_cnt = 0;
1786        }
1787
1788        ice_free(hw, entry);
1789}
1790
1791/**
1792 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
1793 * @hw: pointer to the HW struct
1794 * @blk: classification stage
1795 * @prof_id: the profile ID handle
1796 * @hw_prof_id: pointer to variable to receive the HW profile ID
1797 */
1798enum ice_status
1799ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
1800                     u8 *hw_prof_id)
1801{
1802        enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
1803        struct ice_prof_map *map;
1804
1805        ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
1806        map = ice_search_prof_id(hw, blk, prof_id);
1807        if (map) {
1808                *hw_prof_id = map->prof_id;
1809                status = ICE_SUCCESS;
1810        }
1811        ice_release_lock(&hw->blk[blk].es.prof_map_lock);
1812        return status;
1813}
1814
1815#define ICE_ACL_INVALID_SCEN    0x3f
1816
1817/**
1818 * ice_flow_acl_is_prof_in_use - Verify if the profile is associated to any PF
1819 * @hw: pointer to the hardware structure
1820 * @prof: pointer to flow profile
1821 * @buf: destination buffer function writes partial extraction sequence to
1822 *
1823 * returns ICE_SUCCESS if no PF is associated to the given profile
1824 * returns ICE_ERR_IN_USE if at least one PF is associated to the given profile
1825 * returns other error code for real error
1826 */
1827static enum ice_status
1828ice_flow_acl_is_prof_in_use(struct ice_hw *hw, struct ice_flow_prof *prof,
1829                            struct ice_aqc_acl_prof_generic_frmt *buf)
1830{
1831        enum ice_status status;
1832        u8 prof_id = 0;
1833
1834        status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1835        if (status)
1836                return status;
1837
1838        status = ice_query_acl_prof(hw, prof_id, buf, NULL);
1839        if (status)
1840                return status;
1841
1842        /* If all PF's associated scenarios are all 0 or all
1843         * ICE_ACL_INVALID_SCEN (63) for the given profile then the latter has
1844         * not been configured yet.
1845         */
1846        if (buf->pf_scenario_num[0] == 0 && buf->pf_scenario_num[1] == 0 &&
1847            buf->pf_scenario_num[2] == 0 && buf->pf_scenario_num[3] == 0 &&
1848            buf->pf_scenario_num[4] == 0 && buf->pf_scenario_num[5] == 0 &&
1849            buf->pf_scenario_num[6] == 0 && buf->pf_scenario_num[7] == 0)
1850                return ICE_SUCCESS;
1851
1852        if (buf->pf_scenario_num[0] == ICE_ACL_INVALID_SCEN &&
1853            buf->pf_scenario_num[1] == ICE_ACL_INVALID_SCEN &&
1854            buf->pf_scenario_num[2] == ICE_ACL_INVALID_SCEN &&
1855            buf->pf_scenario_num[3] == ICE_ACL_INVALID_SCEN &&
1856            buf->pf_scenario_num[4] == ICE_ACL_INVALID_SCEN &&
1857            buf->pf_scenario_num[5] == ICE_ACL_INVALID_SCEN &&
1858            buf->pf_scenario_num[6] == ICE_ACL_INVALID_SCEN &&
1859            buf->pf_scenario_num[7] == ICE_ACL_INVALID_SCEN)
1860                return ICE_SUCCESS;
1861
1862        return ICE_ERR_IN_USE;
1863}
1864
1865/**
1866 * ice_flow_acl_free_act_cntr - Free the ACL rule's actions
1867 * @hw: pointer to the hardware structure
1868 * @acts: array of actions to be performed on a match
1869 * @acts_cnt: number of actions
1870 */
1871static enum ice_status
1872ice_flow_acl_free_act_cntr(struct ice_hw *hw, struct ice_flow_action *acts,
1873                           u8 acts_cnt)
1874{
1875        int i;
1876
1877        for (i = 0; i < acts_cnt; i++) {
1878                if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
1879                    acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
1880                    acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
1881                        struct ice_acl_cntrs cntrs = { 0 };
1882                        enum ice_status status;
1883
1884                        /* amount is unused in the dealloc path but the common
1885                         * parameter check routine wants a value set, as zero
1886                         * is invalid for the check. Just set it.
1887                         */
1888                        cntrs.amount = 1;
1889                        cntrs.bank = 0; /* Only bank0 for the moment */
1890                        cntrs.first_cntr =
1891                                        LE16_TO_CPU(acts[i].data.acl_act.value);
1892                        cntrs.last_cntr =
1893                                        LE16_TO_CPU(acts[i].data.acl_act.value);
1894
1895                        if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
1896                                cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
1897                        else
1898                                cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
1899
1900                        status = ice_aq_dealloc_acl_cntrs(hw, &cntrs, NULL);
1901                        if (status)
1902                                return status;
1903                }
1904        }
1905        return ICE_SUCCESS;
1906}
1907
1908/**
1909 * ice_flow_acl_disassoc_scen - Disassociate the scenario from the profile
1910 * @hw: pointer to the hardware structure
1911 * @prof: pointer to flow profile
1912 *
1913 * Disassociate the scenario from the profile for the PF of the VSI.
1914 */
1915static enum ice_status
1916ice_flow_acl_disassoc_scen(struct ice_hw *hw, struct ice_flow_prof *prof)
1917{
1918        struct ice_aqc_acl_prof_generic_frmt buf;
1919        enum ice_status status = ICE_SUCCESS;
1920        u8 prof_id = 0;
1921
1922        ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
1923
1924        status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
1925        if (status)
1926                return status;
1927
1928        status = ice_query_acl_prof(hw, prof_id, &buf, NULL);
1929        if (status)
1930                return status;
1931
1932        /* Clear scenario for this PF */
1933        buf.pf_scenario_num[hw->pf_id] = ICE_ACL_INVALID_SCEN;
1934        status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
1935
1936        return status;
1937}
1938
1939/**
1940 * ice_flow_rem_entry_sync - Remove a flow entry
1941 * @hw: pointer to the HW struct
1942 * @blk: classification stage
1943 * @entry: flow entry to be removed
1944 */
1945static enum ice_status
1946ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block blk,
1947                        struct ice_flow_entry *entry)
1948{
1949        if (!entry)
1950                return ICE_ERR_BAD_PTR;
1951
1952        if (blk == ICE_BLK_ACL) {
1953                enum ice_status status;
1954
1955                if (!entry->prof)
1956                        return ICE_ERR_BAD_PTR;
1957
1958                status = ice_acl_rem_entry(hw, entry->prof->cfg.scen,
1959                                           entry->scen_entry_idx);
1960                if (status)
1961                        return status;
1962
1963                /* Checks if we need to release an ACL counter. */
1964                if (entry->acts_cnt && entry->acts)
1965                        ice_flow_acl_free_act_cntr(hw, entry->acts,
1966                                                   entry->acts_cnt);
1967        }
1968
1969        LIST_DEL(&entry->l_entry);
1970
1971        ice_dealloc_flow_entry(hw, entry);
1972
1973        return ICE_SUCCESS;
1974}
1975
1976/**
1977 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
1978 * @hw: pointer to the HW struct
1979 * @blk: classification stage
1980 * @dir: flow direction
1981 * @prof_id: unique ID to identify this flow profile
1982 * @segs: array of one or more packet segments that describe the flow
1983 * @segs_cnt: number of packet segments provided
1984 * @acts: array of default actions
1985 * @acts_cnt: number of default actions
1986 * @prof: stores the returned flow profile added
1987 *
1988 * Assumption: the caller has acquired the lock to the profile list
1989 */
1990static enum ice_status
1991ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
1992                       enum ice_flow_dir dir, u64 prof_id,
1993                       struct ice_flow_seg_info *segs, u8 segs_cnt,
1994                       struct ice_flow_action *acts, u8 acts_cnt,
1995                       struct ice_flow_prof **prof)
1996{
1997        struct ice_flow_prof_params *params;
1998        enum ice_status status;
1999        u8 i;
2000
2001        if (!prof || (acts_cnt && !acts))
2002                return ICE_ERR_BAD_PTR;
2003
2004        params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
2005        if (!params)
2006                return ICE_ERR_NO_MEMORY;
2007
2008        params->prof = (struct ice_flow_prof *)
2009                ice_malloc(hw, sizeof(*params->prof));
2010        if (!params->prof) {
2011                status = ICE_ERR_NO_MEMORY;
2012                goto free_params;
2013        }
2014
2015        /* initialize extraction sequence to all invalid (0xff) */
2016        for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
2017                params->es[i].prot_id = ICE_PROT_INVALID;
2018                params->es[i].off = ICE_FV_OFFSET_INVAL;
2019        }
2020
2021        params->blk = blk;
2022        params->prof->id = prof_id;
2023        params->prof->dir = dir;
2024        params->prof->segs_cnt = segs_cnt;
2025
2026        /* Make a copy of the segments that need to be persistent in the flow
2027         * profile instance
2028         */
2029        for (i = 0; i < segs_cnt; i++)
2030                ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
2031                           ICE_NONDMA_TO_NONDMA);
2032
2033        /* Make a copy of the actions that need to be persistent in the flow
2034         * profile instance.
2035         */
2036        if (acts_cnt) {
2037                params->prof->acts = (struct ice_flow_action *)
2038                        ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2039                                   ICE_NONDMA_TO_NONDMA);
2040
2041                if (!params->prof->acts) {
2042                        status = ICE_ERR_NO_MEMORY;
2043                        goto out;
2044                }
2045        }
2046
2047        status = ice_flow_proc_segs(hw, params);
2048        if (status) {
2049                ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
2050                goto out;
2051        }
2052
2053        /* Add a HW profile for this flow profile */
2054        status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
2055                              params->attr, params->attr_cnt, params->es,
2056                              params->mask);
2057        if (status) {
2058                ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
2059                goto out;
2060        }
2061
2062        INIT_LIST_HEAD(&params->prof->entries);
2063        ice_init_lock(&params->prof->entries_lock);
2064        *prof = params->prof;
2065
2066out:
2067        if (status) {
2068                if (params->prof->acts)
2069                        ice_free(hw, params->prof->acts);
2070                ice_free(hw, params->prof);
2071        }
2072free_params:
2073        ice_free(hw, params);
2074
2075        return status;
2076}
2077
2078/**
2079 * ice_flow_rem_prof_sync - remove a flow profile
2080 * @hw: pointer to the hardware structure
2081 * @blk: classification stage
2082 * @prof: pointer to flow profile to remove
2083 *
2084 * Assumption: the caller has acquired the lock to the profile list
2085 */
2086static enum ice_status
2087ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
2088                       struct ice_flow_prof *prof)
2089{
2090        enum ice_status status;
2091
2092        /* Remove all remaining flow entries before removing the flow profile */
2093        if (!LIST_EMPTY(&prof->entries)) {
2094                struct ice_flow_entry *e, *t;
2095
2096                ice_acquire_lock(&prof->entries_lock);
2097
2098                LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
2099                                         l_entry) {
2100                        status = ice_flow_rem_entry_sync(hw, blk, e);
2101                        if (status)
2102                                break;
2103                }
2104
2105                ice_release_lock(&prof->entries_lock);
2106        }
2107
2108        if (blk == ICE_BLK_ACL) {
2109                struct ice_aqc_acl_profile_ranges query_rng_buf;
2110                struct ice_aqc_acl_prof_generic_frmt buf;
2111                u8 prof_id = 0;
2112
2113                /* Disassociate the scenario from the profile for the PF */
2114                status = ice_flow_acl_disassoc_scen(hw, prof);
2115                if (status)
2116                        return status;
2117
2118                /* Clear the range-checker if the profile ID is no longer
2119                 * used by any PF
2120                 */
2121                status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2122                if (status && status != ICE_ERR_IN_USE) {
2123                        return status;
2124                } else if (!status) {
2125                        /* Clear the range-checker value for profile ID */
2126                        ice_memset(&query_rng_buf, 0,
2127                                   sizeof(struct ice_aqc_acl_profile_ranges),
2128                                   ICE_NONDMA_MEM);
2129
2130                        status = ice_flow_get_hw_prof(hw, blk, prof->id,
2131                                                      &prof_id);
2132                        if (status)
2133                                return status;
2134
2135                        status = ice_prog_acl_prof_ranges(hw, prof_id,
2136                                                          &query_rng_buf, NULL);
2137                        if (status)
2138                                return status;
2139                }
2140        }
2141
2142        /* Remove all hardware profiles associated with this flow profile */
2143        status = ice_rem_prof(hw, blk, prof->id);
2144        if (!status) {
2145                LIST_DEL(&prof->l_entry);
2146                ice_destroy_lock(&prof->entries_lock);
2147                if (prof->acts)
2148                        ice_free(hw, prof->acts);
2149                ice_free(hw, prof);
2150        }
2151
2152        return status;
2153}
2154
2155/**
2156 * ice_flow_acl_set_xtrct_seq_fld - Populate xtrct seq for single field
2157 * @buf: Destination buffer function writes partial xtrct sequence to
2158 * @info: Info about field
2159 */
2160static void
2161ice_flow_acl_set_xtrct_seq_fld(struct ice_aqc_acl_prof_generic_frmt *buf,
2162                               struct ice_flow_fld_info *info)
2163{
2164        u16 dst, i;
2165        u8 src;
2166
2167        src = info->xtrct.idx * ICE_FLOW_FV_EXTRACT_SZ +
2168                info->xtrct.disp / BITS_PER_BYTE;
2169        dst = info->entry.val;
2170        for (i = 0; i < info->entry.last; i++)
2171                /* HW stores field vector words in LE, convert words back to BE
2172                 * so constructed entries will end up in network order
2173                 */
2174                buf->byte_selection[dst++] = src++ ^ 1;
2175}
2176
2177/**
2178 * ice_flow_acl_set_xtrct_seq - Program ACL extraction sequence
2179 * @hw: pointer to the hardware structure
2180 * @prof: pointer to flow profile
2181 */
2182static enum ice_status
2183ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
2184{
2185        struct ice_aqc_acl_prof_generic_frmt buf;
2186        struct ice_flow_fld_info *info;
2187        enum ice_status status;
2188        u8 prof_id = 0;
2189        u16 i;
2190
2191        ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2192
2193        status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2194        if (status)
2195                return status;
2196
2197        status = ice_flow_acl_is_prof_in_use(hw, prof, &buf);
2198        if (status && status != ICE_ERR_IN_USE)
2199                return status;
2200
2201        if (!status) {
2202                /* Program the profile dependent configuration. This is done
2203                 * only once regardless of the number of PFs using that profile
2204                 */
2205                ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
2206
2207                for (i = 0; i < prof->segs_cnt; i++) {
2208                        struct ice_flow_seg_info *seg = &prof->segs[i];
2209                        u16 j;
2210
2211                        ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2212                                             ICE_FLOW_FIELD_IDX_MAX) {
2213                                info = &seg->fields[j];
2214
2215                                if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2216                                        buf.word_selection[info->entry.val] =
2217                                                info->xtrct.idx;
2218                                else
2219                                        ice_flow_acl_set_xtrct_seq_fld(&buf,
2220                                                                       info);
2221                        }
2222
2223                        for (j = 0; j < seg->raws_cnt; j++) {
2224                                info = &seg->raws[j].info;
2225                                ice_flow_acl_set_xtrct_seq_fld(&buf, info);
2226                        }
2227                }
2228
2229                ice_memset(&buf.pf_scenario_num[0], ICE_ACL_INVALID_SCEN,
2230                           ICE_AQC_ACL_PROF_PF_SCEN_NUM_ELEMS,
2231                           ICE_NONDMA_MEM);
2232        }
2233
2234        /* Update the current PF */
2235        buf.pf_scenario_num[hw->pf_id] = (u8)prof->cfg.scen->id;
2236        status = ice_prgm_acl_prof_xtrct(hw, prof_id, &buf, NULL);
2237
2238        return status;
2239}
2240
2241/**
2242 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
2243 * @hw: pointer to the hardware structure
2244 * @blk: classification stage
2245 * @vsi_handle: software VSI handle
2246 * @vsig: target VSI group
2247 *
2248 * Assumption: the caller has already verified that the VSI to
2249 * be added has the same characteristics as the VSIG and will
2250 * thereby have access to all resources added to that VSIG.
2251 */
2252enum ice_status
2253ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
2254                        u16 vsig)
2255{
2256        enum ice_status status;
2257
2258        if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
2259                return ICE_ERR_PARAM;
2260
2261        ice_acquire_lock(&hw->fl_profs_locks[blk]);
2262        status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
2263                                  vsig);
2264        ice_release_lock(&hw->fl_profs_locks[blk]);
2265
2266        return status;
2267}
2268
2269/**
2270 * ice_flow_assoc_prof - associate a VSI with a flow profile
2271 * @hw: pointer to the hardware structure
2272 * @blk: classification stage
2273 * @prof: pointer to flow profile
2274 * @vsi_handle: software VSI handle
2275 *
2276 * Assumption: the caller has acquired the lock to the profile list
2277 * and the software VSI handle has been validated
2278 */
2279enum ice_status
2280ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
2281                    struct ice_flow_prof *prof, u16 vsi_handle)
2282{
2283        enum ice_status status = ICE_SUCCESS;
2284
2285        if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
2286                if (blk == ICE_BLK_ACL) {
2287                        status = ice_flow_acl_set_xtrct_seq(hw, prof);
2288                        if (status)
2289                                return status;
2290                }
2291                status = ice_add_prof_id_flow(hw, blk,
2292                                              ice_get_hw_vsi_num(hw,
2293                                                                 vsi_handle),
2294                                              prof->id);
2295                if (!status)
2296                        ice_set_bit(vsi_handle, prof->vsis);
2297                else
2298                        ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
2299                                  status);
2300        }
2301
2302        return status;
2303}
2304
2305/**
2306 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
2307 * @hw: pointer to the hardware structure
2308 * @blk: classification stage
2309 * @prof: pointer to flow profile
2310 * @vsi_handle: software VSI handle
2311 *
2312 * Assumption: the caller has acquired the lock to the profile list
2313 * and the software VSI handle has been validated
2314 */
2315static enum ice_status
2316ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
2317                       struct ice_flow_prof *prof, u16 vsi_handle)
2318{
2319        enum ice_status status = ICE_SUCCESS;
2320
2321        if (ice_is_bit_set(prof->vsis, vsi_handle)) {
2322                status = ice_rem_prof_id_flow(hw, blk,
2323                                              ice_get_hw_vsi_num(hw,
2324                                                                 vsi_handle),
2325                                              prof->id);
2326                if (!status)
2327                        ice_clear_bit(vsi_handle, prof->vsis);
2328                else
2329                        ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
2330                                  status);
2331        }
2332
2333        return status;
2334}
2335
2336/**
2337 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
2338 * @hw: pointer to the HW struct
2339 * @blk: classification stage
2340 * @dir: flow direction
2341 * @prof_id: unique ID to identify this flow profile
2342 * @segs: array of one or more packet segments that describe the flow
2343 * @segs_cnt: number of packet segments provided
2344 * @acts: array of default actions
2345 * @acts_cnt: number of default actions
2346 * @prof: stores the returned flow profile added
2347 */
2348enum ice_status
2349ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
2350                  u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
2351                  struct ice_flow_action *acts, u8 acts_cnt,
2352                  struct ice_flow_prof **prof)
2353{
2354        enum ice_status status;
2355
2356        if (segs_cnt > ICE_FLOW_SEG_MAX)
2357                return ICE_ERR_MAX_LIMIT;
2358
2359        if (!segs_cnt)
2360                return ICE_ERR_PARAM;
2361
2362        if (!segs)
2363                return ICE_ERR_BAD_PTR;
2364
2365        status = ice_flow_val_hdrs(segs, segs_cnt);
2366        if (status)
2367                return status;
2368
2369        ice_acquire_lock(&hw->fl_profs_locks[blk]);
2370
2371        status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
2372                                        acts, acts_cnt, prof);
2373        if (!status)
2374                LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
2375
2376        ice_release_lock(&hw->fl_profs_locks[blk]);
2377
2378        return status;
2379}
2380
2381/**
2382 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
2383 * @hw: pointer to the HW struct
2384 * @blk: the block for which the flow profile is to be removed
2385 * @prof_id: unique ID of the flow profile to be removed
2386 */
2387enum ice_status
2388ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
2389{
2390        struct ice_flow_prof *prof;
2391        enum ice_status status;
2392
2393        ice_acquire_lock(&hw->fl_profs_locks[blk]);
2394
2395        prof = ice_flow_find_prof_id(hw, blk, prof_id);
2396        if (!prof) {
2397                status = ICE_ERR_DOES_NOT_EXIST;
2398                goto out;
2399        }
2400
2401        /* prof becomes invalid after the call */
2402        status = ice_flow_rem_prof_sync(hw, blk, prof);
2403
2404out:
2405        ice_release_lock(&hw->fl_profs_locks[blk]);
2406
2407        return status;
2408}
2409
2410/**
2411 * ice_flow_find_entry - look for a flow entry using its unique ID
2412 * @hw: pointer to the HW struct
2413 * @blk: classification stage
2414 * @entry_id: unique ID to identify this flow entry
2415 *
2416 * This function looks for the flow entry with the specified unique ID in all
2417 * flow profiles of the specified classification stage. If the entry is found,
2418 * and it returns the handle to the flow entry. Otherwise, it returns
2419 * ICE_FLOW_ENTRY_ID_INVAL.
2420 */
2421u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
2422{
2423        struct ice_flow_entry *found = NULL;
2424        struct ice_flow_prof *p;
2425
2426        ice_acquire_lock(&hw->fl_profs_locks[blk]);
2427
2428        LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
2429                struct ice_flow_entry *e;
2430
2431                ice_acquire_lock(&p->entries_lock);
2432                LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
2433                        if (e->id == entry_id) {
2434                                found = e;
2435                                break;
2436                        }
2437                ice_release_lock(&p->entries_lock);
2438
2439                if (found)
2440                        break;
2441        }
2442
2443        ice_release_lock(&hw->fl_profs_locks[blk]);
2444
2445        return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
2446}
2447
2448/**
2449 * ice_flow_acl_check_actions - Checks the ACL rule's actions
2450 * @hw: pointer to the hardware structure
2451 * @acts: array of actions to be performed on a match
2452 * @acts_cnt: number of actions
2453 * @cnt_alloc: indicates if an ACL counter has been allocated.
2454 */
2455static enum ice_status
2456ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
2457                           u8 acts_cnt, bool *cnt_alloc)
2458{
2459        ice_declare_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2460        int i;
2461
2462        ice_zero_bitmap(dup_check, ICE_AQC_TBL_MAX_ACTION_PAIRS * 2);
2463        *cnt_alloc = false;
2464
2465        if (acts_cnt > ICE_FLOW_ACL_MAX_NUM_ACT)
2466                return ICE_ERR_OUT_OF_RANGE;
2467
2468        for (i = 0; i < acts_cnt; i++) {
2469                if (acts[i].type != ICE_FLOW_ACT_NOP &&
2470                    acts[i].type != ICE_FLOW_ACT_DROP &&
2471                    acts[i].type != ICE_FLOW_ACT_CNTR_PKT &&
2472                    acts[i].type != ICE_FLOW_ACT_FWD_QUEUE)
2473                        return ICE_ERR_CFG;
2474
2475                /* If the caller want to add two actions of the same type, then
2476                 * it is considered invalid configuration.
2477                 */
2478                if (ice_test_and_set_bit(acts[i].type, dup_check))
2479                        return ICE_ERR_PARAM;
2480        }
2481
2482        /* Checks if ACL counters are needed. */
2483        for (i = 0; i < acts_cnt; i++) {
2484                if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT ||
2485                    acts[i].type == ICE_FLOW_ACT_CNTR_BYTES ||
2486                    acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES) {
2487                        struct ice_acl_cntrs cntrs = { 0 };
2488                        enum ice_status status;
2489
2490                        cntrs.amount = 1;
2491                        cntrs.bank = 0; /* Only bank0 for the moment */
2492
2493                        if (acts[i].type == ICE_FLOW_ACT_CNTR_PKT_BYTES)
2494                                cntrs.type = ICE_AQC_ACL_CNT_TYPE_DUAL;
2495                        else
2496                                cntrs.type = ICE_AQC_ACL_CNT_TYPE_SINGLE;
2497
2498                        status = ice_aq_alloc_acl_cntrs(hw, &cntrs, NULL);
2499                        if (status)
2500                                return status;
2501                        /* Counter index within the bank */
2502                        acts[i].data.acl_act.value =
2503                                                CPU_TO_LE16(cntrs.first_cntr);
2504                        *cnt_alloc = true;
2505                }
2506        }
2507
2508        return ICE_SUCCESS;
2509}
2510
2511/**
2512 * ice_flow_acl_frmt_entry_range - Format an ACL range checker for a given field
2513 * @fld: number of the given field
2514 * @info: info about field
2515 * @range_buf: range checker configuration buffer
2516 * @data: pointer to a data buffer containing flow entry's match values/masks
2517 * @range: Input/output param indicating which range checkers are being used
2518 */
2519static void
2520ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
2521                              struct ice_aqc_acl_profile_ranges *range_buf,
2522                              u8 *data, u8 *range)
2523{
2524        u16 new_mask;
2525
2526        /* If not specified, default mask is all bits in field */
2527        new_mask = (info->src.mask == ICE_FLOW_FLD_OFF_INVAL ?
2528                    BIT(ice_flds_info[fld].size) - 1 :
2529                    (*(u16 *)(data + info->src.mask))) << info->xtrct.disp;
2530
2531        /* If the mask is 0, then we don't need to worry about this input
2532         * range checker value.
2533         */
2534        if (new_mask) {
2535                u16 new_high =
2536                        (*(u16 *)(data + info->src.last)) << info->xtrct.disp;
2537                u16 new_low =
2538                        (*(u16 *)(data + info->src.val)) << info->xtrct.disp;
2539                u8 range_idx = info->entry.val;
2540
2541                range_buf->checker_cfg[range_idx].low_boundary =
2542                        CPU_TO_BE16(new_low);
2543                range_buf->checker_cfg[range_idx].high_boundary =
2544                        CPU_TO_BE16(new_high);
2545                range_buf->checker_cfg[range_idx].mask = CPU_TO_BE16(new_mask);
2546
2547                /* Indicate which range checker is being used */
2548                *range |= BIT(range_idx);
2549        }
2550}
2551
2552/**
2553 * ice_flow_acl_frmt_entry_fld - Partially format ACL entry for a given field
2554 * @fld: number of the given field
2555 * @info: info about the field
2556 * @buf: buffer containing the entry
2557 * @dontcare: buffer containing don't care mask for entry
2558 * @data: pointer to a data buffer containing flow entry's match values/masks
2559 */
2560static void
2561ice_flow_acl_frmt_entry_fld(u16 fld, struct ice_flow_fld_info *info, u8 *buf,
2562                            u8 *dontcare, u8 *data)
2563{
2564        u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0;
2565        bool use_mask = false;
2566        u8 disp;
2567
2568        src = info->src.val;
2569        mask = info->src.mask;
2570        dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2571        disp = info->xtrct.disp % BITS_PER_BYTE;
2572
2573        if (mask != ICE_FLOW_FLD_OFF_INVAL)
2574                use_mask = true;
2575
2576        for (k = 0; k < info->entry.last; k++, dst++) {
2577                /* Add overflow bits from previous byte */
2578                buf[dst] = (tmp_s & 0xff00) >> 8;
2579
2580                /* If mask is not valid, tmp_m is always zero, so just setting
2581                 * dontcare to 0 (no masked bits). If mask is valid, pulls in
2582                 * overflow bits of mask from prev byte
2583                 */
2584                dontcare[dst] = (tmp_m & 0xff00) >> 8;
2585
2586                /* If there is displacement, last byte will only contain
2587                 * displaced data, but there is no more data to read from user
2588                 * buffer, so skip so as not to potentially read beyond end of
2589                 * user buffer
2590                 */
2591                if (!disp || k < info->entry.last - 1) {
2592                        /* Store shifted data to use in next byte */
2593                        tmp_s = data[src++] << disp;
2594
2595                        /* Add current (shifted) byte */
2596                        buf[dst] |= tmp_s & 0xff;
2597
2598                        /* Handle mask if valid */
2599                        if (use_mask) {
2600                                tmp_m = (~data[mask++] & 0xff) << disp;
2601                                dontcare[dst] |= tmp_m & 0xff;
2602                        }
2603                }
2604        }
2605
2606        /* Fill in don't care bits at beginning of field */
2607        if (disp) {
2608                dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2609                for (k = 0; k < disp; k++)
2610                        dontcare[dst] |= BIT(k);
2611        }
2612
2613        end_disp = (disp + ice_flds_info[fld].size) % BITS_PER_BYTE;
2614
2615        /* Fill in don't care bits at end of field */
2616        if (end_disp) {
2617                dst = info->entry.val - ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX +
2618                      info->entry.last - 1;
2619                for (k = end_disp; k < BITS_PER_BYTE; k++)
2620                        dontcare[dst] |= BIT(k);
2621        }
2622}
2623
2624/**
2625 * ice_flow_acl_frmt_entry - Format ACL entry
2626 * @hw: pointer to the hardware structure
2627 * @prof: pointer to flow profile
2628 * @e: pointer to the flow entry
2629 * @data: pointer to a data buffer containing flow entry's match values/masks
2630 * @acts: array of actions to be performed on a match
2631 * @acts_cnt: number of actions
2632 *
2633 * Formats the key (and key_inverse) to be matched from the data passed in,
2634 * along with data from the flow profile. This key/key_inverse pair makes up
2635 * the 'entry' for an ACL flow entry.
2636 */
2637static enum ice_status
2638ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
2639                        struct ice_flow_entry *e, u8 *data,
2640                        struct ice_flow_action *acts, u8 acts_cnt)
2641{
2642        u8 *buf = NULL, *dontcare = NULL, *key = NULL, range = 0, dir_flag_msk;
2643        struct ice_aqc_acl_profile_ranges *range_buf = NULL;
2644        enum ice_status status;
2645        bool cnt_alloc;
2646        u8 prof_id = 0;
2647        u16 i, buf_sz;
2648
2649        status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id, &prof_id);
2650        if (status)
2651                return status;
2652
2653        /* Format the result action */
2654
2655        status = ice_flow_acl_check_actions(hw, acts, acts_cnt, &cnt_alloc);
2656        if (status)
2657                return status;
2658
2659        status = ICE_ERR_NO_MEMORY;
2660
2661        e->acts = (struct ice_flow_action *)
2662                ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
2663                           ICE_NONDMA_TO_NONDMA);
2664        if (!e->acts)
2665                goto out;
2666
2667        e->acts_cnt = acts_cnt;
2668
2669        /* Format the matching data */
2670        buf_sz = prof->cfg.scen->width;
2671        buf = (u8 *)ice_malloc(hw, buf_sz);
2672        if (!buf)
2673                goto out;
2674
2675        dontcare = (u8 *)ice_malloc(hw, buf_sz);
2676        if (!dontcare)
2677                goto out;
2678
2679        /* 'key' buffer will store both key and key_inverse, so must be twice
2680         * size of buf
2681         */
2682        key = (u8 *)ice_malloc(hw, buf_sz * 2);
2683        if (!key)
2684                goto out;
2685
2686        range_buf = (struct ice_aqc_acl_profile_ranges *)
2687                ice_malloc(hw, sizeof(struct ice_aqc_acl_profile_ranges));
2688        if (!range_buf)
2689                goto out;
2690
2691        /* Set don't care mask to all 1's to start, will zero out used bytes */
2692        ice_memset(dontcare, 0xff, buf_sz, ICE_NONDMA_MEM);
2693
2694        for (i = 0; i < prof->segs_cnt; i++) {
2695                struct ice_flow_seg_info *seg = &prof->segs[i];
2696                u8 j;
2697
2698                ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
2699                                     ICE_FLOW_FIELD_IDX_MAX) {
2700                        struct ice_flow_fld_info *info = &seg->fields[j];
2701
2702                        if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
2703                                ice_flow_acl_frmt_entry_range(j, info,
2704                                                              range_buf, data,
2705                                                              &range);
2706                        else
2707                                ice_flow_acl_frmt_entry_fld(j, info, buf,
2708                                                            dontcare, data);
2709                }
2710
2711                for (j = 0; j < seg->raws_cnt; j++) {
2712                        struct ice_flow_fld_info *info = &seg->raws[j].info;
2713                        u16 dst, src, mask, k;
2714                        bool use_mask = false;
2715
2716                        src = info->src.val;
2717                        dst = info->entry.val -
2718                                        ICE_AQC_ACL_PROF_BYTE_SEL_START_IDX;
2719                        mask = info->src.mask;
2720
2721                        if (mask != ICE_FLOW_FLD_OFF_INVAL)
2722                                use_mask = true;
2723
2724                        for (k = 0; k < info->entry.last; k++, dst++) {
2725                                buf[dst] = data[src++];
2726                                if (use_mask)
2727                                        dontcare[dst] = ~data[mask++];
2728                                else
2729                                        dontcare[dst] = 0;
2730                        }
2731                }
2732        }
2733
2734        buf[prof->cfg.scen->pid_idx] = (u8)prof_id;
2735        dontcare[prof->cfg.scen->pid_idx] = 0;
2736
2737        /* Format the buffer for direction flags */
2738        dir_flag_msk = BIT(ICE_FLG_PKT_DIR);
2739
2740        if (prof->dir == ICE_FLOW_RX)
2741                buf[prof->cfg.scen->pkt_dir_idx] = dir_flag_msk;
2742
2743        if (range) {
2744                buf[prof->cfg.scen->rng_chk_idx] = range;
2745                /* Mark any unused range checkers as don't care */
2746                dontcare[prof->cfg.scen->rng_chk_idx] = ~range;
2747                e->range_buf = range_buf;
2748        } else {
2749                ice_free(hw, range_buf);
2750        }
2751
2752        status = ice_set_key(key, buf_sz * 2, buf, NULL, dontcare, NULL, 0,
2753                             buf_sz);
2754        if (status)
2755                goto out;
2756
2757        e->entry = key;
2758        e->entry_sz = buf_sz * 2;
2759
2760out:
2761        if (buf)
2762                ice_free(hw, buf);
2763
2764        if (dontcare)
2765                ice_free(hw, dontcare);
2766
2767        if (status && key)
2768                ice_free(hw, key);
2769
2770        if (status && range_buf) {
2771                ice_free(hw, range_buf);
2772                e->range_buf = NULL;
2773        }
2774
2775        if (status && e->acts) {
2776                ice_free(hw, e->acts);
2777                e->acts = NULL;
2778                e->acts_cnt = 0;
2779        }
2780
2781        if (status && cnt_alloc)
2782                ice_flow_acl_free_act_cntr(hw, acts, acts_cnt);
2783
2784        return status;
2785}
2786
2787/**
2788 * ice_flow_acl_find_scen_entry_cond - Find an ACL scenario entry that matches
2789 *                                     the compared data.
2790 * @prof: pointer to flow profile
2791 * @e: pointer to the comparing flow entry
2792 * @do_chg_action: decide if we want to change the ACL action
2793 * @do_add_entry: decide if we want to add the new ACL entry
2794 * @do_rem_entry: decide if we want to remove the current ACL entry
2795 *
2796 * Find an ACL scenario entry that matches the compared data. In the same time,
2797 * this function also figure out:
2798 * a/ If we want to change the ACL action
2799 * b/ If we want to add the new ACL entry
2800 * c/ If we want to remove the current ACL entry
2801 */
2802static struct ice_flow_entry *
2803ice_flow_acl_find_scen_entry_cond(struct ice_flow_prof *prof,
2804                                  struct ice_flow_entry *e, bool *do_chg_action,
2805                                  bool *do_add_entry, bool *do_rem_entry)
2806{
2807        struct ice_flow_entry *p, *return_entry = NULL;
2808        u8 i, j;
2809
2810        /* Check if:
2811         * a/ There exists an entry with same matching data, but different
2812         *    priority, then we remove this existing ACL entry. Then, we
2813         *    will add the new entry to the ACL scenario.
2814         * b/ There exists an entry with same matching data, priority, and
2815         *    result action, then we do nothing
2816         * c/ There exists an entry with same matching data, priority, but
2817         *    different, action, then do only change the action's entry.
2818         * d/ Else, we add this new entry to the ACL scenario.
2819         */
2820        *do_chg_action = false;
2821        *do_add_entry = true;
2822        *do_rem_entry = false;
2823        LIST_FOR_EACH_ENTRY(p, &prof->entries, ice_flow_entry, l_entry) {
2824                if (memcmp(p->entry, e->entry, p->entry_sz))
2825                        continue;
2826
2827                /* From this point, we have the same matching_data. */
2828                *do_add_entry = false;
2829                return_entry = p;
2830
2831                if (p->priority != e->priority) {
2832                        /* matching data && !priority */
2833                        *do_add_entry = true;
2834                        *do_rem_entry = true;
2835                        break;
2836                }
2837
2838                /* From this point, we will have matching_data && priority */
2839                if (p->acts_cnt != e->acts_cnt)
2840                        *do_chg_action = true;
2841                for (i = 0; i < p->acts_cnt; i++) {
2842                        bool found_not_match = false;
2843
2844                        for (j = 0; j < e->acts_cnt; j++)
2845                                if (memcmp(&p->acts[i], &e->acts[j],
2846                                           sizeof(struct ice_flow_action))) {
2847                                        found_not_match = true;
2848                                        break;
2849                                }
2850
2851                        if (found_not_match) {
2852                                *do_chg_action = true;
2853                                break;
2854                        }
2855                }
2856
2857                /* (do_chg_action = true) means :
2858                 *    matching_data && priority && !result_action
2859                 * (do_chg_action = false) means :
2860                 *    matching_data && priority && result_action
2861                 */
2862                break;
2863        }
2864
2865        return return_entry;
2866}
2867
2868/**
2869 * ice_flow_acl_convert_to_acl_prio - Convert to ACL priority
2870 * @p: flow priority
2871 */
2872static enum ice_acl_entry_prio
2873ice_flow_acl_convert_to_acl_prio(enum ice_flow_priority p)
2874{
2875        enum ice_acl_entry_prio acl_prio;
2876
2877        switch (p) {
2878        case ICE_FLOW_PRIO_LOW:
2879                acl_prio = ICE_ACL_PRIO_LOW;
2880                break;
2881        case ICE_FLOW_PRIO_NORMAL:
2882                acl_prio = ICE_ACL_PRIO_NORMAL;
2883                break;
2884        case ICE_FLOW_PRIO_HIGH:
2885                acl_prio = ICE_ACL_PRIO_HIGH;
2886                break;
2887        default:
2888                acl_prio = ICE_ACL_PRIO_NORMAL;
2889                break;
2890        }
2891
2892        return acl_prio;
2893}
2894
2895/**
2896 * ice_flow_acl_union_rng_chk - Perform union operation between two
2897 *                              range-range checker buffers
2898 * @dst_buf: pointer to destination range checker buffer
2899 * @src_buf: pointer to source range checker buffer
2900 *
2901 * For this function, we do the union between dst_buf and src_buf
2902 * range checker buffer, and we will save the result back to dst_buf
2903 */
2904static enum ice_status
2905ice_flow_acl_union_rng_chk(struct ice_aqc_acl_profile_ranges *dst_buf,
2906                           struct ice_aqc_acl_profile_ranges *src_buf)
2907{
2908        u8 i, j;
2909
2910        if (!dst_buf || !src_buf)
2911                return ICE_ERR_BAD_PTR;
2912
2913        for (i = 0; i < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; i++) {
2914                struct ice_acl_rng_data *cfg_data = NULL, *in_data;
2915                bool will_populate = false;
2916
2917                in_data = &src_buf->checker_cfg[i];
2918
2919                if (!in_data->mask)
2920                        break;
2921
2922                for (j = 0; j < ICE_AQC_ACL_PROF_RANGES_NUM_CFG; j++) {
2923                        cfg_data = &dst_buf->checker_cfg[j];
2924
2925                        if (!cfg_data->mask ||
2926                            !memcmp(cfg_data, in_data,
2927                                    sizeof(struct ice_acl_rng_data))) {
2928                                will_populate = true;
2929                                break;
2930                        }
2931                }
2932
2933                if (will_populate) {
2934                        ice_memcpy(cfg_data, in_data,
2935                                   sizeof(struct ice_acl_rng_data),
2936                                   ICE_NONDMA_TO_NONDMA);
2937                } else {
2938                        /* No available slot left to program range checker */
2939                        return ICE_ERR_MAX_LIMIT;
2940                }
2941        }
2942
2943        return ICE_SUCCESS;
2944}
2945
2946/**
2947 * ice_flow_acl_add_scen_entry_sync - Add entry to ACL scenario sync
2948 * @hw: pointer to the hardware structure
2949 * @prof: pointer to flow profile
2950 * @entry: double pointer to the flow entry
2951 *
2952 * For this function, we will look at the current added entries in the
2953 * corresponding ACL scenario. Then, we will perform matching logic to
2954 * see if we want to add/modify/do nothing with this new entry.
2955 */
2956static enum ice_status
2957ice_flow_acl_add_scen_entry_sync(struct ice_hw *hw, struct ice_flow_prof *prof,
2958                                 struct ice_flow_entry **entry)
2959{
2960        bool do_add_entry, do_rem_entry, do_chg_action, do_chg_rng_chk;
2961        struct ice_aqc_acl_profile_ranges query_rng_buf, cfg_rng_buf;
2962        struct ice_acl_act_entry *acts = NULL;
2963        struct ice_flow_entry *exist;
2964        enum ice_status status = ICE_SUCCESS;
2965        struct ice_flow_entry *e;
2966        u8 i;
2967
2968        if (!entry || !(*entry) || !prof)
2969                return ICE_ERR_BAD_PTR;
2970
2971        e = *entry;
2972
2973        do_chg_rng_chk = false;
2974        if (e->range_buf) {
2975                u8 prof_id = 0;
2976
2977                status = ice_flow_get_hw_prof(hw, ICE_BLK_ACL, prof->id,
2978                                              &prof_id);
2979                if (status)
2980                        return status;
2981
2982                /* Query the current range-checker value in FW */
2983                status = ice_query_acl_prof_ranges(hw, prof_id, &query_rng_buf,
2984                                                   NULL);
2985                if (status)
2986                        return status;
2987                ice_memcpy(&cfg_rng_buf, &query_rng_buf,
2988                           sizeof(struct ice_aqc_acl_profile_ranges),
2989                           ICE_NONDMA_TO_NONDMA);
2990
2991                /* Generate the new range-checker value */
2992                status = ice_flow_acl_union_rng_chk(&cfg_rng_buf, e->range_buf);
2993                if (status)
2994                        return status;
2995
2996                /* Reconfigure the range check if the buffer is changed. */
2997                do_chg_rng_chk = false;
2998                if (memcmp(&query_rng_buf, &cfg_rng_buf,
2999                           sizeof(struct ice_aqc_acl_profile_ranges))) {
3000                        status = ice_prog_acl_prof_ranges(hw, prof_id,
3001                                                          &cfg_rng_buf, NULL);
3002                        if (status)
3003                                return status;
3004
3005                        do_chg_rng_chk = true;
3006                }
3007        }
3008
3009        /* Figure out if we want to (change the ACL action) and/or
3010         * (Add the new ACL entry) and/or (Remove the current ACL entry)
3011         */
3012        exist = ice_flow_acl_find_scen_entry_cond(prof, e, &do_chg_action,
3013                                                  &do_add_entry, &do_rem_entry);
3014        if (do_rem_entry) {
3015                status = ice_flow_rem_entry_sync(hw, ICE_BLK_ACL, exist);
3016                if (status)
3017                        return status;
3018        }
3019
3020        /* Prepare the result action buffer */
3021        acts = (struct ice_acl_act_entry *)
3022                ice_calloc(hw, e->entry_sz, sizeof(struct ice_acl_act_entry));
3023        if (!acts)
3024                return ICE_ERR_NO_MEMORY;
3025
3026        for (i = 0; i < e->acts_cnt; i++)
3027                ice_memcpy(&acts[i], &e->acts[i].data.acl_act,
3028                           sizeof(struct ice_acl_act_entry),
3029                           ICE_NONDMA_TO_NONDMA);
3030
3031        if (do_add_entry) {
3032                enum ice_acl_entry_prio prio;
3033                u8 *keys, *inverts;
3034                u16 entry_idx;
3035
3036                keys = (u8 *)e->entry;
3037                inverts = keys + (e->entry_sz / 2);
3038                prio = ice_flow_acl_convert_to_acl_prio(e->priority);
3039
3040                status = ice_acl_add_entry(hw, prof->cfg.scen, prio, keys,
3041                                           inverts, acts, e->acts_cnt,
3042                                           &entry_idx);
3043                if (status)
3044                        goto out;
3045
3046                e->scen_entry_idx = entry_idx;
3047                LIST_ADD(&e->l_entry, &prof->entries);
3048        } else {
3049                if (do_chg_action) {
3050                        /* For the action memory info, update the SW's copy of
3051                         * exist entry with e's action memory info
3052                         */
3053                        ice_free(hw, exist->acts);
3054                        exist->acts_cnt = e->acts_cnt;
3055                        exist->acts = (struct ice_flow_action *)
3056                                ice_calloc(hw, exist->acts_cnt,
3057                                           sizeof(struct ice_flow_action));
3058                        if (!exist->acts) {
3059                                status = ICE_ERR_NO_MEMORY;
3060                                goto out;
3061                        }
3062
3063                        ice_memcpy(exist->acts, e->acts,
3064                                   sizeof(struct ice_flow_action) * e->acts_cnt,
3065                                   ICE_NONDMA_TO_NONDMA);
3066
3067                        status = ice_acl_prog_act(hw, prof->cfg.scen, acts,
3068                                                  e->acts_cnt,
3069                                                  exist->scen_entry_idx);
3070                        if (status)
3071                                goto out;
3072                }
3073
3074                if (do_chg_rng_chk) {
3075                        /* In this case, we want to update the range checker
3076                         * information of the exist entry
3077                         */
3078                        status = ice_flow_acl_union_rng_chk(exist->range_buf,
3079                                                            e->range_buf);
3080                        if (status)
3081                                goto out;
3082                }
3083
3084                /* As we don't add the new entry to our SW DB, deallocate its
3085                 * memories, and return the exist entry to the caller
3086                 */
3087                ice_dealloc_flow_entry(hw, e);
3088                *(entry) = exist;
3089        }
3090out:
3091        ice_free(hw, acts);
3092
3093        return status;
3094}
3095
3096/**
3097 * ice_flow_acl_add_scen_entry - Add entry to ACL scenario
3098 * @hw: pointer to the hardware structure
3099 * @prof: pointer to flow profile
3100 * @e: double pointer to the flow entry
3101 */
3102static enum ice_status
3103ice_flow_acl_add_scen_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
3104                            struct ice_flow_entry **e)
3105{
3106        enum ice_status status;
3107
3108        ice_acquire_lock(&prof->entries_lock);
3109        status = ice_flow_acl_add_scen_entry_sync(hw, prof, e);
3110        ice_release_lock(&prof->entries_lock);
3111
3112        return status;
3113}
3114
3115/**
3116 * ice_flow_add_entry - Add a flow entry
3117 * @hw: pointer to the HW struct
3118 * @blk: classification stage
3119 * @prof_id: ID of the profile to add a new flow entry to
3120 * @entry_id: unique ID to identify this flow entry
3121 * @vsi_handle: software VSI handle for the flow entry
3122 * @prio: priority of the flow entry
3123 * @data: pointer to a data buffer containing flow entry's match values/masks
3124 * @acts: arrays of actions to be performed on a match
3125 * @acts_cnt: number of actions
3126 * @entry_h: pointer to buffer that receives the new flow entry's handle
3127 */
3128enum ice_status
3129ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
3130                   u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
3131                   void *data, struct ice_flow_action *acts, u8 acts_cnt,
3132                   u64 *entry_h)
3133{
3134        struct ice_flow_entry *e = NULL;
3135        struct ice_flow_prof *prof;
3136        enum ice_status status = ICE_SUCCESS;
3137
3138        /* ACL entries must indicate an action */
3139        if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
3140                return ICE_ERR_PARAM;
3141
3142        /* No flow entry data is expected for RSS */
3143        if (!entry_h || (!data && blk != ICE_BLK_RSS))
3144                return ICE_ERR_BAD_PTR;
3145
3146        if (!ice_is_vsi_valid(hw, vsi_handle))
3147                return ICE_ERR_PARAM;
3148
3149        ice_acquire_lock(&hw->fl_profs_locks[blk]);
3150
3151        prof = ice_flow_find_prof_id(hw, blk, prof_id);
3152        if (!prof) {
3153                status = ICE_ERR_DOES_NOT_EXIST;
3154        } else {
3155                /* Allocate memory for the entry being added and associate
3156                 * the VSI to the found flow profile
3157                 */
3158                e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
3159                if (!e)
3160                        status = ICE_ERR_NO_MEMORY;
3161                else
3162                        status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3163        }
3164
3165        ice_release_lock(&hw->fl_profs_locks[blk]);
3166        if (status)
3167                goto out;
3168
3169        e->id = entry_id;
3170        e->vsi_handle = vsi_handle;
3171        e->prof = prof;
3172        e->priority = prio;
3173
3174        switch (blk) {
3175        case ICE_BLK_FD:
3176        case ICE_BLK_RSS:
3177                break;
3178        case ICE_BLK_ACL:
3179                /* ACL will handle the entry management */
3180                status = ice_flow_acl_frmt_entry(hw, prof, e, (u8 *)data, acts,
3181                                                 acts_cnt);
3182                if (status)
3183                        goto out;
3184
3185                status = ice_flow_acl_add_scen_entry(hw, prof, &e);
3186                if (status)
3187                        goto out;
3188
3189                break;
3190        default:
3191                status = ICE_ERR_NOT_IMPL;
3192                goto out;
3193        }
3194
3195        if (blk != ICE_BLK_ACL) {
3196                /* ACL will handle the entry management */
3197                ice_acquire_lock(&prof->entries_lock);
3198                LIST_ADD(&e->l_entry, &prof->entries);
3199                ice_release_lock(&prof->entries_lock);
3200        }
3201
3202        *entry_h = ICE_FLOW_ENTRY_HNDL(e);
3203
3204out:
3205        if (status && e) {
3206                if (e->entry)
3207                        ice_free(hw, e->entry);
3208                ice_free(hw, e);
3209        }
3210
3211        return status;
3212}
3213
3214/**
3215 * ice_flow_rem_entry - Remove a flow entry
3216 * @hw: pointer to the HW struct
3217 * @blk: classification stage
3218 * @entry_h: handle to the flow entry to be removed
3219 */
3220enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
3221                                   u64 entry_h)
3222{
3223        struct ice_flow_entry *entry;
3224        struct ice_flow_prof *prof;
3225        enum ice_status status = ICE_SUCCESS;
3226
3227        if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
3228                return ICE_ERR_PARAM;
3229
3230        entry = ICE_FLOW_ENTRY_PTR((intptr_t)entry_h);
3231
3232        /* Retain the pointer to the flow profile as the entry will be freed */
3233        prof = entry->prof;
3234
3235        if (prof) {
3236                ice_acquire_lock(&prof->entries_lock);
3237                status = ice_flow_rem_entry_sync(hw, blk, entry);
3238                ice_release_lock(&prof->entries_lock);
3239        }
3240
3241        return status;
3242}
3243
3244/**
3245 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
3246 * @seg: packet segment the field being set belongs to
3247 * @fld: field to be set
3248 * @field_type: type of the field
3249 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3250 *           entry's input buffer
3251 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3252 *            input buffer
3253 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3254 *            entry's input buffer
3255 *
3256 * This helper function stores information of a field being matched, including
3257 * the type of the field and the locations of the value to match, the mask, and
3258 * the upper-bound value in the start of the input buffer for a flow entry.
3259 * This function should only be used for fixed-size data structures.
3260 *
3261 * This function also opportunistically determines the protocol headers to be
3262 * present based on the fields being set. Some fields cannot be used alone to
3263 * determine the protocol headers present. Sometimes, fields for particular
3264 * protocol headers are not matched. In those cases, the protocol headers
3265 * must be explicitly set.
3266 */
3267static void
3268ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3269                     enum ice_flow_fld_match_type field_type, u16 val_loc,
3270                     u16 mask_loc, u16 last_loc)
3271{
3272        u64 bit = BIT_ULL(fld);
3273
3274        seg->match |= bit;
3275        if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
3276                seg->range |= bit;
3277
3278        seg->fields[fld].type = field_type;
3279        seg->fields[fld].src.val = val_loc;
3280        seg->fields[fld].src.mask = mask_loc;
3281        seg->fields[fld].src.last = last_loc;
3282
3283        ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
3284}
3285
3286/**
3287 * ice_flow_set_fld - specifies locations of field from entry's input buffer
3288 * @seg: packet segment the field being set belongs to
3289 * @fld: field to be set
3290 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3291 *           entry's input buffer
3292 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
3293 *            input buffer
3294 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
3295 *            entry's input buffer
3296 * @range: indicate if field being matched is to be in a range
3297 *
3298 * This function specifies the locations, in the form of byte offsets from the
3299 * start of the input buffer for a flow entry, from where the value to match,
3300 * the mask value, and upper value can be extracted. These locations are then
3301 * stored in the flow profile. When adding a flow entry associated with the
3302 * flow profile, these locations will be used to quickly extract the values and
3303 * create the content of a match entry. This function should only be used for
3304 * fixed-size data structures.
3305 */
3306void
3307ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3308                 u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
3309{
3310        enum ice_flow_fld_match_type t = range ?
3311                ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
3312
3313        ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
3314}
3315
3316/**
3317 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
3318 * @seg: packet segment the field being set belongs to
3319 * @fld: field to be set
3320 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
3321 *           entry's input buffer
3322 * @pref_loc: location of prefix value from entry's input buffer
3323 * @pref_sz: size of the location holding the prefix value
3324 *
3325 * This function specifies the locations, in the form of byte offsets from the
3326 * start of the input buffer for a flow entry, from where the value to match
3327 * and the IPv4 prefix value can be extracted. These locations are then stored
3328 * in the flow profile. When adding flow entries to the associated flow profile,
3329 * these locations can be used to quickly extract the values to create the
3330 * content of a match entry. This function should only be used for fixed-size
3331 * data structures.
3332 */
3333void
3334ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
3335                        u16 val_loc, u16 pref_loc, u8 pref_sz)
3336{
3337        /* For this type of field, the "mask" location is for the prefix value's
3338         * location and the "last" location is for the size of the location of
3339         * the prefix value.
3340         */
3341        ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
3342                             pref_loc, (u16)pref_sz);
3343}
3344
3345/**
3346 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
3347 * @seg: packet segment the field being set belongs to
3348 * @off: offset of the raw field from the beginning of the segment in bytes
3349 * @len: length of the raw pattern to be matched
3350 * @val_loc: location of the value to match from entry's input buffer
3351 * @mask_loc: location of mask value from entry's input buffer
3352 *
3353 * This function specifies the offset of the raw field to be match from the
3354 * beginning of the specified packet segment, and the locations, in the form of
3355 * byte offsets from the start of the input buffer for a flow entry, from where
3356 * the value to match and the mask value to be extracted. These locations are
3357 * then stored in the flow profile. When adding flow entries to the associated
3358 * flow profile, these locations can be used to quickly extract the values to
3359 * create the content of a match entry. This function should only be used for
3360 * fixed-size data structures.
3361 */
3362void
3363ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
3364                     u16 val_loc, u16 mask_loc)
3365{
3366        if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
3367                seg->raws[seg->raws_cnt].off = off;
3368                seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
3369                seg->raws[seg->raws_cnt].info.src.val = val_loc;
3370                seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
3371                /* The "last" field is used to store the length of the field */
3372                seg->raws[seg->raws_cnt].info.src.last = len;
3373        }
3374
3375        /* Overflows of "raws" will be handled as an error condition later in
3376         * the flow when this information is processed.
3377         */
3378        seg->raws_cnt++;
3379}
3380
3381/**
3382 * ice_flow_rem_vsi_prof - remove vsi from flow profile
3383 * @hw: pointer to the hardware structure
3384 * @blk: classification stage
3385 * @vsi_handle: software VSI handle
3386 * @prof_id: unique ID to identify this flow profile
3387 *
3388 * This function removes the flow entries associated to the input
3389 * vsi handle and disassociates the vsi from the flow profile.
3390 */
3391enum ice_status ice_flow_rem_vsi_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
3392                                      u64 prof_id)
3393{
3394        struct ice_flow_prof *prof = NULL;
3395        enum ice_status status = ICE_SUCCESS;
3396
3397        if (blk >= ICE_BLK_COUNT || !ice_is_vsi_valid(hw, vsi_handle))
3398                return ICE_ERR_PARAM;
3399
3400        /* find flow profile pointer with input package block and profile id */
3401        prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
3402        if (!prof) {
3403                ice_debug(hw, ICE_DBG_PKG,
3404                          "Cannot find flow profile id=%" PRIu64 "\n", prof_id);
3405                return ICE_ERR_DOES_NOT_EXIST;
3406        }
3407
3408        /* Remove all remaining flow entries before removing the flow profile */
3409        if (!LIST_EMPTY(&prof->entries)) {
3410                struct ice_flow_entry *e, *t;
3411
3412                ice_acquire_lock(&prof->entries_lock);
3413                LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
3414                                         l_entry) {
3415                        if (e->vsi_handle != vsi_handle)
3416                                continue;
3417
3418                        status = ice_flow_rem_entry_sync(hw, blk, e);
3419                        if (status)
3420                                break;
3421                }
3422                ice_release_lock(&prof->entries_lock);
3423        }
3424        if (status)
3425                return status;
3426
3427        /* disassociate the flow profile from sw vsi handle */
3428        status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3429        if (status)
3430                ice_debug(hw, ICE_DBG_PKG,
3431                          "ice_flow_disassoc_prof() failed with status=%d\n",
3432                          status);
3433        return status;
3434}
3435
3436#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
3437(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_ETH_NON_IP | ICE_FLOW_SEG_HDR_VLAN)
3438
3439#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
3440        (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
3441
3442#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
3443        (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
3444
3445#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
3446        (ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
3447         ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
3448         ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
3449
3450/**
3451 * ice_flow_set_rss_seg_info - setup packet segments for RSS
3452 * @segs: pointer to the flow field segment(s)
3453 * @seg_cnt: segment count
3454 * @cfg: configure parameters
3455 *
3456 * Helper function to extract fields from hash bitmap and use flow
3457 * header value to set flow field segment for further use in flow
3458 * profile entry or removal.
3459 */
3460static enum ice_status
3461ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
3462                          const struct ice_rss_hash_cfg *cfg)
3463{
3464        struct ice_flow_seg_info *seg;
3465        u64 val;
3466        u8 i;
3467
3468        /* set inner most segment */
3469        seg = &segs[seg_cnt - 1];
3470
3471        ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
3472                             ICE_FLOW_FIELD_IDX_MAX)
3473                ice_flow_set_fld(seg, (enum ice_flow_field)i,
3474                                 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
3475                                 ICE_FLOW_FLD_OFF_INVAL, false);
3476
3477        ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
3478
3479        /* set outer most header */
3480        if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
3481                segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
3482                                                   ICE_FLOW_SEG_HDR_IPV_FRAG |
3483                                                   ICE_FLOW_SEG_HDR_IPV_OTHER;
3484        else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
3485                segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
3486                                                   ICE_FLOW_SEG_HDR_IPV_FRAG |
3487                                                   ICE_FLOW_SEG_HDR_IPV_OTHER;
3488
3489        if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
3490            ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER &
3491            ~ICE_FLOW_SEG_HDR_IPV_FRAG)
3492                return ICE_ERR_PARAM;
3493
3494        val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
3495        if (val && !ice_is_pow2(val))
3496                return ICE_ERR_CFG;
3497
3498        val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
3499        if (val && !ice_is_pow2(val))
3500                return ICE_ERR_CFG;
3501
3502        return ICE_SUCCESS;
3503}
3504
3505/**
3506 * ice_rem_vsi_rss_list - remove VSI from RSS list
3507 * @hw: pointer to the hardware structure
3508 * @vsi_handle: software VSI handle
3509 *
3510 * Remove the VSI from all RSS configurations in the list.
3511 */
3512void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
3513{
3514        struct ice_rss_cfg *r, *tmp;
3515
3516        if (LIST_EMPTY(&hw->rss_list_head))
3517                return;
3518
3519        ice_acquire_lock(&hw->rss_locks);
3520        LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3521                                 ice_rss_cfg, l_entry)
3522                if (ice_test_and_clear_bit(vsi_handle, r->vsis))
3523                        if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3524                                LIST_DEL(&r->l_entry);
3525                                ice_free(hw, r);
3526                        }
3527        ice_release_lock(&hw->rss_locks);
3528}
3529
3530/**
3531 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
3532 * @hw: pointer to the hardware structure
3533 * @vsi_handle: software VSI handle
3534 *
3535 * This function will iterate through all flow profiles and disassociate
3536 * the VSI from that profile. If the flow profile has no VSIs it will
3537 * be removed.
3538 */
3539enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
3540{
3541        const enum ice_block blk = ICE_BLK_RSS;
3542        struct ice_flow_prof *p, *t;
3543        enum ice_status status = ICE_SUCCESS;
3544
3545        if (!ice_is_vsi_valid(hw, vsi_handle))
3546                return ICE_ERR_PARAM;
3547
3548        if (LIST_EMPTY(&hw->fl_profs[blk]))
3549                return ICE_SUCCESS;
3550
3551        ice_acquire_lock(&hw->rss_locks);
3552        LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
3553                                 l_entry)
3554                if (ice_is_bit_set(p->vsis, vsi_handle)) {
3555                        status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
3556                        if (status)
3557                                break;
3558
3559                        if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
3560                                status = ice_flow_rem_prof(hw, blk, p->id);
3561                                if (status)
3562                                        break;
3563                        }
3564                }
3565        ice_release_lock(&hw->rss_locks);
3566
3567        return status;
3568}
3569
3570/**
3571 * ice_get_rss_hdr_type - get a RSS profile's header type
3572 * @prof: RSS flow profile
3573 */
3574static enum ice_rss_cfg_hdr_type
3575ice_get_rss_hdr_type(struct ice_flow_prof *prof)
3576{
3577        enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
3578
3579        if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
3580                hdr_type = ICE_RSS_OUTER_HEADERS;
3581        } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
3582                if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
3583                        hdr_type = ICE_RSS_INNER_HEADERS;
3584                if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
3585                        hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
3586                if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
3587                        hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
3588        }
3589
3590        return hdr_type;
3591}
3592
3593/**
3594 * ice_rem_rss_list - remove RSS configuration from list
3595 * @hw: pointer to the hardware structure
3596 * @vsi_handle: software VSI handle
3597 * @prof: pointer to flow profile
3598 *
3599 * Assumption: lock has already been acquired for RSS list
3600 */
3601static void
3602ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3603{
3604        enum ice_rss_cfg_hdr_type hdr_type;
3605        struct ice_rss_cfg *r, *tmp;
3606
3607        /* Search for RSS hash fields associated to the VSI that match the
3608         * hash configurations associated to the flow profile. If found
3609         * remove from the RSS entry list of the VSI context and delete entry.
3610         */
3611        hdr_type = ice_get_rss_hdr_type(prof);
3612        LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
3613                                 ice_rss_cfg, l_entry)
3614                if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3615                    r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3616                    r->hash.hdr_type == hdr_type) {
3617                        ice_clear_bit(vsi_handle, r->vsis);
3618                        if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
3619                                LIST_DEL(&r->l_entry);
3620                                ice_free(hw, r);
3621                        }
3622                        return;
3623                }
3624}
3625
3626/**
3627 * ice_add_rss_list - add RSS configuration to list
3628 * @hw: pointer to the hardware structure
3629 * @vsi_handle: software VSI handle
3630 * @prof: pointer to flow profile
3631 *
3632 * Assumption: lock has already been acquired for RSS list
3633 */
3634static enum ice_status
3635ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
3636{
3637        enum ice_rss_cfg_hdr_type hdr_type;
3638        struct ice_rss_cfg *r, *rss_cfg;
3639
3640        hdr_type = ice_get_rss_hdr_type(prof);
3641        LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
3642                            ice_rss_cfg, l_entry)
3643                if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
3644                    r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
3645                    r->hash.hdr_type == hdr_type) {
3646                        ice_set_bit(vsi_handle, r->vsis);
3647                        return ICE_SUCCESS;
3648                }
3649
3650        rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
3651        if (!rss_cfg)
3652                return ICE_ERR_NO_MEMORY;
3653
3654        rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
3655        rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
3656        rss_cfg->hash.hdr_type = hdr_type;
3657        rss_cfg->hash.symm = prof->cfg.symm;
3658        ice_set_bit(vsi_handle, rss_cfg->vsis);
3659
3660        LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
3661
3662        return ICE_SUCCESS;
3663}
3664
3665#define ICE_FLOW_PROF_HASH_S    0
3666#define ICE_FLOW_PROF_HASH_M    (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
3667#define ICE_FLOW_PROF_HDR_S     32
3668#define ICE_FLOW_PROF_HDR_M     (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
3669#define ICE_FLOW_PROF_ENCAP_S   62
3670#define ICE_FLOW_PROF_ENCAP_M   (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
3671
3672/* Flow profile ID format:
3673 * [0:31] - Packet match fields
3674 * [32:61] - Protocol header
3675 * [62:63] - Encapsulation flag:
3676 *           0 if non-tunneled
3677 *           1 if tunneled
3678 *           2 for tunneled with outer ipv4
3679 *           3 for tunneled with outer ipv6
3680 */
3681#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
3682        ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
3683               (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
3684               (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)))
3685
3686static void
3687ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
3688{
3689        u32 s = ((src % 4) << 3); /* byte shift */
3690        u32 v = dst | 0x80; /* value to program */
3691        u8 i = src / 4; /* register index */
3692        u32 reg;
3693
3694        reg = rd32(hw, GLQF_HSYMM(prof_id, i));
3695        reg = (reg & ~(0xff << s)) | (v << s);
3696        wr32(hw, GLQF_HSYMM(prof_id, i), reg);
3697}
3698
3699static void
3700ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
3701{
3702        int fv_last_word =
3703                ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
3704        int i;
3705
3706        for (i = 0; i < len; i++) {
3707                ice_rss_config_xor_word(hw, prof_id,
3708                                        /* Yes, field vector in GLQF_HSYMM and
3709                                         * GLQF_HINSET is inversed!
3710                                         */
3711                                        fv_last_word - (src + i),
3712                                        fv_last_word - (dst + i));
3713                ice_rss_config_xor_word(hw, prof_id,
3714                                        fv_last_word - (dst + i),
3715                                        fv_last_word - (src + i));
3716        }
3717}
3718
3719static void
3720ice_rss_update_symm(struct ice_hw *hw,
3721                    struct ice_flow_prof *prof)
3722{
3723        struct ice_prof_map *map;
3724        u8 prof_id, m;
3725
3726        ice_acquire_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3727        map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
3728        if (map)
3729                prof_id = map->prof_id;
3730        ice_release_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
3731        if (!map)
3732                return;
3733        /* clear to default */
3734        for (m = 0; m < 6; m++)
3735                wr32(hw, GLQF_HSYMM(prof_id, m), 0);
3736        if (prof->cfg.symm) {
3737                struct ice_flow_seg_info *seg =
3738                        &prof->segs[prof->segs_cnt - 1];
3739
3740                struct ice_flow_seg_xtrct *ipv4_src =
3741                        &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
3742                struct ice_flow_seg_xtrct *ipv4_dst =
3743                        &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
3744                struct ice_flow_seg_xtrct *ipv6_src =
3745                        &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
3746                struct ice_flow_seg_xtrct *ipv6_dst =
3747                        &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
3748
3749                struct ice_flow_seg_xtrct *tcp_src =
3750                        &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
3751                struct ice_flow_seg_xtrct *tcp_dst =
3752                        &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
3753
3754                struct ice_flow_seg_xtrct *udp_src =
3755                        &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
3756                struct ice_flow_seg_xtrct *udp_dst =
3757                        &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
3758
3759                struct ice_flow_seg_xtrct *sctp_src =
3760                        &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
3761                struct ice_flow_seg_xtrct *sctp_dst =
3762                        &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
3763
3764                /* xor IPv4 */
3765                if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
3766                        ice_rss_config_xor(hw, prof_id,
3767                                           ipv4_src->idx, ipv4_dst->idx, 2);
3768
3769                /* xor IPv6 */
3770                if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
3771                        ice_rss_config_xor(hw, prof_id,
3772                                           ipv6_src->idx, ipv6_dst->idx, 8);
3773
3774                /* xor TCP */
3775                if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
3776                        ice_rss_config_xor(hw, prof_id,
3777                                           tcp_src->idx, tcp_dst->idx, 1);
3778
3779                /* xor UDP */
3780                if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
3781                        ice_rss_config_xor(hw, prof_id,
3782                                           udp_src->idx, udp_dst->idx, 1);
3783
3784                /* xor SCTP */
3785                if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
3786                        ice_rss_config_xor(hw, prof_id,
3787                                           sctp_src->idx, sctp_dst->idx, 1);
3788        }
3789}
3790
3791/**
3792 * ice_add_rss_cfg_sync - add an RSS configuration
3793 * @hw: pointer to the hardware structure
3794 * @vsi_handle: software VSI handle
3795 * @cfg: configure parameters
3796 *
3797 * Assumption: lock has already been acquired for RSS list
3798 */
3799static enum ice_status
3800ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3801                     const struct ice_rss_hash_cfg *cfg)
3802{
3803        const enum ice_block blk = ICE_BLK_RSS;
3804        struct ice_flow_prof *prof = NULL;
3805        struct ice_flow_seg_info *segs;
3806        enum ice_status status;
3807        u8 segs_cnt;
3808
3809        segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3810                        ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3811
3812        segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3813                                                      sizeof(*segs));
3814        if (!segs)
3815                return ICE_ERR_NO_MEMORY;
3816
3817        /* Construct the packet segment info from the hashed fields */
3818        status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3819        if (status)
3820                goto exit;
3821
3822        /* Search for a flow profile that has matching headers, hash fields
3823         * and has the input VSI associated to it. If found, no further
3824         * operations required and exit.
3825         */
3826        prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3827                                        vsi_handle,
3828                                        ICE_FLOW_FIND_PROF_CHK_FLDS |
3829                                        ICE_FLOW_FIND_PROF_CHK_VSI);
3830        if (prof) {
3831                if (prof->cfg.symm == cfg->symm)
3832                        goto exit;
3833                prof->cfg.symm = cfg->symm;
3834                goto update_symm;
3835        }
3836
3837        /* Check if a flow profile exists with the same protocol headers and
3838         * associated with the input VSI. If so disassociate the VSI from
3839         * this profile. The VSI will be added to a new profile created with
3840         * the protocol header and new hash field configuration.
3841         */
3842        prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3843                                        vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
3844        if (prof) {
3845                status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3846                if (!status)
3847                        ice_rem_rss_list(hw, vsi_handle, prof);
3848                else
3849                        goto exit;
3850
3851                /* Remove profile if it has no VSIs associated */
3852                if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
3853                        status = ice_flow_rem_prof(hw, blk, prof->id);
3854                        if (status)
3855                                goto exit;
3856                }
3857        }
3858
3859        /* Search for a profile that has same match fields only. If this
3860         * exists then associate the VSI to this profile.
3861         */
3862        prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3863                                        vsi_handle,
3864                                        ICE_FLOW_FIND_PROF_CHK_FLDS);
3865        if (prof) {
3866                if (prof->cfg.symm == cfg->symm) {
3867                        status = ice_flow_assoc_prof(hw, blk, prof,
3868                                                     vsi_handle);
3869                        if (!status)
3870                                status = ice_add_rss_list(hw, vsi_handle,
3871                                                          prof);
3872                } else {
3873                        /* if a profile exist but with different symmetric
3874                         * requirement, just return error.
3875                         */
3876                        status = ICE_ERR_NOT_SUPPORTED;
3877                }
3878                goto exit;
3879        }
3880
3881        /* Create a new flow profile with generated profile and packet
3882         * segment information.
3883         */
3884        status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
3885                                   ICE_FLOW_GEN_PROFID(cfg->hash_flds,
3886                                                       segs[segs_cnt - 1].hdrs,
3887                                                       cfg->hdr_type),
3888                                   segs, segs_cnt, NULL, 0, &prof);
3889        if (status)
3890                goto exit;
3891
3892        status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
3893        /* If association to a new flow profile failed then this profile can
3894         * be removed.
3895         */
3896        if (status) {
3897                ice_flow_rem_prof(hw, blk, prof->id);
3898                goto exit;
3899        }
3900
3901        status = ice_add_rss_list(hw, vsi_handle, prof);
3902
3903        prof->cfg.symm = cfg->symm;
3904update_symm:
3905        ice_rss_update_symm(hw, prof);
3906
3907exit:
3908        ice_free(hw, segs);
3909        return status;
3910}
3911
3912/**
3913 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
3914 * @hw: pointer to the hardware structure
3915 * @vsi_handle: software VSI handle
3916 * @cfg: configure parameters
3917 *
3918 * This function will generate a flow profile based on fields associated with
3919 * the input fields to hash on, the flow type and use the VSI number to add
3920 * a flow entry to the profile.
3921 */
3922enum ice_status
3923ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
3924                const struct ice_rss_hash_cfg *cfg)
3925{
3926        struct ice_rss_hash_cfg local_cfg;
3927        enum ice_status status;
3928
3929        if (!ice_is_vsi_valid(hw, vsi_handle) ||
3930            !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
3931            cfg->hash_flds == ICE_HASH_INVALID)
3932                return ICE_ERR_PARAM;
3933
3934        local_cfg = *cfg;
3935        if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
3936                ice_acquire_lock(&hw->rss_locks);
3937                status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3938                ice_release_lock(&hw->rss_locks);
3939        } else {
3940                ice_acquire_lock(&hw->rss_locks);
3941                local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3942                status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
3943                if (!status) {
3944                        local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
3945                        status = ice_add_rss_cfg_sync(hw, vsi_handle,
3946                                                      &local_cfg);
3947                }
3948                ice_release_lock(&hw->rss_locks);
3949        }
3950
3951        return status;
3952}
3953
3954/**
3955 * ice_rem_rss_cfg_sync - remove an existing RSS configuration
3956 * @hw: pointer to the hardware structure
3957 * @vsi_handle: software VSI handle
3958 * @cfg: configure parameters
3959 *
3960 * Assumption: lock has already been acquired for RSS list
3961 */
3962static enum ice_status
3963ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
3964                     const struct ice_rss_hash_cfg *cfg)
3965{
3966        const enum ice_block blk = ICE_BLK_RSS;
3967        struct ice_flow_seg_info *segs;
3968        struct ice_flow_prof *prof;
3969        enum ice_status status;
3970        u8 segs_cnt;
3971
3972        segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
3973                        ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
3974        segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
3975                                                      sizeof(*segs));
3976        if (!segs)
3977                return ICE_ERR_NO_MEMORY;
3978
3979        /* Construct the packet segment info from the hashed fields */
3980        status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
3981        if (status)
3982                goto out;
3983
3984        prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
3985                                        vsi_handle,
3986                                        ICE_FLOW_FIND_PROF_CHK_FLDS);
3987        if (!prof) {
3988                status = ICE_ERR_DOES_NOT_EXIST;
3989                goto out;
3990        }
3991
3992        status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
3993        if (status)
3994                goto out;
3995
3996        /* Remove RSS configuration from VSI context before deleting
3997         * the flow profile.
3998         */
3999        ice_rem_rss_list(hw, vsi_handle, prof);
4000
4001        if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
4002                status = ice_flow_rem_prof(hw, blk, prof->id);
4003
4004out:
4005        ice_free(hw, segs);
4006        return status;
4007}
4008
4009/**
4010 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
4011 * @hw: pointer to the hardware structure
4012 * @vsi_handle: software VSI handle
4013 * @cfg: configure parameters
4014 *
4015 * This function will lookup the flow profile based on the input
4016 * hash field bitmap, iterate through the profile entry list of
4017 * that profile and find entry associated with input VSI to be
4018 * removed. Calls are made to underlying flow apis which will in
4019 * turn build or update buffers for RSS XLT1 section.
4020 */
4021enum ice_status
4022ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
4023                const struct ice_rss_hash_cfg *cfg)
4024{
4025        struct ice_rss_hash_cfg local_cfg;
4026        enum ice_status status;
4027
4028        if (!ice_is_vsi_valid(hw, vsi_handle) ||
4029            !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
4030            cfg->hash_flds == ICE_HASH_INVALID)
4031                return ICE_ERR_PARAM;
4032
4033        ice_acquire_lock(&hw->rss_locks);
4034        local_cfg = *cfg;
4035        if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
4036                status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4037        } else {
4038                local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
4039                status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
4040
4041                if (!status) {
4042                        local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
4043                        status = ice_rem_rss_cfg_sync(hw, vsi_handle,
4044                                                      &local_cfg);
4045                }
4046        }
4047        ice_release_lock(&hw->rss_locks);
4048
4049        return status;
4050}
4051
4052/**
4053 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
4054 * @hw: pointer to the hardware structure
4055 * @vsi_handle: software VSI handle
4056 */
4057enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
4058{
4059        enum ice_status status = ICE_SUCCESS;
4060        struct ice_rss_cfg *r;
4061
4062        if (!ice_is_vsi_valid(hw, vsi_handle))
4063                return ICE_ERR_PARAM;
4064
4065        ice_acquire_lock(&hw->rss_locks);
4066        LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4067                            ice_rss_cfg, l_entry) {
4068                if (ice_is_bit_set(r->vsis, vsi_handle)) {
4069                        status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
4070                        if (status)
4071                                break;
4072                }
4073        }
4074        ice_release_lock(&hw->rss_locks);
4075
4076        return status;
4077}
4078
4079/**
4080 * ice_get_rss_cfg - returns hashed fields for the given header types
4081 * @hw: pointer to the hardware structure
4082 * @vsi_handle: software VSI handle
4083 * @hdrs: protocol header type
4084 *
4085 * This function will return the match fields of the first instance of flow
4086 * profile having the given header types and containing input VSI
4087 */
4088u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
4089{
4090        u64 rss_hash = ICE_HASH_INVALID;
4091        struct ice_rss_cfg *r;
4092
4093        /* verify if the protocol header is non zero and VSI is valid */
4094        if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
4095                return ICE_HASH_INVALID;
4096
4097        ice_acquire_lock(&hw->rss_locks);
4098        LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
4099                            ice_rss_cfg, l_entry)
4100                if (ice_is_bit_set(r->vsis, vsi_handle) &&
4101                    r->hash.addl_hdrs == hdrs) {
4102                        rss_hash = r->hash.hash_flds;
4103                        break;
4104                }
4105        ice_release_lock(&hw->rss_locks);
4106
4107        return rss_hash;
4108}
4109