linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Header Parser helpers for Marvell PPv2 Network Controller
   4 *
   5 * Copyright (C) 2014 Marvell
   6 *
   7 * Marcin Wojtas <mw@semihalf.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/netdevice.h>
  12#include <linux/etherdevice.h>
  13#include <linux/platform_device.h>
  14#include <uapi/linux/ppp_defs.h>
  15#include <net/ip.h>
  16#include <net/ipv6.h>
  17
  18#include "mvpp2.h"
  19#include "mvpp2_prs.h"
  20
  21/* Update parser tcam and sram hw entries */
  22static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  23{
  24        int i;
  25
  26        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  27                return -EINVAL;
  28
  29        /* Clear entry invalidation bit */
  30        pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  31
  32        /* Write tcam index - indirect access */
  33        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  34        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  35                mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
  36
  37        /* Write sram index - indirect access */
  38        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  39        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  40                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
  41
  42        return 0;
  43}
  44
  45/* Initialize tcam entry from hw */
  46int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
  47                           int tid)
  48{
  49        int i;
  50
  51        if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  52                return -EINVAL;
  53
  54        memset(pe, 0, sizeof(*pe));
  55        pe->index = tid;
  56
  57        /* Write tcam index - indirect access */
  58        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  59
  60        pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  61                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  62        if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  63                return MVPP2_PRS_TCAM_ENTRY_INVALID;
  64
  65        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  66                pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  67
  68        /* Write sram index - indirect access */
  69        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  70        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  71                pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  72
  73        return 0;
  74}
  75
  76/* Invalidate tcam hw entry */
  77static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  78{
  79        /* Write index - indirect access */
  80        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  81        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  82                    MVPP2_PRS_TCAM_INV_MASK);
  83}
  84
  85/* Enable shadow table entry and set its lookup ID */
  86static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  87{
  88        priv->prs_shadow[index].valid = true;
  89        priv->prs_shadow[index].lu = lu;
  90}
  91
  92/* Update ri fields in shadow table entry */
  93static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  94                                    unsigned int ri, unsigned int ri_mask)
  95{
  96        priv->prs_shadow[index].ri_mask = ri_mask;
  97        priv->prs_shadow[index].ri = ri;
  98}
  99
 100/* Update lookup field in tcam sw entry */
 101static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
 102{
 103        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
 104        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
 105        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
 106        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
 107}
 108
 109/* Update mask for single port in tcam sw entry */
 110static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
 111                                    unsigned int port, bool add)
 112{
 113        if (add)
 114                pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
 115        else
 116                pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
 117}
 118
 119/* Update port map in tcam sw entry */
 120static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
 121                                        unsigned int ports)
 122{
 123        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
 124        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
 125        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
 126}
 127
 128/* Obtain port map from tcam sw entry */
 129unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
 130{
 131        return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
 132}
 133
 134/* Set byte of data and its enable bits in tcam sw entry */
 135static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
 136                                         unsigned int offs, unsigned char byte,
 137                                         unsigned char enable)
 138{
 139        int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
 140
 141        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
 142        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
 143        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
 144        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
 145}
 146
 147/* Get byte of data and its enable bits from tcam sw entry */
 148void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
 149                                  unsigned int offs, unsigned char *byte,
 150                                  unsigned char *enable)
 151{
 152        int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
 153
 154        *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
 155        *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
 156}
 157
 158/* Compare tcam data bytes with a pattern */
 159static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
 160                                    u16 data)
 161{
 162        u16 tcam_data;
 163
 164        tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
 165        return tcam_data == data;
 166}
 167
 168/* Update ai bits in tcam sw entry */
 169static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
 170                                     unsigned int bits, unsigned int enable)
 171{
 172        int i;
 173
 174        for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
 175                if (!(enable & BIT(i)))
 176                        continue;
 177
 178                if (bits & BIT(i))
 179                        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
 180                else
 181                        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
 182        }
 183
 184        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
 185}
 186
 187/* Get ai bits from tcam sw entry */
 188static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
 189{
 190        return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
 191}
 192
 193/* Set ethertype in tcam sw entry */
 194static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
 195                                  unsigned short ethertype)
 196{
 197        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
 198        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
 199}
 200
 201/* Set vid in tcam sw entry */
 202static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
 203                                unsigned short vid)
 204{
 205        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
 206        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
 207}
 208
 209/* Set bits in sram sw entry */
 210static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
 211                                    u32 val)
 212{
 213        pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
 214}
 215
 216/* Clear bits in sram sw entry */
 217static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
 218                                      u32 val)
 219{
 220        pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
 221}
 222
 223/* Update ri bits in sram sw entry */
 224static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
 225                                     unsigned int bits, unsigned int mask)
 226{
 227        unsigned int i;
 228
 229        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
 230                if (!(mask & BIT(i)))
 231                        continue;
 232
 233                if (bits & BIT(i))
 234                        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
 235                                                1);
 236                else
 237                        mvpp2_prs_sram_bits_clear(pe,
 238                                                  MVPP2_PRS_SRAM_RI_OFFS + i,
 239                                                  1);
 240
 241                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
 242        }
 243}
 244
 245/* Obtain ri bits from sram sw entry */
 246static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
 247{
 248        return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
 249}
 250
 251/* Update ai bits in sram sw entry */
 252static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
 253                                     unsigned int bits, unsigned int mask)
 254{
 255        unsigned int i;
 256
 257        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
 258                if (!(mask & BIT(i)))
 259                        continue;
 260
 261                if (bits & BIT(i))
 262                        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
 263                                                1);
 264                else
 265                        mvpp2_prs_sram_bits_clear(pe,
 266                                                  MVPP2_PRS_SRAM_AI_OFFS + i,
 267                                                  1);
 268
 269                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
 270        }
 271}
 272
 273/* Read ai bits from sram sw entry */
 274static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
 275{
 276        u8 bits;
 277        /* ai is stored on bits 90->97; so it spreads across two u32 */
 278        int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
 279        int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
 280
 281        bits = (pe->sram[ai_off] >> ai_shift) |
 282               (pe->sram[ai_off + 1] << (32 - ai_shift));
 283
 284        return bits;
 285}
 286
 287/* In sram sw entry set lookup ID field of the tcam key to be used in the next
 288 * lookup interation
 289 */
 290static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
 291                                       unsigned int lu)
 292{
 293        int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
 294
 295        mvpp2_prs_sram_bits_clear(pe, sram_next_off,
 296                                  MVPP2_PRS_SRAM_NEXT_LU_MASK);
 297        mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
 298}
 299
 300/* In the sram sw entry set sign and value of the next lookup offset
 301 * and the offset value generated to the classifier
 302 */
 303static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
 304                                     unsigned int op)
 305{
 306        /* Set sign */
 307        if (shift < 0) {
 308                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
 309                shift = 0 - shift;
 310        } else {
 311                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
 312        }
 313
 314        /* Set value */
 315        pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
 316                shift & MVPP2_PRS_SRAM_SHIFT_MASK;
 317
 318        /* Reset and set operation */
 319        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
 320                                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
 321        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
 322
 323        /* Set base offset as current */
 324        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
 325}
 326
 327/* In the sram sw entry set sign and value of the user defined offset
 328 * generated to the classifier
 329 */
 330static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
 331                                      unsigned int type, int offset,
 332                                      unsigned int op)
 333{
 334        /* Set sign */
 335        if (offset < 0) {
 336                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
 337                offset = 0 - offset;
 338        } else {
 339                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
 340        }
 341
 342        /* Set value */
 343        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
 344                                  MVPP2_PRS_SRAM_UDF_MASK);
 345        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
 346                                offset & MVPP2_PRS_SRAM_UDF_MASK);
 347
 348        /* Set offset type */
 349        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
 350                                  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
 351        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
 352
 353        /* Set offset operation */
 354        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
 355                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
 356        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
 357                                op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
 358
 359        /* Set base offset as current */
 360        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
 361}
 362
 363/* Find parser flow entry */
 364static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
 365{
 366        struct mvpp2_prs_entry pe;
 367        int tid;
 368
 369        /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
 370        for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
 371                u8 bits;
 372
 373                if (!priv->prs_shadow[tid].valid ||
 374                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
 375                        continue;
 376
 377                mvpp2_prs_init_from_hw(priv, &pe, tid);
 378                bits = mvpp2_prs_sram_ai_get(&pe);
 379
 380                /* Sram store classification lookup ID in AI bits [5:0] */
 381                if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
 382                        return tid;
 383        }
 384
 385        return -ENOENT;
 386}
 387
 388/* Return first free tcam index, seeking from start to end */
 389static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
 390                                     unsigned char end)
 391{
 392        int tid;
 393
 394        if (start > end)
 395                swap(start, end);
 396
 397        if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
 398                end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
 399
 400        for (tid = start; tid <= end; tid++) {
 401                if (!priv->prs_shadow[tid].valid)
 402                        return tid;
 403        }
 404
 405        return -EINVAL;
 406}
 407
 408/* Enable/disable dropping all mac da's */
 409static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
 410{
 411        struct mvpp2_prs_entry pe;
 412
 413        if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
 414                /* Entry exist - update port only */
 415                mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
 416        } else {
 417                /* Entry doesn't exist - create new */
 418                memset(&pe, 0, sizeof(pe));
 419                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
 420                pe.index = MVPP2_PE_DROP_ALL;
 421
 422                /* Non-promiscuous mode for all ports - DROP unknown packets */
 423                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
 424                                         MVPP2_PRS_RI_DROP_MASK);
 425
 426                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
 427                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
 428
 429                /* Update shadow table */
 430                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
 431
 432                /* Mask all ports */
 433                mvpp2_prs_tcam_port_map_set(&pe, 0);
 434        }
 435
 436        /* Update port mask */
 437        mvpp2_prs_tcam_port_set(&pe, port, add);
 438
 439        mvpp2_prs_hw_write(priv, &pe);
 440}
 441
 442/* Set port to unicast or multicast promiscuous mode */
 443void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
 444                               enum mvpp2_prs_l2_cast l2_cast, bool add)
 445{
 446        struct mvpp2_prs_entry pe;
 447        unsigned char cast_match;
 448        unsigned int ri;
 449        int tid;
 450
 451        if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
 452                cast_match = MVPP2_PRS_UCAST_VAL;
 453                tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
 454                ri = MVPP2_PRS_RI_L2_UCAST;
 455        } else {
 456                cast_match = MVPP2_PRS_MCAST_VAL;
 457                tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
 458                ri = MVPP2_PRS_RI_L2_MCAST;
 459        }
 460
 461        /* promiscuous mode - Accept unknown unicast or multicast packets */
 462        if (priv->prs_shadow[tid].valid) {
 463                mvpp2_prs_init_from_hw(priv, &pe, tid);
 464        } else {
 465                memset(&pe, 0, sizeof(pe));
 466                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
 467                pe.index = tid;
 468
 469                /* Continue - set next lookup */
 470                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
 471
 472                /* Set result info bits */
 473                mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
 474
 475                /* Match UC or MC addresses */
 476                mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
 477                                             MVPP2_PRS_CAST_MASK);
 478
 479                /* Shift to ethertype */
 480                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
 481                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 482
 483                /* Mask all ports */
 484                mvpp2_prs_tcam_port_map_set(&pe, 0);
 485
 486                /* Update shadow table */
 487                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
 488        }
 489
 490        /* Update port mask */
 491        mvpp2_prs_tcam_port_set(&pe, port, add);
 492
 493        mvpp2_prs_hw_write(priv, &pe);
 494}
 495
 496/* Set entry for dsa packets */
 497static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
 498                                  bool tagged, bool extend)
 499{
 500        struct mvpp2_prs_entry pe;
 501        int tid, shift;
 502
 503        if (extend) {
 504                tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
 505                shift = 8;
 506        } else {
 507                tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
 508                shift = 4;
 509        }
 510
 511        if (priv->prs_shadow[tid].valid) {
 512                /* Entry exist - update port only */
 513                mvpp2_prs_init_from_hw(priv, &pe, tid);
 514        } else {
 515                /* Entry doesn't exist - create new */
 516                memset(&pe, 0, sizeof(pe));
 517                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
 518                pe.index = tid;
 519
 520                /* Update shadow table */
 521                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
 522
 523                if (tagged) {
 524                        /* Set tagged bit in DSA tag */
 525                        mvpp2_prs_tcam_data_byte_set(&pe, 0,
 526                                             MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
 527                                             MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
 528
 529                        /* Set ai bits for next iteration */
 530                        if (extend)
 531                                mvpp2_prs_sram_ai_update(&pe, 1,
 532                                                        MVPP2_PRS_SRAM_AI_MASK);
 533                        else
 534                                mvpp2_prs_sram_ai_update(&pe, 0,
 535                                                        MVPP2_PRS_SRAM_AI_MASK);
 536
 537                        /* Set result info bits to 'single vlan' */
 538                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
 539                                                 MVPP2_PRS_RI_VLAN_MASK);
 540                        /* If packet is tagged continue check vid filtering */
 541                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
 542                } else {
 543                        /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
 544                        mvpp2_prs_sram_shift_set(&pe, shift,
 545                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 546
 547                        /* Set result info bits to 'no vlans' */
 548                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
 549                                                 MVPP2_PRS_RI_VLAN_MASK);
 550                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
 551                }
 552
 553                /* Mask all ports */
 554                mvpp2_prs_tcam_port_map_set(&pe, 0);
 555        }
 556
 557        /* Update port mask */
 558        mvpp2_prs_tcam_port_set(&pe, port, add);
 559
 560        mvpp2_prs_hw_write(priv, &pe);
 561}
 562
 563/* Set entry for dsa ethertype */
 564static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
 565                                            bool add, bool tagged, bool extend)
 566{
 567        struct mvpp2_prs_entry pe;
 568        int tid, shift, port_mask;
 569
 570        if (extend) {
 571                tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
 572                      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
 573                port_mask = 0;
 574                shift = 8;
 575        } else {
 576                tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
 577                      MVPP2_PE_ETYPE_DSA_UNTAGGED;
 578                port_mask = MVPP2_PRS_PORT_MASK;
 579                shift = 4;
 580        }
 581
 582        if (priv->prs_shadow[tid].valid) {
 583                /* Entry exist - update port only */
 584                mvpp2_prs_init_from_hw(priv, &pe, tid);
 585        } else {
 586                /* Entry doesn't exist - create new */
 587                memset(&pe, 0, sizeof(pe));
 588                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
 589                pe.index = tid;
 590
 591                /* Set ethertype */
 592                mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
 593                mvpp2_prs_match_etype(&pe, 2, 0);
 594
 595                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
 596                                         MVPP2_PRS_RI_DSA_MASK);
 597                /* Shift ethertype + 2 byte reserved + tag*/
 598                mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
 599                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 600
 601                /* Update shadow table */
 602                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
 603
 604                if (tagged) {
 605                        /* Set tagged bit in DSA tag */
 606                        mvpp2_prs_tcam_data_byte_set(&pe,
 607                                                     MVPP2_ETH_TYPE_LEN + 2 + 3,
 608                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
 609                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
 610                        /* Clear all ai bits for next iteration */
 611                        mvpp2_prs_sram_ai_update(&pe, 0,
 612                                                 MVPP2_PRS_SRAM_AI_MASK);
 613                        /* If packet is tagged continue check vlans */
 614                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 615                } else {
 616                        /* Set result info bits to 'no vlans' */
 617                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
 618                                                 MVPP2_PRS_RI_VLAN_MASK);
 619                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
 620                }
 621                /* Mask/unmask all ports, depending on dsa type */
 622                mvpp2_prs_tcam_port_map_set(&pe, port_mask);
 623        }
 624
 625        /* Update port mask */
 626        mvpp2_prs_tcam_port_set(&pe, port, add);
 627
 628        mvpp2_prs_hw_write(priv, &pe);
 629}
 630
 631/* Search for existing single/triple vlan entry */
 632static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
 633{
 634        struct mvpp2_prs_entry pe;
 635        int tid;
 636
 637        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
 638        for (tid = MVPP2_PE_FIRST_FREE_TID;
 639             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
 640                unsigned int ri_bits, ai_bits;
 641                bool match;
 642
 643                if (!priv->prs_shadow[tid].valid ||
 644                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
 645                        continue;
 646
 647                mvpp2_prs_init_from_hw(priv, &pe, tid);
 648                match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
 649                if (!match)
 650                        continue;
 651
 652                /* Get vlan type */
 653                ri_bits = mvpp2_prs_sram_ri_get(&pe);
 654                ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
 655
 656                /* Get current ai value from tcam */
 657                ai_bits = mvpp2_prs_tcam_ai_get(&pe);
 658                /* Clear double vlan bit */
 659                ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
 660
 661                if (ai != ai_bits)
 662                        continue;
 663
 664                if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
 665                    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
 666                        return tid;
 667        }
 668
 669        return -ENOENT;
 670}
 671
 672/* Add/update single/triple vlan entry */
 673static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 674                              unsigned int port_map)
 675{
 676        struct mvpp2_prs_entry pe;
 677        int tid_aux, tid;
 678        int ret = 0;
 679
 680        memset(&pe, 0, sizeof(pe));
 681
 682        tid = mvpp2_prs_vlan_find(priv, tpid, ai);
 683
 684        if (tid < 0) {
 685                /* Create new tcam entry */
 686                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
 687                                                MVPP2_PE_FIRST_FREE_TID);
 688                if (tid < 0)
 689                        return tid;
 690
 691                /* Get last double vlan tid */
 692                for (tid_aux = MVPP2_PE_LAST_FREE_TID;
 693                     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
 694                        unsigned int ri_bits;
 695
 696                        if (!priv->prs_shadow[tid_aux].valid ||
 697                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
 698                                continue;
 699
 700                        mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
 701                        ri_bits = mvpp2_prs_sram_ri_get(&pe);
 702                        if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
 703                            MVPP2_PRS_RI_VLAN_DOUBLE)
 704                                break;
 705                }
 706
 707                if (tid <= tid_aux)
 708                        return -EINVAL;
 709
 710                memset(&pe, 0, sizeof(pe));
 711                pe.index = tid;
 712                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 713
 714                mvpp2_prs_match_etype(&pe, 0, tpid);
 715
 716                /* VLAN tag detected, proceed with VID filtering */
 717                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
 718
 719                /* Clear all ai bits for next iteration */
 720                mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
 721
 722                if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
 723                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
 724                                                 MVPP2_PRS_RI_VLAN_MASK);
 725                } else {
 726                        ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
 727                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
 728                                                 MVPP2_PRS_RI_VLAN_MASK);
 729                }
 730                mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
 731
 732                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
 733        } else {
 734                mvpp2_prs_init_from_hw(priv, &pe, tid);
 735        }
 736        /* Update ports' mask */
 737        mvpp2_prs_tcam_port_map_set(&pe, port_map);
 738
 739        mvpp2_prs_hw_write(priv, &pe);
 740
 741        return ret;
 742}
 743
 744/* Get first free double vlan ai number */
 745static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
 746{
 747        int i;
 748
 749        for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
 750                if (!priv->prs_double_vlans[i])
 751                        return i;
 752        }
 753
 754        return -EINVAL;
 755}
 756
 757/* Search for existing double vlan entry */
 758static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
 759                                      unsigned short tpid2)
 760{
 761        struct mvpp2_prs_entry pe;
 762        int tid;
 763
 764        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
 765        for (tid = MVPP2_PE_FIRST_FREE_TID;
 766             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
 767                unsigned int ri_mask;
 768                bool match;
 769
 770                if (!priv->prs_shadow[tid].valid ||
 771                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
 772                        continue;
 773
 774                mvpp2_prs_init_from_hw(priv, &pe, tid);
 775
 776                match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
 777                        mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
 778
 779                if (!match)
 780                        continue;
 781
 782                ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
 783                if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
 784                        return tid;
 785        }
 786
 787        return -ENOENT;
 788}
 789
 790/* Add or update double vlan entry */
 791static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
 792                                     unsigned short tpid2,
 793                                     unsigned int port_map)
 794{
 795        int tid_aux, tid, ai, ret = 0;
 796        struct mvpp2_prs_entry pe;
 797
 798        memset(&pe, 0, sizeof(pe));
 799
 800        tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
 801
 802        if (tid < 0) {
 803                /* Create new tcam entry */
 804                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 805                                MVPP2_PE_LAST_FREE_TID);
 806                if (tid < 0)
 807                        return tid;
 808
 809                /* Set ai value for new double vlan entry */
 810                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
 811                if (ai < 0)
 812                        return ai;
 813
 814                /* Get first single/triple vlan tid */
 815                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
 816                     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
 817                        unsigned int ri_bits;
 818
 819                        if (!priv->prs_shadow[tid_aux].valid ||
 820                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
 821                                continue;
 822
 823                        mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
 824                        ri_bits = mvpp2_prs_sram_ri_get(&pe);
 825                        ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
 826                        if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
 827                            ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
 828                                break;
 829                }
 830
 831                if (tid >= tid_aux)
 832                        return -ERANGE;
 833
 834                memset(&pe, 0, sizeof(pe));
 835                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 836                pe.index = tid;
 837
 838                priv->prs_double_vlans[ai] = true;
 839
 840                mvpp2_prs_match_etype(&pe, 0, tpid1);
 841                mvpp2_prs_match_etype(&pe, 4, tpid2);
 842
 843                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 844                /* Shift 4 bytes - skip outer vlan tag */
 845                mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
 846                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 847                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
 848                                         MVPP2_PRS_RI_VLAN_MASK);
 849                mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
 850                                         MVPP2_PRS_SRAM_AI_MASK);
 851
 852                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
 853        } else {
 854                mvpp2_prs_init_from_hw(priv, &pe, tid);
 855        }
 856
 857        /* Update ports' mask */
 858        mvpp2_prs_tcam_port_map_set(&pe, port_map);
 859        mvpp2_prs_hw_write(priv, &pe);
 860
 861        return ret;
 862}
 863
 864/* IPv4 header parsing for fragmentation and L4 offset */
 865static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
 866                               unsigned int ri, unsigned int ri_mask)
 867{
 868        struct mvpp2_prs_entry pe;
 869        int tid;
 870
 871        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
 872            (proto != IPPROTO_IGMP))
 873                return -EINVAL;
 874
 875        /* Not fragmented packet */
 876        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 877                                        MVPP2_PE_LAST_FREE_TID);
 878        if (tid < 0)
 879                return tid;
 880
 881        memset(&pe, 0, sizeof(pe));
 882        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
 883        pe.index = tid;
 884
 885        /* Set next lu to IPv4 */
 886        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
 887        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 888        /* Set L4 offset */
 889        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
 890                                  sizeof(struct iphdr) - 4,
 891                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
 892        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
 893                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
 894        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
 895
 896        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
 897                                     MVPP2_PRS_TCAM_PROTO_MASK_L);
 898        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
 899                                     MVPP2_PRS_TCAM_PROTO_MASK);
 900
 901        mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
 902        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
 903        /* Unmask all ports */
 904        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
 905
 906        /* Update shadow table and hw entry */
 907        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
 908        mvpp2_prs_hw_write(priv, &pe);
 909
 910        /* Fragmented packet */
 911        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 912                                        MVPP2_PE_LAST_FREE_TID);
 913        if (tid < 0)
 914                return tid;
 915
 916        pe.index = tid;
 917        /* Clear ri before updating */
 918        pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
 919        pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
 920        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
 921
 922        mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
 923                                 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
 924
 925        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
 926        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
 927
 928        /* Update shadow table and hw entry */
 929        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
 930        mvpp2_prs_hw_write(priv, &pe);
 931
 932        return 0;
 933}
 934
 935/* IPv4 L3 multicast or broadcast */
 936static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
 937{
 938        struct mvpp2_prs_entry pe;
 939        int mask, tid;
 940
 941        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 942                                        MVPP2_PE_LAST_FREE_TID);
 943        if (tid < 0)
 944                return tid;
 945
 946        memset(&pe, 0, sizeof(pe));
 947        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
 948        pe.index = tid;
 949
 950        switch (l3_cast) {
 951        case MVPP2_PRS_L3_MULTI_CAST:
 952                mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
 953                                             MVPP2_PRS_IPV4_MC_MASK);
 954                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
 955                                         MVPP2_PRS_RI_L3_ADDR_MASK);
 956                break;
 957        case  MVPP2_PRS_L3_BROAD_CAST:
 958                mask = MVPP2_PRS_IPV4_BC_MASK;
 959                mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
 960                mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
 961                mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
 962                mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
 963                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
 964                                         MVPP2_PRS_RI_L3_ADDR_MASK);
 965                break;
 966        default:
 967                return -EINVAL;
 968        }
 969
 970        /* Finished: go to flowid generation */
 971        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
 972        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
 973
 974        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
 975                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
 976        /* Unmask all ports */
 977        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
 978
 979        /* Update shadow table and hw entry */
 980        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
 981        mvpp2_prs_hw_write(priv, &pe);
 982
 983        return 0;
 984}
 985
 986/* Set entries for protocols over IPv6  */
 987static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
 988                               unsigned int ri, unsigned int ri_mask)
 989{
 990        struct mvpp2_prs_entry pe;
 991        int tid;
 992
 993        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
 994            (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
 995                return -EINVAL;
 996
 997        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 998                                        MVPP2_PE_LAST_FREE_TID);
 999        if (tid < 0)
1000                return tid;
1001
1002        memset(&pe, 0, sizeof(pe));
1003        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1004        pe.index = tid;
1005
1006        /* Finished: go to flowid generation */
1007        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1008        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1009        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1010        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1011                                  sizeof(struct ipv6hdr) - 6,
1012                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1013
1014        mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1015        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1016                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1017        /* Unmask all ports */
1018        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1019
1020        /* Write HW */
1021        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1022        mvpp2_prs_hw_write(priv, &pe);
1023
1024        return 0;
1025}
1026
1027/* IPv6 L3 multicast entry */
1028static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1029{
1030        struct mvpp2_prs_entry pe;
1031        int tid;
1032
1033        if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1034                return -EINVAL;
1035
1036        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1037                                        MVPP2_PE_LAST_FREE_TID);
1038        if (tid < 0)
1039                return tid;
1040
1041        memset(&pe, 0, sizeof(pe));
1042        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1043        pe.index = tid;
1044
1045        /* Finished: go to flowid generation */
1046        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1047        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1048                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1049        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1050                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1051        /* Shift back to IPv6 NH */
1052        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1053
1054        mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1055                                     MVPP2_PRS_IPV6_MC_MASK);
1056        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1057        /* Unmask all ports */
1058        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1059
1060        /* Update shadow table and hw entry */
1061        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1062        mvpp2_prs_hw_write(priv, &pe);
1063
1064        return 0;
1065}
1066
1067/* Parser per-port initialization */
1068static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1069                                   int lu_max, int offset)
1070{
1071        u32 val;
1072
1073        /* Set lookup ID */
1074        val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1075        val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1076        val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1077        mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1078
1079        /* Set maximum number of loops for packet received from port */
1080        val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1081        val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1082        val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1083        mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1084
1085        /* Set initial offset for packet header extraction for the first
1086         * searching loop
1087         */
1088        val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1089        val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1090        val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1091        mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1092}
1093
1094/* Default flow entries initialization for all ports */
1095static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1096{
1097        struct mvpp2_prs_entry pe;
1098        int port;
1099
1100        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1101                memset(&pe, 0, sizeof(pe));
1102                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1103                pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1104
1105                /* Mask all ports */
1106                mvpp2_prs_tcam_port_map_set(&pe, 0);
1107
1108                /* Set flow ID*/
1109                mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1110                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1111
1112                /* Update shadow table and hw entry */
1113                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1114                mvpp2_prs_hw_write(priv, &pe);
1115        }
1116}
1117
1118/* Set default entry for Marvell Header field */
1119static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1120{
1121        struct mvpp2_prs_entry pe;
1122
1123        memset(&pe, 0, sizeof(pe));
1124
1125        pe.index = MVPP2_PE_MH_DEFAULT;
1126        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1127        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1128                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1129        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1130
1131        /* Unmask all ports */
1132        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1133
1134        /* Update shadow table and hw entry */
1135        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1136        mvpp2_prs_hw_write(priv, &pe);
1137}
1138
1139/* Set default entires (place holder) for promiscuous, non-promiscuous and
1140 * multicast MAC addresses
1141 */
1142static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1143{
1144        struct mvpp2_prs_entry pe;
1145
1146        memset(&pe, 0, sizeof(pe));
1147
1148        /* Non-promiscuous mode for all ports - DROP unknown packets */
1149        pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1150        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1151
1152        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1153                                 MVPP2_PRS_RI_DROP_MASK);
1154        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1155        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1156
1157        /* Unmask all ports */
1158        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1159
1160        /* Update shadow table and hw entry */
1161        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1162        mvpp2_prs_hw_write(priv, &pe);
1163
1164        /* Create dummy entries for drop all and promiscuous modes */
1165        mvpp2_prs_mac_drop_all_set(priv, 0, false);
1166        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1167        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1168}
1169
1170/* Set default entries for various types of dsa packets */
1171static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1172{
1173        struct mvpp2_prs_entry pe;
1174
1175        /* None tagged EDSA entry - place holder */
1176        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1177                              MVPP2_PRS_EDSA);
1178
1179        /* Tagged EDSA entry - place holder */
1180        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1181
1182        /* None tagged DSA entry - place holder */
1183        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1184                              MVPP2_PRS_DSA);
1185
1186        /* Tagged DSA entry - place holder */
1187        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1188
1189        /* None tagged EDSA ethertype entry - place holder*/
1190        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1191                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1192
1193        /* Tagged EDSA ethertype entry - place holder*/
1194        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1195                                        MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1196
1197        /* None tagged DSA ethertype entry */
1198        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1199                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1200
1201        /* Tagged DSA ethertype entry */
1202        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1203                                        MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1204
1205        /* Set default entry, in case DSA or EDSA tag not found */
1206        memset(&pe, 0, sizeof(pe));
1207        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1208        pe.index = MVPP2_PE_DSA_DEFAULT;
1209        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1210
1211        /* Shift 0 bytes */
1212        mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1213        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1214
1215        /* Clear all sram ai bits for next iteration */
1216        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1217
1218        /* Unmask all ports */
1219        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1220
1221        mvpp2_prs_hw_write(priv, &pe);
1222}
1223
1224/* Initialize parser entries for VID filtering */
1225static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1226{
1227        struct mvpp2_prs_entry pe;
1228
1229        memset(&pe, 0, sizeof(pe));
1230
1231        /* Set default vid entry */
1232        pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1233        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1234
1235        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1236
1237        /* Skip VLAN header - Set offset to 4 bytes */
1238        mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1239                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1240
1241        /* Clear all ai bits for next iteration */
1242        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1243
1244        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1245
1246        /* Unmask all ports */
1247        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1248
1249        /* Update shadow table and hw entry */
1250        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1251        mvpp2_prs_hw_write(priv, &pe);
1252
1253        /* Set default vid entry for extended DSA*/
1254        memset(&pe, 0, sizeof(pe));
1255
1256        /* Set default vid entry */
1257        pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1258        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1259
1260        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1261                                 MVPP2_PRS_EDSA_VID_AI_BIT);
1262
1263        /* Skip VLAN header - Set offset to 8 bytes */
1264        mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1265                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1266
1267        /* Clear all ai bits for next iteration */
1268        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1269
1270        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1271
1272        /* Unmask all ports */
1273        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1274
1275        /* Update shadow table and hw entry */
1276        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1277        mvpp2_prs_hw_write(priv, &pe);
1278}
1279
1280/* Match basic ethertypes */
1281static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1282{
1283        struct mvpp2_prs_entry pe;
1284        int tid;
1285
1286        /* Ethertype: PPPoE */
1287        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1288                                        MVPP2_PE_LAST_FREE_TID);
1289        if (tid < 0)
1290                return tid;
1291
1292        memset(&pe, 0, sizeof(pe));
1293        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1294        pe.index = tid;
1295
1296        mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1297
1298        mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1299                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1300        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1301        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1302                                 MVPP2_PRS_RI_PPPOE_MASK);
1303
1304        /* Update shadow table and hw entry */
1305        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1306        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1307        priv->prs_shadow[pe.index].finish = false;
1308        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1309                                MVPP2_PRS_RI_PPPOE_MASK);
1310        mvpp2_prs_hw_write(priv, &pe);
1311
1312        /* Ethertype: ARP */
1313        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1314                                        MVPP2_PE_LAST_FREE_TID);
1315        if (tid < 0)
1316                return tid;
1317
1318        memset(&pe, 0, sizeof(pe));
1319        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1320        pe.index = tid;
1321
1322        mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1323
1324        /* Generate flow in the next iteration*/
1325        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1326        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1327        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1328                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1329        /* Set L3 offset */
1330        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1331                                  MVPP2_ETH_TYPE_LEN,
1332                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1333
1334        /* Update shadow table and hw entry */
1335        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1336        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1337        priv->prs_shadow[pe.index].finish = true;
1338        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1339                                MVPP2_PRS_RI_L3_PROTO_MASK);
1340        mvpp2_prs_hw_write(priv, &pe);
1341
1342        /* Ethertype: LBTD */
1343        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1344                                        MVPP2_PE_LAST_FREE_TID);
1345        if (tid < 0)
1346                return tid;
1347
1348        memset(&pe, 0, sizeof(pe));
1349        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1350        pe.index = tid;
1351
1352        mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1353
1354        /* Generate flow in the next iteration*/
1355        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1356        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1357        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1358                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1359                                 MVPP2_PRS_RI_CPU_CODE_MASK |
1360                                 MVPP2_PRS_RI_UDF3_MASK);
1361        /* Set L3 offset */
1362        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1363                                  MVPP2_ETH_TYPE_LEN,
1364                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1365
1366        /* Update shadow table and hw entry */
1367        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1368        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1369        priv->prs_shadow[pe.index].finish = true;
1370        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1371                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1372                                MVPP2_PRS_RI_CPU_CODE_MASK |
1373                                MVPP2_PRS_RI_UDF3_MASK);
1374        mvpp2_prs_hw_write(priv, &pe);
1375
1376        /* Ethertype: IPv4 without options */
1377        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1378                                        MVPP2_PE_LAST_FREE_TID);
1379        if (tid < 0)
1380                return tid;
1381
1382        memset(&pe, 0, sizeof(pe));
1383        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1384        pe.index = tid;
1385
1386        mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1387        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1388                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1389                                     MVPP2_PRS_IPV4_HEAD_MASK |
1390                                     MVPP2_PRS_IPV4_IHL_MASK);
1391
1392        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1393        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1394                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1395        /* Skip eth_type + 4 bytes of IP header */
1396        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1397                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1398        /* Set L3 offset */
1399        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1400                                  MVPP2_ETH_TYPE_LEN,
1401                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1402
1403        /* Update shadow table and hw entry */
1404        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1405        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1406        priv->prs_shadow[pe.index].finish = false;
1407        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1408                                MVPP2_PRS_RI_L3_PROTO_MASK);
1409        mvpp2_prs_hw_write(priv, &pe);
1410
1411        /* Ethertype: IPv4 with options */
1412        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1413                                        MVPP2_PE_LAST_FREE_TID);
1414        if (tid < 0)
1415                return tid;
1416
1417        pe.index = tid;
1418
1419        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1420                                     MVPP2_PRS_IPV4_HEAD,
1421                                     MVPP2_PRS_IPV4_HEAD_MASK);
1422
1423        /* Clear ri before updating */
1424        pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1425        pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1426        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1427                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1428
1429        /* Update shadow table and hw entry */
1430        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1431        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1432        priv->prs_shadow[pe.index].finish = false;
1433        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1434                                MVPP2_PRS_RI_L3_PROTO_MASK);
1435        mvpp2_prs_hw_write(priv, &pe);
1436
1437        /* Ethertype: IPv6 without options */
1438        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1439                                        MVPP2_PE_LAST_FREE_TID);
1440        if (tid < 0)
1441                return tid;
1442
1443        memset(&pe, 0, sizeof(pe));
1444        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1445        pe.index = tid;
1446
1447        mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1448
1449        /* Skip DIP of IPV6 header */
1450        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1451                                 MVPP2_MAX_L3_ADDR_SIZE,
1452                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1453        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1454        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1455                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1456        /* Set L3 offset */
1457        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1458                                  MVPP2_ETH_TYPE_LEN,
1459                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1460
1461        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1462        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1463        priv->prs_shadow[pe.index].finish = false;
1464        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1465                                MVPP2_PRS_RI_L3_PROTO_MASK);
1466        mvpp2_prs_hw_write(priv, &pe);
1467
1468        /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1469        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1470        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1471        pe.index = MVPP2_PE_ETH_TYPE_UN;
1472
1473        /* Unmask all ports */
1474        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1475
1476        /* Generate flow in the next iteration*/
1477        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1478        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1479        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1480                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1481        /* Set L3 offset even it's unknown L3 */
1482        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1483                                  MVPP2_ETH_TYPE_LEN,
1484                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1485
1486        /* Update shadow table and hw entry */
1487        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1488        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1489        priv->prs_shadow[pe.index].finish = true;
1490        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1491                                MVPP2_PRS_RI_L3_PROTO_MASK);
1492        mvpp2_prs_hw_write(priv, &pe);
1493
1494        return 0;
1495}
1496
1497/* Configure vlan entries and detect up to 2 successive VLAN tags.
1498 * Possible options:
1499 * 0x8100, 0x88A8
1500 * 0x8100, 0x8100
1501 * 0x8100
1502 * 0x88A8
1503 */
1504static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1505{
1506        struct mvpp2_prs_entry pe;
1507        int err;
1508
1509        priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1510                                              MVPP2_PRS_DBL_VLANS_MAX,
1511                                              GFP_KERNEL);
1512        if (!priv->prs_double_vlans)
1513                return -ENOMEM;
1514
1515        /* Double VLAN: 0x8100, 0x88A8 */
1516        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1517                                        MVPP2_PRS_PORT_MASK);
1518        if (err)
1519                return err;
1520
1521        /* Double VLAN: 0x8100, 0x8100 */
1522        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1523                                        MVPP2_PRS_PORT_MASK);
1524        if (err)
1525                return err;
1526
1527        /* Single VLAN: 0x88a8 */
1528        err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1529                                 MVPP2_PRS_PORT_MASK);
1530        if (err)
1531                return err;
1532
1533        /* Single VLAN: 0x8100 */
1534        err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1535                                 MVPP2_PRS_PORT_MASK);
1536        if (err)
1537                return err;
1538
1539        /* Set default double vlan entry */
1540        memset(&pe, 0, sizeof(pe));
1541        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1542        pe.index = MVPP2_PE_VLAN_DBL;
1543
1544        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1545
1546        /* Clear ai for next iterations */
1547        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1548        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1549                                 MVPP2_PRS_RI_VLAN_MASK);
1550
1551        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1552                                 MVPP2_PRS_DBL_VLAN_AI_BIT);
1553        /* Unmask all ports */
1554        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1555
1556        /* Update shadow table and hw entry */
1557        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1558        mvpp2_prs_hw_write(priv, &pe);
1559
1560        /* Set default vlan none entry */
1561        memset(&pe, 0, sizeof(pe));
1562        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1563        pe.index = MVPP2_PE_VLAN_NONE;
1564
1565        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1566        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1567                                 MVPP2_PRS_RI_VLAN_MASK);
1568
1569        /* Unmask all ports */
1570        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1571
1572        /* Update shadow table and hw entry */
1573        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1574        mvpp2_prs_hw_write(priv, &pe);
1575
1576        return 0;
1577}
1578
1579/* Set entries for PPPoE ethertype */
1580static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1581{
1582        struct mvpp2_prs_entry pe;
1583        int tid;
1584
1585        /* IPv4 over PPPoE with options */
1586        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1587                                        MVPP2_PE_LAST_FREE_TID);
1588        if (tid < 0)
1589                return tid;
1590
1591        memset(&pe, 0, sizeof(pe));
1592        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1593        pe.index = tid;
1594
1595        mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1596
1597        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1598        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1599                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1600        /* Skip eth_type + 4 bytes of IP header */
1601        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1602                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1603        /* Set L3 offset */
1604        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1605                                  MVPP2_ETH_TYPE_LEN,
1606                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1607
1608        /* Update shadow table and hw entry */
1609        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1610        mvpp2_prs_hw_write(priv, &pe);
1611
1612        /* IPv4 over PPPoE without options */
1613        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1614                                        MVPP2_PE_LAST_FREE_TID);
1615        if (tid < 0)
1616                return tid;
1617
1618        pe.index = tid;
1619
1620        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1621                                     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1622                                     MVPP2_PRS_IPV4_HEAD_MASK |
1623                                     MVPP2_PRS_IPV4_IHL_MASK);
1624
1625        /* Clear ri before updating */
1626        pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1627        pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1628        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1629                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1630
1631        /* Update shadow table and hw entry */
1632        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1633        mvpp2_prs_hw_write(priv, &pe);
1634
1635        /* IPv6 over PPPoE */
1636        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1637                                        MVPP2_PE_LAST_FREE_TID);
1638        if (tid < 0)
1639                return tid;
1640
1641        memset(&pe, 0, sizeof(pe));
1642        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1643        pe.index = tid;
1644
1645        mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1646
1647        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1648        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1649                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1650        /* Skip eth_type + 4 bytes of IPv6 header */
1651        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1652                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1653        /* Set L3 offset */
1654        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1655                                  MVPP2_ETH_TYPE_LEN,
1656                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1657
1658        /* Update shadow table and hw entry */
1659        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1660        mvpp2_prs_hw_write(priv, &pe);
1661
1662        /* Non-IP over PPPoE */
1663        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1664                                        MVPP2_PE_LAST_FREE_TID);
1665        if (tid < 0)
1666                return tid;
1667
1668        memset(&pe, 0, sizeof(pe));
1669        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1670        pe.index = tid;
1671
1672        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1673                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1674
1675        /* Finished: go to flowid generation */
1676        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1677        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1678        /* Set L3 offset even if it's unknown L3 */
1679        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1680                                  MVPP2_ETH_TYPE_LEN,
1681                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1682
1683        /* Update shadow table and hw entry */
1684        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1685        mvpp2_prs_hw_write(priv, &pe);
1686
1687        return 0;
1688}
1689
1690/* Initialize entries for IPv4 */
1691static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1692{
1693        struct mvpp2_prs_entry pe;
1694        int err;
1695
1696        /* Set entries for TCP, UDP and IGMP over IPv4 */
1697        err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1698                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1699        if (err)
1700                return err;
1701
1702        err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1703                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1704        if (err)
1705                return err;
1706
1707        err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1708                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1709                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1710                                  MVPP2_PRS_RI_CPU_CODE_MASK |
1711                                  MVPP2_PRS_RI_UDF3_MASK);
1712        if (err)
1713                return err;
1714
1715        /* IPv4 Broadcast */
1716        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1717        if (err)
1718                return err;
1719
1720        /* IPv4 Multicast */
1721        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1722        if (err)
1723                return err;
1724
1725        /* Default IPv4 entry for unknown protocols */
1726        memset(&pe, 0, sizeof(pe));
1727        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1728        pe.index = MVPP2_PE_IP4_PROTO_UN;
1729
1730        /* Set next lu to IPv4 */
1731        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1732        mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1733        /* Set L4 offset */
1734        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1735                                  sizeof(struct iphdr) - 4,
1736                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1737        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1738                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1739        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1740                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1741
1742        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1743        /* Unmask all ports */
1744        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1745
1746        /* Update shadow table and hw entry */
1747        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1748        mvpp2_prs_hw_write(priv, &pe);
1749
1750        /* Default IPv4 entry for unicast address */
1751        memset(&pe, 0, sizeof(pe));
1752        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1753        pe.index = MVPP2_PE_IP4_ADDR_UN;
1754
1755        /* Finished: go to flowid generation */
1756        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1757        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1758        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1759                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1760
1761        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1762                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1763        /* Unmask all ports */
1764        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1765
1766        /* Update shadow table and hw entry */
1767        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1768        mvpp2_prs_hw_write(priv, &pe);
1769
1770        return 0;
1771}
1772
1773/* Initialize entries for IPv6 */
1774static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1775{
1776        struct mvpp2_prs_entry pe;
1777        int tid, err;
1778
1779        /* Set entries for TCP, UDP and ICMP over IPv6 */
1780        err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1781                                  MVPP2_PRS_RI_L4_TCP,
1782                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1783        if (err)
1784                return err;
1785
1786        err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1787                                  MVPP2_PRS_RI_L4_UDP,
1788                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1789        if (err)
1790                return err;
1791
1792        err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1793                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1794                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1795                                  MVPP2_PRS_RI_CPU_CODE_MASK |
1796                                  MVPP2_PRS_RI_UDF3_MASK);
1797        if (err)
1798                return err;
1799
1800        /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1801        /* Result Info: UDF7=1, DS lite */
1802        err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1803                                  MVPP2_PRS_RI_UDF7_IP6_LITE,
1804                                  MVPP2_PRS_RI_UDF7_MASK);
1805        if (err)
1806                return err;
1807
1808        /* IPv6 multicast */
1809        err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1810        if (err)
1811                return err;
1812
1813        /* Entry for checking hop limit */
1814        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1815                                        MVPP2_PE_LAST_FREE_TID);
1816        if (tid < 0)
1817                return tid;
1818
1819        memset(&pe, 0, sizeof(pe));
1820        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1821        pe.index = tid;
1822
1823        /* Finished: go to flowid generation */
1824        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1825        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1826        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1827                                 MVPP2_PRS_RI_DROP_MASK,
1828                                 MVPP2_PRS_RI_L3_PROTO_MASK |
1829                                 MVPP2_PRS_RI_DROP_MASK);
1830
1831        mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1832        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1833                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1834
1835        /* Update shadow table and hw entry */
1836        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1837        mvpp2_prs_hw_write(priv, &pe);
1838
1839        /* Default IPv6 entry for unknown protocols */
1840        memset(&pe, 0, sizeof(pe));
1841        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1842        pe.index = MVPP2_PE_IP6_PROTO_UN;
1843
1844        /* Finished: go to flowid generation */
1845        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1846        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1847        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1848                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1849        /* Set L4 offset relatively to our current place */
1850        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1851                                  sizeof(struct ipv6hdr) - 4,
1852                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1853
1854        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1855                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1856        /* Unmask all ports */
1857        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1858
1859        /* Update shadow table and hw entry */
1860        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1861        mvpp2_prs_hw_write(priv, &pe);
1862
1863        /* Default IPv6 entry for unknown ext protocols */
1864        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1865        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1866        pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1867
1868        /* Finished: go to flowid generation */
1869        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1870        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1871        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1872                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1873
1874        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1875                                 MVPP2_PRS_IPV6_EXT_AI_BIT);
1876        /* Unmask all ports */
1877        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1878
1879        /* Update shadow table and hw entry */
1880        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1881        mvpp2_prs_hw_write(priv, &pe);
1882
1883        /* Default IPv6 entry for unicast address */
1884        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1885        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1886        pe.index = MVPP2_PE_IP6_ADDR_UN;
1887
1888        /* Finished: go to IPv6 again */
1889        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1890        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1891                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1892        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1893                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1894        /* Shift back to IPV6 NH */
1895        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1896
1897        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1898        /* Unmask all ports */
1899        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1900
1901        /* Update shadow table and hw entry */
1902        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1903        mvpp2_prs_hw_write(priv, &pe);
1904
1905        return 0;
1906}
1907
1908/* Find tcam entry with matched pair <vid,port> */
1909static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1910{
1911        unsigned char byte[2], enable[2];
1912        struct mvpp2_prs_entry pe;
1913        u16 rvid, rmask;
1914        int tid;
1915
1916        /* Go through the all entries with MVPP2_PRS_LU_VID */
1917        for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1918             tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1919                if (!port->priv->prs_shadow[tid].valid ||
1920                    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1921                        continue;
1922
1923                mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1924
1925                mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1926                mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1927
1928                rvid = ((byte[0] & 0xf) << 8) + byte[1];
1929                rmask = ((enable[0] & 0xf) << 8) + enable[1];
1930
1931                if (rvid != vid || rmask != mask)
1932                        continue;
1933
1934                return tid;
1935        }
1936
1937        return -ENOENT;
1938}
1939
1940/* Write parser entry for VID filtering */
1941int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1942{
1943        unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1944                                 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1945        unsigned int mask = 0xfff, reg_val, shift;
1946        struct mvpp2 *priv = port->priv;
1947        struct mvpp2_prs_entry pe;
1948        int tid;
1949
1950        memset(&pe, 0, sizeof(pe));
1951
1952        /* Scan TCAM and see if entry with this <vid,port> already exist */
1953        tid = mvpp2_prs_vid_range_find(port, vid, mask);
1954
1955        reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1956        if (reg_val & MVPP2_DSA_EXTENDED)
1957                shift = MVPP2_VLAN_TAG_EDSA_LEN;
1958        else
1959                shift = MVPP2_VLAN_TAG_LEN;
1960
1961        /* No such entry */
1962        if (tid < 0) {
1963
1964                /* Go through all entries from first to last in vlan range */
1965                tid = mvpp2_prs_tcam_first_free(priv, vid_start,
1966                                                vid_start +
1967                                                MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
1968
1969                /* There isn't room for a new VID filter */
1970                if (tid < 0)
1971                        return tid;
1972
1973                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1974                pe.index = tid;
1975
1976                /* Mask all ports */
1977                mvpp2_prs_tcam_port_map_set(&pe, 0);
1978        } else {
1979                mvpp2_prs_init_from_hw(priv, &pe, tid);
1980        }
1981
1982        /* Enable the current port */
1983        mvpp2_prs_tcam_port_set(&pe, port->id, true);
1984
1985        /* Continue - set next lookup */
1986        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1987
1988        /* Skip VLAN header - Set offset to 4 or 8 bytes */
1989        mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1990
1991        /* Set match on VID */
1992        mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
1993
1994        /* Clear all ai bits for next iteration */
1995        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1996
1997        /* Update shadow table */
1998        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1999        mvpp2_prs_hw_write(priv, &pe);
2000
2001        return 0;
2002}
2003
2004/* Write parser entry for VID filtering */
2005void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2006{
2007        struct mvpp2 *priv = port->priv;
2008        int tid;
2009
2010        /* Scan TCAM and see if entry with this <vid,port> already exist */
2011        tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2012
2013        /* No such entry */
2014        if (tid < 0)
2015                return;
2016
2017        mvpp2_prs_hw_inv(priv, tid);
2018        priv->prs_shadow[tid].valid = false;
2019}
2020
2021/* Remove all existing VID filters on this port */
2022void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2023{
2024        struct mvpp2 *priv = port->priv;
2025        int tid;
2026
2027        for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2028             tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2029                if (priv->prs_shadow[tid].valid) {
2030                        mvpp2_prs_hw_inv(priv, tid);
2031                        priv->prs_shadow[tid].valid = false;
2032                }
2033        }
2034}
2035
2036/* Remove VID filering entry for this port */
2037void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2038{
2039        unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2040        struct mvpp2 *priv = port->priv;
2041
2042        /* Invalidate the guard entry */
2043        mvpp2_prs_hw_inv(priv, tid);
2044
2045        priv->prs_shadow[tid].valid = false;
2046}
2047
2048/* Add guard entry that drops packets when no VID is matched on this port */
2049void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2050{
2051        unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2052        struct mvpp2 *priv = port->priv;
2053        unsigned int reg_val, shift;
2054        struct mvpp2_prs_entry pe;
2055
2056        if (priv->prs_shadow[tid].valid)
2057                return;
2058
2059        memset(&pe, 0, sizeof(pe));
2060
2061        pe.index = tid;
2062
2063        reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2064        if (reg_val & MVPP2_DSA_EXTENDED)
2065                shift = MVPP2_VLAN_TAG_EDSA_LEN;
2066        else
2067                shift = MVPP2_VLAN_TAG_LEN;
2068
2069        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2070
2071        /* Mask all ports */
2072        mvpp2_prs_tcam_port_map_set(&pe, 0);
2073
2074        /* Update port mask */
2075        mvpp2_prs_tcam_port_set(&pe, port->id, true);
2076
2077        /* Continue - set next lookup */
2078        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2079
2080        /* Skip VLAN header - Set offset to 4 or 8 bytes */
2081        mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2082
2083        /* Drop VLAN packets that don't belong to any VIDs on this port */
2084        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2085                                 MVPP2_PRS_RI_DROP_MASK);
2086
2087        /* Clear all ai bits for next iteration */
2088        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2089
2090        /* Update shadow table */
2091        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2092        mvpp2_prs_hw_write(priv, &pe);
2093}
2094
2095/* Parser default initialization */
2096int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2097{
2098        int err, index, i;
2099
2100        /* Enable tcam table */
2101        mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2102
2103        /* Clear all tcam and sram entries */
2104        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2105                mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2106                for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2107                        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2108
2109                mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2110                for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2111                        mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2112        }
2113
2114        /* Invalidate all tcam entries */
2115        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2116                mvpp2_prs_hw_inv(priv, index);
2117
2118        priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2119                                        sizeof(*priv->prs_shadow),
2120                                        GFP_KERNEL);
2121        if (!priv->prs_shadow)
2122                return -ENOMEM;
2123
2124        /* Always start from lookup = 0 */
2125        for (index = 0; index < MVPP2_MAX_PORTS; index++)
2126                mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2127                                       MVPP2_PRS_PORT_LU_MAX, 0);
2128
2129        mvpp2_prs_def_flow_init(priv);
2130
2131        mvpp2_prs_mh_init(priv);
2132
2133        mvpp2_prs_mac_init(priv);
2134
2135        mvpp2_prs_dsa_init(priv);
2136
2137        mvpp2_prs_vid_init(priv);
2138
2139        err = mvpp2_prs_etype_init(priv);
2140        if (err)
2141                return err;
2142
2143        err = mvpp2_prs_vlan_init(pdev, priv);
2144        if (err)
2145                return err;
2146
2147        err = mvpp2_prs_pppoe_init(priv);
2148        if (err)
2149                return err;
2150
2151        err = mvpp2_prs_ip6_init(priv);
2152        if (err)
2153                return err;
2154
2155        err = mvpp2_prs_ip4_init(priv);
2156        if (err)
2157                return err;
2158
2159        return 0;
2160}
2161
2162/* Compare MAC DA with tcam entry data */
2163static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2164                                       const u8 *da, unsigned char *mask)
2165{
2166        unsigned char tcam_byte, tcam_mask;
2167        int index;
2168
2169        for (index = 0; index < ETH_ALEN; index++) {
2170                mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2171                if (tcam_mask != mask[index])
2172                        return false;
2173
2174                if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2175                        return false;
2176        }
2177
2178        return true;
2179}
2180
2181/* Find tcam entry with matched pair <MAC DA, port> */
2182static int
2183mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2184                            unsigned char *mask, int udf_type)
2185{
2186        struct mvpp2_prs_entry pe;
2187        int tid;
2188
2189        /* Go through the all entires with MVPP2_PRS_LU_MAC */
2190        for (tid = MVPP2_PE_MAC_RANGE_START;
2191             tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2192                unsigned int entry_pmap;
2193
2194                if (!priv->prs_shadow[tid].valid ||
2195                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2196                    (priv->prs_shadow[tid].udf != udf_type))
2197                        continue;
2198
2199                mvpp2_prs_init_from_hw(priv, &pe, tid);
2200                entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2201
2202                if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2203                    entry_pmap == pmap)
2204                        return tid;
2205        }
2206
2207        return -ENOENT;
2208}
2209
2210/* Update parser's mac da entry */
2211int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2212{
2213        unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2214        struct mvpp2 *priv = port->priv;
2215        unsigned int pmap, len, ri;
2216        struct mvpp2_prs_entry pe;
2217        int tid;
2218
2219        memset(&pe, 0, sizeof(pe));
2220
2221        /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2222        tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2223                                          MVPP2_PRS_UDF_MAC_DEF);
2224
2225        /* No such entry */
2226        if (tid < 0) {
2227                if (!add)
2228                        return 0;
2229
2230                /* Create new TCAM entry */
2231                /* Go through the all entries from first to last */
2232                tid = mvpp2_prs_tcam_first_free(priv,
2233                                                MVPP2_PE_MAC_RANGE_START,
2234                                                MVPP2_PE_MAC_RANGE_END);
2235                if (tid < 0)
2236                        return tid;
2237
2238                pe.index = tid;
2239
2240                /* Mask all ports */
2241                mvpp2_prs_tcam_port_map_set(&pe, 0);
2242        } else {
2243                mvpp2_prs_init_from_hw(priv, &pe, tid);
2244        }
2245
2246        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2247
2248        /* Update port mask */
2249        mvpp2_prs_tcam_port_set(&pe, port->id, add);
2250
2251        /* Invalidate the entry if no ports are left enabled */
2252        pmap = mvpp2_prs_tcam_port_map_get(&pe);
2253        if (pmap == 0) {
2254                if (add)
2255                        return -EINVAL;
2256
2257                mvpp2_prs_hw_inv(priv, pe.index);
2258                priv->prs_shadow[pe.index].valid = false;
2259                return 0;
2260        }
2261
2262        /* Continue - set next lookup */
2263        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2264
2265        /* Set match on DA */
2266        len = ETH_ALEN;
2267        while (len--)
2268                mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2269
2270        /* Set result info bits */
2271        if (is_broadcast_ether_addr(da)) {
2272                ri = MVPP2_PRS_RI_L2_BCAST;
2273        } else if (is_multicast_ether_addr(da)) {
2274                ri = MVPP2_PRS_RI_L2_MCAST;
2275        } else {
2276                ri = MVPP2_PRS_RI_L2_UCAST;
2277
2278                if (ether_addr_equal(da, port->dev->dev_addr))
2279                        ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2280        }
2281
2282        mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2283                                 MVPP2_PRS_RI_MAC_ME_MASK);
2284        mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2285                                MVPP2_PRS_RI_MAC_ME_MASK);
2286
2287        /* Shift to ethertype */
2288        mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2289                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2290
2291        /* Update shadow table and hw entry */
2292        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2293        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2294        mvpp2_prs_hw_write(priv, &pe);
2295
2296        return 0;
2297}
2298
2299int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2300{
2301        struct mvpp2_port *port = netdev_priv(dev);
2302        int err;
2303
2304        /* Remove old parser entry */
2305        err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2306        if (err)
2307                return err;
2308
2309        /* Add new parser entry */
2310        err = mvpp2_prs_mac_da_accept(port, da, true);
2311        if (err)
2312                return err;
2313
2314        /* Set addr in the device */
2315        ether_addr_copy(dev->dev_addr, da);
2316
2317        return 0;
2318}
2319
2320void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2321{
2322        struct mvpp2 *priv = port->priv;
2323        struct mvpp2_prs_entry pe;
2324        unsigned long pmap;
2325        int index, tid;
2326
2327        for (tid = MVPP2_PE_MAC_RANGE_START;
2328             tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2329                unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2330
2331                if (!priv->prs_shadow[tid].valid ||
2332                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2333                    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2334                        continue;
2335
2336                mvpp2_prs_init_from_hw(priv, &pe, tid);
2337
2338                pmap = mvpp2_prs_tcam_port_map_get(&pe);
2339
2340                /* We only want entries active on this port */
2341                if (!test_bit(port->id, &pmap))
2342                        continue;
2343
2344                /* Read mac addr from entry */
2345                for (index = 0; index < ETH_ALEN; index++)
2346                        mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2347                                                     &da_mask[index]);
2348
2349                /* Special cases : Don't remove broadcast and port's own
2350                 * address
2351                 */
2352                if (is_broadcast_ether_addr(da) ||
2353                    ether_addr_equal(da, port->dev->dev_addr))
2354                        continue;
2355
2356                /* Remove entry from TCAM */
2357                mvpp2_prs_mac_da_accept(port, da, false);
2358        }
2359}
2360
2361int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2362{
2363        switch (type) {
2364        case MVPP2_TAG_TYPE_EDSA:
2365                /* Add port to EDSA entries */
2366                mvpp2_prs_dsa_tag_set(priv, port, true,
2367                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2368                mvpp2_prs_dsa_tag_set(priv, port, true,
2369                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2370                /* Remove port from DSA entries */
2371                mvpp2_prs_dsa_tag_set(priv, port, false,
2372                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2373                mvpp2_prs_dsa_tag_set(priv, port, false,
2374                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2375                break;
2376
2377        case MVPP2_TAG_TYPE_DSA:
2378                /* Add port to DSA entries */
2379                mvpp2_prs_dsa_tag_set(priv, port, true,
2380                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2381                mvpp2_prs_dsa_tag_set(priv, port, true,
2382                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2383                /* Remove port from EDSA entries */
2384                mvpp2_prs_dsa_tag_set(priv, port, false,
2385                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2386                mvpp2_prs_dsa_tag_set(priv, port, false,
2387                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2388                break;
2389
2390        case MVPP2_TAG_TYPE_MH:
2391        case MVPP2_TAG_TYPE_NONE:
2392                /* Remove port form EDSA and DSA entries */
2393                mvpp2_prs_dsa_tag_set(priv, port, false,
2394                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2395                mvpp2_prs_dsa_tag_set(priv, port, false,
2396                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2397                mvpp2_prs_dsa_tag_set(priv, port, false,
2398                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2399                mvpp2_prs_dsa_tag_set(priv, port, false,
2400                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2401                break;
2402
2403        default:
2404                if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2405                        return -EINVAL;
2406        }
2407
2408        return 0;
2409}
2410
2411int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2412{
2413        struct mvpp2_prs_entry pe;
2414        u8 *ri_byte, *ri_byte_mask;
2415        int tid, i;
2416
2417        memset(&pe, 0, sizeof(pe));
2418
2419        tid = mvpp2_prs_tcam_first_free(priv,
2420                                        MVPP2_PE_LAST_FREE_TID,
2421                                        MVPP2_PE_FIRST_FREE_TID);
2422        if (tid < 0)
2423                return tid;
2424
2425        pe.index = tid;
2426
2427        ri_byte = (u8 *)&ri;
2428        ri_byte_mask = (u8 *)&ri_mask;
2429
2430        mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2431        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2432
2433        for (i = 0; i < 4; i++) {
2434                mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2435                                             ri_byte_mask[i]);
2436        }
2437
2438        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2439        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2440        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2441        mvpp2_prs_hw_write(priv, &pe);
2442
2443        return 0;
2444}
2445
2446/* Set prs flow for the port */
2447int mvpp2_prs_def_flow(struct mvpp2_port *port)
2448{
2449        struct mvpp2_prs_entry pe;
2450        int tid;
2451
2452        memset(&pe, 0, sizeof(pe));
2453
2454        tid = mvpp2_prs_flow_find(port->priv, port->id);
2455
2456        /* Such entry not exist */
2457        if (tid < 0) {
2458                /* Go through the all entires from last to first */
2459                tid = mvpp2_prs_tcam_first_free(port->priv,
2460                                                MVPP2_PE_LAST_FREE_TID,
2461                                               MVPP2_PE_FIRST_FREE_TID);
2462                if (tid < 0)
2463                        return tid;
2464
2465                pe.index = tid;
2466
2467                /* Set flow ID*/
2468                mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2469                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2470
2471                /* Update shadow table */
2472                mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2473        } else {
2474                mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2475        }
2476
2477        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2478        mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2479        mvpp2_prs_hw_write(port->priv, &pe);
2480
2481        return 0;
2482}
2483
2484int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2485{
2486        u32 val;
2487
2488        if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2489                return -EINVAL;
2490
2491        mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2492
2493        val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2494
2495        val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2496
2497        return val;
2498}
2499