linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Header Parser helpers for Marvell PPv2 Network Controller
   4 *
   5 * Copyright (C) 2014 Marvell
   6 *
   7 * Marcin Wojtas <mw@semihalf.com>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/netdevice.h>
  12#include <linux/etherdevice.h>
  13#include <linux/platform_device.h>
  14#include <uapi/linux/ppp_defs.h>
  15#include <net/ip.h>
  16#include <net/ipv6.h>
  17
  18#include "mvpp2.h"
  19#include "mvpp2_prs.h"
  20
  21/* Update parser tcam and sram hw entries */
  22static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  23{
  24        int i;
  25
  26        if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  27                return -EINVAL;
  28
  29        /* Clear entry invalidation bit */
  30        pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  31
  32        /* Write sram index - indirect access */
  33        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  34        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  35                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
  36
  37        /* Write tcam index - indirect access */
  38        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  39        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  40                mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
  41
  42        return 0;
  43}
  44
  45/* Initialize tcam entry from hw */
  46int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
  47                           int tid)
  48{
  49        int i;
  50
  51        if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  52                return -EINVAL;
  53
  54        memset(pe, 0, sizeof(*pe));
  55        pe->index = tid;
  56
  57        /* Write tcam index - indirect access */
  58        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  59
  60        pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  61                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  62        if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  63                return MVPP2_PRS_TCAM_ENTRY_INVALID;
  64
  65        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  66                pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  67
  68        /* Write sram index - indirect access */
  69        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  70        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  71                pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  72
  73        return 0;
  74}
  75
  76/* Invalidate tcam hw entry */
  77static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  78{
  79        /* Write index - indirect access */
  80        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  81        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  82                    MVPP2_PRS_TCAM_INV_MASK);
  83}
  84
  85/* Enable shadow table entry and set its lookup ID */
  86static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  87{
  88        priv->prs_shadow[index].valid = true;
  89        priv->prs_shadow[index].lu = lu;
  90}
  91
  92/* Update ri fields in shadow table entry */
  93static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  94                                    unsigned int ri, unsigned int ri_mask)
  95{
  96        priv->prs_shadow[index].ri_mask = ri_mask;
  97        priv->prs_shadow[index].ri = ri;
  98}
  99
 100/* Update lookup field in tcam sw entry */
 101static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
 102{
 103        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
 104        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
 105        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
 106        pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
 107}
 108
 109/* Update mask for single port in tcam sw entry */
 110static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
 111                                    unsigned int port, bool add)
 112{
 113        if (add)
 114                pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
 115        else
 116                pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
 117}
 118
 119/* Update port map in tcam sw entry */
 120static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
 121                                        unsigned int ports)
 122{
 123        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
 124        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
 125        pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
 126}
 127
 128/* Obtain port map from tcam sw entry */
 129unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
 130{
 131        return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
 132}
 133
 134/* Set byte of data and its enable bits in tcam sw entry */
 135static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
 136                                         unsigned int offs, unsigned char byte,
 137                                         unsigned char enable)
 138{
 139        int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
 140
 141        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
 142        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
 143        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
 144        pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
 145}
 146
 147/* Get byte of data and its enable bits from tcam sw entry */
 148void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
 149                                  unsigned int offs, unsigned char *byte,
 150                                  unsigned char *enable)
 151{
 152        int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
 153
 154        *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
 155        *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
 156}
 157
 158/* Compare tcam data bytes with a pattern */
 159static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
 160                                    u16 data)
 161{
 162        u16 tcam_data;
 163
 164        tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
 165        return tcam_data == data;
 166}
 167
 168/* Update ai bits in tcam sw entry */
 169static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
 170                                     unsigned int bits, unsigned int enable)
 171{
 172        int i;
 173
 174        for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
 175                if (!(enable & BIT(i)))
 176                        continue;
 177
 178                if (bits & BIT(i))
 179                        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
 180                else
 181                        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
 182        }
 183
 184        pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
 185}
 186
 187/* Get ai bits from tcam sw entry */
 188static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
 189{
 190        return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
 191}
 192
 193/* Set ethertype in tcam sw entry */
 194static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
 195                                  unsigned short ethertype)
 196{
 197        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
 198        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
 199}
 200
 201/* Set vid in tcam sw entry */
 202static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
 203                                unsigned short vid)
 204{
 205        mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
 206        mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
 207}
 208
 209/* Set bits in sram sw entry */
 210static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
 211                                    u32 val)
 212{
 213        pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
 214}
 215
 216/* Clear bits in sram sw entry */
 217static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
 218                                      u32 val)
 219{
 220        pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
 221}
 222
 223/* Update ri bits in sram sw entry */
 224static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
 225                                     unsigned int bits, unsigned int mask)
 226{
 227        unsigned int i;
 228
 229        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
 230                if (!(mask & BIT(i)))
 231                        continue;
 232
 233                if (bits & BIT(i))
 234                        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
 235                                                1);
 236                else
 237                        mvpp2_prs_sram_bits_clear(pe,
 238                                                  MVPP2_PRS_SRAM_RI_OFFS + i,
 239                                                  1);
 240
 241                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
 242        }
 243}
 244
 245/* Obtain ri bits from sram sw entry */
 246static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
 247{
 248        return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
 249}
 250
 251/* Update ai bits in sram sw entry */
 252static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
 253                                     unsigned int bits, unsigned int mask)
 254{
 255        unsigned int i;
 256
 257        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
 258                if (!(mask & BIT(i)))
 259                        continue;
 260
 261                if (bits & BIT(i))
 262                        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
 263                                                1);
 264                else
 265                        mvpp2_prs_sram_bits_clear(pe,
 266                                                  MVPP2_PRS_SRAM_AI_OFFS + i,
 267                                                  1);
 268
 269                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
 270        }
 271}
 272
 273/* Read ai bits from sram sw entry */
 274static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
 275{
 276        u8 bits;
 277        /* ai is stored on bits 90->97; so it spreads across two u32 */
 278        int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
 279        int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
 280
 281        bits = (pe->sram[ai_off] >> ai_shift) |
 282               (pe->sram[ai_off + 1] << (32 - ai_shift));
 283
 284        return bits;
 285}
 286
 287/* In sram sw entry set lookup ID field of the tcam key to be used in the next
 288 * lookup interation
 289 */
 290static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
 291                                       unsigned int lu)
 292{
 293        int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
 294
 295        mvpp2_prs_sram_bits_clear(pe, sram_next_off,
 296                                  MVPP2_PRS_SRAM_NEXT_LU_MASK);
 297        mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
 298}
 299
 300/* In the sram sw entry set sign and value of the next lookup offset
 301 * and the offset value generated to the classifier
 302 */
 303static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
 304                                     unsigned int op)
 305{
 306        /* Set sign */
 307        if (shift < 0) {
 308                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
 309                shift = 0 - shift;
 310        } else {
 311                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
 312        }
 313
 314        /* Set value */
 315        pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
 316                shift & MVPP2_PRS_SRAM_SHIFT_MASK;
 317
 318        /* Reset and set operation */
 319        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
 320                                  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
 321        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
 322
 323        /* Set base offset as current */
 324        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
 325}
 326
 327/* In the sram sw entry set sign and value of the user defined offset
 328 * generated to the classifier
 329 */
 330static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
 331                                      unsigned int type, int offset,
 332                                      unsigned int op)
 333{
 334        /* Set sign */
 335        if (offset < 0) {
 336                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
 337                offset = 0 - offset;
 338        } else {
 339                mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
 340        }
 341
 342        /* Set value */
 343        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
 344                                  MVPP2_PRS_SRAM_UDF_MASK);
 345        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
 346                                offset & MVPP2_PRS_SRAM_UDF_MASK);
 347
 348        /* Set offset type */
 349        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
 350                                  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
 351        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
 352
 353        /* Set offset operation */
 354        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
 355                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
 356        mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
 357                                op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
 358
 359        /* Set base offset as current */
 360        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
 361}
 362
 363/* Find parser flow entry */
 364static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
 365{
 366        struct mvpp2_prs_entry pe;
 367        int tid;
 368
 369        /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
 370        for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
 371                u8 bits;
 372
 373                if (!priv->prs_shadow[tid].valid ||
 374                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
 375                        continue;
 376
 377                mvpp2_prs_init_from_hw(priv, &pe, tid);
 378                bits = mvpp2_prs_sram_ai_get(&pe);
 379
 380                /* Sram store classification lookup ID in AI bits [5:0] */
 381                if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
 382                        return tid;
 383        }
 384
 385        return -ENOENT;
 386}
 387
 388/* Return first free tcam index, seeking from start to end */
 389static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
 390                                     unsigned char end)
 391{
 392        int tid;
 393
 394        if (start > end)
 395                swap(start, end);
 396
 397        if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
 398                end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
 399
 400        for (tid = start; tid <= end; tid++) {
 401                if (!priv->prs_shadow[tid].valid)
 402                        return tid;
 403        }
 404
 405        return -EINVAL;
 406}
 407
 408/* Drop flow control pause frames */
 409static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
 410{
 411        unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
 412        struct mvpp2_prs_entry pe;
 413        unsigned int len;
 414
 415        memset(&pe, 0, sizeof(pe));
 416
 417        /* For all ports - drop flow control frames */
 418        pe.index = MVPP2_PE_FC_DROP;
 419        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
 420
 421        /* Set match on DA */
 422        len = ETH_ALEN;
 423        while (len--)
 424                mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
 425
 426        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
 427                                 MVPP2_PRS_RI_DROP_MASK);
 428
 429        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
 430        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
 431
 432        /* Mask all ports */
 433        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
 434
 435        /* Update shadow table and hw entry */
 436        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
 437        mvpp2_prs_hw_write(priv, &pe);
 438}
 439
 440/* Enable/disable dropping all mac da's */
 441static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
 442{
 443        struct mvpp2_prs_entry pe;
 444
 445        if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
 446                /* Entry exist - update port only */
 447                mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
 448        } else {
 449                /* Entry doesn't exist - create new */
 450                memset(&pe, 0, sizeof(pe));
 451                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
 452                pe.index = MVPP2_PE_DROP_ALL;
 453
 454                /* Non-promiscuous mode for all ports - DROP unknown packets */
 455                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
 456                                         MVPP2_PRS_RI_DROP_MASK);
 457
 458                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
 459                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
 460
 461                /* Update shadow table */
 462                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
 463
 464                /* Mask all ports */
 465                mvpp2_prs_tcam_port_map_set(&pe, 0);
 466        }
 467
 468        /* Update port mask */
 469        mvpp2_prs_tcam_port_set(&pe, port, add);
 470
 471        mvpp2_prs_hw_write(priv, &pe);
 472}
 473
 474/* Set port to unicast or multicast promiscuous mode */
 475void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
 476                               enum mvpp2_prs_l2_cast l2_cast, bool add)
 477{
 478        struct mvpp2_prs_entry pe;
 479        unsigned char cast_match;
 480        unsigned int ri;
 481        int tid;
 482
 483        if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
 484                cast_match = MVPP2_PRS_UCAST_VAL;
 485                tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
 486                ri = MVPP2_PRS_RI_L2_UCAST;
 487        } else {
 488                cast_match = MVPP2_PRS_MCAST_VAL;
 489                tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
 490                ri = MVPP2_PRS_RI_L2_MCAST;
 491        }
 492
 493        /* promiscuous mode - Accept unknown unicast or multicast packets */
 494        if (priv->prs_shadow[tid].valid) {
 495                mvpp2_prs_init_from_hw(priv, &pe, tid);
 496        } else {
 497                memset(&pe, 0, sizeof(pe));
 498                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
 499                pe.index = tid;
 500
 501                /* Continue - set next lookup */
 502                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
 503
 504                /* Set result info bits */
 505                mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
 506
 507                /* Match UC or MC addresses */
 508                mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
 509                                             MVPP2_PRS_CAST_MASK);
 510
 511                /* Shift to ethertype */
 512                mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
 513                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 514
 515                /* Mask all ports */
 516                mvpp2_prs_tcam_port_map_set(&pe, 0);
 517
 518                /* Update shadow table */
 519                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
 520        }
 521
 522        /* Update port mask */
 523        mvpp2_prs_tcam_port_set(&pe, port, add);
 524
 525        mvpp2_prs_hw_write(priv, &pe);
 526}
 527
 528/* Set entry for dsa packets */
 529static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
 530                                  bool tagged, bool extend)
 531{
 532        struct mvpp2_prs_entry pe;
 533        int tid, shift;
 534
 535        if (extend) {
 536                tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
 537                shift = 8;
 538        } else {
 539                tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
 540                shift = 4;
 541        }
 542
 543        if (priv->prs_shadow[tid].valid) {
 544                /* Entry exist - update port only */
 545                mvpp2_prs_init_from_hw(priv, &pe, tid);
 546        } else {
 547                /* Entry doesn't exist - create new */
 548                memset(&pe, 0, sizeof(pe));
 549                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
 550                pe.index = tid;
 551
 552                /* Update shadow table */
 553                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
 554
 555                if (tagged) {
 556                        /* Set tagged bit in DSA tag */
 557                        mvpp2_prs_tcam_data_byte_set(&pe, 0,
 558                                             MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
 559                                             MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
 560
 561                        /* Set ai bits for next iteration */
 562                        if (extend)
 563                                mvpp2_prs_sram_ai_update(&pe, 1,
 564                                                        MVPP2_PRS_SRAM_AI_MASK);
 565                        else
 566                                mvpp2_prs_sram_ai_update(&pe, 0,
 567                                                        MVPP2_PRS_SRAM_AI_MASK);
 568
 569                        /* Set result info bits to 'single vlan' */
 570                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
 571                                                 MVPP2_PRS_RI_VLAN_MASK);
 572                        /* If packet is tagged continue check vid filtering */
 573                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
 574                } else {
 575                        /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
 576                        mvpp2_prs_sram_shift_set(&pe, shift,
 577                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 578
 579                        /* Set result info bits to 'no vlans' */
 580                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
 581                                                 MVPP2_PRS_RI_VLAN_MASK);
 582                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
 583                }
 584
 585                /* Mask all ports */
 586                mvpp2_prs_tcam_port_map_set(&pe, 0);
 587        }
 588
 589        /* Update port mask */
 590        mvpp2_prs_tcam_port_set(&pe, port, add);
 591
 592        mvpp2_prs_hw_write(priv, &pe);
 593}
 594
 595/* Set entry for dsa ethertype */
 596static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
 597                                            bool add, bool tagged, bool extend)
 598{
 599        struct mvpp2_prs_entry pe;
 600        int tid, shift, port_mask;
 601
 602        if (extend) {
 603                tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
 604                      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
 605                port_mask = 0;
 606                shift = 8;
 607        } else {
 608                tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
 609                      MVPP2_PE_ETYPE_DSA_UNTAGGED;
 610                port_mask = MVPP2_PRS_PORT_MASK;
 611                shift = 4;
 612        }
 613
 614        if (priv->prs_shadow[tid].valid) {
 615                /* Entry exist - update port only */
 616                mvpp2_prs_init_from_hw(priv, &pe, tid);
 617        } else {
 618                /* Entry doesn't exist - create new */
 619                memset(&pe, 0, sizeof(pe));
 620                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
 621                pe.index = tid;
 622
 623                /* Set ethertype */
 624                mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
 625                mvpp2_prs_match_etype(&pe, 2, 0);
 626
 627                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
 628                                         MVPP2_PRS_RI_DSA_MASK);
 629                /* Shift ethertype + 2 byte reserved + tag*/
 630                mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
 631                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 632
 633                /* Update shadow table */
 634                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
 635
 636                if (tagged) {
 637                        /* Set tagged bit in DSA tag */
 638                        mvpp2_prs_tcam_data_byte_set(&pe,
 639                                                     MVPP2_ETH_TYPE_LEN + 2 + 3,
 640                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
 641                                                 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
 642                        /* Clear all ai bits for next iteration */
 643                        mvpp2_prs_sram_ai_update(&pe, 0,
 644                                                 MVPP2_PRS_SRAM_AI_MASK);
 645                        /* If packet is tagged continue check vlans */
 646                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 647                } else {
 648                        /* Set result info bits to 'no vlans' */
 649                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
 650                                                 MVPP2_PRS_RI_VLAN_MASK);
 651                        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
 652                }
 653                /* Mask/unmask all ports, depending on dsa type */
 654                mvpp2_prs_tcam_port_map_set(&pe, port_mask);
 655        }
 656
 657        /* Update port mask */
 658        mvpp2_prs_tcam_port_set(&pe, port, add);
 659
 660        mvpp2_prs_hw_write(priv, &pe);
 661}
 662
 663/* Search for existing single/triple vlan entry */
 664static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
 665{
 666        struct mvpp2_prs_entry pe;
 667        int tid;
 668
 669        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
 670        for (tid = MVPP2_PE_FIRST_FREE_TID;
 671             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
 672                unsigned int ri_bits, ai_bits;
 673                bool match;
 674
 675                if (!priv->prs_shadow[tid].valid ||
 676                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
 677                        continue;
 678
 679                mvpp2_prs_init_from_hw(priv, &pe, tid);
 680                match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
 681                if (!match)
 682                        continue;
 683
 684                /* Get vlan type */
 685                ri_bits = mvpp2_prs_sram_ri_get(&pe);
 686                ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
 687
 688                /* Get current ai value from tcam */
 689                ai_bits = mvpp2_prs_tcam_ai_get(&pe);
 690                /* Clear double vlan bit */
 691                ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
 692
 693                if (ai != ai_bits)
 694                        continue;
 695
 696                if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
 697                    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
 698                        return tid;
 699        }
 700
 701        return -ENOENT;
 702}
 703
 704/* Add/update single/triple vlan entry */
 705static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
 706                              unsigned int port_map)
 707{
 708        struct mvpp2_prs_entry pe;
 709        int tid_aux, tid;
 710        int ret = 0;
 711
 712        memset(&pe, 0, sizeof(pe));
 713
 714        tid = mvpp2_prs_vlan_find(priv, tpid, ai);
 715
 716        if (tid < 0) {
 717                /* Create new tcam entry */
 718                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
 719                                                MVPP2_PE_FIRST_FREE_TID);
 720                if (tid < 0)
 721                        return tid;
 722
 723                /* Get last double vlan tid */
 724                for (tid_aux = MVPP2_PE_LAST_FREE_TID;
 725                     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
 726                        unsigned int ri_bits;
 727
 728                        if (!priv->prs_shadow[tid_aux].valid ||
 729                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
 730                                continue;
 731
 732                        mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
 733                        ri_bits = mvpp2_prs_sram_ri_get(&pe);
 734                        if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
 735                            MVPP2_PRS_RI_VLAN_DOUBLE)
 736                                break;
 737                }
 738
 739                if (tid <= tid_aux)
 740                        return -EINVAL;
 741
 742                memset(&pe, 0, sizeof(pe));
 743                pe.index = tid;
 744                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 745
 746                mvpp2_prs_match_etype(&pe, 0, tpid);
 747
 748                /* VLAN tag detected, proceed with VID filtering */
 749                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
 750
 751                /* Clear all ai bits for next iteration */
 752                mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
 753
 754                if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
 755                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
 756                                                 MVPP2_PRS_RI_VLAN_MASK);
 757                } else {
 758                        ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
 759                        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
 760                                                 MVPP2_PRS_RI_VLAN_MASK);
 761                }
 762                mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
 763
 764                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
 765        } else {
 766                mvpp2_prs_init_from_hw(priv, &pe, tid);
 767        }
 768        /* Update ports' mask */
 769        mvpp2_prs_tcam_port_map_set(&pe, port_map);
 770
 771        mvpp2_prs_hw_write(priv, &pe);
 772
 773        return ret;
 774}
 775
 776/* Get first free double vlan ai number */
 777static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
 778{
 779        int i;
 780
 781        for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
 782                if (!priv->prs_double_vlans[i])
 783                        return i;
 784        }
 785
 786        return -EINVAL;
 787}
 788
 789/* Search for existing double vlan entry */
 790static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
 791                                      unsigned short tpid2)
 792{
 793        struct mvpp2_prs_entry pe;
 794        int tid;
 795
 796        /* Go through the all entries with MVPP2_PRS_LU_VLAN */
 797        for (tid = MVPP2_PE_FIRST_FREE_TID;
 798             tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
 799                unsigned int ri_mask;
 800                bool match;
 801
 802                if (!priv->prs_shadow[tid].valid ||
 803                    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
 804                        continue;
 805
 806                mvpp2_prs_init_from_hw(priv, &pe, tid);
 807
 808                match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
 809                        mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
 810
 811                if (!match)
 812                        continue;
 813
 814                ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
 815                if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
 816                        return tid;
 817        }
 818
 819        return -ENOENT;
 820}
 821
 822/* Add or update double vlan entry */
 823static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
 824                                     unsigned short tpid2,
 825                                     unsigned int port_map)
 826{
 827        int tid_aux, tid, ai, ret = 0;
 828        struct mvpp2_prs_entry pe;
 829
 830        memset(&pe, 0, sizeof(pe));
 831
 832        tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
 833
 834        if (tid < 0) {
 835                /* Create new tcam entry */
 836                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 837                                MVPP2_PE_LAST_FREE_TID);
 838                if (tid < 0)
 839                        return tid;
 840
 841                /* Set ai value for new double vlan entry */
 842                ai = mvpp2_prs_double_vlan_ai_free_get(priv);
 843                if (ai < 0)
 844                        return ai;
 845
 846                /* Get first single/triple vlan tid */
 847                for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
 848                     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
 849                        unsigned int ri_bits;
 850
 851                        if (!priv->prs_shadow[tid_aux].valid ||
 852                            priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
 853                                continue;
 854
 855                        mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
 856                        ri_bits = mvpp2_prs_sram_ri_get(&pe);
 857                        ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
 858                        if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
 859                            ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
 860                                break;
 861                }
 862
 863                if (tid >= tid_aux)
 864                        return -ERANGE;
 865
 866                memset(&pe, 0, sizeof(pe));
 867                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 868                pe.index = tid;
 869
 870                priv->prs_double_vlans[ai] = true;
 871
 872                mvpp2_prs_match_etype(&pe, 0, tpid1);
 873                mvpp2_prs_match_etype(&pe, 4, tpid2);
 874
 875                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
 876                /* Shift 4 bytes - skip outer vlan tag */
 877                mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
 878                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
 879                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
 880                                         MVPP2_PRS_RI_VLAN_MASK);
 881                mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
 882                                         MVPP2_PRS_SRAM_AI_MASK);
 883
 884                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
 885        } else {
 886                mvpp2_prs_init_from_hw(priv, &pe, tid);
 887        }
 888
 889        /* Update ports' mask */
 890        mvpp2_prs_tcam_port_map_set(&pe, port_map);
 891        mvpp2_prs_hw_write(priv, &pe);
 892
 893        return ret;
 894}
 895
 896/* IPv4 header parsing for fragmentation and L4 offset */
 897static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
 898                               unsigned int ri, unsigned int ri_mask)
 899{
 900        struct mvpp2_prs_entry pe;
 901        int tid;
 902
 903        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
 904            (proto != IPPROTO_IGMP))
 905                return -EINVAL;
 906
 907        /* Not fragmented packet */
 908        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 909                                        MVPP2_PE_LAST_FREE_TID);
 910        if (tid < 0)
 911                return tid;
 912
 913        memset(&pe, 0, sizeof(pe));
 914        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
 915        pe.index = tid;
 916
 917        /* Finished: go to flowid generation */
 918        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
 919        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
 920
 921        /* Set L3 offset */
 922        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
 923                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
 924        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
 925        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
 926
 927        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
 928                                     MVPP2_PRS_TCAM_PROTO_MASK_L);
 929        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
 930                                     MVPP2_PRS_TCAM_PROTO_MASK);
 931
 932        mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
 933        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
 934                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
 935        /* Unmask all ports */
 936        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
 937
 938        /* Update shadow table and hw entry */
 939        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
 940        mvpp2_prs_hw_write(priv, &pe);
 941
 942        /* Fragmented packet */
 943        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 944                                        MVPP2_PE_LAST_FREE_TID);
 945        if (tid < 0)
 946                return tid;
 947
 948        pe.index = tid;
 949        /* Clear ri before updating */
 950        pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
 951        pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
 952        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
 953
 954        mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
 955                                 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
 956
 957        mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
 958        mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
 959
 960        /* Update shadow table and hw entry */
 961        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
 962        mvpp2_prs_hw_write(priv, &pe);
 963
 964        return 0;
 965}
 966
 967/* IPv4 L3 multicast or broadcast */
 968static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
 969{
 970        struct mvpp2_prs_entry pe;
 971        int mask, tid;
 972
 973        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
 974                                        MVPP2_PE_LAST_FREE_TID);
 975        if (tid < 0)
 976                return tid;
 977
 978        memset(&pe, 0, sizeof(pe));
 979        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
 980        pe.index = tid;
 981
 982        switch (l3_cast) {
 983        case MVPP2_PRS_L3_MULTI_CAST:
 984                mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
 985                                             MVPP2_PRS_IPV4_MC_MASK);
 986                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
 987                                         MVPP2_PRS_RI_L3_ADDR_MASK);
 988                break;
 989        case  MVPP2_PRS_L3_BROAD_CAST:
 990                mask = MVPP2_PRS_IPV4_BC_MASK;
 991                mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
 992                mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
 993                mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
 994                mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
 995                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
 996                                         MVPP2_PRS_RI_L3_ADDR_MASK);
 997                break;
 998        default:
 999                return -EINVAL;
1000        }
1001
1002        /* Go again to ipv4 */
1003        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1004
1005        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1006                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1007
1008        /* Shift back to IPv4 proto */
1009        mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1010
1011        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1012
1013        /* Unmask all ports */
1014        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1015
1016        /* Update shadow table and hw entry */
1017        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1018        mvpp2_prs_hw_write(priv, &pe);
1019
1020        return 0;
1021}
1022
1023/* Set entries for protocols over IPv6  */
1024static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1025                               unsigned int ri, unsigned int ri_mask)
1026{
1027        struct mvpp2_prs_entry pe;
1028        int tid;
1029
1030        if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1031            (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1032                return -EINVAL;
1033
1034        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1035                                        MVPP2_PE_LAST_FREE_TID);
1036        if (tid < 0)
1037                return tid;
1038
1039        memset(&pe, 0, sizeof(pe));
1040        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1041        pe.index = tid;
1042
1043        /* Finished: go to flowid generation */
1044        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1045        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1046        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1047        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1048                                  sizeof(struct ipv6hdr) - 6,
1049                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1050
1051        mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1052        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1053                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1054        /* Unmask all ports */
1055        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1056
1057        /* Write HW */
1058        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1059        mvpp2_prs_hw_write(priv, &pe);
1060
1061        return 0;
1062}
1063
1064/* IPv6 L3 multicast entry */
1065static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1066{
1067        struct mvpp2_prs_entry pe;
1068        int tid;
1069
1070        if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1071                return -EINVAL;
1072
1073        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1074                                        MVPP2_PE_LAST_FREE_TID);
1075        if (tid < 0)
1076                return tid;
1077
1078        memset(&pe, 0, sizeof(pe));
1079        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1080        pe.index = tid;
1081
1082        /* Finished: go to flowid generation */
1083        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1084        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1085                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1086        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1087                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1088        /* Shift back to IPv6 NH */
1089        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1090
1091        mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1092                                     MVPP2_PRS_IPV6_MC_MASK);
1093        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1094        /* Unmask all ports */
1095        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1096
1097        /* Update shadow table and hw entry */
1098        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1099        mvpp2_prs_hw_write(priv, &pe);
1100
1101        return 0;
1102}
1103
1104/* Parser per-port initialization */
1105static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1106                                   int lu_max, int offset)
1107{
1108        u32 val;
1109
1110        /* Set lookup ID */
1111        val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1112        val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1113        val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1114        mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1115
1116        /* Set maximum number of loops for packet received from port */
1117        val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1118        val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1119        val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1120        mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1121
1122        /* Set initial offset for packet header extraction for the first
1123         * searching loop
1124         */
1125        val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1126        val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1127        val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1128        mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1129}
1130
1131/* Default flow entries initialization for all ports */
1132static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1133{
1134        struct mvpp2_prs_entry pe;
1135        int port;
1136
1137        for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1138                memset(&pe, 0, sizeof(pe));
1139                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1140                pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1141
1142                /* Mask all ports */
1143                mvpp2_prs_tcam_port_map_set(&pe, 0);
1144
1145                /* Set flow ID*/
1146                mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1147                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1148
1149                /* Update shadow table and hw entry */
1150                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1151                mvpp2_prs_hw_write(priv, &pe);
1152        }
1153}
1154
1155/* Set default entry for Marvell Header field */
1156static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1157{
1158        struct mvpp2_prs_entry pe;
1159
1160        memset(&pe, 0, sizeof(pe));
1161
1162        pe.index = MVPP2_PE_MH_DEFAULT;
1163        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1164        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1165                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1166        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1167
1168        /* Unmask all ports */
1169        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1170
1171        /* Update shadow table and hw entry */
1172        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1173        mvpp2_prs_hw_write(priv, &pe);
1174
1175        /* Set MH entry that skip parser */
1176        pe.index = MVPP2_PE_MH_SKIP_PRS;
1177        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1178        mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1179                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1180        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1181        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1182
1183        /* Mask all ports */
1184        mvpp2_prs_tcam_port_map_set(&pe, 0);
1185
1186        /* Update shadow table and hw entry */
1187        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1188        mvpp2_prs_hw_write(priv, &pe);
1189}
1190
1191/* Set default entires (place holder) for promiscuous, non-promiscuous and
1192 * multicast MAC addresses
1193 */
1194static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1195{
1196        struct mvpp2_prs_entry pe;
1197
1198        memset(&pe, 0, sizeof(pe));
1199
1200        /* Non-promiscuous mode for all ports - DROP unknown packets */
1201        pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1202        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1203
1204        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1205                                 MVPP2_PRS_RI_DROP_MASK);
1206        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1207        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1208
1209        /* Unmask all ports */
1210        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1211
1212        /* Update shadow table and hw entry */
1213        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1214        mvpp2_prs_hw_write(priv, &pe);
1215
1216        /* Create dummy entries for drop all and promiscuous modes */
1217        mvpp2_prs_drop_fc(priv);
1218        mvpp2_prs_mac_drop_all_set(priv, 0, false);
1219        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1220        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1221}
1222
1223/* Set default entries for various types of dsa packets */
1224static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1225{
1226        struct mvpp2_prs_entry pe;
1227
1228        /* None tagged EDSA entry - place holder */
1229        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1230                              MVPP2_PRS_EDSA);
1231
1232        /* Tagged EDSA entry - place holder */
1233        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1234
1235        /* None tagged DSA entry - place holder */
1236        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1237                              MVPP2_PRS_DSA);
1238
1239        /* Tagged DSA entry - place holder */
1240        mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1241
1242        /* None tagged EDSA ethertype entry - place holder*/
1243        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1244                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1245
1246        /* Tagged EDSA ethertype entry - place holder*/
1247        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1248                                        MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1249
1250        /* None tagged DSA ethertype entry */
1251        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1252                                        MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1253
1254        /* Tagged DSA ethertype entry */
1255        mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1256                                        MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1257
1258        /* Set default entry, in case DSA or EDSA tag not found */
1259        memset(&pe, 0, sizeof(pe));
1260        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1261        pe.index = MVPP2_PE_DSA_DEFAULT;
1262        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1263
1264        /* Shift 0 bytes */
1265        mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1266        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1267
1268        /* Clear all sram ai bits for next iteration */
1269        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1270
1271        /* Unmask all ports */
1272        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1273
1274        mvpp2_prs_hw_write(priv, &pe);
1275}
1276
1277/* Initialize parser entries for VID filtering */
1278static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1279{
1280        struct mvpp2_prs_entry pe;
1281
1282        memset(&pe, 0, sizeof(pe));
1283
1284        /* Set default vid entry */
1285        pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1286        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1287
1288        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1289
1290        /* Skip VLAN header - Set offset to 4 bytes */
1291        mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1292                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1293
1294        /* Clear all ai bits for next iteration */
1295        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1296
1297        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1298
1299        /* Unmask all ports */
1300        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1301
1302        /* Update shadow table and hw entry */
1303        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1304        mvpp2_prs_hw_write(priv, &pe);
1305
1306        /* Set default vid entry for extended DSA*/
1307        memset(&pe, 0, sizeof(pe));
1308
1309        /* Set default vid entry */
1310        pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1311        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1312
1313        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1314                                 MVPP2_PRS_EDSA_VID_AI_BIT);
1315
1316        /* Skip VLAN header - Set offset to 8 bytes */
1317        mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1318                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1319
1320        /* Clear all ai bits for next iteration */
1321        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1322
1323        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1324
1325        /* Unmask all ports */
1326        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1327
1328        /* Update shadow table and hw entry */
1329        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1330        mvpp2_prs_hw_write(priv, &pe);
1331}
1332
1333/* Match basic ethertypes */
1334static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1335{
1336        struct mvpp2_prs_entry pe;
1337        int tid, ihl;
1338
1339        /* Ethertype: PPPoE */
1340        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1341                                        MVPP2_PE_LAST_FREE_TID);
1342        if (tid < 0)
1343                return tid;
1344
1345        memset(&pe, 0, sizeof(pe));
1346        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1347        pe.index = tid;
1348
1349        mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1350
1351        mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1352                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1353        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1354        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1355                                 MVPP2_PRS_RI_PPPOE_MASK);
1356
1357        /* Update shadow table and hw entry */
1358        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1359        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1360        priv->prs_shadow[pe.index].finish = false;
1361        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1362                                MVPP2_PRS_RI_PPPOE_MASK);
1363        mvpp2_prs_hw_write(priv, &pe);
1364
1365        /* Ethertype: ARP */
1366        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1367                                        MVPP2_PE_LAST_FREE_TID);
1368        if (tid < 0)
1369                return tid;
1370
1371        memset(&pe, 0, sizeof(pe));
1372        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1373        pe.index = tid;
1374
1375        mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1376
1377        /* Generate flow in the next iteration*/
1378        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1379        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1380        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1381                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1382        /* Set L3 offset */
1383        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1384                                  MVPP2_ETH_TYPE_LEN,
1385                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1386
1387        /* Update shadow table and hw entry */
1388        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1389        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1390        priv->prs_shadow[pe.index].finish = true;
1391        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1392                                MVPP2_PRS_RI_L3_PROTO_MASK);
1393        mvpp2_prs_hw_write(priv, &pe);
1394
1395        /* Ethertype: LBTD */
1396        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1397                                        MVPP2_PE_LAST_FREE_TID);
1398        if (tid < 0)
1399                return tid;
1400
1401        memset(&pe, 0, sizeof(pe));
1402        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1403        pe.index = tid;
1404
1405        mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1406
1407        /* Generate flow in the next iteration*/
1408        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1409        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1410        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1411                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1412                                 MVPP2_PRS_RI_CPU_CODE_MASK |
1413                                 MVPP2_PRS_RI_UDF3_MASK);
1414        /* Set L3 offset */
1415        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1416                                  MVPP2_ETH_TYPE_LEN,
1417                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1418
1419        /* Update shadow table and hw entry */
1420        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1421        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1422        priv->prs_shadow[pe.index].finish = true;
1423        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1424                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1425                                MVPP2_PRS_RI_CPU_CODE_MASK |
1426                                MVPP2_PRS_RI_UDF3_MASK);
1427        mvpp2_prs_hw_write(priv, &pe);
1428
1429        /* Ethertype: IPv4 with header length >= 5 */
1430        for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1431                tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1432                                                MVPP2_PE_LAST_FREE_TID);
1433                if (tid < 0)
1434                        return tid;
1435
1436                memset(&pe, 0, sizeof(pe));
1437                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1438                pe.index = tid;
1439
1440                mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1441                mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1442                                             MVPP2_PRS_IPV4_HEAD | ihl,
1443                                             MVPP2_PRS_IPV4_HEAD_MASK |
1444                                             MVPP2_PRS_IPV4_IHL_MASK);
1445
1446                mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1447                mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1448                                         MVPP2_PRS_RI_L3_PROTO_MASK);
1449                /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1450                mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1451                                         sizeof(struct iphdr) - 4,
1452                                         MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1453                /* Set L4 offset */
1454                mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1455                                          MVPP2_ETH_TYPE_LEN + (ihl * 4),
1456                                          MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1457
1458                /* Update shadow table and hw entry */
1459                mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1460                priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1461                priv->prs_shadow[pe.index].finish = false;
1462                mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1463                                        MVPP2_PRS_RI_L3_PROTO_MASK);
1464                mvpp2_prs_hw_write(priv, &pe);
1465        }
1466
1467        /* Ethertype: IPv6 without options */
1468        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1469                                        MVPP2_PE_LAST_FREE_TID);
1470        if (tid < 0)
1471                return tid;
1472
1473        memset(&pe, 0, sizeof(pe));
1474        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1475        pe.index = tid;
1476
1477        mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1478
1479        /* Skip DIP of IPV6 header */
1480        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1481                                 MVPP2_MAX_L3_ADDR_SIZE,
1482                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1483        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1484        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1485                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1486        /* Set L3 offset */
1487        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1488                                  MVPP2_ETH_TYPE_LEN,
1489                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1490
1491        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1492        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1493        priv->prs_shadow[pe.index].finish = false;
1494        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1495                                MVPP2_PRS_RI_L3_PROTO_MASK);
1496        mvpp2_prs_hw_write(priv, &pe);
1497
1498        /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1499        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1500        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1501        pe.index = MVPP2_PE_ETH_TYPE_UN;
1502
1503        /* Unmask all ports */
1504        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1505
1506        /* Generate flow in the next iteration*/
1507        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1508        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1509        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1510                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1511        /* Set L3 offset even it's unknown L3 */
1512        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1513                                  MVPP2_ETH_TYPE_LEN,
1514                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1515
1516        /* Update shadow table and hw entry */
1517        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1518        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1519        priv->prs_shadow[pe.index].finish = true;
1520        mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1521                                MVPP2_PRS_RI_L3_PROTO_MASK);
1522        mvpp2_prs_hw_write(priv, &pe);
1523
1524        return 0;
1525}
1526
1527/* Configure vlan entries and detect up to 2 successive VLAN tags.
1528 * Possible options:
1529 * 0x8100, 0x88A8
1530 * 0x8100, 0x8100
1531 * 0x8100
1532 * 0x88A8
1533 */
1534static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1535{
1536        struct mvpp2_prs_entry pe;
1537        int err;
1538
1539        priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1540                                              MVPP2_PRS_DBL_VLANS_MAX,
1541                                              GFP_KERNEL);
1542        if (!priv->prs_double_vlans)
1543                return -ENOMEM;
1544
1545        /* Double VLAN: 0x8100, 0x88A8 */
1546        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1547                                        MVPP2_PRS_PORT_MASK);
1548        if (err)
1549                return err;
1550
1551        /* Double VLAN: 0x8100, 0x8100 */
1552        err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1553                                        MVPP2_PRS_PORT_MASK);
1554        if (err)
1555                return err;
1556
1557        /* Single VLAN: 0x88a8 */
1558        err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1559                                 MVPP2_PRS_PORT_MASK);
1560        if (err)
1561                return err;
1562
1563        /* Single VLAN: 0x8100 */
1564        err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1565                                 MVPP2_PRS_PORT_MASK);
1566        if (err)
1567                return err;
1568
1569        /* Set default double vlan entry */
1570        memset(&pe, 0, sizeof(pe));
1571        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1572        pe.index = MVPP2_PE_VLAN_DBL;
1573
1574        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1575
1576        /* Clear ai for next iterations */
1577        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1578        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1579                                 MVPP2_PRS_RI_VLAN_MASK);
1580
1581        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1582                                 MVPP2_PRS_DBL_VLAN_AI_BIT);
1583        /* Unmask all ports */
1584        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1585
1586        /* Update shadow table and hw entry */
1587        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1588        mvpp2_prs_hw_write(priv, &pe);
1589
1590        /* Set default vlan none entry */
1591        memset(&pe, 0, sizeof(pe));
1592        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1593        pe.index = MVPP2_PE_VLAN_NONE;
1594
1595        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1596        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1597                                 MVPP2_PRS_RI_VLAN_MASK);
1598
1599        /* Unmask all ports */
1600        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1601
1602        /* Update shadow table and hw entry */
1603        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1604        mvpp2_prs_hw_write(priv, &pe);
1605
1606        return 0;
1607}
1608
1609/* Set entries for PPPoE ethertype */
1610static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1611{
1612        struct mvpp2_prs_entry pe;
1613        int tid;
1614
1615        /* IPv4 over PPPoE with options */
1616        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1617                                        MVPP2_PE_LAST_FREE_TID);
1618        if (tid < 0)
1619                return tid;
1620
1621        memset(&pe, 0, sizeof(pe));
1622        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1623        pe.index = tid;
1624
1625        mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1626
1627        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1628        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1629                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1630        /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
1631        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1632                                 sizeof(struct iphdr) - 4,
1633                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1634        /* Set L3 offset */
1635        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1636                                  MVPP2_ETH_TYPE_LEN,
1637                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1638
1639        /* Update shadow table and hw entry */
1640        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1641        mvpp2_prs_hw_write(priv, &pe);
1642
1643        /* IPv4 over PPPoE without options */
1644        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1645                                        MVPP2_PE_LAST_FREE_TID);
1646        if (tid < 0)
1647                return tid;
1648
1649        pe.index = tid;
1650
1651        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1652                                     MVPP2_PRS_IPV4_HEAD |
1653                                     MVPP2_PRS_IPV4_IHL_MIN,
1654                                     MVPP2_PRS_IPV4_HEAD_MASK |
1655                                     MVPP2_PRS_IPV4_IHL_MASK);
1656
1657        /* Clear ri before updating */
1658        pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1659        pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1660        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1661                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1662
1663        /* Update shadow table and hw entry */
1664        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1665        mvpp2_prs_hw_write(priv, &pe);
1666
1667        /* IPv6 over PPPoE */
1668        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1669                                        MVPP2_PE_LAST_FREE_TID);
1670        if (tid < 0)
1671                return tid;
1672
1673        memset(&pe, 0, sizeof(pe));
1674        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1675        pe.index = tid;
1676
1677        mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1678
1679        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1680        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1681                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1682        /* Jump to DIP of IPV6 header */
1683        mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1684                                 MVPP2_MAX_L3_ADDR_SIZE,
1685                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1686        /* Set L3 offset */
1687        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1688                                  MVPP2_ETH_TYPE_LEN,
1689                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1690
1691        /* Update shadow table and hw entry */
1692        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1693        mvpp2_prs_hw_write(priv, &pe);
1694
1695        /* Non-IP over PPPoE */
1696        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1697                                        MVPP2_PE_LAST_FREE_TID);
1698        if (tid < 0)
1699                return tid;
1700
1701        memset(&pe, 0, sizeof(pe));
1702        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1703        pe.index = tid;
1704
1705        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1706                                 MVPP2_PRS_RI_L3_PROTO_MASK);
1707
1708        /* Finished: go to flowid generation */
1709        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1710        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1711        /* Set L3 offset even if it's unknown L3 */
1712        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1713                                  MVPP2_ETH_TYPE_LEN,
1714                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1715
1716        /* Update shadow table and hw entry */
1717        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1718        mvpp2_prs_hw_write(priv, &pe);
1719
1720        return 0;
1721}
1722
1723/* Initialize entries for IPv4 */
1724static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1725{
1726        struct mvpp2_prs_entry pe;
1727        int err;
1728
1729        /* Set entries for TCP, UDP and IGMP over IPv4 */
1730        err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1731                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1732        if (err)
1733                return err;
1734
1735        err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1736                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1737        if (err)
1738                return err;
1739
1740        err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1741                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1742                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1743                                  MVPP2_PRS_RI_CPU_CODE_MASK |
1744                                  MVPP2_PRS_RI_UDF3_MASK);
1745        if (err)
1746                return err;
1747
1748        /* IPv4 Broadcast */
1749        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1750        if (err)
1751                return err;
1752
1753        /* IPv4 Multicast */
1754        err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1755        if (err)
1756                return err;
1757
1758        /* Default IPv4 entry for unknown protocols */
1759        memset(&pe, 0, sizeof(pe));
1760        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1761        pe.index = MVPP2_PE_IP4_PROTO_UN;
1762
1763        /* Finished: go to flowid generation */
1764        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1765        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1766
1767        /* Set L3 offset */
1768        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
1769                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1770        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1771        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1772                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1773
1774        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1775                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1776        /* Unmask all ports */
1777        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1778
1779        /* Update shadow table and hw entry */
1780        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1781        mvpp2_prs_hw_write(priv, &pe);
1782
1783        /* Default IPv4 entry for unicast address */
1784        memset(&pe, 0, sizeof(pe));
1785        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1786        pe.index = MVPP2_PE_IP4_ADDR_UN;
1787
1788        /* Go again to ipv4 */
1789        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1790
1791        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1792                                 MVPP2_PRS_IPV4_DIP_AI_BIT);
1793
1794        /* Shift back to IPv4 proto */
1795        mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1796
1797        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1798                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1799        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1800
1801        /* Unmask all ports */
1802        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1803
1804        /* Update shadow table and hw entry */
1805        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1806        mvpp2_prs_hw_write(priv, &pe);
1807
1808        return 0;
1809}
1810
1811/* Initialize entries for IPv6 */
1812static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1813{
1814        struct mvpp2_prs_entry pe;
1815        int tid, err;
1816
1817        /* Set entries for TCP, UDP and ICMP over IPv6 */
1818        err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1819                                  MVPP2_PRS_RI_L4_TCP,
1820                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1821        if (err)
1822                return err;
1823
1824        err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1825                                  MVPP2_PRS_RI_L4_UDP,
1826                                  MVPP2_PRS_RI_L4_PROTO_MASK);
1827        if (err)
1828                return err;
1829
1830        err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1831                                  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1832                                  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1833                                  MVPP2_PRS_RI_CPU_CODE_MASK |
1834                                  MVPP2_PRS_RI_UDF3_MASK);
1835        if (err)
1836                return err;
1837
1838        /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1839        /* Result Info: UDF7=1, DS lite */
1840        err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1841                                  MVPP2_PRS_RI_UDF7_IP6_LITE,
1842                                  MVPP2_PRS_RI_UDF7_MASK);
1843        if (err)
1844                return err;
1845
1846        /* IPv6 multicast */
1847        err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1848        if (err)
1849                return err;
1850
1851        /* Entry for checking hop limit */
1852        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1853                                        MVPP2_PE_LAST_FREE_TID);
1854        if (tid < 0)
1855                return tid;
1856
1857        memset(&pe, 0, sizeof(pe));
1858        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1859        pe.index = tid;
1860
1861        /* Finished: go to flowid generation */
1862        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1863        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1864        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1865                                 MVPP2_PRS_RI_DROP_MASK,
1866                                 MVPP2_PRS_RI_L3_PROTO_MASK |
1867                                 MVPP2_PRS_RI_DROP_MASK);
1868
1869        mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1870        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1871                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1872
1873        /* Update shadow table and hw entry */
1874        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1875        mvpp2_prs_hw_write(priv, &pe);
1876
1877        /* Default IPv6 entry for unknown protocols */
1878        memset(&pe, 0, sizeof(pe));
1879        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1880        pe.index = MVPP2_PE_IP6_PROTO_UN;
1881
1882        /* Finished: go to flowid generation */
1883        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1884        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1885        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1886                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1887        /* Set L4 offset relatively to our current place */
1888        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1889                                  sizeof(struct ipv6hdr) - 4,
1890                                  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1891
1892        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1893                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1894        /* Unmask all ports */
1895        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1896
1897        /* Update shadow table and hw entry */
1898        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1899        mvpp2_prs_hw_write(priv, &pe);
1900
1901        /* Default IPv6 entry for unknown ext protocols */
1902        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1903        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1904        pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1905
1906        /* Finished: go to flowid generation */
1907        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1908        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1909        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1910                                 MVPP2_PRS_RI_L4_PROTO_MASK);
1911
1912        mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1913                                 MVPP2_PRS_IPV6_EXT_AI_BIT);
1914        /* Unmask all ports */
1915        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1916
1917        /* Update shadow table and hw entry */
1918        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1919        mvpp2_prs_hw_write(priv, &pe);
1920
1921        /* Default IPv6 entry for unicast address */
1922        memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1923        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1924        pe.index = MVPP2_PE_IP6_ADDR_UN;
1925
1926        /* Finished: go to IPv6 again */
1927        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1928        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1929                                 MVPP2_PRS_RI_L3_ADDR_MASK);
1930        mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1931                                 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1932        /* Shift back to IPV6 NH */
1933        mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1934
1935        mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1936        /* Unmask all ports */
1937        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1938
1939        /* Update shadow table and hw entry */
1940        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1941        mvpp2_prs_hw_write(priv, &pe);
1942
1943        return 0;
1944}
1945
1946/* Find tcam entry with matched pair <vid,port> */
1947static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1948{
1949        unsigned char byte[2], enable[2];
1950        struct mvpp2_prs_entry pe;
1951        u16 rvid, rmask;
1952        int tid;
1953
1954        /* Go through the all entries with MVPP2_PRS_LU_VID */
1955        for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1956             tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1957                if (!port->priv->prs_shadow[tid].valid ||
1958                    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1959                        continue;
1960
1961                mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1962
1963                mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1964                mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1965
1966                rvid = ((byte[0] & 0xf) << 8) + byte[1];
1967                rmask = ((enable[0] & 0xf) << 8) + enable[1];
1968
1969                if (rvid != vid || rmask != mask)
1970                        continue;
1971
1972                return tid;
1973        }
1974
1975        return -ENOENT;
1976}
1977
1978/* Write parser entry for VID filtering */
1979int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1980{
1981        unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1982                                 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1983        unsigned int mask = 0xfff, reg_val, shift;
1984        struct mvpp2 *priv = port->priv;
1985        struct mvpp2_prs_entry pe;
1986        int tid;
1987
1988        memset(&pe, 0, sizeof(pe));
1989
1990        /* Scan TCAM and see if entry with this <vid,port> already exist */
1991        tid = mvpp2_prs_vid_range_find(port, vid, mask);
1992
1993        reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1994        if (reg_val & MVPP2_DSA_EXTENDED)
1995                shift = MVPP2_VLAN_TAG_EDSA_LEN;
1996        else
1997                shift = MVPP2_VLAN_TAG_LEN;
1998
1999        /* No such entry */
2000        if (tid < 0) {
2001
2002                /* Go through all entries from first to last in vlan range */
2003                tid = mvpp2_prs_tcam_first_free(priv, vid_start,
2004                                                vid_start +
2005                                                MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
2006
2007                /* There isn't room for a new VID filter */
2008                if (tid < 0)
2009                        return tid;
2010
2011                mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2012                pe.index = tid;
2013
2014                /* Mask all ports */
2015                mvpp2_prs_tcam_port_map_set(&pe, 0);
2016        } else {
2017                mvpp2_prs_init_from_hw(priv, &pe, tid);
2018        }
2019
2020        /* Enable the current port */
2021        mvpp2_prs_tcam_port_set(&pe, port->id, true);
2022
2023        /* Continue - set next lookup */
2024        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2025
2026        /* Skip VLAN header - Set offset to 4 or 8 bytes */
2027        mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2028
2029        /* Set match on VID */
2030        mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2031
2032        /* Clear all ai bits for next iteration */
2033        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2034
2035        /* Update shadow table */
2036        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2037        mvpp2_prs_hw_write(priv, &pe);
2038
2039        return 0;
2040}
2041
2042/* Write parser entry for VID filtering */
2043void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2044{
2045        struct mvpp2 *priv = port->priv;
2046        int tid;
2047
2048        /* Scan TCAM and see if entry with this <vid,port> already exist */
2049        tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2050
2051        /* No such entry */
2052        if (tid < 0)
2053                return;
2054
2055        mvpp2_prs_hw_inv(priv, tid);
2056        priv->prs_shadow[tid].valid = false;
2057}
2058
2059/* Remove all existing VID filters on this port */
2060void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2061{
2062        struct mvpp2 *priv = port->priv;
2063        int tid;
2064
2065        for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2066             tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2067                if (priv->prs_shadow[tid].valid) {
2068                        mvpp2_prs_hw_inv(priv, tid);
2069                        priv->prs_shadow[tid].valid = false;
2070                }
2071        }
2072}
2073
2074/* Remove VID filering entry for this port */
2075void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2076{
2077        unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2078        struct mvpp2 *priv = port->priv;
2079
2080        /* Invalidate the guard entry */
2081        mvpp2_prs_hw_inv(priv, tid);
2082
2083        priv->prs_shadow[tid].valid = false;
2084}
2085
2086/* Add guard entry that drops packets when no VID is matched on this port */
2087void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2088{
2089        unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2090        struct mvpp2 *priv = port->priv;
2091        unsigned int reg_val, shift;
2092        struct mvpp2_prs_entry pe;
2093
2094        if (priv->prs_shadow[tid].valid)
2095                return;
2096
2097        memset(&pe, 0, sizeof(pe));
2098
2099        pe.index = tid;
2100
2101        reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2102        if (reg_val & MVPP2_DSA_EXTENDED)
2103                shift = MVPP2_VLAN_TAG_EDSA_LEN;
2104        else
2105                shift = MVPP2_VLAN_TAG_LEN;
2106
2107        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2108
2109        /* Mask all ports */
2110        mvpp2_prs_tcam_port_map_set(&pe, 0);
2111
2112        /* Update port mask */
2113        mvpp2_prs_tcam_port_set(&pe, port->id, true);
2114
2115        /* Continue - set next lookup */
2116        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2117
2118        /* Skip VLAN header - Set offset to 4 or 8 bytes */
2119        mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2120
2121        /* Drop VLAN packets that don't belong to any VIDs on this port */
2122        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2123                                 MVPP2_PRS_RI_DROP_MASK);
2124
2125        /* Clear all ai bits for next iteration */
2126        mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2127
2128        /* Update shadow table */
2129        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2130        mvpp2_prs_hw_write(priv, &pe);
2131}
2132
2133/* Parser default initialization */
2134int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2135{
2136        int err, index, i;
2137
2138        /* Enable tcam table */
2139        mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2140
2141        /* Clear all tcam and sram entries */
2142        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2143                mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2144                for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2145                        mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2146
2147                mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2148                for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2149                        mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2150        }
2151
2152        /* Invalidate all tcam entries */
2153        for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2154                mvpp2_prs_hw_inv(priv, index);
2155
2156        priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2157                                        sizeof(*priv->prs_shadow),
2158                                        GFP_KERNEL);
2159        if (!priv->prs_shadow)
2160                return -ENOMEM;
2161
2162        /* Always start from lookup = 0 */
2163        for (index = 0; index < MVPP2_MAX_PORTS; index++)
2164                mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2165                                       MVPP2_PRS_PORT_LU_MAX, 0);
2166
2167        mvpp2_prs_def_flow_init(priv);
2168
2169        mvpp2_prs_mh_init(priv);
2170
2171        mvpp2_prs_mac_init(priv);
2172
2173        mvpp2_prs_dsa_init(priv);
2174
2175        mvpp2_prs_vid_init(priv);
2176
2177        err = mvpp2_prs_etype_init(priv);
2178        if (err)
2179                return err;
2180
2181        err = mvpp2_prs_vlan_init(pdev, priv);
2182        if (err)
2183                return err;
2184
2185        err = mvpp2_prs_pppoe_init(priv);
2186        if (err)
2187                return err;
2188
2189        err = mvpp2_prs_ip6_init(priv);
2190        if (err)
2191                return err;
2192
2193        err = mvpp2_prs_ip4_init(priv);
2194        if (err)
2195                return err;
2196
2197        return 0;
2198}
2199
2200/* Compare MAC DA with tcam entry data */
2201static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2202                                       const u8 *da, unsigned char *mask)
2203{
2204        unsigned char tcam_byte, tcam_mask;
2205        int index;
2206
2207        for (index = 0; index < ETH_ALEN; index++) {
2208                mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2209                if (tcam_mask != mask[index])
2210                        return false;
2211
2212                if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2213                        return false;
2214        }
2215
2216        return true;
2217}
2218
2219/* Find tcam entry with matched pair <MAC DA, port> */
2220static int
2221mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2222                            unsigned char *mask, int udf_type)
2223{
2224        struct mvpp2_prs_entry pe;
2225        int tid;
2226
2227        /* Go through the all entires with MVPP2_PRS_LU_MAC */
2228        for (tid = MVPP2_PE_MAC_RANGE_START;
2229             tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2230                unsigned int entry_pmap;
2231
2232                if (!priv->prs_shadow[tid].valid ||
2233                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2234                    (priv->prs_shadow[tid].udf != udf_type))
2235                        continue;
2236
2237                mvpp2_prs_init_from_hw(priv, &pe, tid);
2238                entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2239
2240                if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2241                    entry_pmap == pmap)
2242                        return tid;
2243        }
2244
2245        return -ENOENT;
2246}
2247
2248/* Update parser's mac da entry */
2249int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2250{
2251        unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2252        struct mvpp2 *priv = port->priv;
2253        unsigned int pmap, len, ri;
2254        struct mvpp2_prs_entry pe;
2255        int tid;
2256
2257        memset(&pe, 0, sizeof(pe));
2258
2259        /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2260        tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2261                                          MVPP2_PRS_UDF_MAC_DEF);
2262
2263        /* No such entry */
2264        if (tid < 0) {
2265                if (!add)
2266                        return 0;
2267
2268                /* Create new TCAM entry */
2269                /* Go through the all entries from first to last */
2270                tid = mvpp2_prs_tcam_first_free(priv,
2271                                                MVPP2_PE_MAC_RANGE_START,
2272                                                MVPP2_PE_MAC_RANGE_END);
2273                if (tid < 0)
2274                        return tid;
2275
2276                pe.index = tid;
2277
2278                /* Mask all ports */
2279                mvpp2_prs_tcam_port_map_set(&pe, 0);
2280        } else {
2281                mvpp2_prs_init_from_hw(priv, &pe, tid);
2282        }
2283
2284        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2285
2286        /* Update port mask */
2287        mvpp2_prs_tcam_port_set(&pe, port->id, add);
2288
2289        /* Invalidate the entry if no ports are left enabled */
2290        pmap = mvpp2_prs_tcam_port_map_get(&pe);
2291        if (pmap == 0) {
2292                if (add)
2293                        return -EINVAL;
2294
2295                mvpp2_prs_hw_inv(priv, pe.index);
2296                priv->prs_shadow[pe.index].valid = false;
2297                return 0;
2298        }
2299
2300        /* Continue - set next lookup */
2301        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2302
2303        /* Set match on DA */
2304        len = ETH_ALEN;
2305        while (len--)
2306                mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2307
2308        /* Set result info bits */
2309        if (is_broadcast_ether_addr(da)) {
2310                ri = MVPP2_PRS_RI_L2_BCAST;
2311        } else if (is_multicast_ether_addr(da)) {
2312                ri = MVPP2_PRS_RI_L2_MCAST;
2313        } else {
2314                ri = MVPP2_PRS_RI_L2_UCAST;
2315
2316                if (ether_addr_equal(da, port->dev->dev_addr))
2317                        ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2318        }
2319
2320        mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2321                                 MVPP2_PRS_RI_MAC_ME_MASK);
2322        mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2323                                MVPP2_PRS_RI_MAC_ME_MASK);
2324
2325        /* Shift to ethertype */
2326        mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2327                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2328
2329        /* Update shadow table and hw entry */
2330        priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2331        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2332        mvpp2_prs_hw_write(priv, &pe);
2333
2334        return 0;
2335}
2336
2337int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2338{
2339        struct mvpp2_port *port = netdev_priv(dev);
2340        int err;
2341
2342        /* Remove old parser entry */
2343        err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2344        if (err)
2345                return err;
2346
2347        /* Add new parser entry */
2348        err = mvpp2_prs_mac_da_accept(port, da, true);
2349        if (err)
2350                return err;
2351
2352        /* Set addr in the device */
2353        ether_addr_copy(dev->dev_addr, da);
2354
2355        return 0;
2356}
2357
2358void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2359{
2360        struct mvpp2 *priv = port->priv;
2361        struct mvpp2_prs_entry pe;
2362        unsigned long pmap;
2363        int index, tid;
2364
2365        for (tid = MVPP2_PE_MAC_RANGE_START;
2366             tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2367                unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2368
2369                if (!priv->prs_shadow[tid].valid ||
2370                    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2371                    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2372                        continue;
2373
2374                mvpp2_prs_init_from_hw(priv, &pe, tid);
2375
2376                pmap = mvpp2_prs_tcam_port_map_get(&pe);
2377
2378                /* We only want entries active on this port */
2379                if (!test_bit(port->id, &pmap))
2380                        continue;
2381
2382                /* Read mac addr from entry */
2383                for (index = 0; index < ETH_ALEN; index++)
2384                        mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2385                                                     &da_mask[index]);
2386
2387                /* Special cases : Don't remove broadcast and port's own
2388                 * address
2389                 */
2390                if (is_broadcast_ether_addr(da) ||
2391                    ether_addr_equal(da, port->dev->dev_addr))
2392                        continue;
2393
2394                /* Remove entry from TCAM */
2395                mvpp2_prs_mac_da_accept(port, da, false);
2396        }
2397}
2398
2399int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2400{
2401        switch (type) {
2402        case MVPP2_TAG_TYPE_EDSA:
2403                /* Add port to EDSA entries */
2404                mvpp2_prs_dsa_tag_set(priv, port, true,
2405                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2406                mvpp2_prs_dsa_tag_set(priv, port, true,
2407                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2408                /* Remove port from DSA entries */
2409                mvpp2_prs_dsa_tag_set(priv, port, false,
2410                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2411                mvpp2_prs_dsa_tag_set(priv, port, false,
2412                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2413                break;
2414
2415        case MVPP2_TAG_TYPE_DSA:
2416                /* Add port to DSA entries */
2417                mvpp2_prs_dsa_tag_set(priv, port, true,
2418                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2419                mvpp2_prs_dsa_tag_set(priv, port, true,
2420                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2421                /* Remove port from EDSA entries */
2422                mvpp2_prs_dsa_tag_set(priv, port, false,
2423                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2424                mvpp2_prs_dsa_tag_set(priv, port, false,
2425                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2426                break;
2427
2428        case MVPP2_TAG_TYPE_MH:
2429        case MVPP2_TAG_TYPE_NONE:
2430                /* Remove port form EDSA and DSA entries */
2431                mvpp2_prs_dsa_tag_set(priv, port, false,
2432                                      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2433                mvpp2_prs_dsa_tag_set(priv, port, false,
2434                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2435                mvpp2_prs_dsa_tag_set(priv, port, false,
2436                                      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2437                mvpp2_prs_dsa_tag_set(priv, port, false,
2438                                      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2439                break;
2440
2441        default:
2442                if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2443                        return -EINVAL;
2444        }
2445
2446        return 0;
2447}
2448
2449int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2450{
2451        struct mvpp2_prs_entry pe;
2452        u8 *ri_byte, *ri_byte_mask;
2453        int tid, i;
2454
2455        memset(&pe, 0, sizeof(pe));
2456
2457        tid = mvpp2_prs_tcam_first_free(priv,
2458                                        MVPP2_PE_LAST_FREE_TID,
2459                                        MVPP2_PE_FIRST_FREE_TID);
2460        if (tid < 0)
2461                return tid;
2462
2463        pe.index = tid;
2464
2465        ri_byte = (u8 *)&ri;
2466        ri_byte_mask = (u8 *)&ri_mask;
2467
2468        mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2469        mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2470
2471        for (i = 0; i < 4; i++) {
2472                mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2473                                             ri_byte_mask[i]);
2474        }
2475
2476        mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2477        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2478        mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2479        mvpp2_prs_hw_write(priv, &pe);
2480
2481        return 0;
2482}
2483
2484/* Set prs flow for the port */
2485int mvpp2_prs_def_flow(struct mvpp2_port *port)
2486{
2487        struct mvpp2_prs_entry pe;
2488        int tid;
2489
2490        memset(&pe, 0, sizeof(pe));
2491
2492        tid = mvpp2_prs_flow_find(port->priv, port->id);
2493
2494        /* Such entry not exist */
2495        if (tid < 0) {
2496                /* Go through the all entires from last to first */
2497                tid = mvpp2_prs_tcam_first_free(port->priv,
2498                                                MVPP2_PE_LAST_FREE_TID,
2499                                               MVPP2_PE_FIRST_FREE_TID);
2500                if (tid < 0)
2501                        return tid;
2502
2503                pe.index = tid;
2504
2505                /* Set flow ID*/
2506                mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2507                mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2508
2509                /* Update shadow table */
2510                mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2511        } else {
2512                mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2513        }
2514
2515        mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2516        mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2517        mvpp2_prs_hw_write(port->priv, &pe);
2518
2519        return 0;
2520}
2521
2522int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2523{
2524        u32 val;
2525
2526        if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2527                return -EINVAL;
2528
2529        mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2530
2531        val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2532
2533        val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2534
2535        return val;
2536}
2537