uboot/arch/mips/mach-octeon/include/mach/cvmx-pki.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 *
   5 * Interface to the hardware Packet Input Data unit.
   6 */
   7
   8#ifndef __CVMX_PKI_H__
   9#define __CVMX_PKI_H__
  10
  11#include "cvmx-fpa3.h"
  12#include "cvmx-helper-util.h"
  13#include "cvmx-helper-cfg.h"
  14#include "cvmx-error.h"
  15
  16/* PKI AURA and BPID count are equal to FPA AURA count */
  17#define CVMX_PKI_NUM_AURA              (cvmx_fpa3_num_auras())
  18#define CVMX_PKI_NUM_BPID              (cvmx_fpa3_num_auras())
  19#define CVMX_PKI_NUM_SSO_GROUP         (cvmx_sso_num_xgrp())
  20#define CVMX_PKI_NUM_CLUSTER_GROUP_MAX 1
  21#define CVMX_PKI_NUM_CLUSTER_GROUP     (cvmx_pki_num_cl_grp())
  22#define CVMX_PKI_NUM_CLUSTER           (cvmx_pki_num_clusters())
  23
  24/* FIXME: Reduce some of these values, convert to routines XXX */
  25#define CVMX_PKI_NUM_CHANNEL        4096
  26#define CVMX_PKI_NUM_PKIND          64
  27#define CVMX_PKI_NUM_INTERNAL_STYLE 256
  28#define CVMX_PKI_NUM_FINAL_STYLE    64
  29#define CVMX_PKI_NUM_QPG_ENTRY      2048
  30#define CVMX_PKI_NUM_MTAG_IDX       (32 / 4) /* 32 registers grouped by 4*/
  31#define CVMX_PKI_NUM_LTYPE          32
  32#define CVMX_PKI_NUM_PCAM_BANK      2
  33#define CVMX_PKI_NUM_PCAM_ENTRY     192
  34#define CVMX_PKI_NUM_FRAME_CHECK    2
  35#define CVMX_PKI_NUM_BELTYPE        32
  36#define CVMX_PKI_MAX_FRAME_SIZE     65535
  37#define CVMX_PKI_FIND_AVAL_ENTRY    (-1)
  38#define CVMX_PKI_CLUSTER_ALL        0xf
  39
  40#ifdef CVMX_SUPPORT_SEPARATE_CLUSTER_CONFIG
  41#define CVMX_PKI_TOTAL_PCAM_ENTRY                                                                  \
  42        ((CVMX_PKI_NUM_CLUSTER) * (CVMX_PKI_NUM_PCAM_BANK) * (CVMX_PKI_NUM_PCAM_ENTRY))
  43#else
  44#define CVMX_PKI_TOTAL_PCAM_ENTRY (CVMX_PKI_NUM_PCAM_BANK * CVMX_PKI_NUM_PCAM_ENTRY)
  45#endif
  46
  47static inline unsigned int cvmx_pki_num_clusters(void)
  48{
  49        if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
  50                return 2;
  51        return 4;
  52}
  53
  54static inline unsigned int cvmx_pki_num_cl_grp(void)
  55{
  56        if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
  57            OCTEON_IS_MODEL(OCTEON_CN78XX))
  58                return 1;
  59        return 0;
  60}
  61
  62enum cvmx_pki_pkind_parse_mode {
  63        CVMX_PKI_PARSE_LA_TO_LG = 0,  /* Parse LA(L2) to LG */
  64        CVMX_PKI_PARSE_LB_TO_LG = 1,  /* Parse LB(custom) to LG */
  65        CVMX_PKI_PARSE_LC_TO_LG = 3,  /* Parse LC(L3) to LG */
  66        CVMX_PKI_PARSE_LG = 0x3f,     /* Parse LG */
  67        CVMX_PKI_PARSE_NOTHING = 0x7f /* Parse nothing */
  68};
  69
  70enum cvmx_pki_parse_mode_chg {
  71        CVMX_PKI_PARSE_NO_CHG = 0x0,
  72        CVMX_PKI_PARSE_SKIP_TO_LB = 0x1,
  73        CVMX_PKI_PARSE_SKIP_TO_LC = 0x3,
  74        CVMX_PKI_PARSE_SKIP_TO_LD = 0x7,
  75        CVMX_PKI_PARSE_SKIP_TO_LG = 0x3f,
  76        CVMX_PKI_PARSE_SKIP_ALL = 0x7f,
  77};
  78
  79enum cvmx_pki_l2_len_mode { PKI_L2_LENCHK_EQUAL_GREATER = 0, PKI_L2_LENCHK_EQUAL_ONLY };
  80
  81enum cvmx_pki_cache_mode {
  82        CVMX_PKI_OPC_MODE_STT = 0LL,      /* All blocks write through DRAM,*/
  83        CVMX_PKI_OPC_MODE_STF = 1LL,      /* All blocks into L2 */
  84        CVMX_PKI_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
  85        CVMX_PKI_OPC_MODE_STF2_STT = 3LL  /* 1st, 2nd blocks L2, rest DRAM */
  86};
  87
  88/**
  89 * Tag type definitions
  90 */
  91enum cvmx_sso_tag_type {
  92        CVMX_SSO_TAG_TYPE_ORDERED = 0L,
  93        CVMX_SSO_TAG_TYPE_ATOMIC = 1L,
  94        CVMX_SSO_TAG_TYPE_UNTAGGED = 2L,
  95        CVMX_SSO_TAG_TYPE_EMPTY = 3L
  96};
  97
  98enum cvmx_pki_qpg_qos {
  99        CVMX_PKI_QPG_QOS_NONE = 0,
 100        CVMX_PKI_QPG_QOS_VLAN,
 101        CVMX_PKI_QPG_QOS_MPLS,
 102        CVMX_PKI_QPG_QOS_DSA_SRC,
 103        CVMX_PKI_QPG_QOS_DIFFSERV,
 104        CVMX_PKI_QPG_QOS_HIGIG,
 105};
 106
 107enum cvmx_pki_wqe_vlan { CVMX_PKI_USE_FIRST_VLAN = 0, CVMX_PKI_USE_SECOND_VLAN };
 108
 109/**
 110 * Controls how the PKI statistics counters are handled
 111 * The PKI_STAT*_X registers can be indexed either by port kind (pkind), or
 112 * final style. (Does not apply to the PKI_STAT_INB* registers.)
 113 *    0 = X represents the packet’s pkind
 114 *    1 = X represents the low 6-bits of packet’s final style
 115 */
 116enum cvmx_pki_stats_mode { CVMX_PKI_STAT_MODE_PKIND, CVMX_PKI_STAT_MODE_STYLE };
 117
 118enum cvmx_pki_fpa_wait { CVMX_PKI_DROP_PKT, CVMX_PKI_WAIT_PKT };
 119
 120#define PKI_BELTYPE_E__NONE_M 0x0
 121#define PKI_BELTYPE_E__MISC_M 0x1
 122#define PKI_BELTYPE_E__IP4_M  0x2
 123#define PKI_BELTYPE_E__IP6_M  0x3
 124#define PKI_BELTYPE_E__TCP_M  0x4
 125#define PKI_BELTYPE_E__UDP_M  0x5
 126#define PKI_BELTYPE_E__SCTP_M 0x6
 127#define PKI_BELTYPE_E__SNAP_M 0x7
 128
 129/* PKI_BELTYPE_E_t */
 130enum cvmx_pki_beltype {
 131        CVMX_PKI_BELTYPE_NONE = PKI_BELTYPE_E__NONE_M,
 132        CVMX_PKI_BELTYPE_MISC = PKI_BELTYPE_E__MISC_M,
 133        CVMX_PKI_BELTYPE_IP4 = PKI_BELTYPE_E__IP4_M,
 134        CVMX_PKI_BELTYPE_IP6 = PKI_BELTYPE_E__IP6_M,
 135        CVMX_PKI_BELTYPE_TCP = PKI_BELTYPE_E__TCP_M,
 136        CVMX_PKI_BELTYPE_UDP = PKI_BELTYPE_E__UDP_M,
 137        CVMX_PKI_BELTYPE_SCTP = PKI_BELTYPE_E__SCTP_M,
 138        CVMX_PKI_BELTYPE_SNAP = PKI_BELTYPE_E__SNAP_M,
 139        CVMX_PKI_BELTYPE_MAX = CVMX_PKI_BELTYPE_SNAP
 140};
 141
 142struct cvmx_pki_frame_len {
 143        u16 maxlen;
 144        u16 minlen;
 145};
 146
 147struct cvmx_pki_tag_fields {
 148        u64 layer_g_src : 1;
 149        u64 layer_f_src : 1;
 150        u64 layer_e_src : 1;
 151        u64 layer_d_src : 1;
 152        u64 layer_c_src : 1;
 153        u64 layer_b_src : 1;
 154        u64 layer_g_dst : 1;
 155        u64 layer_f_dst : 1;
 156        u64 layer_e_dst : 1;
 157        u64 layer_d_dst : 1;
 158        u64 layer_c_dst : 1;
 159        u64 layer_b_dst : 1;
 160        u64 input_port : 1;
 161        u64 mpls_label : 1;
 162        u64 first_vlan : 1;
 163        u64 second_vlan : 1;
 164        u64 ip_prot_nexthdr : 1;
 165        u64 tag_sync : 1;
 166        u64 tag_spi : 1;
 167        u64 tag_gtp : 1;
 168        u64 tag_vni : 1;
 169};
 170
 171struct cvmx_pki_pkind_parse {
 172        u64 mpls_en : 1;
 173        u64 inst_hdr : 1;
 174        u64 lg_custom : 1;
 175        u64 fulc_en : 1;
 176        u64 dsa_en : 1;
 177        u64 hg2_en : 1;
 178        u64 hg_en : 1;
 179};
 180
 181struct cvmx_pki_pool_config {
 182        int pool_num;
 183        cvmx_fpa3_pool_t pool;
 184        u64 buffer_size;
 185        u64 buffer_count;
 186};
 187
 188struct cvmx_pki_qpg_config {
 189        int qpg_base;
 190        int port_add;
 191        int aura_num;
 192        int grp_ok;
 193        int grp_bad;
 194        int grptag_ok;
 195        int grptag_bad;
 196};
 197
 198struct cvmx_pki_aura_config {
 199        int aura_num;
 200        int pool_num;
 201        cvmx_fpa3_pool_t pool;
 202        cvmx_fpa3_gaura_t aura;
 203        int buffer_count;
 204};
 205
 206struct cvmx_pki_cluster_grp_config {
 207        int grp_num;
 208        u64 cluster_mask; /* Bit mask of cluster assigned to this cluster group */
 209};
 210
 211struct cvmx_pki_sso_grp_config {
 212        int group;
 213        int priority;
 214        int weight;
 215        int affinity;
 216        u64 core_mask;
 217        u8 core_mask_set;
 218};
 219
 220/* This is per style structure for configuring port parameters,
 221 * it is kind of of profile which can be assigned to any port.
 222 * If multiple ports are assigned same style be aware that modifying
 223 * that style will modify the respective parameters for all the ports
 224 * which are using this style
 225 */
 226struct cvmx_pki_style_parm {
 227        bool ip6_udp_opt;
 228        bool lenerr_en;
 229        bool maxerr_en;
 230        bool minerr_en;
 231        u8 lenerr_eqpad;
 232        u8 minmax_sel;
 233        bool qpg_dis_grptag;
 234        bool fcs_strip;
 235        bool fcs_chk;
 236        bool rawdrp;
 237        bool force_drop;
 238        bool nodrop;
 239        bool qpg_dis_padd;
 240        bool qpg_dis_grp;
 241        bool qpg_dis_aura;
 242        u16 qpg_base;
 243        enum cvmx_pki_qpg_qos qpg_qos;
 244        u8 qpg_port_sh;
 245        u8 qpg_port_msb;
 246        u8 apad_nip;
 247        u8 wqe_vs;
 248        enum cvmx_sso_tag_type tag_type;
 249        bool pkt_lend;
 250        u8 wqe_hsz;
 251        u16 wqe_skip;
 252        u16 first_skip;
 253        u16 later_skip;
 254        enum cvmx_pki_cache_mode cache_mode;
 255        u8 dis_wq_dat;
 256        u64 mbuff_size;
 257        bool len_lg;
 258        bool len_lf;
 259        bool len_le;
 260        bool len_ld;
 261        bool len_lc;
 262        bool len_lb;
 263        bool csum_lg;
 264        bool csum_lf;
 265        bool csum_le;
 266        bool csum_ld;
 267        bool csum_lc;
 268        bool csum_lb;
 269};
 270
 271/* This is per style structure for configuring port's tag configuration,
 272 * it is kind of of profile which can be assigned to any port.
 273 * If multiple ports are assigned same style be aware that modiying that style
 274 * will modify the respective parameters for all the ports which are
 275 * using this style */
 276enum cvmx_pki_mtag_ptrsel {
 277        CVMX_PKI_MTAG_PTRSEL_SOP = 0,
 278        CVMX_PKI_MTAG_PTRSEL_LA = 8,
 279        CVMX_PKI_MTAG_PTRSEL_LB = 9,
 280        CVMX_PKI_MTAG_PTRSEL_LC = 10,
 281        CVMX_PKI_MTAG_PTRSEL_LD = 11,
 282        CVMX_PKI_MTAG_PTRSEL_LE = 12,
 283        CVMX_PKI_MTAG_PTRSEL_LF = 13,
 284        CVMX_PKI_MTAG_PTRSEL_LG = 14,
 285        CVMX_PKI_MTAG_PTRSEL_VL = 15,
 286};
 287
 288struct cvmx_pki_mask_tag {
 289        bool enable;
 290        int base;   /* CVMX_PKI_MTAG_PTRSEL_XXX */
 291        int offset; /* Offset from base. */
 292        u64 val;    /* Bitmask:
 293                1 = enable, 0 = disabled for each byte in the 64-byte array.*/
 294};
 295
 296struct cvmx_pki_style_tag_cfg {
 297        struct cvmx_pki_tag_fields tag_fields;
 298        struct cvmx_pki_mask_tag mask_tag[4];
 299};
 300
 301struct cvmx_pki_style_config {
 302        struct cvmx_pki_style_parm parm_cfg;
 303        struct cvmx_pki_style_tag_cfg tag_cfg;
 304};
 305
 306struct cvmx_pki_pkind_config {
 307        u8 cluster_grp;
 308        bool fcs_pres;
 309        struct cvmx_pki_pkind_parse parse_en;
 310        enum cvmx_pki_pkind_parse_mode initial_parse_mode;
 311        u8 fcs_skip;
 312        u8 inst_skip;
 313        int initial_style;
 314        bool custom_l2_hdr;
 315        u8 l2_scan_offset;
 316        u64 lg_scan_offset;
 317};
 318
 319struct cvmx_pki_port_config {
 320        struct cvmx_pki_pkind_config pkind_cfg;
 321        struct cvmx_pki_style_config style_cfg;
 322};
 323
 324struct cvmx_pki_global_parse {
 325        u64 virt_pen : 1;
 326        u64 clg_pen : 1;
 327        u64 cl2_pen : 1;
 328        u64 l4_pen : 1;
 329        u64 il3_pen : 1;
 330        u64 l3_pen : 1;
 331        u64 mpls_pen : 1;
 332        u64 fulc_pen : 1;
 333        u64 dsa_pen : 1;
 334        u64 hg_pen : 1;
 335};
 336
 337struct cvmx_pki_tag_sec {
 338        u16 dst6;
 339        u16 src6;
 340        u16 dst;
 341        u16 src;
 342};
 343
 344struct cvmx_pki_global_config {
 345        u64 cluster_mask[CVMX_PKI_NUM_CLUSTER_GROUP_MAX];
 346        enum cvmx_pki_stats_mode stat_mode;
 347        enum cvmx_pki_fpa_wait fpa_wait;
 348        struct cvmx_pki_global_parse gbl_pen;
 349        struct cvmx_pki_tag_sec tag_secret;
 350        struct cvmx_pki_frame_len frm_len[CVMX_PKI_NUM_FRAME_CHECK];
 351        enum cvmx_pki_beltype ltype_map[CVMX_PKI_NUM_BELTYPE];
 352        int pki_enable;
 353};
 354
 355#define CVMX_PKI_PCAM_TERM_E_NONE_M      0x0
 356#define CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M 0x2
 357#define CVMX_PKI_PCAM_TERM_E_HIGIGD_M    0x4
 358#define CVMX_PKI_PCAM_TERM_E_HIGIG_M     0x5
 359#define CVMX_PKI_PCAM_TERM_E_SMACH_M     0x8
 360#define CVMX_PKI_PCAM_TERM_E_SMACL_M     0x9
 361#define CVMX_PKI_PCAM_TERM_E_DMACH_M     0xA
 362#define CVMX_PKI_PCAM_TERM_E_DMACL_M     0xB
 363#define CVMX_PKI_PCAM_TERM_E_GLORT_M     0x12
 364#define CVMX_PKI_PCAM_TERM_E_DSA_M       0x13
 365#define CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M  0x18
 366#define CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M  0x19
 367#define CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M  0x1A
 368#define CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M  0x1B
 369#define CVMX_PKI_PCAM_TERM_E_MPLS0_M     0x1E
 370#define CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M  0x1F
 371#define CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M  0x20
 372#define CVMX_PKI_PCAM_TERM_E_L3_SIPML_M  0x21
 373#define CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M  0x22
 374#define CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M  0x23
 375#define CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M  0x24
 376#define CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M  0x25
 377#define CVMX_PKI_PCAM_TERM_E_L3_DIPML_M  0x26
 378#define CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M  0x27
 379#define CVMX_PKI_PCAM_TERM_E_LD_VNI_M    0x28
 380#define CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M 0x2B
 381#define CVMX_PKI_PCAM_TERM_E_LF_SPI_M    0x2E
 382#define CVMX_PKI_PCAM_TERM_E_L4_SPORT_M  0x2f
 383#define CVMX_PKI_PCAM_TERM_E_L4_PORT_M   0x30
 384#define CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M 0x39
 385
 386enum cvmx_pki_term {
 387        CVMX_PKI_PCAM_TERM_NONE = CVMX_PKI_PCAM_TERM_E_NONE_M,
 388        CVMX_PKI_PCAM_TERM_L2_CUSTOM = CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M,
 389        CVMX_PKI_PCAM_TERM_HIGIGD = CVMX_PKI_PCAM_TERM_E_HIGIGD_M,
 390        CVMX_PKI_PCAM_TERM_HIGIG = CVMX_PKI_PCAM_TERM_E_HIGIG_M,
 391        CVMX_PKI_PCAM_TERM_SMACH = CVMX_PKI_PCAM_TERM_E_SMACH_M,
 392        CVMX_PKI_PCAM_TERM_SMACL = CVMX_PKI_PCAM_TERM_E_SMACL_M,
 393        CVMX_PKI_PCAM_TERM_DMACH = CVMX_PKI_PCAM_TERM_E_DMACH_M,
 394        CVMX_PKI_PCAM_TERM_DMACL = CVMX_PKI_PCAM_TERM_E_DMACL_M,
 395        CVMX_PKI_PCAM_TERM_GLORT = CVMX_PKI_PCAM_TERM_E_GLORT_M,
 396        CVMX_PKI_PCAM_TERM_DSA = CVMX_PKI_PCAM_TERM_E_DSA_M,
 397        CVMX_PKI_PCAM_TERM_ETHTYPE0 = CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M,
 398        CVMX_PKI_PCAM_TERM_ETHTYPE1 = CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M,
 399        CVMX_PKI_PCAM_TERM_ETHTYPE2 = CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M,
 400        CVMX_PKI_PCAM_TERM_ETHTYPE3 = CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M,
 401        CVMX_PKI_PCAM_TERM_MPLS0 = CVMX_PKI_PCAM_TERM_E_MPLS0_M,
 402        CVMX_PKI_PCAM_TERM_L3_SIPHH = CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M,
 403        CVMX_PKI_PCAM_TERM_L3_SIPMH = CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M,
 404        CVMX_PKI_PCAM_TERM_L3_SIPML = CVMX_PKI_PCAM_TERM_E_L3_SIPML_M,
 405        CVMX_PKI_PCAM_TERM_L3_SIPLL = CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M,
 406        CVMX_PKI_PCAM_TERM_L3_FLAGS = CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M,
 407        CVMX_PKI_PCAM_TERM_L3_DIPHH = CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M,
 408        CVMX_PKI_PCAM_TERM_L3_DIPMH = CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M,
 409        CVMX_PKI_PCAM_TERM_L3_DIPML = CVMX_PKI_PCAM_TERM_E_L3_DIPML_M,
 410        CVMX_PKI_PCAM_TERM_L3_DIPLL = CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M,
 411        CVMX_PKI_PCAM_TERM_LD_VNI = CVMX_PKI_PCAM_TERM_E_LD_VNI_M,
 412        CVMX_PKI_PCAM_TERM_IL3_FLAGS = CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M,
 413        CVMX_PKI_PCAM_TERM_LF_SPI = CVMX_PKI_PCAM_TERM_E_LF_SPI_M,
 414        CVMX_PKI_PCAM_TERM_L4_PORT = CVMX_PKI_PCAM_TERM_E_L4_PORT_M,
 415        CVMX_PKI_PCAM_TERM_L4_SPORT = CVMX_PKI_PCAM_TERM_E_L4_SPORT_M,
 416        CVMX_PKI_PCAM_TERM_LG_CUSTOM = CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M
 417};
 418
 419#define CVMX_PKI_DMACH_SHIFT      32
 420#define CVMX_PKI_DMACH_MASK       cvmx_build_mask(16)
 421#define CVMX_PKI_DMACL_MASK       CVMX_PKI_DATA_MASK_32
 422#define CVMX_PKI_DATA_MASK_32     cvmx_build_mask(32)
 423#define CVMX_PKI_DATA_MASK_16     cvmx_build_mask(16)
 424#define CVMX_PKI_DMAC_MATCH_EXACT cvmx_build_mask(48)
 425
 426struct cvmx_pki_pcam_input {
 427        u64 style;
 428        u64 style_mask; /* bits: 1-match, 0-dont care */
 429        enum cvmx_pki_term field;
 430        u32 field_mask; /* bits: 1-match, 0-dont care */
 431        u64 data;
 432        u64 data_mask; /* bits: 1-match, 0-dont care */
 433};
 434
 435struct cvmx_pki_pcam_action {
 436        enum cvmx_pki_parse_mode_chg parse_mode_chg;
 437        enum cvmx_pki_layer_type layer_type_set;
 438        int style_add;
 439        int parse_flag_set;
 440        int pointer_advance;
 441};
 442
 443struct cvmx_pki_pcam_config {
 444        int in_use;
 445        int entry_num;
 446        u64 cluster_mask;
 447        struct cvmx_pki_pcam_input pcam_input;
 448        struct cvmx_pki_pcam_action pcam_action;
 449};
 450
 451/**
 452 * Status statistics for a port
 453 */
 454struct cvmx_pki_port_stats {
 455        u64 dropped_octets;
 456        u64 dropped_packets;
 457        u64 pci_raw_packets;
 458        u64 octets;
 459        u64 packets;
 460        u64 multicast_packets;
 461        u64 broadcast_packets;
 462        u64 len_64_packets;
 463        u64 len_65_127_packets;
 464        u64 len_128_255_packets;
 465        u64 len_256_511_packets;
 466        u64 len_512_1023_packets;
 467        u64 len_1024_1518_packets;
 468        u64 len_1519_max_packets;
 469        u64 fcs_align_err_packets;
 470        u64 runt_packets;
 471        u64 runt_crc_packets;
 472        u64 oversize_packets;
 473        u64 oversize_crc_packets;
 474        u64 inb_packets;
 475        u64 inb_octets;
 476        u64 inb_errors;
 477        u64 mcast_l2_red_packets;
 478        u64 bcast_l2_red_packets;
 479        u64 mcast_l3_red_packets;
 480        u64 bcast_l3_red_packets;
 481};
 482
 483/**
 484 * PKI Packet Instruction Header Structure (PKI_INST_HDR_S)
 485 */
 486typedef union {
 487        u64 u64;
 488        struct {
 489                u64 w : 1;    /* INST_HDR size: 0 = 2 bytes, 1 = 4 or 8 bytes */
 490                u64 raw : 1;  /* RAW packet indicator in WQE[RAW]: 1 = enable */
 491                u64 utag : 1; /* Use INST_HDR[TAG] to compute WQE[TAG]: 1 = enable */
 492                u64 uqpg : 1; /* Use INST_HDR[QPG] to compute QPG: 1 = enable */
 493                u64 rsvd1 : 1;
 494                u64 pm : 3; /* Packet parsing mode. Legal values = 0x0..0x7 */
 495                u64 sl : 8; /* Number of bytes in INST_HDR. */
 496                /* The following fields are not present, if INST_HDR[W] = 0: */
 497                u64 utt : 1; /* Use INST_HDR[TT] to compute WQE[TT]: 1 = enable */
 498                u64 tt : 2;  /* INST_HDR[TT] => WQE[TT], if INST_HDR[UTT] = 1 */
 499                u64 rsvd2 : 2;
 500                u64 qpg : 11; /* INST_HDR[QPG] => QPG, if INST_HDR[UQPG] = 1 */
 501                u64 tag : 32; /* INST_HDR[TAG] => WQE[TAG], if INST_HDR[UTAG] = 1 */
 502        } s;
 503} cvmx_pki_inst_hdr_t;
 504
 505/**
 506 * This function assignes the clusters to a group, later pkind can be
 507 * configured to use that group depending on number of clusters pkind
 508 * would use. A given cluster can only be enabled in a single cluster group.
 509 * Number of clusters assign to that group determines how many engine can work
 510 * in parallel to process the packet. Eack cluster can process x MPPS.
 511 *
 512 * @param node  Node
 513 * @param cluster_group Group to attach clusters to.
 514 * @param cluster_mask The mask of clusters which needs to be assigned to the group.
 515 */
 516static inline int cvmx_pki_attach_cluster_to_group(int node, u64 cluster_group, u64 cluster_mask)
 517{
 518        cvmx_pki_icgx_cfg_t pki_cl_grp;
 519
 520        if (cluster_group >= CVMX_PKI_NUM_CLUSTER_GROUP) {
 521                debug("ERROR: config cluster group %d", (int)cluster_group);
 522                return -1;
 523        }
 524        pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group));
 525        pki_cl_grp.s.clusters = cluster_mask;
 526        cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group), pki_cl_grp.u64);
 527        return 0;
 528}
 529
 530static inline void cvmx_pki_write_global_parse(int node, struct cvmx_pki_global_parse gbl_pen)
 531{
 532        cvmx_pki_gbl_pen_t gbl_pen_reg;
 533
 534        gbl_pen_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_GBL_PEN);
 535        gbl_pen_reg.s.virt_pen = gbl_pen.virt_pen;
 536        gbl_pen_reg.s.clg_pen = gbl_pen.clg_pen;
 537        gbl_pen_reg.s.cl2_pen = gbl_pen.cl2_pen;
 538        gbl_pen_reg.s.l4_pen = gbl_pen.l4_pen;
 539        gbl_pen_reg.s.il3_pen = gbl_pen.il3_pen;
 540        gbl_pen_reg.s.l3_pen = gbl_pen.l3_pen;
 541        gbl_pen_reg.s.mpls_pen = gbl_pen.mpls_pen;
 542        gbl_pen_reg.s.fulc_pen = gbl_pen.fulc_pen;
 543        gbl_pen_reg.s.dsa_pen = gbl_pen.dsa_pen;
 544        gbl_pen_reg.s.hg_pen = gbl_pen.hg_pen;
 545        cvmx_write_csr_node(node, CVMX_PKI_GBL_PEN, gbl_pen_reg.u64);
 546}
 547
 548static inline void cvmx_pki_write_tag_secret(int node, struct cvmx_pki_tag_sec tag_secret)
 549{
 550        cvmx_pki_tag_secret_t tag_secret_reg;
 551
 552        tag_secret_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_TAG_SECRET);
 553        tag_secret_reg.s.dst6 = tag_secret.dst6;
 554        tag_secret_reg.s.src6 = tag_secret.src6;
 555        tag_secret_reg.s.dst = tag_secret.dst;
 556        tag_secret_reg.s.src = tag_secret.src;
 557        cvmx_write_csr_node(node, CVMX_PKI_TAG_SECRET, tag_secret_reg.u64);
 558}
 559
 560static inline void cvmx_pki_write_ltype_map(int node, enum cvmx_pki_layer_type layer,
 561                                            enum cvmx_pki_beltype backend)
 562{
 563        cvmx_pki_ltypex_map_t ltype_map;
 564
 565        if (layer > CVMX_PKI_LTYPE_E_MAX || backend > CVMX_PKI_BELTYPE_MAX) {
 566                debug("ERROR: invalid ltype beltype mapping\n");
 567                return;
 568        }
 569        ltype_map.u64 = cvmx_read_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer));
 570        ltype_map.s.beltype = backend;
 571        cvmx_write_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer), ltype_map.u64);
 572}
 573
 574/**
 575 * This function enables the cluster group to start parsing.
 576 *
 577 * @param node    Node number.
 578 * @param cl_grp  Cluster group to enable parsing.
 579 */
 580static inline int cvmx_pki_parse_enable(int node, unsigned int cl_grp)
 581{
 582        cvmx_pki_icgx_cfg_t pki_cl_grp;
 583
 584        if (cl_grp >= CVMX_PKI_NUM_CLUSTER_GROUP) {
 585                debug("ERROR: pki parse en group %d", (int)cl_grp);
 586                return -1;
 587        }
 588        pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
 589        pki_cl_grp.s.pena = 1;
 590        cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp), pki_cl_grp.u64);
 591        return 0;
 592}
 593
 594/**
 595 * This function enables the PKI to send bpid level backpressure to CN78XX inputs.
 596 *
 597 * @param node Node number.
 598 */
 599static inline void cvmx_pki_enable_backpressure(int node)
 600{
 601        cvmx_pki_buf_ctl_t pki_buf_ctl;
 602
 603        pki_buf_ctl.u64 = cvmx_read_csr_node(node, CVMX_PKI_BUF_CTL);
 604        pki_buf_ctl.s.pbp_en = 1;
 605        cvmx_write_csr_node(node, CVMX_PKI_BUF_CTL, pki_buf_ctl.u64);
 606}
 607
 608/**
 609 * Clear the statistics counters for a port.
 610 *
 611 * @param node Node number.
 612 * @param port Port number (ipd_port) to get statistics for.
 613 *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.
 614 */
 615void cvmx_pki_clear_port_stats(int node, u64 port);
 616
 617/**
 618 * Get the status counters for index from PKI.
 619 *
 620 * @param node    Node number.
 621 * @param index   PKIND number, if PKI_STATS_CTL:mode = 0 or
 622 *     style(flow) number, if PKI_STATS_CTL:mode = 1
 623 * @param status  Where to put the results.
 624 */
 625void cvmx_pki_get_stats(int node, int index, struct cvmx_pki_port_stats *status);
 626
 627/**
 628 * Get the statistics counters for a port.
 629 *
 630 * @param node   Node number
 631 * @param port   Port number (ipd_port) to get statistics for.
 632 *    Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.
 633 * @param status Where to put the results.
 634 */
 635static inline void cvmx_pki_get_port_stats(int node, u64 port, struct cvmx_pki_port_stats *status)
 636{
 637        int xipd = cvmx_helper_node_to_ipd_port(node, port);
 638        int xiface = cvmx_helper_get_interface_num(xipd);
 639        int index = cvmx_helper_get_interface_index_num(port);
 640        int pknd = cvmx_helper_get_pknd(xiface, index);
 641
 642        cvmx_pki_get_stats(node, pknd, status);
 643}
 644
 645/**
 646 * Get the statistics counters for a flow represented by style in PKI.
 647 *
 648 * @param node Node number.
 649 * @param style_num Style number to get statistics for.
 650 *    Make sure PKI_STATS_CTL:mode is set to 1 for collecting per style/flow stats.
 651 * @param status Where to put the results.
 652 */
 653static inline void cvmx_pki_get_flow_stats(int node, u64 style_num,
 654                                           struct cvmx_pki_port_stats *status)
 655{
 656        cvmx_pki_get_stats(node, style_num, status);
 657}
 658
 659/**
 660 * Show integrated PKI configuration.
 661 *
 662 * @param node     node number
 663 */
 664int cvmx_pki_config_dump(unsigned int node);
 665
 666/**
 667 * Show integrated PKI statistics.
 668 *
 669 * @param node     node number
 670 */
 671int cvmx_pki_stats_dump(unsigned int node);
 672
 673/**
 674 * Clear PKI statistics.
 675 *
 676 * @param node     node number
 677 */
 678void cvmx_pki_stats_clear(unsigned int node);
 679
 680/**
 681 * This function enables PKI.
 682 *
 683 * @param node   node to enable pki in.
 684 */
 685void cvmx_pki_enable(int node);
 686
 687/**
 688 * This function disables PKI.
 689 *
 690 * @param node  node to disable pki in.
 691 */
 692void cvmx_pki_disable(int node);
 693
 694/**
 695 * This function soft resets PKI.
 696 *
 697 * @param node  node to enable pki in.
 698 */
 699void cvmx_pki_reset(int node);
 700
 701/**
 702 * This function sets the clusters in PKI.
 703 *
 704 * @param node  node to set clusters in.
 705 */
 706int cvmx_pki_setup_clusters(int node);
 707
 708/**
 709 * This function reads global configuration of PKI block.
 710 *
 711 * @param node    Node number.
 712 * @param gbl_cfg Pointer to struct to read global configuration
 713 */
 714void cvmx_pki_read_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);
 715
 716/**
 717 * This function writes global configuration of PKI into hw.
 718 *
 719 * @param node    Node number.
 720 * @param gbl_cfg Pointer to struct to global configuration
 721 */
 722void cvmx_pki_write_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);
 723
 724/**
 725 * This function reads per pkind parameters in hardware which defines how
 726 * the incoming packet is processed.
 727 *
 728 * @param node   Node number.
 729 * @param pkind  PKI supports a large number of incoming interfaces and packets
 730 *     arriving on different interfaces or channels may want to be processed
 731 *     differently. PKI uses the pkind to determine how the incoming packet
 732 *     is processed.
 733 * @param pkind_cfg     Pointer to struct conatining pkind configuration read
 734 *     from hardware.
 735 */
 736int cvmx_pki_read_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);
 737
 738/**
 739 * This function writes per pkind parameters in hardware which defines how
 740 * the incoming packet is processed.
 741 *
 742 * @param node   Node number.
 743 * @param pkind  PKI supports a large number of incoming interfaces and packets
 744 *     arriving on different interfaces or channels may want to be processed
 745 *     differently. PKI uses the pkind to determine how the incoming packet
 746 *     is processed.
 747 * @param pkind_cfg     Pointer to struct conatining pkind configuration need
 748 *     to be written in hardware.
 749 */
 750int cvmx_pki_write_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);
 751
 752/**
 753 * This function reads parameters associated with tag configuration in hardware.
 754 *
 755 * @param node   Node number.
 756 * @param style  Style to configure tag for.
 757 * @param cluster_mask  Mask of clusters to configure the style for.
 758 * @param tag_cfg  Pointer to tag configuration struct.
 759 */
 760void cvmx_pki_read_tag_config(int node, int style, u64 cluster_mask,
 761                              struct cvmx_pki_style_tag_cfg *tag_cfg);
 762
 763/**
 764 * This function writes/configures parameters associated with tag
 765 * configuration in hardware.
 766 *
 767 * @param node  Node number.
 768 * @param style  Style to configure tag for.
 769 * @param cluster_mask  Mask of clusters to configure the style for.
 770 * @param tag_cfg  Pointer to taf configuration struct.
 771 */
 772void cvmx_pki_write_tag_config(int node, int style, u64 cluster_mask,
 773                               struct cvmx_pki_style_tag_cfg *tag_cfg);
 774
 775/**
 776 * This function reads parameters associated with style in hardware.
 777 *
 778 * @param node  Node number.
 779 * @param style  Style to read from.
 780 * @param cluster_mask  Mask of clusters style belongs to.
 781 * @param style_cfg  Pointer to style config struct.
 782 */
 783void cvmx_pki_read_style_config(int node, int style, u64 cluster_mask,
 784                                struct cvmx_pki_style_config *style_cfg);
 785
 786/**
 787 * This function writes/configures parameters associated with style in hardware.
 788 *
 789 * @param node  Node number.
 790 * @param style  Style to configure.
 791 * @param cluster_mask  Mask of clusters to configure the style for.
 792 * @param style_cfg  Pointer to style config struct.
 793 */
 794void cvmx_pki_write_style_config(int node, u64 style, u64 cluster_mask,
 795                                 struct cvmx_pki_style_config *style_cfg);
 796/**
 797 * This function reads qpg entry at specified offset from qpg table
 798 *
 799 * @param node  Node number.
 800 * @param offset  Offset in qpg table to read from.
 801 * @param qpg_cfg  Pointer to structure containing qpg values
 802 */
 803int cvmx_pki_read_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);
 804
 805/**
 806 * This function writes qpg entry at specified offset in qpg table
 807 *
 808 * @param node  Node number.
 809 * @param offset  Offset in qpg table to write to.
 810 * @param qpg_cfg  Pointer to stricture containing qpg values.
 811 */
 812void cvmx_pki_write_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);
 813
 814/**
 815 * This function writes pcam entry at given offset in pcam table in hardware
 816 *
 817 * @param node  Node number.
 818 * @param index  Offset in pcam table.
 819 * @param cluster_mask  Mask of clusters in which to write pcam entry.
 820 * @param input  Input keys to pcam match passed as struct.
 821 * @param action  PCAM match action passed as struct
 822 */
 823int cvmx_pki_pcam_write_entry(int node, int index, u64 cluster_mask,
 824                              struct cvmx_pki_pcam_input input, struct cvmx_pki_pcam_action action);
 825/**
 826 * Configures the channel which will receive backpressure from the specified bpid.
 827 * Each channel listens for backpressure on a specific bpid.
 828 * Each bpid can backpressure multiple channels.
 829 * @param node  Node number.
 830 * @param bpid  BPID from which channel will receive backpressure.
 831 * @param channel  Channel number to receive backpressue.
 832 */
 833int cvmx_pki_write_channel_bpid(int node, int channel, int bpid);
 834
 835/**
 836 * Configures the bpid on which, specified channel will
 837 * assert backpressure.
 838 * Each bpid receives backpressure from auras.
 839 * Multiple auras can backpressure single bpid.
 840 * @param node  Node number.
 841 * @param aura  Number which will assert backpressure on that bpid.
 842 * @param bpid  To assert backpressure on.
 843 */
 844int cvmx_pki_write_aura_bpid(int node, int aura, int bpid);
 845
 846/**
 847 * Enables/Disabled QoS (RED Drop, Tail Drop & backpressure) for the* PKI aura.
 848 *
 849 * @param node  Node number
 850 * @param aura  To enable/disable QoS on.
 851 * @param ena_red  Enable/Disable RED drop between pass and drop level
 852 *    1-enable 0-disable
 853 * @param ena_drop  Enable/disable tail drop when max drop level exceeds
 854 *    1-enable 0-disable
 855 * @param ena_bp  Enable/Disable asserting backpressure on bpid when
 856 *    max DROP level exceeds.
 857 *    1-enable 0-disable
 858 */
 859int cvmx_pki_enable_aura_qos(int node, int aura, bool ena_red, bool ena_drop, bool ena_bp);
 860
 861/**
 862 * This function gives the initial style used by that pkind.
 863 *
 864 * @param node  Node number.
 865 * @param pkind  PKIND number.
 866 */
 867int cvmx_pki_get_pkind_style(int node, int pkind);
 868
 869/**
 870 * This function sets the wqe buffer mode. First packet data buffer can reside
 871 * either in same buffer as wqe OR it can go in separate buffer. If used the later mode,
 872 * make sure software allocate enough buffers to now have wqe separate from packet data.
 873 *
 874 * @param node  Node number.
 875 * @param style  Style to configure.
 876 * @param pkt_outside_wqe
 877 *    0 = The packet link pointer will be at word [FIRST_SKIP] immediately
 878 *    followed by packet data, in the same buffer as the work queue entry.
 879 *    1 = The packet link pointer will be at word [FIRST_SKIP] in a new
 880 *    buffer separate from the work queue entry. Words following the
 881 *    WQE in the same cache line will be zeroed, other lines in the
 882 *    buffer will not be modified and will retain stale data (from the
 883 *    buffer’s previous use). This setting may decrease the peak PKI
 884 *    performance by up to half on small packets.
 885 */
 886void cvmx_pki_set_wqe_mode(int node, u64 style, bool pkt_outside_wqe);
 887
 888/**
 889 * This function sets the Packet mode of all ports and styles to little-endian.
 890 * It Changes write operations of packet data to L2C to
 891 * be in little-endian. Does not change the WQE header format, which is
 892 * properly endian neutral.
 893 *
 894 * @param node  Node number.
 895 * @param style  Style to configure.
 896 */
 897void cvmx_pki_set_little_endian(int node, u64 style);
 898
 899/**
 900 * Enables/Disables L2 length error check and max & min frame length checks.
 901 *
 902 * @param node  Node number.
 903 * @param pknd  PKIND to disable error for.
 904 * @param l2len_err      L2 length error check enable.
 905 * @param maxframe_err  Max frame error check enable.
 906 * @param minframe_err  Min frame error check enable.
 907 *    1 -- Enabel err checks
 908 *    0 -- Disable error checks
 909 */
 910void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err, bool maxframe_err,
 911                            bool minframe_err);
 912
 913/**
 914 * Enables/Disables fcs check and fcs stripping on the pkind.
 915 *
 916 * @param node  Node number.
 917 * @param pknd  PKIND to apply settings on.
 918 * @param fcs_chk  Enable/disable fcs check.
 919 *    1 -- enable fcs error check.
 920 *    0 -- disable fcs error check.
 921 * @param fcs_strip      Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
 922 *    1 -- strip L2 FCS.
 923 *    0 -- Do not strip L2 FCS.
 924 */
 925void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip);
 926
 927/**
 928 * This function shows the qpg table entries, read directly from hardware.
 929 *
 930 * @param node  Node number.
 931 * @param num_entry  Number of entries to print.
 932 */
 933void cvmx_pki_show_qpg_entries(int node, u16 num_entry);
 934
 935/**
 936 * This function shows the pcam table in raw format read directly from hardware.
 937 *
 938 * @param node  Node number.
 939 */
 940void cvmx_pki_show_pcam_entries(int node);
 941
 942/**
 943 * This function shows the valid entries in readable format,
 944 * read directly from hardware.
 945 *
 946 * @param node  Node number.
 947 */
 948void cvmx_pki_show_valid_pcam_entries(int node);
 949
 950/**
 951 * This function shows the pkind attributes in readable format,
 952 * read directly from hardware.
 953 * @param node  Node number.
 954 * @param pkind  PKIND number to print.
 955 */
 956void cvmx_pki_show_pkind_attributes(int node, int pkind);
 957
 958/**
 959 * @INTERNAL
 960 * This function is called by cvmx_helper_shutdown() to extract all FPA buffers
 961 * out of the PKI. After this function completes, all FPA buffers that were
 962 * prefetched by PKI will be in the appropriate FPA pool.
 963 * This functions does not reset the PKI.
 964 * WARNING: It is very important that PKI be reset soon after a call to this function.
 965 *
 966 * @param node  Node number.
 967 */
 968void __cvmx_pki_free_ptr(int node);
 969
 970#endif
 971