linux/drivers/net/ethernet/mediatek/mtk_ppe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
   3
   4#include <linux/kernel.h>
   5#include <linux/io.h>
   6#include <linux/iopoll.h>
   7#include <linux/etherdevice.h>
   8#include <linux/platform_device.h>
   9#include "mtk_ppe.h"
  10#include "mtk_ppe_regs.h"
  11
  12static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
  13{
  14        writel(val, ppe->base + reg);
  15}
  16
  17static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
  18{
  19        return readl(ppe->base + reg);
  20}
  21
  22static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
  23{
  24        u32 val;
  25
  26        val = ppe_r32(ppe, reg);
  27        val &= ~mask;
  28        val |= set;
  29        ppe_w32(ppe, reg, val);
  30
  31        return val;
  32}
  33
  34static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
  35{
  36        return ppe_m32(ppe, reg, 0, val);
  37}
  38
  39static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
  40{
  41        return ppe_m32(ppe, reg, val, 0);
  42}
  43
  44static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  45{
  46        int ret;
  47        u32 val;
  48
  49        ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
  50                                 !(val & MTK_PPE_GLO_CFG_BUSY),
  51                                 20, MTK_PPE_WAIT_TIMEOUT_US);
  52
  53        if (ret)
  54                dev_err(ppe->dev, "PPE table busy");
  55
  56        return ret;
  57}
  58
  59static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
  60{
  61        ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  62        ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
  63}
  64
  65static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
  66{
  67        mtk_ppe_cache_clear(ppe);
  68
  69        ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
  70                enable * MTK_PPE_CACHE_CTL_EN);
  71}
  72
  73static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
  74{
  75        u32 hv1, hv2, hv3;
  76        u32 hash;
  77
  78        switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
  79                case MTK_PPE_PKT_TYPE_BRIDGE:
  80                        hv1 = e->bridge.src_mac_lo;
  81                        hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
  82                        hv2 = e->bridge.src_mac_hi >> 16;
  83                        hv2 ^= e->bridge.dest_mac_lo;
  84                        hv3 = e->bridge.dest_mac_hi;
  85                        break;
  86                case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  87                case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  88                        hv1 = e->ipv4.orig.ports;
  89                        hv2 = e->ipv4.orig.dest_ip;
  90                        hv3 = e->ipv4.orig.src_ip;
  91                        break;
  92                case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
  93                case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
  94                        hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
  95                        hv1 ^= e->ipv6.ports;
  96
  97                        hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
  98                        hv2 ^= e->ipv6.dest_ip[0];
  99
 100                        hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
 101                        hv3 ^= e->ipv6.src_ip[0];
 102                        break;
 103                case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 104                case MTK_PPE_PKT_TYPE_IPV6_6RD:
 105                default:
 106                        WARN_ON_ONCE(1);
 107                        return MTK_PPE_HASH_MASK;
 108        }
 109
 110        hash = (hv1 & hv2) | ((~hv1) & hv3);
 111        hash = (hash >> 24) | ((hash & 0xffffff) << 8);
 112        hash ^= hv1 ^ hv2 ^ hv3;
 113        hash ^= hash >> 16;
 114        hash <<= 1;
 115        hash &= MTK_PPE_ENTRIES - 1;
 116
 117        return hash;
 118}
 119
 120static inline struct mtk_foe_mac_info *
 121mtk_foe_entry_l2(struct mtk_foe_entry *entry)
 122{
 123        int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 124
 125        if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 126                return &entry->ipv6.l2;
 127
 128        return &entry->ipv4.l2;
 129}
 130
 131static inline u32 *
 132mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
 133{
 134        int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 135
 136        if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
 137                return &entry->ipv6.ib2;
 138
 139        return &entry->ipv4.ib2;
 140}
 141
 142int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
 143                          u8 pse_port, u8 *src_mac, u8 *dest_mac)
 144{
 145        struct mtk_foe_mac_info *l2;
 146        u32 ports_pad, val;
 147
 148        memset(entry, 0, sizeof(*entry));
 149
 150        val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
 151              FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
 152              FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
 153              MTK_FOE_IB1_BIND_TTL |
 154              MTK_FOE_IB1_BIND_CACHE;
 155        entry->ib1 = val;
 156
 157        val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
 158              FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
 159              FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
 160
 161        if (is_multicast_ether_addr(dest_mac))
 162                val |= MTK_FOE_IB2_MULTICAST;
 163
 164        ports_pad = 0xa5a5a500 | (l4proto & 0xff);
 165        if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
 166                entry->ipv4.orig.ports = ports_pad;
 167        if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
 168                entry->ipv6.ports = ports_pad;
 169
 170        if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
 171                entry->ipv6.ib2 = val;
 172                l2 = &entry->ipv6.l2;
 173        } else {
 174                entry->ipv4.ib2 = val;
 175                l2 = &entry->ipv4.l2;
 176        }
 177
 178        l2->dest_mac_hi = get_unaligned_be32(dest_mac);
 179        l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
 180        l2->src_mac_hi = get_unaligned_be32(src_mac);
 181        l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
 182
 183        if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
 184                l2->etype = ETH_P_IPV6;
 185        else
 186                l2->etype = ETH_P_IP;
 187
 188        return 0;
 189}
 190
 191int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
 192{
 193        u32 *ib2 = mtk_foe_entry_ib2(entry);
 194        u32 val;
 195
 196        val = *ib2;
 197        val &= ~MTK_FOE_IB2_DEST_PORT;
 198        val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
 199        *ib2 = val;
 200
 201        return 0;
 202}
 203
 204int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
 205                                 __be32 src_addr, __be16 src_port,
 206                                 __be32 dest_addr, __be16 dest_port)
 207{
 208        int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 209        struct mtk_ipv4_tuple *t;
 210
 211        switch (type) {
 212        case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
 213                if (egress) {
 214                        t = &entry->ipv4.new;
 215                        break;
 216                }
 217                fallthrough;
 218        case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 219        case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
 220                t = &entry->ipv4.orig;
 221                break;
 222        case MTK_PPE_PKT_TYPE_IPV6_6RD:
 223                entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
 224                entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
 225                return 0;
 226        default:
 227                WARN_ON_ONCE(1);
 228                return -EINVAL;
 229        }
 230
 231        t->src_ip = be32_to_cpu(src_addr);
 232        t->dest_ip = be32_to_cpu(dest_addr);
 233
 234        if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
 235                return 0;
 236
 237        t->src_port = be16_to_cpu(src_port);
 238        t->dest_port = be16_to_cpu(dest_port);
 239
 240        return 0;
 241}
 242
 243int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
 244                                 __be32 *src_addr, __be16 src_port,
 245                                 __be32 *dest_addr, __be16 dest_port)
 246{
 247        int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
 248        u32 *src, *dest;
 249        int i;
 250
 251        switch (type) {
 252        case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
 253                src = entry->dslite.tunnel_src_ip;
 254                dest = entry->dslite.tunnel_dest_ip;
 255                break;
 256        case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
 257        case MTK_PPE_PKT_TYPE_IPV6_6RD:
 258                entry->ipv6.src_port = be16_to_cpu(src_port);
 259                entry->ipv6.dest_port = be16_to_cpu(dest_port);
 260                fallthrough;
 261        case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
 262                src = entry->ipv6.src_ip;
 263                dest = entry->ipv6.dest_ip;
 264                break;
 265        default:
 266                WARN_ON_ONCE(1);
 267                return -EINVAL;
 268        }
 269
 270        for (i = 0; i < 4; i++)
 271                src[i] = be32_to_cpu(src_addr[i]);
 272        for (i = 0; i < 4; i++)
 273                dest[i] = be32_to_cpu(dest_addr[i]);
 274
 275        return 0;
 276}
 277
 278int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
 279{
 280        struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
 281
 282        l2->etype = BIT(port);
 283
 284        if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
 285                entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
 286        else
 287                l2->etype |= BIT(8);
 288
 289        entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
 290
 291        return 0;
 292}
 293
 294int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
 295{
 296        struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
 297
 298        switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
 299        case 0:
 300                entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
 301                              FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
 302                l2->vlan1 = vid;
 303                return 0;
 304        case 1:
 305                if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
 306                        l2->vlan1 = vid;
 307                        l2->etype |= BIT(8);
 308                } else {
 309                        l2->vlan2 = vid;
 310                        entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
 311                }
 312                return 0;
 313        default:
 314                return -ENOSPC;
 315        }
 316}
 317
 318int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
 319{
 320        struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
 321
 322        if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
 323            (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
 324                l2->etype = ETH_P_PPP_SES;
 325
 326        entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
 327        l2->pppoe_id = sid;
 328
 329        return 0;
 330}
 331
 332static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
 333{
 334        return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
 335               FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
 336}
 337
 338int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
 339                         u16 timestamp)
 340{
 341        struct mtk_foe_entry *hwe;
 342        u32 hash;
 343
 344        timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
 345        entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
 346        entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
 347
 348        hash = mtk_ppe_hash_entry(entry);
 349        hwe = &ppe->foe_table[hash];
 350        if (!mtk_foe_entry_usable(hwe)) {
 351                hwe++;
 352                hash++;
 353
 354                if (!mtk_foe_entry_usable(hwe))
 355                        return -ENOSPC;
 356        }
 357
 358        memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
 359        wmb();
 360        hwe->ib1 = entry->ib1;
 361
 362        dma_wmb();
 363
 364        mtk_ppe_cache_clear(ppe);
 365
 366        return hash;
 367}
 368
 369int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
 370                 int version)
 371{
 372        struct mtk_foe_entry *foe;
 373
 374        /* need to allocate a separate device, since it PPE DMA access is
 375         * not coherent.
 376         */
 377        ppe->base = base;
 378        ppe->dev = dev;
 379        ppe->version = version;
 380
 381        foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
 382                                  &ppe->foe_phys, GFP_KERNEL);
 383        if (!foe)
 384                return -ENOMEM;
 385
 386        ppe->foe_table = foe;
 387
 388        mtk_ppe_debugfs_init(ppe);
 389
 390        return 0;
 391}
 392
 393static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
 394{
 395        static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
 396        int i, k;
 397
 398        memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
 399
 400        if (!IS_ENABLED(CONFIG_SOC_MT7621))
 401                return;
 402
 403        /* skip all entries that cross the 1024 byte boundary */
 404        for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
 405                for (k = 0; k < ARRAY_SIZE(skip); k++)
 406                        ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
 407}
 408
 409int mtk_ppe_start(struct mtk_ppe *ppe)
 410{
 411        u32 val;
 412
 413        mtk_ppe_init_foe_table(ppe);
 414        ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
 415
 416        val = MTK_PPE_TB_CFG_ENTRY_80B |
 417              MTK_PPE_TB_CFG_AGE_NON_L4 |
 418              MTK_PPE_TB_CFG_AGE_UNBIND |
 419              MTK_PPE_TB_CFG_AGE_TCP |
 420              MTK_PPE_TB_CFG_AGE_UDP |
 421              MTK_PPE_TB_CFG_AGE_TCP_FIN |
 422              FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
 423                         MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
 424              FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
 425                         MTK_PPE_KEEPALIVE_DISABLE) |
 426              FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
 427              FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
 428                         MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
 429              FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
 430                         MTK_PPE_ENTRIES_SHIFT);
 431        ppe_w32(ppe, MTK_PPE_TB_CFG, val);
 432
 433        ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
 434                MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
 435
 436        mtk_ppe_cache_enable(ppe, true);
 437
 438        val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
 439              MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
 440              MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
 441              MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
 442              MTK_PPE_FLOW_CFG_IP6_6RD |
 443              MTK_PPE_FLOW_CFG_IP4_NAT |
 444              MTK_PPE_FLOW_CFG_IP4_NAPT |
 445              MTK_PPE_FLOW_CFG_IP4_DSLITE |
 446              MTK_PPE_FLOW_CFG_L2_BRIDGE |
 447              MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
 448        ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
 449
 450        val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
 451              FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
 452        ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
 453
 454        val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
 455              FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
 456        ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
 457
 458        val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
 459              FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
 460        ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
 461
 462        val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
 463        ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
 464
 465        val = MTK_PPE_BIND_LIMIT1_FULL |
 466              FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
 467        ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
 468
 469        val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
 470              FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
 471        ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
 472
 473        /* enable PPE */
 474        val = MTK_PPE_GLO_CFG_EN |
 475              MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
 476              MTK_PPE_GLO_CFG_IP4_CS_DROP |
 477              MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
 478        ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
 479
 480        ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
 481
 482        return 0;
 483}
 484
 485int mtk_ppe_stop(struct mtk_ppe *ppe)
 486{
 487        u32 val;
 488        int i;
 489
 490        for (i = 0; i < MTK_PPE_ENTRIES; i++)
 491                ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
 492                                                   MTK_FOE_STATE_INVALID);
 493
 494        mtk_ppe_cache_enable(ppe, false);
 495
 496        /* disable offload engine */
 497        ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
 498        ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
 499
 500        /* disable aging */
 501        val = MTK_PPE_TB_CFG_AGE_NON_L4 |
 502              MTK_PPE_TB_CFG_AGE_UNBIND |
 503              MTK_PPE_TB_CFG_AGE_TCP |
 504              MTK_PPE_TB_CFG_AGE_UDP |
 505              MTK_PPE_TB_CFG_AGE_TCP_FIN;
 506        ppe_clear(ppe, MTK_PPE_TB_CFG, val);
 507
 508        return mtk_ppe_wait_busy(ppe);
 509}
 510