linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2014 Broadcom Corporation
   4 */
   5
   6
   7#include <linux/types.h>
   8#include <linux/netdevice.h>
   9#include <linux/etherdevice.h>
  10#include <brcmu_utils.h>
  11
  12#include "core.h"
  13#include "debug.h"
  14#include "bus.h"
  15#include "proto.h"
  16#include "flowring.h"
  17#include "msgbuf.h"
  18#include "common.h"
  19
  20
  21#define BRCMF_FLOWRING_HIGH             1024
  22#define BRCMF_FLOWRING_LOW              (BRCMF_FLOWRING_HIGH - 256)
  23#define BRCMF_FLOWRING_INVALID_IFIDX    0xff
  24
  25#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
  26#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
  27
  28static const u8 brcmf_flowring_prio2fifo[] = {
  29        0,
  30        1,
  31        1,
  32        0,
  33        2,
  34        2,
  35        3,
  36        3
  37};
  38
  39static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  40
  41
  42static bool
  43brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
  44{
  45        struct brcmf_flowring_tdls_entry *search;
  46
  47        search = flow->tdls_entry;
  48
  49        while (search) {
  50                if (memcmp(search->mac, mac, ETH_ALEN) == 0)
  51                        return true;
  52                search = search->next;
  53        }
  54
  55        return false;
  56}
  57
  58
  59u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
  60                          u8 prio, u8 ifidx)
  61{
  62        struct brcmf_flowring_hash *hash;
  63        u16 hash_idx;
  64        u32 i;
  65        bool found;
  66        bool sta;
  67        u8 fifo;
  68        u8 *mac;
  69
  70        fifo = brcmf_flowring_prio2fifo[prio];
  71        sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
  72        mac = da;
  73        if ((!sta) && (is_multicast_ether_addr(da))) {
  74                mac = (u8 *)ALLFFMAC;
  75                fifo = 0;
  76        }
  77        if ((sta) && (flow->tdls_active) &&
  78            (brcmf_flowring_is_tdls_mac(flow, da))) {
  79                sta = false;
  80        }
  81        hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
  82                          BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
  83        hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
  84        found = false;
  85        hash = flow->hash;
  86        for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
  87                if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
  88                    (hash[hash_idx].fifo == fifo) &&
  89                    (hash[hash_idx].ifidx == ifidx)) {
  90                        found = true;
  91                        break;
  92                }
  93                hash_idx++;
  94                hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
  95        }
  96        if (found)
  97                return hash[hash_idx].flowid;
  98
  99        return BRCMF_FLOWRING_INVALID_ID;
 100}
 101
 102
 103u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
 104                          u8 prio, u8 ifidx)
 105{
 106        struct brcmf_flowring_ring *ring;
 107        struct brcmf_flowring_hash *hash;
 108        u16 hash_idx;
 109        u32 i;
 110        bool found;
 111        u8 fifo;
 112        bool sta;
 113        u8 *mac;
 114
 115        fifo = brcmf_flowring_prio2fifo[prio];
 116        sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
 117        mac = da;
 118        if ((!sta) && (is_multicast_ether_addr(da))) {
 119                mac = (u8 *)ALLFFMAC;
 120                fifo = 0;
 121        }
 122        if ((sta) && (flow->tdls_active) &&
 123            (brcmf_flowring_is_tdls_mac(flow, da))) {
 124                sta = false;
 125        }
 126        hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
 127                          BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
 128        hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
 129        found = false;
 130        hash = flow->hash;
 131        for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
 132                if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
 133                    (is_zero_ether_addr(hash[hash_idx].mac))) {
 134                        found = true;
 135                        break;
 136                }
 137                hash_idx++;
 138                hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
 139        }
 140        if (found) {
 141                for (i = 0; i < flow->nrofrings; i++) {
 142                        if (flow->rings[i] == NULL)
 143                                break;
 144                }
 145                if (i == flow->nrofrings)
 146                        return -ENOMEM;
 147
 148                ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
 149                if (!ring)
 150                        return -ENOMEM;
 151
 152                memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
 153                hash[hash_idx].fifo = fifo;
 154                hash[hash_idx].ifidx = ifidx;
 155                hash[hash_idx].flowid = i;
 156
 157                ring->hash_id = hash_idx;
 158                ring->status = RING_CLOSED;
 159                skb_queue_head_init(&ring->skblist);
 160                flow->rings[i] = ring;
 161
 162                return i;
 163        }
 164        return BRCMF_FLOWRING_INVALID_ID;
 165}
 166
 167
 168u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
 169{
 170        struct brcmf_flowring_ring *ring;
 171
 172        ring = flow->rings[flowid];
 173
 174        return flow->hash[ring->hash_id].fifo;
 175}
 176
 177
 178static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
 179                                 bool blocked)
 180{
 181        struct brcmf_flowring_ring *ring;
 182        struct brcmf_bus *bus_if;
 183        struct brcmf_pub *drvr;
 184        struct brcmf_if *ifp;
 185        bool currently_blocked;
 186        int i;
 187        u8 ifidx;
 188        unsigned long flags;
 189
 190        spin_lock_irqsave(&flow->block_lock, flags);
 191
 192        ring = flow->rings[flowid];
 193        if (ring->blocked == blocked) {
 194                spin_unlock_irqrestore(&flow->block_lock, flags);
 195                return;
 196        }
 197        ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 198
 199        currently_blocked = false;
 200        for (i = 0; i < flow->nrofrings; i++) {
 201                if ((flow->rings[i]) && (i != flowid)) {
 202                        ring = flow->rings[i];
 203                        if ((ring->status == RING_OPEN) &&
 204                            (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
 205                                if (ring->blocked) {
 206                                        currently_blocked = true;
 207                                        break;
 208                                }
 209                        }
 210                }
 211        }
 212        flow->rings[flowid]->blocked = blocked;
 213        if (currently_blocked) {
 214                spin_unlock_irqrestore(&flow->block_lock, flags);
 215                return;
 216        }
 217
 218        bus_if = dev_get_drvdata(flow->dev);
 219        drvr = bus_if->drvr;
 220        ifp = brcmf_get_ifp(drvr, ifidx);
 221        brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
 222
 223        spin_unlock_irqrestore(&flow->block_lock, flags);
 224}
 225
 226
 227void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
 228{
 229        struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
 230        struct brcmf_flowring_ring *ring;
 231        struct brcmf_if *ifp;
 232        u16 hash_idx;
 233        u8 ifidx;
 234        struct sk_buff *skb;
 235
 236        ring = flow->rings[flowid];
 237        if (!ring)
 238                return;
 239
 240        ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 241        ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
 242
 243        brcmf_flowring_block(flow, flowid, false);
 244        hash_idx = ring->hash_id;
 245        flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
 246        eth_zero_addr(flow->hash[hash_idx].mac);
 247        flow->rings[flowid] = NULL;
 248
 249        skb = skb_dequeue(&ring->skblist);
 250        while (skb) {
 251                brcmf_txfinalize(ifp, skb, false);
 252                skb = skb_dequeue(&ring->skblist);
 253        }
 254
 255        kfree(ring);
 256}
 257
 258
 259u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
 260                           struct sk_buff *skb)
 261{
 262        struct brcmf_flowring_ring *ring;
 263
 264        ring = flow->rings[flowid];
 265
 266        skb_queue_tail(&ring->skblist, skb);
 267
 268        if (!ring->blocked &&
 269            (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
 270                brcmf_flowring_block(flow, flowid, true);
 271                brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
 272                /* To prevent (work around) possible race condition, check
 273                 * queue len again. It is also possible to use locking to
 274                 * protect, but that is undesirable for every enqueue and
 275                 * dequeue. This simple check will solve a possible race
 276                 * condition if it occurs.
 277                 */
 278                if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
 279                        brcmf_flowring_block(flow, flowid, false);
 280        }
 281        return skb_queue_len(&ring->skblist);
 282}
 283
 284
 285struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
 286{
 287        struct brcmf_flowring_ring *ring;
 288        struct sk_buff *skb;
 289
 290        ring = flow->rings[flowid];
 291        if (ring->status != RING_OPEN)
 292                return NULL;
 293
 294        skb = skb_dequeue(&ring->skblist);
 295
 296        if (ring->blocked &&
 297            (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
 298                brcmf_flowring_block(flow, flowid, false);
 299                brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
 300        }
 301
 302        return skb;
 303}
 304
 305
 306void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
 307                             struct sk_buff *skb)
 308{
 309        struct brcmf_flowring_ring *ring;
 310
 311        ring = flow->rings[flowid];
 312
 313        skb_queue_head(&ring->skblist, skb);
 314}
 315
 316
 317u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
 318{
 319        struct brcmf_flowring_ring *ring;
 320
 321        ring = flow->rings[flowid];
 322        if (!ring)
 323                return 0;
 324
 325        if (ring->status != RING_OPEN)
 326                return 0;
 327
 328        return skb_queue_len(&ring->skblist);
 329}
 330
 331
 332void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
 333{
 334        struct brcmf_flowring_ring *ring;
 335
 336        ring = flow->rings[flowid];
 337        if (!ring) {
 338                brcmf_err("Ring NULL, for flowid %d\n", flowid);
 339                return;
 340        }
 341
 342        ring->status = RING_OPEN;
 343}
 344
 345
 346u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
 347{
 348        struct brcmf_flowring_ring *ring;
 349        u16 hash_idx;
 350
 351        ring = flow->rings[flowid];
 352        hash_idx = ring->hash_id;
 353
 354        return flow->hash[hash_idx].ifidx;
 355}
 356
 357
 358struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
 359{
 360        struct brcmf_flowring *flow;
 361        u32 i;
 362
 363        flow = kzalloc(sizeof(*flow), GFP_KERNEL);
 364        if (flow) {
 365                flow->dev = dev;
 366                flow->nrofrings = nrofrings;
 367                spin_lock_init(&flow->block_lock);
 368                for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
 369                        flow->addr_mode[i] = ADDR_INDIRECT;
 370                for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
 371                        flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
 372                flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
 373                                      GFP_KERNEL);
 374                if (!flow->rings) {
 375                        kfree(flow);
 376                        flow = NULL;
 377                }
 378        }
 379
 380        return flow;
 381}
 382
 383
 384void brcmf_flowring_detach(struct brcmf_flowring *flow)
 385{
 386        struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
 387        struct brcmf_pub *drvr = bus_if->drvr;
 388        struct brcmf_flowring_tdls_entry *search;
 389        struct brcmf_flowring_tdls_entry *remove;
 390        u16 flowid;
 391
 392        for (flowid = 0; flowid < flow->nrofrings; flowid++) {
 393                if (flow->rings[flowid])
 394                        brcmf_msgbuf_delete_flowring(drvr, flowid);
 395        }
 396
 397        search = flow->tdls_entry;
 398        while (search) {
 399                remove = search;
 400                search = search->next;
 401                kfree(remove);
 402        }
 403        kfree(flow->rings);
 404        kfree(flow);
 405}
 406
 407
 408void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
 409                                        enum proto_addr_mode addr_mode)
 410{
 411        struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
 412        struct brcmf_pub *drvr = bus_if->drvr;
 413        u32 i;
 414        u16 flowid;
 415
 416        if (flow->addr_mode[ifidx] != addr_mode) {
 417                for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
 418                        if (flow->hash[i].ifidx == ifidx) {
 419                                flowid = flow->hash[i].flowid;
 420                                if (flow->rings[flowid]->status != RING_OPEN)
 421                                        continue;
 422                                flow->rings[flowid]->status = RING_CLOSING;
 423                                brcmf_msgbuf_delete_flowring(drvr, flowid);
 424                        }
 425                }
 426                flow->addr_mode[ifidx] = addr_mode;
 427        }
 428}
 429
 430
 431void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
 432                                u8 peer[ETH_ALEN])
 433{
 434        struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
 435        struct brcmf_pub *drvr = bus_if->drvr;
 436        struct brcmf_flowring_hash *hash;
 437        struct brcmf_flowring_tdls_entry *prev;
 438        struct brcmf_flowring_tdls_entry *search;
 439        u32 i;
 440        u16 flowid;
 441        bool sta;
 442
 443        sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
 444
 445        search = flow->tdls_entry;
 446        prev = NULL;
 447        while (search) {
 448                if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
 449                        sta = false;
 450                        break;
 451                }
 452                prev = search;
 453                search = search->next;
 454        }
 455
 456        hash = flow->hash;
 457        for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
 458                if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
 459                    (hash[i].ifidx == ifidx)) {
 460                        flowid = flow->hash[i].flowid;
 461                        if (flow->rings[flowid]->status == RING_OPEN) {
 462                                flow->rings[flowid]->status = RING_CLOSING;
 463                                brcmf_msgbuf_delete_flowring(drvr, flowid);
 464                        }
 465                }
 466        }
 467
 468        if (search) {
 469                if (prev)
 470                        prev->next = search->next;
 471                else
 472                        flow->tdls_entry = search->next;
 473                kfree(search);
 474                if (flow->tdls_entry == NULL)
 475                        flow->tdls_active = false;
 476        }
 477}
 478
 479
 480void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
 481                                  u8 peer[ETH_ALEN])
 482{
 483        struct brcmf_flowring_tdls_entry *tdls_entry;
 484        struct brcmf_flowring_tdls_entry *search;
 485
 486        tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
 487        if (tdls_entry == NULL)
 488                return;
 489
 490        memcpy(tdls_entry->mac, peer, ETH_ALEN);
 491        tdls_entry->next = NULL;
 492        if (flow->tdls_entry == NULL) {
 493                flow->tdls_entry = tdls_entry;
 494        } else {
 495                search = flow->tdls_entry;
 496                if (memcmp(search->mac, peer, ETH_ALEN) == 0)
 497                        goto free_entry;
 498                while (search->next) {
 499                        search = search->next;
 500                        if (memcmp(search->mac, peer, ETH_ALEN) == 0)
 501                                goto free_entry;
 502                }
 503                search->next = tdls_entry;
 504        }
 505
 506        flow->tdls_active = true;
 507        return;
 508
 509free_entry:
 510        kfree(tdls_entry);
 511}
 512