linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2014 Broadcom Corporation
   4 */
   5
   6/*******************************************************************************
   7 * Communicates with the dongle by using dcmd codes.
   8 * For certain dcmd codes, the dongle interprets string data from the host.
   9 ******************************************************************************/
  10
  11#include <linux/types.h>
  12#include <linux/netdevice.h>
  13#include <linux/etherdevice.h>
  14
  15#include <brcmu_utils.h>
  16#include <brcmu_wifi.h>
  17
  18#include "core.h"
  19#include "debug.h"
  20#include "proto.h"
  21#include "msgbuf.h"
  22#include "commonring.h"
  23#include "flowring.h"
  24#include "bus.h"
  25#include "tracepoint.h"
  26
  27
  28#define MSGBUF_IOCTL_RESP_TIMEOUT               msecs_to_jiffies(2000)
  29
  30#define MSGBUF_TYPE_GEN_STATUS                  0x1
  31#define MSGBUF_TYPE_RING_STATUS                 0x2
  32#define MSGBUF_TYPE_FLOW_RING_CREATE            0x3
  33#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT      0x4
  34#define MSGBUF_TYPE_FLOW_RING_DELETE            0x5
  35#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT      0x6
  36#define MSGBUF_TYPE_FLOW_RING_FLUSH             0x7
  37#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT       0x8
  38#define MSGBUF_TYPE_IOCTLPTR_REQ                0x9
  39#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK            0xA
  40#define MSGBUF_TYPE_IOCTLRESP_BUF_POST          0xB
  41#define MSGBUF_TYPE_IOCTL_CMPLT                 0xC
  42#define MSGBUF_TYPE_EVENT_BUF_POST              0xD
  43#define MSGBUF_TYPE_WL_EVENT                    0xE
  44#define MSGBUF_TYPE_TX_POST                     0xF
  45#define MSGBUF_TYPE_TX_STATUS                   0x10
  46#define MSGBUF_TYPE_RXBUF_POST                  0x11
  47#define MSGBUF_TYPE_RX_CMPLT                    0x12
  48#define MSGBUF_TYPE_LPBK_DMAXFER                0x13
  49#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT          0x14
  50
  51#define NR_TX_PKTIDS                            2048
  52#define NR_RX_PKTIDS                            1024
  53
  54#define BRCMF_IOCTL_REQ_PKTID                   0xFFFE
  55
  56#define BRCMF_MSGBUF_MAX_PKT_SIZE               2048
  57#define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE           8192
  58#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD        32
  59#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST      8
  60#define BRCMF_MSGBUF_MAX_EVENTBUF_POST          8
  61
  62#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3      0x01
  63#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11     0x02
  64#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK       0x07
  65#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT       5
  66
  67#define BRCMF_MSGBUF_TX_FLUSH_CNT1              32
  68#define BRCMF_MSGBUF_TX_FLUSH_CNT2              96
  69
  70#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS        96
  71#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS      32
  72#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS         48
  73
  74
  75struct msgbuf_common_hdr {
  76        u8                              msgtype;
  77        u8                              ifidx;
  78        u8                              flags;
  79        u8                              rsvd0;
  80        __le32                          request_id;
  81};
  82
  83struct msgbuf_ioctl_req_hdr {
  84        struct msgbuf_common_hdr        msg;
  85        __le32                          cmd;
  86        __le16                          trans_id;
  87        __le16                          input_buf_len;
  88        __le16                          output_buf_len;
  89        __le16                          rsvd0[3];
  90        struct msgbuf_buf_addr          req_buf_addr;
  91        __le32                          rsvd1[2];
  92};
  93
  94struct msgbuf_tx_msghdr {
  95        struct msgbuf_common_hdr        msg;
  96        u8                              txhdr[ETH_HLEN];
  97        u8                              flags;
  98        u8                              seg_cnt;
  99        struct msgbuf_buf_addr          metadata_buf_addr;
 100        struct msgbuf_buf_addr          data_buf_addr;
 101        __le16                          metadata_buf_len;
 102        __le16                          data_len;
 103        __le32                          rsvd0;
 104};
 105
 106struct msgbuf_rx_bufpost {
 107        struct msgbuf_common_hdr        msg;
 108        __le16                          metadata_buf_len;
 109        __le16                          data_buf_len;
 110        __le32                          rsvd0;
 111        struct msgbuf_buf_addr          metadata_buf_addr;
 112        struct msgbuf_buf_addr          data_buf_addr;
 113};
 114
 115struct msgbuf_rx_ioctl_resp_or_event {
 116        struct msgbuf_common_hdr        msg;
 117        __le16                          host_buf_len;
 118        __le16                          rsvd0[3];
 119        struct msgbuf_buf_addr          host_buf_addr;
 120        __le32                          rsvd1[4];
 121};
 122
 123struct msgbuf_completion_hdr {
 124        __le16                          status;
 125        __le16                          flow_ring_id;
 126};
 127
 128/* Data struct for the MSGBUF_TYPE_GEN_STATUS */
 129struct msgbuf_gen_status {
 130        struct msgbuf_common_hdr        msg;
 131        struct msgbuf_completion_hdr    compl_hdr;
 132        __le16                          write_idx;
 133        __le32                          rsvd0[3];
 134};
 135
 136/* Data struct for the MSGBUF_TYPE_RING_STATUS */
 137struct msgbuf_ring_status {
 138        struct msgbuf_common_hdr        msg;
 139        struct msgbuf_completion_hdr    compl_hdr;
 140        __le16                          write_idx;
 141        __le16                          rsvd0[5];
 142};
 143
 144struct msgbuf_rx_event {
 145        struct msgbuf_common_hdr        msg;
 146        struct msgbuf_completion_hdr    compl_hdr;
 147        __le16                          event_data_len;
 148        __le16                          seqnum;
 149        __le16                          rsvd0[4];
 150};
 151
 152struct msgbuf_ioctl_resp_hdr {
 153        struct msgbuf_common_hdr        msg;
 154        struct msgbuf_completion_hdr    compl_hdr;
 155        __le16                          resp_len;
 156        __le16                          trans_id;
 157        __le32                          cmd;
 158        __le32                          rsvd0;
 159};
 160
 161struct msgbuf_tx_status {
 162        struct msgbuf_common_hdr        msg;
 163        struct msgbuf_completion_hdr    compl_hdr;
 164        __le16                          metadata_len;
 165        __le16                          tx_status;
 166};
 167
 168struct msgbuf_rx_complete {
 169        struct msgbuf_common_hdr        msg;
 170        struct msgbuf_completion_hdr    compl_hdr;
 171        __le16                          metadata_len;
 172        __le16                          data_len;
 173        __le16                          data_offset;
 174        __le16                          flags;
 175        __le32                          rx_status_0;
 176        __le32                          rx_status_1;
 177        __le32                          rsvd0;
 178};
 179
 180struct msgbuf_tx_flowring_create_req {
 181        struct msgbuf_common_hdr        msg;
 182        u8                              da[ETH_ALEN];
 183        u8                              sa[ETH_ALEN];
 184        u8                              tid;
 185        u8                              if_flags;
 186        __le16                          flow_ring_id;
 187        u8                              tc;
 188        u8                              priority;
 189        __le16                          int_vector;
 190        __le16                          max_items;
 191        __le16                          len_item;
 192        struct msgbuf_buf_addr          flow_ring_addr;
 193};
 194
 195struct msgbuf_tx_flowring_delete_req {
 196        struct msgbuf_common_hdr        msg;
 197        __le16                          flow_ring_id;
 198        __le16                          reason;
 199        __le32                          rsvd0[7];
 200};
 201
 202struct msgbuf_flowring_create_resp {
 203        struct msgbuf_common_hdr        msg;
 204        struct msgbuf_completion_hdr    compl_hdr;
 205        __le32                          rsvd0[3];
 206};
 207
 208struct msgbuf_flowring_delete_resp {
 209        struct msgbuf_common_hdr        msg;
 210        struct msgbuf_completion_hdr    compl_hdr;
 211        __le32                          rsvd0[3];
 212};
 213
 214struct msgbuf_flowring_flush_resp {
 215        struct msgbuf_common_hdr        msg;
 216        struct msgbuf_completion_hdr    compl_hdr;
 217        __le32                          rsvd0[3];
 218};
 219
 220struct brcmf_msgbuf_work_item {
 221        struct list_head queue;
 222        u32 flowid;
 223        int ifidx;
 224        u8 sa[ETH_ALEN];
 225        u8 da[ETH_ALEN];
 226};
 227
 228struct brcmf_msgbuf {
 229        struct brcmf_pub *drvr;
 230
 231        struct brcmf_commonring **commonrings;
 232        struct brcmf_commonring **flowrings;
 233        dma_addr_t *flowring_dma_handle;
 234
 235        u16 max_flowrings;
 236        u16 max_submissionrings;
 237        u16 max_completionrings;
 238
 239        u16 rx_dataoffset;
 240        u32 max_rxbufpost;
 241        u16 rx_metadata_offset;
 242        u32 rxbufpost;
 243
 244        u32 max_ioctlrespbuf;
 245        u32 cur_ioctlrespbuf;
 246        u32 max_eventbuf;
 247        u32 cur_eventbuf;
 248
 249        void *ioctbuf;
 250        dma_addr_t ioctbuf_handle;
 251        u32 ioctbuf_phys_hi;
 252        u32 ioctbuf_phys_lo;
 253        int ioctl_resp_status;
 254        u32 ioctl_resp_ret_len;
 255        u32 ioctl_resp_pktid;
 256
 257        u16 data_seq_no;
 258        u16 ioctl_seq_no;
 259        u32 reqid;
 260        wait_queue_head_t ioctl_resp_wait;
 261        bool ctl_completed;
 262
 263        struct brcmf_msgbuf_pktids *tx_pktids;
 264        struct brcmf_msgbuf_pktids *rx_pktids;
 265        struct brcmf_flowring *flow;
 266
 267        struct workqueue_struct *txflow_wq;
 268        struct work_struct txflow_work;
 269        unsigned long *flow_map;
 270        unsigned long *txstatus_done_map;
 271
 272        struct work_struct flowring_work;
 273        spinlock_t flowring_work_lock;
 274        struct list_head work_queue;
 275};
 276
 277struct brcmf_msgbuf_pktid {
 278        atomic_t  allocated;
 279        u16 data_offset;
 280        struct sk_buff *skb;
 281        dma_addr_t physaddr;
 282};
 283
 284struct brcmf_msgbuf_pktids {
 285        u32 array_size;
 286        u32 last_allocated_idx;
 287        enum dma_data_direction direction;
 288        struct brcmf_msgbuf_pktid *array;
 289};
 290
 291static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
 292
 293
 294static struct brcmf_msgbuf_pktids *
 295brcmf_msgbuf_init_pktids(u32 nr_array_entries,
 296                         enum dma_data_direction direction)
 297{
 298        struct brcmf_msgbuf_pktid *array;
 299        struct brcmf_msgbuf_pktids *pktids;
 300
 301        array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
 302        if (!array)
 303                return NULL;
 304
 305        pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
 306        if (!pktids) {
 307                kfree(array);
 308                return NULL;
 309        }
 310        pktids->array = array;
 311        pktids->array_size = nr_array_entries;
 312
 313        return pktids;
 314}
 315
 316
 317static int
 318brcmf_msgbuf_alloc_pktid(struct device *dev,
 319                         struct brcmf_msgbuf_pktids *pktids,
 320                         struct sk_buff *skb, u16 data_offset,
 321                         dma_addr_t *physaddr, u32 *idx)
 322{
 323        struct brcmf_msgbuf_pktid *array;
 324        u32 count;
 325
 326        array = pktids->array;
 327
 328        *physaddr = dma_map_single(dev, skb->data + data_offset,
 329                                   skb->len - data_offset, pktids->direction);
 330
 331        if (dma_mapping_error(dev, *physaddr)) {
 332                brcmf_err("dma_map_single failed !!\n");
 333                return -ENOMEM;
 334        }
 335
 336        *idx = pktids->last_allocated_idx;
 337
 338        count = 0;
 339        do {
 340                (*idx)++;
 341                if (*idx == pktids->array_size)
 342                        *idx = 0;
 343                if (array[*idx].allocated.counter == 0)
 344                        if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
 345                                break;
 346                count++;
 347        } while (count < pktids->array_size);
 348
 349        if (count == pktids->array_size)
 350                return -ENOMEM;
 351
 352        array[*idx].data_offset = data_offset;
 353        array[*idx].physaddr = *physaddr;
 354        array[*idx].skb = skb;
 355
 356        pktids->last_allocated_idx = *idx;
 357
 358        return 0;
 359}
 360
 361
 362static struct sk_buff *
 363brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
 364                       u32 idx)
 365{
 366        struct brcmf_msgbuf_pktid *pktid;
 367        struct sk_buff *skb;
 368
 369        if (idx >= pktids->array_size) {
 370                brcmf_err("Invalid packet id %d (max %d)\n", idx,
 371                          pktids->array_size);
 372                return NULL;
 373        }
 374        if (pktids->array[idx].allocated.counter) {
 375                pktid = &pktids->array[idx];
 376                dma_unmap_single(dev, pktid->physaddr,
 377                                 pktid->skb->len - pktid->data_offset,
 378                                 pktids->direction);
 379                skb = pktid->skb;
 380                pktid->allocated.counter = 0;
 381                return skb;
 382        } else {
 383                brcmf_err("Invalid packet id %d (not in use)\n", idx);
 384        }
 385
 386        return NULL;
 387}
 388
 389
 390static void
 391brcmf_msgbuf_release_array(struct device *dev,
 392                           struct brcmf_msgbuf_pktids *pktids)
 393{
 394        struct brcmf_msgbuf_pktid *array;
 395        struct brcmf_msgbuf_pktid *pktid;
 396        u32 count;
 397
 398        array = pktids->array;
 399        count = 0;
 400        do {
 401                if (array[count].allocated.counter) {
 402                        pktid = &array[count];
 403                        dma_unmap_single(dev, pktid->physaddr,
 404                                         pktid->skb->len - pktid->data_offset,
 405                                         pktids->direction);
 406                        brcmu_pkt_buf_free_skb(pktid->skb);
 407                }
 408                count++;
 409        } while (count < pktids->array_size);
 410
 411        kfree(array);
 412        kfree(pktids);
 413}
 414
 415
 416static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
 417{
 418        if (msgbuf->rx_pktids)
 419                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 420                                           msgbuf->rx_pktids);
 421        if (msgbuf->tx_pktids)
 422                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 423                                           msgbuf->tx_pktids);
 424}
 425
 426
 427static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
 428                                 uint cmd, void *buf, uint len)
 429{
 430        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 431        struct brcmf_commonring *commonring;
 432        struct msgbuf_ioctl_req_hdr *request;
 433        u16 buf_len;
 434        void *ret_ptr;
 435        int err;
 436
 437        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 438        brcmf_commonring_lock(commonring);
 439        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 440        if (!ret_ptr) {
 441                bphy_err(drvr, "Failed to reserve space in commonring\n");
 442                brcmf_commonring_unlock(commonring);
 443                return -ENOMEM;
 444        }
 445
 446        msgbuf->reqid++;
 447
 448        request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
 449        request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
 450        request->msg.ifidx = (u8)ifidx;
 451        request->msg.flags = 0;
 452        request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
 453        request->cmd = cpu_to_le32(cmd);
 454        request->output_buf_len = cpu_to_le16(len);
 455        request->trans_id = cpu_to_le16(msgbuf->reqid);
 456
 457        buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
 458        request->input_buf_len = cpu_to_le16(buf_len);
 459        request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
 460        request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
 461        if (buf)
 462                memcpy(msgbuf->ioctbuf, buf, buf_len);
 463        else
 464                memset(msgbuf->ioctbuf, 0, buf_len);
 465
 466        err = brcmf_commonring_write_complete(commonring);
 467        brcmf_commonring_unlock(commonring);
 468
 469        return err;
 470}
 471
 472
 473static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
 474{
 475        return wait_event_timeout(msgbuf->ioctl_resp_wait,
 476                                  msgbuf->ctl_completed,
 477                                  MSGBUF_IOCTL_RESP_TIMEOUT);
 478}
 479
 480
 481static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 482{
 483        msgbuf->ctl_completed = true;
 484        wake_up(&msgbuf->ioctl_resp_wait);
 485}
 486
 487
 488static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 489                                   uint cmd, void *buf, uint len, int *fwerr)
 490{
 491        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 492        struct sk_buff *skb = NULL;
 493        int timeout;
 494        int err;
 495
 496        brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
 497        *fwerr = 0;
 498        msgbuf->ctl_completed = false;
 499        err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
 500        if (err)
 501                return err;
 502
 503        timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
 504        if (!timeout) {
 505                bphy_err(drvr, "Timeout on response for query command\n");
 506                return -EIO;
 507        }
 508
 509        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 510                                     msgbuf->rx_pktids,
 511                                     msgbuf->ioctl_resp_pktid);
 512        if (msgbuf->ioctl_resp_ret_len != 0) {
 513                if (!skb)
 514                        return -EBADF;
 515
 516                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
 517                                       len : msgbuf->ioctl_resp_ret_len);
 518        }
 519        brcmu_pkt_buf_free_skb(skb);
 520
 521        *fwerr = msgbuf->ioctl_resp_status;
 522        return 0;
 523}
 524
 525
 526static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
 527                                 uint cmd, void *buf, uint len, int *fwerr)
 528{
 529        return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
 530}
 531
 532
 533static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
 534                                struct sk_buff *skb, struct brcmf_if **ifp)
 535{
 536        return -ENODEV;
 537}
 538
 539static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
 540                                   bool inirq)
 541{
 542}
 543
 544static void
 545brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
 546{
 547        u32 dma_sz;
 548        void *dma_buf;
 549
 550        brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
 551
 552        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 553        dma_buf = msgbuf->flowrings[flowid]->buf_addr;
 554        dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
 555                          msgbuf->flowring_dma_handle[flowid]);
 556
 557        brcmf_flowring_delete(msgbuf->flow, flowid);
 558}
 559
 560
 561static struct brcmf_msgbuf_work_item *
 562brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
 563{
 564        struct brcmf_msgbuf_work_item *work = NULL;
 565        ulong flags;
 566
 567        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 568        if (!list_empty(&msgbuf->work_queue)) {
 569                work = list_first_entry(&msgbuf->work_queue,
 570                                        struct brcmf_msgbuf_work_item, queue);
 571                list_del(&work->queue);
 572        }
 573        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 574
 575        return work;
 576}
 577
 578
 579static u32
 580brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
 581                                    struct brcmf_msgbuf_work_item *work)
 582{
 583        struct brcmf_pub *drvr = msgbuf->drvr;
 584        struct msgbuf_tx_flowring_create_req *create;
 585        struct brcmf_commonring *commonring;
 586        void *ret_ptr;
 587        u32 flowid;
 588        void *dma_buf;
 589        u32 dma_sz;
 590        u64 address;
 591        int err;
 592
 593        flowid = work->flowid;
 594        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 595        dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
 596                                     &msgbuf->flowring_dma_handle[flowid],
 597                                     GFP_KERNEL);
 598        if (!dma_buf) {
 599                bphy_err(drvr, "dma_alloc_coherent failed\n");
 600                brcmf_flowring_delete(msgbuf->flow, flowid);
 601                return BRCMF_FLOWRING_INVALID_ID;
 602        }
 603
 604        brcmf_commonring_config(msgbuf->flowrings[flowid],
 605                                BRCMF_H2D_TXFLOWRING_MAX_ITEM,
 606                                BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
 607
 608        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 609        brcmf_commonring_lock(commonring);
 610        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 611        if (!ret_ptr) {
 612                bphy_err(drvr, "Failed to reserve space in commonring\n");
 613                brcmf_commonring_unlock(commonring);
 614                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 615                return BRCMF_FLOWRING_INVALID_ID;
 616        }
 617
 618        create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
 619        create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
 620        create->msg.ifidx = work->ifidx;
 621        create->msg.request_id = 0;
 622        create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
 623        create->flow_ring_id = cpu_to_le16(flowid +
 624                                           BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
 625        memcpy(create->sa, work->sa, ETH_ALEN);
 626        memcpy(create->da, work->da, ETH_ALEN);
 627        address = (u64)msgbuf->flowring_dma_handle[flowid];
 628        create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
 629        create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
 630        create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
 631        create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
 632
 633        brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
 634                  flowid, work->da, create->tid, work->ifidx);
 635
 636        err = brcmf_commonring_write_complete(commonring);
 637        brcmf_commonring_unlock(commonring);
 638        if (err) {
 639                bphy_err(drvr, "Failed to write commonring\n");
 640                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 641                return BRCMF_FLOWRING_INVALID_ID;
 642        }
 643
 644        return flowid;
 645}
 646
 647
 648static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
 649{
 650        struct brcmf_msgbuf *msgbuf;
 651        struct brcmf_msgbuf_work_item *create;
 652
 653        msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
 654
 655        while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
 656                brcmf_msgbuf_flowring_create_worker(msgbuf, create);
 657                kfree(create);
 658        }
 659}
 660
 661
 662static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
 663                                        struct sk_buff *skb)
 664{
 665        struct brcmf_msgbuf_work_item *create;
 666        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 667        u32 flowid;
 668        ulong flags;
 669
 670        create = kzalloc(sizeof(*create), GFP_ATOMIC);
 671        if (create == NULL)
 672                return BRCMF_FLOWRING_INVALID_ID;
 673
 674        flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
 675                                       skb->priority, ifidx);
 676        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 677                kfree(create);
 678                return flowid;
 679        }
 680
 681        create->flowid = flowid;
 682        create->ifidx = ifidx;
 683        memcpy(create->sa, eh->h_source, ETH_ALEN);
 684        memcpy(create->da, eh->h_dest, ETH_ALEN);
 685
 686        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 687        list_add_tail(&create->queue, &msgbuf->work_queue);
 688        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 689        schedule_work(&msgbuf->flowring_work);
 690
 691        return flowid;
 692}
 693
 694
 695static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
 696{
 697        struct brcmf_flowring *flow = msgbuf->flow;
 698        struct brcmf_pub *drvr = msgbuf->drvr;
 699        struct brcmf_commonring *commonring;
 700        void *ret_ptr;
 701        u32 count;
 702        struct sk_buff *skb;
 703        dma_addr_t physaddr;
 704        u32 pktid;
 705        struct msgbuf_tx_msghdr *tx_msghdr;
 706        u64 address;
 707
 708        commonring = msgbuf->flowrings[flowid];
 709        if (!brcmf_commonring_write_available(commonring))
 710                return;
 711
 712        brcmf_commonring_lock(commonring);
 713
 714        count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
 715        while (brcmf_flowring_qlen(flow, flowid)) {
 716                skb = brcmf_flowring_dequeue(flow, flowid);
 717                if (skb == NULL) {
 718                        bphy_err(drvr, "No SKB, but qlen %d\n",
 719                                 brcmf_flowring_qlen(flow, flowid));
 720                        break;
 721                }
 722                skb_orphan(skb);
 723                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 724                                             msgbuf->tx_pktids, skb, ETH_HLEN,
 725                                             &physaddr, &pktid)) {
 726                        brcmf_flowring_reinsert(flow, flowid, skb);
 727                        bphy_err(drvr, "No PKTID available !!\n");
 728                        break;
 729                }
 730                ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 731                if (!ret_ptr) {
 732                        brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 733                                               msgbuf->tx_pktids, pktid);
 734                        brcmf_flowring_reinsert(flow, flowid, skb);
 735                        break;
 736                }
 737                count++;
 738
 739                tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
 740
 741                tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
 742                tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
 743                tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 744                tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
 745                tx_msghdr->flags |= (skb->priority & 0x07) <<
 746                                    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
 747                tx_msghdr->seg_cnt = 1;
 748                memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
 749                tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
 750                address = (u64)physaddr;
 751                tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
 752                tx_msghdr->data_buf_addr.low_addr =
 753                        cpu_to_le32(address & 0xffffffff);
 754                tx_msghdr->metadata_buf_len = 0;
 755                tx_msghdr->metadata_buf_addr.high_addr = 0;
 756                tx_msghdr->metadata_buf_addr.low_addr = 0;
 757                atomic_inc(&commonring->outstanding_tx);
 758                if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
 759                        brcmf_commonring_write_complete(commonring);
 760                        count = 0;
 761                }
 762        }
 763        if (count)
 764                brcmf_commonring_write_complete(commonring);
 765        brcmf_commonring_unlock(commonring);
 766}
 767
 768
 769static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
 770{
 771        struct brcmf_msgbuf *msgbuf;
 772        u32 flowid;
 773
 774        msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
 775        for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
 776                clear_bit(flowid, msgbuf->flow_map);
 777                brcmf_msgbuf_txflow(msgbuf, flowid);
 778        }
 779}
 780
 781
 782static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
 783                                        bool force)
 784{
 785        struct brcmf_commonring *commonring;
 786
 787        set_bit(flowid, msgbuf->flow_map);
 788        commonring = msgbuf->flowrings[flowid];
 789        if ((force) || (atomic_read(&commonring->outstanding_tx) <
 790                        BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
 791                queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
 792
 793        return 0;
 794}
 795
 796
 797static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
 798                                      struct sk_buff *skb)
 799{
 800        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 801        struct brcmf_flowring *flow = msgbuf->flow;
 802        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 803        u32 flowid;
 804        u32 queue_count;
 805        bool force;
 806
 807        flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
 808        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 809                flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
 810                if (flowid == BRCMF_FLOWRING_INVALID_ID)
 811                        return -ENOMEM;
 812        }
 813        queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
 814        force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
 815        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
 816
 817        return 0;
 818}
 819
 820
 821static void
 822brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
 823                                 enum proto_addr_mode addr_mode)
 824{
 825        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 826
 827        brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
 828}
 829
 830
 831static void
 832brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 833{
 834        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 835
 836        brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
 837}
 838
 839
 840static void
 841brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 842{
 843        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 844
 845        brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
 846}
 847
 848
 849static void
 850brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 851{
 852        struct msgbuf_ioctl_resp_hdr *ioctl_resp;
 853
 854        ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
 855
 856        msgbuf->ioctl_resp_status =
 857                        (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
 858        msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
 859        msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
 860
 861        brcmf_msgbuf_ioctl_resp_wake(msgbuf);
 862
 863        if (msgbuf->cur_ioctlrespbuf)
 864                msgbuf->cur_ioctlrespbuf--;
 865        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
 866}
 867
 868
 869static void
 870brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
 871{
 872        struct brcmf_commonring *commonring;
 873        struct msgbuf_tx_status *tx_status;
 874        u32 idx;
 875        struct sk_buff *skb;
 876        u16 flowid;
 877
 878        tx_status = (struct msgbuf_tx_status *)buf;
 879        idx = le32_to_cpu(tx_status->msg.request_id) - 1;
 880        flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
 881        flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
 882        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 883                                     msgbuf->tx_pktids, idx);
 884        if (!skb)
 885                return;
 886
 887        set_bit(flowid, msgbuf->txstatus_done_map);
 888        commonring = msgbuf->flowrings[flowid];
 889        atomic_dec(&commonring->outstanding_tx);
 890
 891        brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
 892                         skb, true);
 893}
 894
 895
 896static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
 897{
 898        struct brcmf_pub *drvr = msgbuf->drvr;
 899        struct brcmf_commonring *commonring;
 900        void *ret_ptr;
 901        struct sk_buff *skb;
 902        u16 alloced;
 903        u32 pktlen;
 904        dma_addr_t physaddr;
 905        struct msgbuf_rx_bufpost *rx_bufpost;
 906        u64 address;
 907        u32 pktid;
 908        u32 i;
 909
 910        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
 911        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
 912                                                              count,
 913                                                              &alloced);
 914        if (!ret_ptr) {
 915                brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
 916                return 0;
 917        }
 918
 919        for (i = 0; i < alloced; i++) {
 920                rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
 921                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
 922
 923                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
 924
 925                if (skb == NULL) {
 926                        bphy_err(drvr, "Failed to alloc SKB\n");
 927                        brcmf_commonring_write_cancel(commonring, alloced - i);
 928                        break;
 929                }
 930
 931                pktlen = skb->len;
 932                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 933                                             msgbuf->rx_pktids, skb, 0,
 934                                             &physaddr, &pktid)) {
 935                        dev_kfree_skb_any(skb);
 936                        bphy_err(drvr, "No PKTID available !!\n");
 937                        brcmf_commonring_write_cancel(commonring, alloced - i);
 938                        break;
 939                }
 940
 941                if (msgbuf->rx_metadata_offset) {
 942                        address = (u64)physaddr;
 943                        rx_bufpost->metadata_buf_len =
 944                                cpu_to_le16(msgbuf->rx_metadata_offset);
 945                        rx_bufpost->metadata_buf_addr.high_addr =
 946                                cpu_to_le32(address >> 32);
 947                        rx_bufpost->metadata_buf_addr.low_addr =
 948                                cpu_to_le32(address & 0xffffffff);
 949
 950                        skb_pull(skb, msgbuf->rx_metadata_offset);
 951                        pktlen = skb->len;
 952                        physaddr += msgbuf->rx_metadata_offset;
 953                }
 954                rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
 955                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
 956
 957                address = (u64)physaddr;
 958                rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
 959                rx_bufpost->data_buf_addr.high_addr =
 960                        cpu_to_le32(address >> 32);
 961                rx_bufpost->data_buf_addr.low_addr =
 962                        cpu_to_le32(address & 0xffffffff);
 963
 964                ret_ptr += brcmf_commonring_len_item(commonring);
 965        }
 966
 967        if (i)
 968                brcmf_commonring_write_complete(commonring);
 969
 970        return i;
 971}
 972
 973
 974static void
 975brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
 976{
 977        u32 fillbufs;
 978        u32 retcount;
 979
 980        fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
 981
 982        while (fillbufs) {
 983                retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
 984                if (!retcount)
 985                        break;
 986                msgbuf->rxbufpost += retcount;
 987                fillbufs -= retcount;
 988        }
 989}
 990
 991
 992static void
 993brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
 994{
 995        msgbuf->rxbufpost -= rxcnt;
 996        if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
 997                                  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
 998                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
 999}
1000
1001
1002static u32
1003brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
1004                             u32 count)
1005{
1006        struct brcmf_pub *drvr = msgbuf->drvr;
1007        struct brcmf_commonring *commonring;
1008        void *ret_ptr;
1009        struct sk_buff *skb;
1010        u16 alloced;
1011        u32 pktlen;
1012        dma_addr_t physaddr;
1013        struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
1014        u64 address;
1015        u32 pktid;
1016        u32 i;
1017
1018        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1019        brcmf_commonring_lock(commonring);
1020        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1021                                                              count,
1022                                                              &alloced);
1023        if (!ret_ptr) {
1024                bphy_err(drvr, "Failed to reserve space in commonring\n");
1025                brcmf_commonring_unlock(commonring);
1026                return 0;
1027        }
1028
1029        for (i = 0; i < alloced; i++) {
1030                rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1031                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1032
1033                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE);
1034
1035                if (skb == NULL) {
1036                        bphy_err(drvr, "Failed to alloc SKB\n");
1037                        brcmf_commonring_write_cancel(commonring, alloced - i);
1038                        break;
1039                }
1040
1041                pktlen = skb->len;
1042                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1043                                             msgbuf->rx_pktids, skb, 0,
1044                                             &physaddr, &pktid)) {
1045                        dev_kfree_skb_any(skb);
1046                        bphy_err(drvr, "No PKTID available !!\n");
1047                        brcmf_commonring_write_cancel(commonring, alloced - i);
1048                        break;
1049                }
1050                if (event_buf)
1051                        rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1052                else
1053                        rx_bufpost->msg.msgtype =
1054                                MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1055                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1056
1057                address = (u64)physaddr;
1058                rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1059                rx_bufpost->host_buf_addr.high_addr =
1060                        cpu_to_le32(address >> 32);
1061                rx_bufpost->host_buf_addr.low_addr =
1062                        cpu_to_le32(address & 0xffffffff);
1063
1064                ret_ptr += brcmf_commonring_len_item(commonring);
1065        }
1066
1067        if (i)
1068                brcmf_commonring_write_complete(commonring);
1069
1070        brcmf_commonring_unlock(commonring);
1071
1072        return i;
1073}
1074
1075
1076static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1077{
1078        u32 count;
1079
1080        count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1081        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1082        msgbuf->cur_ioctlrespbuf += count;
1083}
1084
1085
1086static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1087{
1088        u32 count;
1089
1090        count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1091        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1092        msgbuf->cur_eventbuf += count;
1093}
1094
1095
1096static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1097{
1098        struct brcmf_pub *drvr = msgbuf->drvr;
1099        struct msgbuf_rx_event *event;
1100        u32 idx;
1101        u16 buflen;
1102        struct sk_buff *skb;
1103        struct brcmf_if *ifp;
1104
1105        event = (struct msgbuf_rx_event *)buf;
1106        idx = le32_to_cpu(event->msg.request_id);
1107        buflen = le16_to_cpu(event->event_data_len);
1108
1109        if (msgbuf->cur_eventbuf)
1110                msgbuf->cur_eventbuf--;
1111        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1112
1113        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1114                                     msgbuf->rx_pktids, idx);
1115        if (!skb)
1116                return;
1117
1118        if (msgbuf->rx_dataoffset)
1119                skb_pull(skb, msgbuf->rx_dataoffset);
1120
1121        skb_trim(skb, buflen);
1122
1123        ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
1124        if (!ifp || !ifp->ndev) {
1125                bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1126                         event->msg.ifidx);
1127                goto exit;
1128        }
1129
1130        skb->protocol = eth_type_trans(skb, ifp->ndev);
1131
1132        brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
1133
1134exit:
1135        brcmu_pkt_buf_free_skb(skb);
1136}
1137
1138
1139static void
1140brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1141{
1142        struct brcmf_pub *drvr = msgbuf->drvr;
1143        struct msgbuf_rx_complete *rx_complete;
1144        struct sk_buff *skb;
1145        u16 data_offset;
1146        u16 buflen;
1147        u16 flags;
1148        u32 idx;
1149        struct brcmf_if *ifp;
1150
1151        brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1152
1153        rx_complete = (struct msgbuf_rx_complete *)buf;
1154        data_offset = le16_to_cpu(rx_complete->data_offset);
1155        buflen = le16_to_cpu(rx_complete->data_len);
1156        idx = le32_to_cpu(rx_complete->msg.request_id);
1157        flags = le16_to_cpu(rx_complete->flags);
1158
1159        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1160                                     msgbuf->rx_pktids, idx);
1161        if (!skb)
1162                return;
1163
1164        if (data_offset)
1165                skb_pull(skb, data_offset);
1166        else if (msgbuf->rx_dataoffset)
1167                skb_pull(skb, msgbuf->rx_dataoffset);
1168
1169        skb_trim(skb, buflen);
1170
1171        if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
1172            BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
1173                ifp = msgbuf->drvr->mon_if;
1174
1175                if (!ifp) {
1176                        bphy_err(drvr, "Received unexpected monitor pkt\n");
1177                        brcmu_pkt_buf_free_skb(skb);
1178                        return;
1179                }
1180
1181                brcmf_netif_mon_rx(ifp, skb);
1182                return;
1183        }
1184
1185        ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
1186        if (!ifp || !ifp->ndev) {
1187                bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1188                         rx_complete->msg.ifidx);
1189                brcmu_pkt_buf_free_skb(skb);
1190                return;
1191        }
1192
1193        skb->protocol = eth_type_trans(skb, ifp->ndev);
1194        brcmf_netif_rx(ifp, skb, false);
1195}
1196
1197static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
1198                                            void *buf)
1199{
1200        struct msgbuf_gen_status *gen_status = buf;
1201        struct brcmf_pub *drvr = msgbuf->drvr;
1202        int err;
1203
1204        err = le16_to_cpu(gen_status->compl_hdr.status);
1205        if (err)
1206                bphy_err(drvr, "Firmware reported general error: %d\n", err);
1207}
1208
1209static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
1210                                             void *buf)
1211{
1212        struct msgbuf_ring_status *ring_status = buf;
1213        struct brcmf_pub *drvr = msgbuf->drvr;
1214        int err;
1215
1216        err = le16_to_cpu(ring_status->compl_hdr.status);
1217        if (err) {
1218                int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
1219
1220                bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
1221                         err);
1222        }
1223}
1224
1225static void
1226brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1227                                               void *buf)
1228{
1229        struct brcmf_pub *drvr = msgbuf->drvr;
1230        struct msgbuf_flowring_create_resp *flowring_create_resp;
1231        u16 status;
1232        u16 flowid;
1233
1234        flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1235
1236        flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1237        flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1238        status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
1239
1240        if (status) {
1241                bphy_err(drvr, "Flowring creation failed, code %d\n", status);
1242                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1243                return;
1244        }
1245        brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1246                  status);
1247
1248        brcmf_flowring_open(msgbuf->flow, flowid);
1249
1250        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1251}
1252
1253
1254static void
1255brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1256                                               void *buf)
1257{
1258        struct brcmf_pub *drvr = msgbuf->drvr;
1259        struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1260        u16 status;
1261        u16 flowid;
1262
1263        flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1264
1265        flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1266        flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1267        status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1268
1269        if (status) {
1270                bphy_err(drvr, "Flowring deletion failed, code %d\n", status);
1271                brcmf_flowring_delete(msgbuf->flow, flowid);
1272                return;
1273        }
1274        brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1275                  status);
1276
1277        brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1278}
1279
1280
1281static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1282{
1283        struct brcmf_pub *drvr = msgbuf->drvr;
1284        struct msgbuf_common_hdr *msg;
1285
1286        msg = (struct msgbuf_common_hdr *)buf;
1287        switch (msg->msgtype) {
1288        case MSGBUF_TYPE_GEN_STATUS:
1289                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
1290                brcmf_msgbuf_process_gen_status(msgbuf, buf);
1291                break;
1292        case MSGBUF_TYPE_RING_STATUS:
1293                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
1294                brcmf_msgbuf_process_ring_status(msgbuf, buf);
1295                break;
1296        case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1297                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1298                brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1299                break;
1300        case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1301                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1302                brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1303                break;
1304        case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1305                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1306                break;
1307        case MSGBUF_TYPE_IOCTL_CMPLT:
1308                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1309                brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1310                break;
1311        case MSGBUF_TYPE_WL_EVENT:
1312                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1313                brcmf_msgbuf_process_event(msgbuf, buf);
1314                break;
1315        case MSGBUF_TYPE_TX_STATUS:
1316                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1317                brcmf_msgbuf_process_txstatus(msgbuf, buf);
1318                break;
1319        case MSGBUF_TYPE_RX_CMPLT:
1320                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1321                brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1322                break;
1323        default:
1324                bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
1325                break;
1326        }
1327}
1328
1329
1330static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1331                                    struct brcmf_commonring *commonring)
1332{
1333        void *buf;
1334        u16 count;
1335        u16 processed;
1336
1337again:
1338        buf = brcmf_commonring_get_read_ptr(commonring, &count);
1339        if (buf == NULL)
1340                return;
1341
1342        processed = 0;
1343        while (count) {
1344                brcmf_msgbuf_process_msgtype(msgbuf,
1345                                             buf + msgbuf->rx_dataoffset);
1346                buf += brcmf_commonring_len_item(commonring);
1347                processed++;
1348                if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1349                        brcmf_commonring_read_complete(commonring, processed);
1350                        processed = 0;
1351                }
1352                count--;
1353        }
1354        if (processed)
1355                brcmf_commonring_read_complete(commonring, processed);
1356
1357        if (commonring->r_ptr == 0)
1358                goto again;
1359}
1360
1361
1362int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1363{
1364        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1365        struct brcmf_pub *drvr = bus_if->drvr;
1366        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1367        struct brcmf_commonring *commonring;
1368        void *buf;
1369        u32 flowid;
1370        int qlen;
1371
1372        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1373        brcmf_msgbuf_process_rx(msgbuf, buf);
1374        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1375        brcmf_msgbuf_process_rx(msgbuf, buf);
1376        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1377        brcmf_msgbuf_process_rx(msgbuf, buf);
1378
1379        for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1380                         msgbuf->max_flowrings) {
1381                clear_bit(flowid, msgbuf->txstatus_done_map);
1382                commonring = msgbuf->flowrings[flowid];
1383                qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1384                if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1385                    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1386                                BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1387                        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1388        }
1389
1390        return 0;
1391}
1392
1393
1394void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
1395{
1396        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1397        struct msgbuf_tx_flowring_delete_req *delete;
1398        struct brcmf_commonring *commonring;
1399        void *ret_ptr;
1400        u8 ifidx;
1401        int err;
1402
1403        /* no need to submit if firmware can not be reached */
1404        if (drvr->bus_if->state != BRCMF_BUS_UP) {
1405                brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n");
1406                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1407                return;
1408        }
1409
1410        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1411        brcmf_commonring_lock(commonring);
1412        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1413        if (!ret_ptr) {
1414                bphy_err(drvr, "FW unaware, flowring will be removed !!\n");
1415                brcmf_commonring_unlock(commonring);
1416                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1417                return;
1418        }
1419
1420        delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1421
1422        ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1423
1424        delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1425        delete->msg.ifidx = ifidx;
1426        delete->msg.request_id = 0;
1427
1428        delete->flow_ring_id = cpu_to_le16(flowid +
1429                                           BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
1430        delete->reason = 0;
1431
1432        brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1433                  flowid, ifidx);
1434
1435        err = brcmf_commonring_write_complete(commonring);
1436        brcmf_commonring_unlock(commonring);
1437        if (err) {
1438                bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n");
1439                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1440        }
1441}
1442
1443#ifdef DEBUG
1444static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1445{
1446        struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1447        struct brcmf_pub *drvr = bus_if->drvr;
1448        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1449        struct brcmf_commonring *commonring;
1450        u16 i;
1451        struct brcmf_flowring_ring *ring;
1452        struct brcmf_flowring_hash *hash;
1453
1454        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1455        seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1456                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1457        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1458        seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
1459                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1460        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1461        seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
1462                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1463        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1464        seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1465                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1466        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1467        seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1468                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1469
1470        seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1471                   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1472        seq_puts(seq, "Active flowrings:\n");
1473        for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1474                if (!msgbuf->flow->rings[i])
1475                        continue;
1476                ring = msgbuf->flow->rings[i];
1477                if (ring->status != RING_OPEN)
1478                        continue;
1479                commonring = msgbuf->flowrings[i];
1480                hash = &msgbuf->flow->hash[ring->hash_id];
1481                seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1482                                "        ifidx %u, fifo %u, da %pM\n",
1483                                i, commonring->r_ptr, commonring->w_ptr,
1484                                skb_queue_len(&ring->skblist), ring->blocked,
1485                                hash->ifidx, hash->fifo, hash->mac);
1486        }
1487
1488        return 0;
1489}
1490#else
1491static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1492{
1493        return 0;
1494}
1495#endif
1496
1497static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr)
1498{
1499        brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1500}
1501
1502int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1503{
1504        struct brcmf_bus_msgbuf *if_msgbuf;
1505        struct brcmf_msgbuf *msgbuf;
1506        u64 address;
1507        u32 count;
1508
1509        if_msgbuf = drvr->bus_if->msgbuf;
1510
1511        if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
1512                bphy_err(drvr, "driver not configured for this many flowrings %d\n",
1513                         if_msgbuf->max_flowrings);
1514                if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
1515        }
1516
1517        msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1518        if (!msgbuf)
1519                goto fail;
1520
1521        msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1522        if (msgbuf->txflow_wq == NULL) {
1523                bphy_err(drvr, "workqueue creation failed\n");
1524                goto fail;
1525        }
1526        INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1527        count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
1528        count = count * sizeof(unsigned long);
1529        msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1530        if (!msgbuf->flow_map)
1531                goto fail;
1532
1533        msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1534        if (!msgbuf->txstatus_done_map)
1535                goto fail;
1536
1537        msgbuf->drvr = drvr;
1538        msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1539                                             BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1540                                             &msgbuf->ioctbuf_handle,
1541                                             GFP_KERNEL);
1542        if (!msgbuf->ioctbuf)
1543                goto fail;
1544        address = (u64)msgbuf->ioctbuf_handle;
1545        msgbuf->ioctbuf_phys_hi = address >> 32;
1546        msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1547
1548        drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1549        drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1550        drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1551        drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data;
1552        drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1553        drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1554        drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1555        drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
1556        drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create;
1557        drvr->proto->pd = msgbuf;
1558
1559        init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1560
1561        msgbuf->commonrings =
1562                (struct brcmf_commonring **)if_msgbuf->commonrings;
1563        msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1564        msgbuf->max_flowrings = if_msgbuf->max_flowrings;
1565        msgbuf->flowring_dma_handle =
1566                kcalloc(msgbuf->max_flowrings,
1567                        sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1568        if (!msgbuf->flowring_dma_handle)
1569                goto fail;
1570
1571        msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1572        msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1573
1574        msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1575        msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1576
1577        msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1578                                                     DMA_TO_DEVICE);
1579        if (!msgbuf->tx_pktids)
1580                goto fail;
1581        msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1582                                                     DMA_FROM_DEVICE);
1583        if (!msgbuf->rx_pktids)
1584                goto fail;
1585
1586        msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1587                                             if_msgbuf->max_flowrings);
1588        if (!msgbuf->flow)
1589                goto fail;
1590
1591
1592        brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1593                  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1594                  msgbuf->max_ioctlrespbuf);
1595        count = 0;
1596        do {
1597                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1598                if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1599                        msleep(10);
1600                else
1601                        break;
1602                count++;
1603        } while (count < 10);
1604        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1605        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1606
1607        INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1608        spin_lock_init(&msgbuf->flowring_work_lock);
1609        INIT_LIST_HEAD(&msgbuf->work_queue);
1610
1611        return 0;
1612
1613fail:
1614        if (msgbuf) {
1615                kfree(msgbuf->flow_map);
1616                kfree(msgbuf->txstatus_done_map);
1617                brcmf_msgbuf_release_pktids(msgbuf);
1618                kfree(msgbuf->flowring_dma_handle);
1619                if (msgbuf->ioctbuf)
1620                        dma_free_coherent(drvr->bus_if->dev,
1621                                          BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1622                                          msgbuf->ioctbuf,
1623                                          msgbuf->ioctbuf_handle);
1624                if (msgbuf->txflow_wq)
1625                        destroy_workqueue(msgbuf->txflow_wq);
1626                kfree(msgbuf);
1627        }
1628        return -ENOMEM;
1629}
1630
1631
1632void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1633{
1634        struct brcmf_msgbuf *msgbuf;
1635        struct brcmf_msgbuf_work_item *work;
1636
1637        brcmf_dbg(TRACE, "Enter\n");
1638        if (drvr->proto->pd) {
1639                msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1640                cancel_work_sync(&msgbuf->flowring_work);
1641                while (!list_empty(&msgbuf->work_queue)) {
1642                        work = list_first_entry(&msgbuf->work_queue,
1643                                                struct brcmf_msgbuf_work_item,
1644                                                queue);
1645                        list_del(&work->queue);
1646                        kfree(work);
1647                }
1648                kfree(msgbuf->flow_map);
1649                kfree(msgbuf->txstatus_done_map);
1650                if (msgbuf->txflow_wq)
1651                        destroy_workqueue(msgbuf->txflow_wq);
1652
1653                brcmf_flowring_detach(msgbuf->flow);
1654                dma_free_coherent(drvr->bus_if->dev,
1655                                  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1656                                  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1657                brcmf_msgbuf_release_pktids(msgbuf);
1658                kfree(msgbuf->flowring_dma_handle);
1659                kfree(msgbuf);
1660                drvr->proto->pd = NULL;
1661        }
1662}
1663