linux/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
<<
>>
Prefs
   1/* Copyright (c) 2014 Broadcom Corporation
   2 *
   3 * Permission to use, copy, modify, and/or distribute this software for any
   4 * purpose with or without fee is hereby granted, provided that the above
   5 * copyright notice and this permission notice appear in all copies.
   6 *
   7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14 */
  15
  16/*******************************************************************************
  17 * Communicates with the dongle by using dcmd codes.
  18 * For certain dcmd codes, the dongle interprets string data from the host.
  19 ******************************************************************************/
  20
  21#include <linux/types.h>
  22#include <linux/netdevice.h>
  23
  24#include <brcmu_utils.h>
  25#include <brcmu_wifi.h>
  26
  27#include "core.h"
  28#include "debug.h"
  29#include "proto.h"
  30#include "msgbuf.h"
  31#include "commonring.h"
  32#include "flowring.h"
  33#include "bus.h"
  34#include "tracepoint.h"
  35
  36
  37#define MSGBUF_IOCTL_RESP_TIMEOUT               2000
  38
  39#define MSGBUF_TYPE_GEN_STATUS                  0x1
  40#define MSGBUF_TYPE_RING_STATUS                 0x2
  41#define MSGBUF_TYPE_FLOW_RING_CREATE            0x3
  42#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT      0x4
  43#define MSGBUF_TYPE_FLOW_RING_DELETE            0x5
  44#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT      0x6
  45#define MSGBUF_TYPE_FLOW_RING_FLUSH             0x7
  46#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT       0x8
  47#define MSGBUF_TYPE_IOCTLPTR_REQ                0x9
  48#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK            0xA
  49#define MSGBUF_TYPE_IOCTLRESP_BUF_POST          0xB
  50#define MSGBUF_TYPE_IOCTL_CMPLT                 0xC
  51#define MSGBUF_TYPE_EVENT_BUF_POST              0xD
  52#define MSGBUF_TYPE_WL_EVENT                    0xE
  53#define MSGBUF_TYPE_TX_POST                     0xF
  54#define MSGBUF_TYPE_TX_STATUS                   0x10
  55#define MSGBUF_TYPE_RXBUF_POST                  0x11
  56#define MSGBUF_TYPE_RX_CMPLT                    0x12
  57#define MSGBUF_TYPE_LPBK_DMAXFER                0x13
  58#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT          0x14
  59
  60#define NR_TX_PKTIDS                            2048
  61#define NR_RX_PKTIDS                            1024
  62
  63#define BRCMF_IOCTL_REQ_PKTID                   0xFFFE
  64
  65#define BRCMF_MSGBUF_MAX_PKT_SIZE               2048
  66#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD        32
  67#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST      8
  68#define BRCMF_MSGBUF_MAX_EVENTBUF_POST          8
  69
  70#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3      0x01
  71#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT       5
  72
  73#define BRCMF_MSGBUF_TX_FLUSH_CNT1              32
  74#define BRCMF_MSGBUF_TX_FLUSH_CNT2              96
  75
  76#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS        96
  77#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS      32
  78#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS         48
  79
  80
  81struct msgbuf_common_hdr {
  82        u8                              msgtype;
  83        u8                              ifidx;
  84        u8                              flags;
  85        u8                              rsvd0;
  86        __le32                          request_id;
  87};
  88
  89struct msgbuf_buf_addr {
  90        __le32                          low_addr;
  91        __le32                          high_addr;
  92};
  93
  94struct msgbuf_ioctl_req_hdr {
  95        struct msgbuf_common_hdr        msg;
  96        __le32                          cmd;
  97        __le16                          trans_id;
  98        __le16                          input_buf_len;
  99        __le16                          output_buf_len;
 100        __le16                          rsvd0[3];
 101        struct msgbuf_buf_addr          req_buf_addr;
 102        __le32                          rsvd1[2];
 103};
 104
 105struct msgbuf_tx_msghdr {
 106        struct msgbuf_common_hdr        msg;
 107        u8                              txhdr[ETH_HLEN];
 108        u8                              flags;
 109        u8                              seg_cnt;
 110        struct msgbuf_buf_addr          metadata_buf_addr;
 111        struct msgbuf_buf_addr          data_buf_addr;
 112        __le16                          metadata_buf_len;
 113        __le16                          data_len;
 114        __le32                          rsvd0;
 115};
 116
 117struct msgbuf_rx_bufpost {
 118        struct msgbuf_common_hdr        msg;
 119        __le16                          metadata_buf_len;
 120        __le16                          data_buf_len;
 121        __le32                          rsvd0;
 122        struct msgbuf_buf_addr          metadata_buf_addr;
 123        struct msgbuf_buf_addr          data_buf_addr;
 124};
 125
 126struct msgbuf_rx_ioctl_resp_or_event {
 127        struct msgbuf_common_hdr        msg;
 128        __le16                          host_buf_len;
 129        __le16                          rsvd0[3];
 130        struct msgbuf_buf_addr          host_buf_addr;
 131        __le32                          rsvd1[4];
 132};
 133
 134struct msgbuf_completion_hdr {
 135        __le16                          status;
 136        __le16                          flow_ring_id;
 137};
 138
 139struct msgbuf_rx_event {
 140        struct msgbuf_common_hdr        msg;
 141        struct msgbuf_completion_hdr    compl_hdr;
 142        __le16                          event_data_len;
 143        __le16                          seqnum;
 144        __le16                          rsvd0[4];
 145};
 146
 147struct msgbuf_ioctl_resp_hdr {
 148        struct msgbuf_common_hdr        msg;
 149        struct msgbuf_completion_hdr    compl_hdr;
 150        __le16                          resp_len;
 151        __le16                          trans_id;
 152        __le32                          cmd;
 153        __le32                          rsvd0;
 154};
 155
 156struct msgbuf_tx_status {
 157        struct msgbuf_common_hdr        msg;
 158        struct msgbuf_completion_hdr    compl_hdr;
 159        __le16                          metadata_len;
 160        __le16                          tx_status;
 161};
 162
 163struct msgbuf_rx_complete {
 164        struct msgbuf_common_hdr        msg;
 165        struct msgbuf_completion_hdr    compl_hdr;
 166        __le16                          metadata_len;
 167        __le16                          data_len;
 168        __le16                          data_offset;
 169        __le16                          flags;
 170        __le32                          rx_status_0;
 171        __le32                          rx_status_1;
 172        __le32                          rsvd0;
 173};
 174
 175struct msgbuf_tx_flowring_create_req {
 176        struct msgbuf_common_hdr        msg;
 177        u8                              da[ETH_ALEN];
 178        u8                              sa[ETH_ALEN];
 179        u8                              tid;
 180        u8                              if_flags;
 181        __le16                          flow_ring_id;
 182        u8                              tc;
 183        u8                              priority;
 184        __le16                          int_vector;
 185        __le16                          max_items;
 186        __le16                          len_item;
 187        struct msgbuf_buf_addr          flow_ring_addr;
 188};
 189
 190struct msgbuf_tx_flowring_delete_req {
 191        struct msgbuf_common_hdr        msg;
 192        __le16                          flow_ring_id;
 193        __le16                          reason;
 194        __le32                          rsvd0[7];
 195};
 196
 197struct msgbuf_flowring_create_resp {
 198        struct msgbuf_common_hdr        msg;
 199        struct msgbuf_completion_hdr    compl_hdr;
 200        __le32                          rsvd0[3];
 201};
 202
 203struct msgbuf_flowring_delete_resp {
 204        struct msgbuf_common_hdr        msg;
 205        struct msgbuf_completion_hdr    compl_hdr;
 206        __le32                          rsvd0[3];
 207};
 208
 209struct msgbuf_flowring_flush_resp {
 210        struct msgbuf_common_hdr        msg;
 211        struct msgbuf_completion_hdr    compl_hdr;
 212        __le32                          rsvd0[3];
 213};
 214
 215struct brcmf_msgbuf_work_item {
 216        struct list_head queue;
 217        u32 flowid;
 218        int ifidx;
 219        u8 sa[ETH_ALEN];
 220        u8 da[ETH_ALEN];
 221};
 222
 223struct brcmf_msgbuf {
 224        struct brcmf_pub *drvr;
 225
 226        struct brcmf_commonring **commonrings;
 227        struct brcmf_commonring **flowrings;
 228        dma_addr_t *flowring_dma_handle;
 229        u16 nrof_flowrings;
 230
 231        u16 rx_dataoffset;
 232        u32 max_rxbufpost;
 233        u16 rx_metadata_offset;
 234        u32 rxbufpost;
 235
 236        u32 max_ioctlrespbuf;
 237        u32 cur_ioctlrespbuf;
 238        u32 max_eventbuf;
 239        u32 cur_eventbuf;
 240
 241        void *ioctbuf;
 242        dma_addr_t ioctbuf_handle;
 243        u32 ioctbuf_phys_hi;
 244        u32 ioctbuf_phys_lo;
 245        int ioctl_resp_status;
 246        u32 ioctl_resp_ret_len;
 247        u32 ioctl_resp_pktid;
 248
 249        u16 data_seq_no;
 250        u16 ioctl_seq_no;
 251        u32 reqid;
 252        wait_queue_head_t ioctl_resp_wait;
 253        bool ctl_completed;
 254
 255        struct brcmf_msgbuf_pktids *tx_pktids;
 256        struct brcmf_msgbuf_pktids *rx_pktids;
 257        struct brcmf_flowring *flow;
 258
 259        struct workqueue_struct *txflow_wq;
 260        struct work_struct txflow_work;
 261        unsigned long *flow_map;
 262        unsigned long *txstatus_done_map;
 263
 264        struct work_struct flowring_work;
 265        spinlock_t flowring_work_lock;
 266        struct list_head work_queue;
 267};
 268
 269struct brcmf_msgbuf_pktid {
 270        atomic_t  allocated;
 271        u16 data_offset;
 272        struct sk_buff *skb;
 273        dma_addr_t physaddr;
 274};
 275
 276struct brcmf_msgbuf_pktids {
 277        u32 array_size;
 278        u32 last_allocated_idx;
 279        enum dma_data_direction direction;
 280        struct brcmf_msgbuf_pktid *array;
 281};
 282
 283static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
 284
 285
 286static struct brcmf_msgbuf_pktids *
 287brcmf_msgbuf_init_pktids(u32 nr_array_entries,
 288                         enum dma_data_direction direction)
 289{
 290        struct brcmf_msgbuf_pktid *array;
 291        struct brcmf_msgbuf_pktids *pktids;
 292
 293        array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
 294        if (!array)
 295                return NULL;
 296
 297        pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
 298        if (!pktids) {
 299                kfree(array);
 300                return NULL;
 301        }
 302        pktids->array = array;
 303        pktids->array_size = nr_array_entries;
 304
 305        return pktids;
 306}
 307
 308
 309static int
 310brcmf_msgbuf_alloc_pktid(struct device *dev,
 311                         struct brcmf_msgbuf_pktids *pktids,
 312                         struct sk_buff *skb, u16 data_offset,
 313                         dma_addr_t *physaddr, u32 *idx)
 314{
 315        struct brcmf_msgbuf_pktid *array;
 316        u32 count;
 317
 318        array = pktids->array;
 319
 320        *physaddr = dma_map_single(dev, skb->data + data_offset,
 321                                   skb->len - data_offset, pktids->direction);
 322
 323        if (dma_mapping_error(dev, *physaddr)) {
 324                brcmf_err("dma_map_single failed !!\n");
 325                return -ENOMEM;
 326        }
 327
 328        *idx = pktids->last_allocated_idx;
 329
 330        count = 0;
 331        do {
 332                (*idx)++;
 333                if (*idx == pktids->array_size)
 334                        *idx = 0;
 335                if (array[*idx].allocated.counter == 0)
 336                        if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
 337                                break;
 338                count++;
 339        } while (count < pktids->array_size);
 340
 341        if (count == pktids->array_size)
 342                return -ENOMEM;
 343
 344        array[*idx].data_offset = data_offset;
 345        array[*idx].physaddr = *physaddr;
 346        array[*idx].skb = skb;
 347
 348        pktids->last_allocated_idx = *idx;
 349
 350        return 0;
 351}
 352
 353
 354static struct sk_buff *
 355brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
 356                       u32 idx)
 357{
 358        struct brcmf_msgbuf_pktid *pktid;
 359        struct sk_buff *skb;
 360
 361        if (idx >= pktids->array_size) {
 362                brcmf_err("Invalid packet id %d (max %d)\n", idx,
 363                          pktids->array_size);
 364                return NULL;
 365        }
 366        if (pktids->array[idx].allocated.counter) {
 367                pktid = &pktids->array[idx];
 368                dma_unmap_single(dev, pktid->physaddr,
 369                                 pktid->skb->len - pktid->data_offset,
 370                                 pktids->direction);
 371                skb = pktid->skb;
 372                pktid->allocated.counter = 0;
 373                return skb;
 374        } else {
 375                brcmf_err("Invalid packet id %d (not in use)\n", idx);
 376        }
 377
 378        return NULL;
 379}
 380
 381
 382static void
 383brcmf_msgbuf_release_array(struct device *dev,
 384                           struct brcmf_msgbuf_pktids *pktids)
 385{
 386        struct brcmf_msgbuf_pktid *array;
 387        struct brcmf_msgbuf_pktid *pktid;
 388        u32 count;
 389
 390        array = pktids->array;
 391        count = 0;
 392        do {
 393                if (array[count].allocated.counter) {
 394                        pktid = &array[count];
 395                        dma_unmap_single(dev, pktid->physaddr,
 396                                         pktid->skb->len - pktid->data_offset,
 397                                         pktids->direction);
 398                        brcmu_pkt_buf_free_skb(pktid->skb);
 399                }
 400                count++;
 401        } while (count < pktids->array_size);
 402
 403        kfree(array);
 404        kfree(pktids);
 405}
 406
 407
 408static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
 409{
 410        if (msgbuf->rx_pktids)
 411                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 412                                           msgbuf->rx_pktids);
 413        if (msgbuf->tx_pktids)
 414                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 415                                           msgbuf->tx_pktids);
 416}
 417
 418
 419static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
 420                                 uint cmd, void *buf, uint len)
 421{
 422        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 423        struct brcmf_commonring *commonring;
 424        struct msgbuf_ioctl_req_hdr *request;
 425        u16 buf_len;
 426        void *ret_ptr;
 427        int err;
 428
 429        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 430        brcmf_commonring_lock(commonring);
 431        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 432        if (!ret_ptr) {
 433                brcmf_err("Failed to reserve space in commonring\n");
 434                brcmf_commonring_unlock(commonring);
 435                return -ENOMEM;
 436        }
 437
 438        msgbuf->reqid++;
 439
 440        request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
 441        request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
 442        request->msg.ifidx = (u8)ifidx;
 443        request->msg.flags = 0;
 444        request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
 445        request->cmd = cpu_to_le32(cmd);
 446        request->output_buf_len = cpu_to_le16(len);
 447        request->trans_id = cpu_to_le16(msgbuf->reqid);
 448
 449        buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
 450        request->input_buf_len = cpu_to_le16(buf_len);
 451        request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
 452        request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
 453        if (buf)
 454                memcpy(msgbuf->ioctbuf, buf, buf_len);
 455        else
 456                memset(msgbuf->ioctbuf, 0, buf_len);
 457
 458        err = brcmf_commonring_write_complete(commonring);
 459        brcmf_commonring_unlock(commonring);
 460
 461        return err;
 462}
 463
 464
 465static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
 466{
 467        return wait_event_timeout(msgbuf->ioctl_resp_wait,
 468                                  msgbuf->ctl_completed,
 469                                  msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
 470}
 471
 472
 473static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 474{
 475        msgbuf->ctl_completed = true;
 476        if (waitqueue_active(&msgbuf->ioctl_resp_wait))
 477                wake_up(&msgbuf->ioctl_resp_wait);
 478}
 479
 480
 481static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 482                                   uint cmd, void *buf, uint len)
 483{
 484        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 485        struct sk_buff *skb = NULL;
 486        int timeout;
 487        int err;
 488
 489        brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
 490        msgbuf->ctl_completed = false;
 491        err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
 492        if (err)
 493                return err;
 494
 495        timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
 496        if (!timeout) {
 497                brcmf_err("Timeout on response for query command\n");
 498                return -EIO;
 499        }
 500
 501        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 502                                     msgbuf->rx_pktids,
 503                                     msgbuf->ioctl_resp_pktid);
 504        if (msgbuf->ioctl_resp_ret_len != 0) {
 505                if (!skb)
 506                        return -EBADF;
 507
 508                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
 509                                       len : msgbuf->ioctl_resp_ret_len);
 510        }
 511        brcmu_pkt_buf_free_skb(skb);
 512
 513        return msgbuf->ioctl_resp_status;
 514}
 515
 516
 517static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
 518                                 uint cmd, void *buf, uint len)
 519{
 520        return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
 521}
 522
 523
 524static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
 525                                struct sk_buff *skb, struct brcmf_if **ifp)
 526{
 527        return -ENODEV;
 528}
 529
 530
 531static void
 532brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
 533{
 534        u32 dma_sz;
 535        void *dma_buf;
 536
 537        brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
 538
 539        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 540        dma_buf = msgbuf->flowrings[flowid]->buf_addr;
 541        dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
 542                          msgbuf->flowring_dma_handle[flowid]);
 543
 544        brcmf_flowring_delete(msgbuf->flow, flowid);
 545}
 546
 547
 548static struct brcmf_msgbuf_work_item *
 549brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
 550{
 551        struct brcmf_msgbuf_work_item *work = NULL;
 552        ulong flags;
 553
 554        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 555        if (!list_empty(&msgbuf->work_queue)) {
 556                work = list_first_entry(&msgbuf->work_queue,
 557                                        struct brcmf_msgbuf_work_item, queue);
 558                list_del(&work->queue);
 559        }
 560        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 561
 562        return work;
 563}
 564
 565
 566static u32
 567brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
 568                                    struct brcmf_msgbuf_work_item *work)
 569{
 570        struct msgbuf_tx_flowring_create_req *create;
 571        struct brcmf_commonring *commonring;
 572        void *ret_ptr;
 573        u32 flowid;
 574        void *dma_buf;
 575        u32 dma_sz;
 576        u64 address;
 577        int err;
 578
 579        flowid = work->flowid;
 580        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 581        dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
 582                                     &msgbuf->flowring_dma_handle[flowid],
 583                                     GFP_KERNEL);
 584        if (!dma_buf) {
 585                brcmf_err("dma_alloc_coherent failed\n");
 586                brcmf_flowring_delete(msgbuf->flow, flowid);
 587                return BRCMF_FLOWRING_INVALID_ID;
 588        }
 589
 590        brcmf_commonring_config(msgbuf->flowrings[flowid],
 591                                BRCMF_H2D_TXFLOWRING_MAX_ITEM,
 592                                BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
 593
 594        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 595        brcmf_commonring_lock(commonring);
 596        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 597        if (!ret_ptr) {
 598                brcmf_err("Failed to reserve space in commonring\n");
 599                brcmf_commonring_unlock(commonring);
 600                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 601                return BRCMF_FLOWRING_INVALID_ID;
 602        }
 603
 604        create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
 605        create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
 606        create->msg.ifidx = work->ifidx;
 607        create->msg.request_id = 0;
 608        create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
 609        create->flow_ring_id = cpu_to_le16(flowid +
 610                                           BRCMF_NROF_H2D_COMMON_MSGRINGS);
 611        memcpy(create->sa, work->sa, ETH_ALEN);
 612        memcpy(create->da, work->da, ETH_ALEN);
 613        address = (u64)msgbuf->flowring_dma_handle[flowid];
 614        create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
 615        create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
 616        create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
 617        create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
 618
 619        brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
 620                  flowid, work->da, create->tid, work->ifidx);
 621
 622        err = brcmf_commonring_write_complete(commonring);
 623        brcmf_commonring_unlock(commonring);
 624        if (err) {
 625                brcmf_err("Failed to write commonring\n");
 626                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 627                return BRCMF_FLOWRING_INVALID_ID;
 628        }
 629
 630        return flowid;
 631}
 632
 633
 634static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
 635{
 636        struct brcmf_msgbuf *msgbuf;
 637        struct brcmf_msgbuf_work_item *create;
 638
 639        msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
 640
 641        while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
 642                brcmf_msgbuf_flowring_create_worker(msgbuf, create);
 643                kfree(create);
 644        }
 645}
 646
 647
 648static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
 649                                        struct sk_buff *skb)
 650{
 651        struct brcmf_msgbuf_work_item *create;
 652        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 653        u32 flowid;
 654        ulong flags;
 655
 656        create = kzalloc(sizeof(*create), GFP_ATOMIC);
 657        if (create == NULL)
 658                return BRCMF_FLOWRING_INVALID_ID;
 659
 660        flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
 661                                       skb->priority, ifidx);
 662        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 663                kfree(create);
 664                return flowid;
 665        }
 666
 667        create->flowid = flowid;
 668        create->ifidx = ifidx;
 669        memcpy(create->sa, eh->h_source, ETH_ALEN);
 670        memcpy(create->da, eh->h_dest, ETH_ALEN);
 671
 672        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 673        list_add_tail(&create->queue, &msgbuf->work_queue);
 674        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 675        schedule_work(&msgbuf->flowring_work);
 676
 677        return flowid;
 678}
 679
 680
 681static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
 682{
 683        struct brcmf_flowring *flow = msgbuf->flow;
 684        struct brcmf_commonring *commonring;
 685        void *ret_ptr;
 686        u32 count;
 687        struct sk_buff *skb;
 688        dma_addr_t physaddr;
 689        u32 pktid;
 690        struct msgbuf_tx_msghdr *tx_msghdr;
 691        u64 address;
 692
 693        commonring = msgbuf->flowrings[flowid];
 694        if (!brcmf_commonring_write_available(commonring))
 695                return;
 696
 697        brcmf_commonring_lock(commonring);
 698
 699        count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
 700        while (brcmf_flowring_qlen(flow, flowid)) {
 701                skb = brcmf_flowring_dequeue(flow, flowid);
 702                if (skb == NULL) {
 703                        brcmf_err("No SKB, but qlen %d\n",
 704                                  brcmf_flowring_qlen(flow, flowid));
 705                        break;
 706                }
 707                skb_orphan(skb);
 708                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 709                                             msgbuf->tx_pktids, skb, ETH_HLEN,
 710                                             &physaddr, &pktid)) {
 711                        brcmf_flowring_reinsert(flow, flowid, skb);
 712                        brcmf_err("No PKTID available !!\n");
 713                        break;
 714                }
 715                ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 716                if (!ret_ptr) {
 717                        brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 718                                               msgbuf->tx_pktids, pktid);
 719                        brcmf_flowring_reinsert(flow, flowid, skb);
 720                        break;
 721                }
 722                count++;
 723
 724                tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
 725
 726                tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
 727                tx_msghdr->msg.request_id = cpu_to_le32(pktid);
 728                tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 729                tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
 730                tx_msghdr->flags |= (skb->priority & 0x07) <<
 731                                    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
 732                tx_msghdr->seg_cnt = 1;
 733                memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
 734                tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
 735                address = (u64)physaddr;
 736                tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
 737                tx_msghdr->data_buf_addr.low_addr =
 738                        cpu_to_le32(address & 0xffffffff);
 739                tx_msghdr->metadata_buf_len = 0;
 740                tx_msghdr->metadata_buf_addr.high_addr = 0;
 741                tx_msghdr->metadata_buf_addr.low_addr = 0;
 742                atomic_inc(&commonring->outstanding_tx);
 743                if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
 744                        brcmf_commonring_write_complete(commonring);
 745                        count = 0;
 746                }
 747        }
 748        if (count)
 749                brcmf_commonring_write_complete(commonring);
 750        brcmf_commonring_unlock(commonring);
 751}
 752
 753
 754static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
 755{
 756        struct brcmf_msgbuf *msgbuf;
 757        u32 flowid;
 758
 759        msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
 760        for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
 761                clear_bit(flowid, msgbuf->flow_map);
 762                brcmf_msgbuf_txflow(msgbuf, flowid);
 763        }
 764}
 765
 766
 767static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
 768                                        bool force)
 769{
 770        struct brcmf_commonring *commonring;
 771
 772        set_bit(flowid, msgbuf->flow_map);
 773        commonring = msgbuf->flowrings[flowid];
 774        if ((force) || (atomic_read(&commonring->outstanding_tx) <
 775                        BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
 776                queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
 777
 778        return 0;
 779}
 780
 781
 782static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
 783                               u8 offset, struct sk_buff *skb)
 784{
 785        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 786        struct brcmf_flowring *flow = msgbuf->flow;
 787        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 788        u32 flowid;
 789        u32 queue_count;
 790        bool force;
 791
 792        flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
 793        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 794                flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
 795                if (flowid == BRCMF_FLOWRING_INVALID_ID)
 796                        return -ENOMEM;
 797        }
 798        queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
 799        force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
 800        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
 801
 802        return 0;
 803}
 804
 805
 806static void
 807brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
 808                                 enum proto_addr_mode addr_mode)
 809{
 810        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 811
 812        brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
 813}
 814
 815
 816static void
 817brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 818{
 819        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 820
 821        brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
 822}
 823
 824
 825static void
 826brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 827{
 828        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 829
 830        brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
 831}
 832
 833
 834static void
 835brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 836{
 837        struct msgbuf_ioctl_resp_hdr *ioctl_resp;
 838
 839        ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
 840
 841        msgbuf->ioctl_resp_status =
 842                        (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
 843        msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
 844        msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
 845
 846        brcmf_msgbuf_ioctl_resp_wake(msgbuf);
 847
 848        if (msgbuf->cur_ioctlrespbuf)
 849                msgbuf->cur_ioctlrespbuf--;
 850        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
 851}
 852
 853
 854static void
 855brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
 856{
 857        struct brcmf_commonring *commonring;
 858        struct msgbuf_tx_status *tx_status;
 859        u32 idx;
 860        struct sk_buff *skb;
 861        u16 flowid;
 862
 863        tx_status = (struct msgbuf_tx_status *)buf;
 864        idx = le32_to_cpu(tx_status->msg.request_id);
 865        flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
 866        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
 867        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 868                                     msgbuf->tx_pktids, idx);
 869        if (!skb)
 870                return;
 871
 872        set_bit(flowid, msgbuf->txstatus_done_map);
 873        commonring = msgbuf->flowrings[flowid];
 874        atomic_dec(&commonring->outstanding_tx);
 875
 876        brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
 877                         skb, true);
 878}
 879
 880
 881static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
 882{
 883        struct brcmf_commonring *commonring;
 884        void *ret_ptr;
 885        struct sk_buff *skb;
 886        u16 alloced;
 887        u32 pktlen;
 888        dma_addr_t physaddr;
 889        struct msgbuf_rx_bufpost *rx_bufpost;
 890        u64 address;
 891        u32 pktid;
 892        u32 i;
 893
 894        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
 895        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
 896                                                              count,
 897                                                              &alloced);
 898        if (!ret_ptr) {
 899                brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
 900                return 0;
 901        }
 902
 903        for (i = 0; i < alloced; i++) {
 904                rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
 905                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
 906
 907                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
 908
 909                if (skb == NULL) {
 910                        brcmf_err("Failed to alloc SKB\n");
 911                        brcmf_commonring_write_cancel(commonring, alloced - i);
 912                        break;
 913                }
 914
 915                pktlen = skb->len;
 916                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 917                                             msgbuf->rx_pktids, skb, 0,
 918                                             &physaddr, &pktid)) {
 919                        dev_kfree_skb_any(skb);
 920                        brcmf_err("No PKTID available !!\n");
 921                        brcmf_commonring_write_cancel(commonring, alloced - i);
 922                        break;
 923                }
 924
 925                if (msgbuf->rx_metadata_offset) {
 926                        address = (u64)physaddr;
 927                        rx_bufpost->metadata_buf_len =
 928                                cpu_to_le16(msgbuf->rx_metadata_offset);
 929                        rx_bufpost->metadata_buf_addr.high_addr =
 930                                cpu_to_le32(address >> 32);
 931                        rx_bufpost->metadata_buf_addr.low_addr =
 932                                cpu_to_le32(address & 0xffffffff);
 933
 934                        skb_pull(skb, msgbuf->rx_metadata_offset);
 935                        pktlen = skb->len;
 936                        physaddr += msgbuf->rx_metadata_offset;
 937                }
 938                rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
 939                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
 940
 941                address = (u64)physaddr;
 942                rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
 943                rx_bufpost->data_buf_addr.high_addr =
 944                        cpu_to_le32(address >> 32);
 945                rx_bufpost->data_buf_addr.low_addr =
 946                        cpu_to_le32(address & 0xffffffff);
 947
 948                ret_ptr += brcmf_commonring_len_item(commonring);
 949        }
 950
 951        if (i)
 952                brcmf_commonring_write_complete(commonring);
 953
 954        return i;
 955}
 956
 957
 958static void
 959brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
 960{
 961        u32 fillbufs;
 962        u32 retcount;
 963
 964        fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
 965
 966        while (fillbufs) {
 967                retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
 968                if (!retcount)
 969                        break;
 970                msgbuf->rxbufpost += retcount;
 971                fillbufs -= retcount;
 972        }
 973}
 974
 975
 976static void
 977brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
 978{
 979        msgbuf->rxbufpost -= rxcnt;
 980        if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
 981                                  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
 982                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
 983}
 984
 985
 986static u32
 987brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
 988                             u32 count)
 989{
 990        struct brcmf_commonring *commonring;
 991        void *ret_ptr;
 992        struct sk_buff *skb;
 993        u16 alloced;
 994        u32 pktlen;
 995        dma_addr_t physaddr;
 996        struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
 997        u64 address;
 998        u32 pktid;
 999        u32 i;
1000
1001        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1002        brcmf_commonring_lock(commonring);
1003        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1004                                                              count,
1005                                                              &alloced);
1006        if (!ret_ptr) {
1007                brcmf_err("Failed to reserve space in commonring\n");
1008                brcmf_commonring_unlock(commonring);
1009                return 0;
1010        }
1011
1012        for (i = 0; i < alloced; i++) {
1013                rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1014                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1015
1016                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
1017
1018                if (skb == NULL) {
1019                        brcmf_err("Failed to alloc SKB\n");
1020                        brcmf_commonring_write_cancel(commonring, alloced - i);
1021                        break;
1022                }
1023
1024                pktlen = skb->len;
1025                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1026                                             msgbuf->rx_pktids, skb, 0,
1027                                             &physaddr, &pktid)) {
1028                        dev_kfree_skb_any(skb);
1029                        brcmf_err("No PKTID available !!\n");
1030                        brcmf_commonring_write_cancel(commonring, alloced - i);
1031                        break;
1032                }
1033                if (event_buf)
1034                        rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1035                else
1036                        rx_bufpost->msg.msgtype =
1037                                MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1038                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1039
1040                address = (u64)physaddr;
1041                rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1042                rx_bufpost->host_buf_addr.high_addr =
1043                        cpu_to_le32(address >> 32);
1044                rx_bufpost->host_buf_addr.low_addr =
1045                        cpu_to_le32(address & 0xffffffff);
1046
1047                ret_ptr += brcmf_commonring_len_item(commonring);
1048        }
1049
1050        if (i)
1051                brcmf_commonring_write_complete(commonring);
1052
1053        brcmf_commonring_unlock(commonring);
1054
1055        return i;
1056}
1057
1058
1059static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1060{
1061        u32 count;
1062
1063        count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1064        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1065        msgbuf->cur_ioctlrespbuf += count;
1066}
1067
1068
1069static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1070{
1071        u32 count;
1072
1073        count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1074        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1075        msgbuf->cur_eventbuf += count;
1076}
1077
1078
1079static void
1080brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
1081                    u8 ifidx)
1082{
1083        struct brcmf_if *ifp;
1084
1085        ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
1086        if (!ifp || !ifp->ndev) {
1087                brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
1088                brcmu_pkt_buf_free_skb(skb);
1089                return;
1090        }
1091        brcmf_netif_rx(ifp, skb);
1092}
1093
1094
1095static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1096{
1097        struct msgbuf_rx_event *event;
1098        u32 idx;
1099        u16 buflen;
1100        struct sk_buff *skb;
1101
1102        event = (struct msgbuf_rx_event *)buf;
1103        idx = le32_to_cpu(event->msg.request_id);
1104        buflen = le16_to_cpu(event->event_data_len);
1105
1106        if (msgbuf->cur_eventbuf)
1107                msgbuf->cur_eventbuf--;
1108        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1109
1110        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1111                                     msgbuf->rx_pktids, idx);
1112        if (!skb)
1113                return;
1114
1115        if (msgbuf->rx_dataoffset)
1116                skb_pull(skb, msgbuf->rx_dataoffset);
1117
1118        skb_trim(skb, buflen);
1119
1120        brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
1121}
1122
1123
1124static void
1125brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1126{
1127        struct msgbuf_rx_complete *rx_complete;
1128        struct sk_buff *skb;
1129        u16 data_offset;
1130        u16 buflen;
1131        u32 idx;
1132
1133        brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1134
1135        rx_complete = (struct msgbuf_rx_complete *)buf;
1136        data_offset = le16_to_cpu(rx_complete->data_offset);
1137        buflen = le16_to_cpu(rx_complete->data_len);
1138        idx = le32_to_cpu(rx_complete->msg.request_id);
1139
1140        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1141                                     msgbuf->rx_pktids, idx);
1142        if (!skb)
1143                return;
1144
1145        if (data_offset)
1146                skb_pull(skb, data_offset);
1147        else if (msgbuf->rx_dataoffset)
1148                skb_pull(skb, msgbuf->rx_dataoffset);
1149
1150        skb_trim(skb, buflen);
1151
1152        brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
1153}
1154
1155
1156static void
1157brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1158                                               void *buf)
1159{
1160        struct msgbuf_flowring_create_resp *flowring_create_resp;
1161        u16 status;
1162        u16 flowid;
1163
1164        flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1165
1166        flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1167        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1168        status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
1169
1170        if (status) {
1171                brcmf_err("Flowring creation failed, code %d\n", status);
1172                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1173                return;
1174        }
1175        brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1176                  status);
1177
1178        brcmf_flowring_open(msgbuf->flow, flowid);
1179
1180        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1181}
1182
1183
1184static void
1185brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1186                                               void *buf)
1187{
1188        struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1189        u16 status;
1190        u16 flowid;
1191
1192        flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1193
1194        flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1195        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1196        status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1197
1198        if (status) {
1199                brcmf_err("Flowring deletion failed, code %d\n", status);
1200                brcmf_flowring_delete(msgbuf->flow, flowid);
1201                return;
1202        }
1203        brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1204                  status);
1205
1206        brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1207}
1208
1209
1210static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1211{
1212        struct msgbuf_common_hdr *msg;
1213
1214        msg = (struct msgbuf_common_hdr *)buf;
1215        switch (msg->msgtype) {
1216        case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1217                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1218                brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1219                break;
1220        case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1221                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1222                brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1223                break;
1224        case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1225                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1226                break;
1227        case MSGBUF_TYPE_IOCTL_CMPLT:
1228                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1229                brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1230                break;
1231        case MSGBUF_TYPE_WL_EVENT:
1232                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1233                brcmf_msgbuf_process_event(msgbuf, buf);
1234                break;
1235        case MSGBUF_TYPE_TX_STATUS:
1236                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1237                brcmf_msgbuf_process_txstatus(msgbuf, buf);
1238                break;
1239        case MSGBUF_TYPE_RX_CMPLT:
1240                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1241                brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1242                break;
1243        default:
1244                brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
1245                break;
1246        }
1247}
1248
1249
1250static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1251                                    struct brcmf_commonring *commonring)
1252{
1253        void *buf;
1254        u16 count;
1255        u16 processed;
1256
1257again:
1258        buf = brcmf_commonring_get_read_ptr(commonring, &count);
1259        if (buf == NULL)
1260                return;
1261
1262        processed = 0;
1263        while (count) {
1264                brcmf_msgbuf_process_msgtype(msgbuf,
1265                                             buf + msgbuf->rx_dataoffset);
1266                buf += brcmf_commonring_len_item(commonring);
1267                processed++;
1268                if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1269                        brcmf_commonring_read_complete(commonring, processed);
1270                        processed = 0;
1271                }
1272                count--;
1273        }
1274        if (processed)
1275                brcmf_commonring_read_complete(commonring, processed);
1276
1277        if (commonring->r_ptr == 0)
1278                goto again;
1279}
1280
1281
1282int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1283{
1284        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1285        struct brcmf_pub *drvr = bus_if->drvr;
1286        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1287        struct brcmf_commonring *commonring;
1288        void *buf;
1289        u32 flowid;
1290        int qlen;
1291
1292        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1293        brcmf_msgbuf_process_rx(msgbuf, buf);
1294        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1295        brcmf_msgbuf_process_rx(msgbuf, buf);
1296        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1297        brcmf_msgbuf_process_rx(msgbuf, buf);
1298
1299        for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1300                         msgbuf->nrof_flowrings) {
1301                clear_bit(flowid, msgbuf->txstatus_done_map);
1302                commonring = msgbuf->flowrings[flowid];
1303                qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1304                if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1305                    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1306                                BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1307                        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1308        }
1309
1310        return 0;
1311}
1312
1313
1314void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
1315{
1316        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1317        struct msgbuf_tx_flowring_delete_req *delete;
1318        struct brcmf_commonring *commonring;
1319        void *ret_ptr;
1320        u8 ifidx;
1321        int err;
1322
1323        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1324        brcmf_commonring_lock(commonring);
1325        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1326        if (!ret_ptr) {
1327                brcmf_err("FW unaware, flowring will be removed !!\n");
1328                brcmf_commonring_unlock(commonring);
1329                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1330                return;
1331        }
1332
1333        delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1334
1335        ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1336
1337        delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1338        delete->msg.ifidx = ifidx;
1339        delete->msg.request_id = 0;
1340
1341        delete->flow_ring_id = cpu_to_le16(flowid +
1342                                           BRCMF_NROF_H2D_COMMON_MSGRINGS);
1343        delete->reason = 0;
1344
1345        brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1346                  flowid, ifidx);
1347
1348        err = brcmf_commonring_write_complete(commonring);
1349        brcmf_commonring_unlock(commonring);
1350        if (err) {
1351                brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1352                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1353        }
1354}
1355
1356#ifdef DEBUG
1357static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1358{
1359        struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1360        struct brcmf_pub *drvr = bus_if->drvr;
1361        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1362        struct brcmf_commonring *commonring;
1363        u16 i;
1364        struct brcmf_flowring_ring *ring;
1365        struct brcmf_flowring_hash *hash;
1366
1367        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1368        seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1369                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1370        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1371        seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
1372                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1373        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1374        seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
1375                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1376        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1377        seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1378                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1379        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1380        seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1381                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1382
1383        seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1384                   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1385        seq_puts(seq, "Active flowrings:\n");
1386        hash = msgbuf->flow->hash;
1387        for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1388                if (!msgbuf->flow->rings[i])
1389                        continue;
1390                ring = msgbuf->flow->rings[i];
1391                if (ring->status != RING_OPEN)
1392                        continue;
1393                commonring = msgbuf->flowrings[i];
1394                hash = &msgbuf->flow->hash[ring->hash_id];
1395                seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1396                                "        ifidx %u, fifo %u, da %pM\n",
1397                                i, commonring->r_ptr, commonring->w_ptr,
1398                                skb_queue_len(&ring->skblist), ring->blocked,
1399                                hash->ifidx, hash->fifo, hash->mac);
1400        }
1401
1402        return 0;
1403}
1404#else
1405static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1406{
1407        return 0;
1408}
1409#endif
1410
1411int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1412{
1413        struct brcmf_bus_msgbuf *if_msgbuf;
1414        struct brcmf_msgbuf *msgbuf;
1415        u64 address;
1416        u32 count;
1417
1418        if_msgbuf = drvr->bus_if->msgbuf;
1419        msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1420        if (!msgbuf)
1421                goto fail;
1422
1423        msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1424        if (msgbuf->txflow_wq == NULL) {
1425                brcmf_err("workqueue creation failed\n");
1426                goto fail;
1427        }
1428        INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1429        count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
1430        count = count * sizeof(unsigned long);
1431        msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1432        if (!msgbuf->flow_map)
1433                goto fail;
1434
1435        msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1436        if (!msgbuf->txstatus_done_map)
1437                goto fail;
1438
1439        msgbuf->drvr = drvr;
1440        msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1441                                             BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1442                                             &msgbuf->ioctbuf_handle,
1443                                             GFP_KERNEL);
1444        if (!msgbuf->ioctbuf)
1445                goto fail;
1446        address = (u64)msgbuf->ioctbuf_handle;
1447        msgbuf->ioctbuf_phys_hi = address >> 32;
1448        msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1449
1450        drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1451        drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1452        drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1453        drvr->proto->txdata = brcmf_msgbuf_txdata;
1454        drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1455        drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1456        drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1457        drvr->proto->pd = msgbuf;
1458
1459        init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1460
1461        msgbuf->commonrings =
1462                (struct brcmf_commonring **)if_msgbuf->commonrings;
1463        msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1464        msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
1465        msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
1466                sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1467        if (!msgbuf->flowring_dma_handle)
1468                goto fail;
1469
1470        msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1471        msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1472
1473        msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1474        msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1475
1476        msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1477                                                     DMA_TO_DEVICE);
1478        if (!msgbuf->tx_pktids)
1479                goto fail;
1480        msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1481                                                     DMA_FROM_DEVICE);
1482        if (!msgbuf->rx_pktids)
1483                goto fail;
1484
1485        msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1486                                             if_msgbuf->nrof_flowrings);
1487        if (!msgbuf->flow)
1488                goto fail;
1489
1490
1491        brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1492                  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1493                  msgbuf->max_ioctlrespbuf);
1494        count = 0;
1495        do {
1496                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1497                if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1498                        msleep(10);
1499                else
1500                        break;
1501                count++;
1502        } while (count < 10);
1503        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1504        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1505
1506        INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1507        spin_lock_init(&msgbuf->flowring_work_lock);
1508        INIT_LIST_HEAD(&msgbuf->work_queue);
1509
1510        brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1511
1512        return 0;
1513
1514fail:
1515        if (msgbuf) {
1516                kfree(msgbuf->flow_map);
1517                kfree(msgbuf->txstatus_done_map);
1518                brcmf_msgbuf_release_pktids(msgbuf);
1519                kfree(msgbuf->flowring_dma_handle);
1520                if (msgbuf->ioctbuf)
1521                        dma_free_coherent(drvr->bus_if->dev,
1522                                          BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1523                                          msgbuf->ioctbuf,
1524                                          msgbuf->ioctbuf_handle);
1525                kfree(msgbuf);
1526        }
1527        return -ENOMEM;
1528}
1529
1530
1531void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1532{
1533        struct brcmf_msgbuf *msgbuf;
1534        struct brcmf_msgbuf_work_item *work;
1535
1536        brcmf_dbg(TRACE, "Enter\n");
1537        if (drvr->proto->pd) {
1538                msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1539                cancel_work_sync(&msgbuf->flowring_work);
1540                while (!list_empty(&msgbuf->work_queue)) {
1541                        work = list_first_entry(&msgbuf->work_queue,
1542                                                struct brcmf_msgbuf_work_item,
1543                                                queue);
1544                        list_del(&work->queue);
1545                        kfree(work);
1546                }
1547                kfree(msgbuf->flow_map);
1548                kfree(msgbuf->txstatus_done_map);
1549                if (msgbuf->txflow_wq)
1550                        destroy_workqueue(msgbuf->txflow_wq);
1551
1552                brcmf_flowring_detach(msgbuf->flow);
1553                dma_free_coherent(drvr->bus_if->dev,
1554                                  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1555                                  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1556                brcmf_msgbuf_release_pktids(msgbuf);
1557                kfree(msgbuf->flowring_dma_handle);
1558                kfree(msgbuf);
1559                drvr->proto->pd = NULL;
1560        }
1561}
1562