linux/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
<<
>>
Prefs
   1/* Copyright (c) 2014 Broadcom Corporation
   2 *
   3 * Permission to use, copy, modify, and/or distribute this software for any
   4 * purpose with or without fee is hereby granted, provided that the above
   5 * copyright notice and this permission notice appear in all copies.
   6 *
   7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14 */
  15
  16/*******************************************************************************
  17 * Communicates with the dongle by using dcmd codes.
  18 * For certain dcmd codes, the dongle interprets string data from the host.
  19 ******************************************************************************/
  20
  21#include <linux/types.h>
  22#include <linux/netdevice.h>
  23
  24#include <brcmu_utils.h>
  25#include <brcmu_wifi.h>
  26
  27#include "core.h"
  28#include "debug.h"
  29#include "proto.h"
  30#include "msgbuf.h"
  31#include "commonring.h"
  32#include "flowring.h"
  33#include "bus.h"
  34#include "tracepoint.h"
  35
  36
  37#define MSGBUF_IOCTL_RESP_TIMEOUT               2000
  38
  39#define MSGBUF_TYPE_GEN_STATUS                  0x1
  40#define MSGBUF_TYPE_RING_STATUS                 0x2
  41#define MSGBUF_TYPE_FLOW_RING_CREATE            0x3
  42#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT      0x4
  43#define MSGBUF_TYPE_FLOW_RING_DELETE            0x5
  44#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT      0x6
  45#define MSGBUF_TYPE_FLOW_RING_FLUSH             0x7
  46#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT       0x8
  47#define MSGBUF_TYPE_IOCTLPTR_REQ                0x9
  48#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK            0xA
  49#define MSGBUF_TYPE_IOCTLRESP_BUF_POST          0xB
  50#define MSGBUF_TYPE_IOCTL_CMPLT                 0xC
  51#define MSGBUF_TYPE_EVENT_BUF_POST              0xD
  52#define MSGBUF_TYPE_WL_EVENT                    0xE
  53#define MSGBUF_TYPE_TX_POST                     0xF
  54#define MSGBUF_TYPE_TX_STATUS                   0x10
  55#define MSGBUF_TYPE_RXBUF_POST                  0x11
  56#define MSGBUF_TYPE_RX_CMPLT                    0x12
  57#define MSGBUF_TYPE_LPBK_DMAXFER                0x13
  58#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT          0x14
  59
  60#define NR_TX_PKTIDS                            2048
  61#define NR_RX_PKTIDS                            1024
  62
  63#define BRCMF_IOCTL_REQ_PKTID                   0xFFFE
  64
  65#define BRCMF_MSGBUF_MAX_PKT_SIZE               2048
  66#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD        32
  67#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST      8
  68#define BRCMF_MSGBUF_MAX_EVENTBUF_POST          8
  69
  70#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3      0x01
  71#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT       5
  72
  73#define BRCMF_MSGBUF_TX_FLUSH_CNT1              32
  74#define BRCMF_MSGBUF_TX_FLUSH_CNT2              96
  75
  76#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS        96
  77#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS      32
  78#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS         48
  79
  80
  81struct msgbuf_common_hdr {
  82        u8                              msgtype;
  83        u8                              ifidx;
  84        u8                              flags;
  85        u8                              rsvd0;
  86        __le32                          request_id;
  87};
  88
  89struct msgbuf_buf_addr {
  90        __le32                          low_addr;
  91        __le32                          high_addr;
  92};
  93
  94struct msgbuf_ioctl_req_hdr {
  95        struct msgbuf_common_hdr        msg;
  96        __le32                          cmd;
  97        __le16                          trans_id;
  98        __le16                          input_buf_len;
  99        __le16                          output_buf_len;
 100        __le16                          rsvd0[3];
 101        struct msgbuf_buf_addr          req_buf_addr;
 102        __le32                          rsvd1[2];
 103};
 104
 105struct msgbuf_tx_msghdr {
 106        struct msgbuf_common_hdr        msg;
 107        u8                              txhdr[ETH_HLEN];
 108        u8                              flags;
 109        u8                              seg_cnt;
 110        struct msgbuf_buf_addr          metadata_buf_addr;
 111        struct msgbuf_buf_addr          data_buf_addr;
 112        __le16                          metadata_buf_len;
 113        __le16                          data_len;
 114        __le32                          rsvd0;
 115};
 116
 117struct msgbuf_rx_bufpost {
 118        struct msgbuf_common_hdr        msg;
 119        __le16                          metadata_buf_len;
 120        __le16                          data_buf_len;
 121        __le32                          rsvd0;
 122        struct msgbuf_buf_addr          metadata_buf_addr;
 123        struct msgbuf_buf_addr          data_buf_addr;
 124};
 125
 126struct msgbuf_rx_ioctl_resp_or_event {
 127        struct msgbuf_common_hdr        msg;
 128        __le16                          host_buf_len;
 129        __le16                          rsvd0[3];
 130        struct msgbuf_buf_addr          host_buf_addr;
 131        __le32                          rsvd1[4];
 132};
 133
 134struct msgbuf_completion_hdr {
 135        __le16                          status;
 136        __le16                          flow_ring_id;
 137};
 138
 139struct msgbuf_rx_event {
 140        struct msgbuf_common_hdr        msg;
 141        struct msgbuf_completion_hdr    compl_hdr;
 142        __le16                          event_data_len;
 143        __le16                          seqnum;
 144        __le16                          rsvd0[4];
 145};
 146
 147struct msgbuf_ioctl_resp_hdr {
 148        struct msgbuf_common_hdr        msg;
 149        struct msgbuf_completion_hdr    compl_hdr;
 150        __le16                          resp_len;
 151        __le16                          trans_id;
 152        __le32                          cmd;
 153        __le32                          rsvd0;
 154};
 155
 156struct msgbuf_tx_status {
 157        struct msgbuf_common_hdr        msg;
 158        struct msgbuf_completion_hdr    compl_hdr;
 159        __le16                          metadata_len;
 160        __le16                          tx_status;
 161};
 162
 163struct msgbuf_rx_complete {
 164        struct msgbuf_common_hdr        msg;
 165        struct msgbuf_completion_hdr    compl_hdr;
 166        __le16                          metadata_len;
 167        __le16                          data_len;
 168        __le16                          data_offset;
 169        __le16                          flags;
 170        __le32                          rx_status_0;
 171        __le32                          rx_status_1;
 172        __le32                          rsvd0;
 173};
 174
 175struct msgbuf_tx_flowring_create_req {
 176        struct msgbuf_common_hdr        msg;
 177        u8                              da[ETH_ALEN];
 178        u8                              sa[ETH_ALEN];
 179        u8                              tid;
 180        u8                              if_flags;
 181        __le16                          flow_ring_id;
 182        u8                              tc;
 183        u8                              priority;
 184        __le16                          int_vector;
 185        __le16                          max_items;
 186        __le16                          len_item;
 187        struct msgbuf_buf_addr          flow_ring_addr;
 188};
 189
 190struct msgbuf_tx_flowring_delete_req {
 191        struct msgbuf_common_hdr        msg;
 192        __le16                          flow_ring_id;
 193        __le16                          reason;
 194        __le32                          rsvd0[7];
 195};
 196
 197struct msgbuf_flowring_create_resp {
 198        struct msgbuf_common_hdr        msg;
 199        struct msgbuf_completion_hdr    compl_hdr;
 200        __le32                          rsvd0[3];
 201};
 202
 203struct msgbuf_flowring_delete_resp {
 204        struct msgbuf_common_hdr        msg;
 205        struct msgbuf_completion_hdr    compl_hdr;
 206        __le32                          rsvd0[3];
 207};
 208
 209struct msgbuf_flowring_flush_resp {
 210        struct msgbuf_common_hdr        msg;
 211        struct msgbuf_completion_hdr    compl_hdr;
 212        __le32                          rsvd0[3];
 213};
 214
 215struct brcmf_msgbuf_work_item {
 216        struct list_head queue;
 217        u32 flowid;
 218        int ifidx;
 219        u8 sa[ETH_ALEN];
 220        u8 da[ETH_ALEN];
 221};
 222
 223struct brcmf_msgbuf {
 224        struct brcmf_pub *drvr;
 225
 226        struct brcmf_commonring **commonrings;
 227        struct brcmf_commonring **flowrings;
 228        dma_addr_t *flowring_dma_handle;
 229        u16 nrof_flowrings;
 230
 231        u16 rx_dataoffset;
 232        u32 max_rxbufpost;
 233        u16 rx_metadata_offset;
 234        u32 rxbufpost;
 235
 236        u32 max_ioctlrespbuf;
 237        u32 cur_ioctlrespbuf;
 238        u32 max_eventbuf;
 239        u32 cur_eventbuf;
 240
 241        void *ioctbuf;
 242        dma_addr_t ioctbuf_handle;
 243        u32 ioctbuf_phys_hi;
 244        u32 ioctbuf_phys_lo;
 245        int ioctl_resp_status;
 246        u32 ioctl_resp_ret_len;
 247        u32 ioctl_resp_pktid;
 248
 249        u16 data_seq_no;
 250        u16 ioctl_seq_no;
 251        u32 reqid;
 252        wait_queue_head_t ioctl_resp_wait;
 253        bool ctl_completed;
 254
 255        struct brcmf_msgbuf_pktids *tx_pktids;
 256        struct brcmf_msgbuf_pktids *rx_pktids;
 257        struct brcmf_flowring *flow;
 258
 259        struct workqueue_struct *txflow_wq;
 260        struct work_struct txflow_work;
 261        unsigned long *flow_map;
 262        unsigned long *txstatus_done_map;
 263
 264        struct work_struct flowring_work;
 265        spinlock_t flowring_work_lock;
 266        struct list_head work_queue;
 267};
 268
 269struct brcmf_msgbuf_pktid {
 270        atomic_t  allocated;
 271        u16 data_offset;
 272        struct sk_buff *skb;
 273        dma_addr_t physaddr;
 274};
 275
 276struct brcmf_msgbuf_pktids {
 277        u32 array_size;
 278        u32 last_allocated_idx;
 279        enum dma_data_direction direction;
 280        struct brcmf_msgbuf_pktid *array;
 281};
 282
 283static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
 284
 285
 286static struct brcmf_msgbuf_pktids *
 287brcmf_msgbuf_init_pktids(u32 nr_array_entries,
 288                         enum dma_data_direction direction)
 289{
 290        struct brcmf_msgbuf_pktid *array;
 291        struct brcmf_msgbuf_pktids *pktids;
 292
 293        array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
 294        if (!array)
 295                return NULL;
 296
 297        pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
 298        if (!pktids) {
 299                kfree(array);
 300                return NULL;
 301        }
 302        pktids->array = array;
 303        pktids->array_size = nr_array_entries;
 304
 305        return pktids;
 306}
 307
 308
 309static int
 310brcmf_msgbuf_alloc_pktid(struct device *dev,
 311                         struct brcmf_msgbuf_pktids *pktids,
 312                         struct sk_buff *skb, u16 data_offset,
 313                         dma_addr_t *physaddr, u32 *idx)
 314{
 315        struct brcmf_msgbuf_pktid *array;
 316        u32 count;
 317
 318        array = pktids->array;
 319
 320        *physaddr = dma_map_single(dev, skb->data + data_offset,
 321                                   skb->len - data_offset, pktids->direction);
 322
 323        if (dma_mapping_error(dev, *physaddr)) {
 324                brcmf_err("dma_map_single failed !!\n");
 325                return -ENOMEM;
 326        }
 327
 328        *idx = pktids->last_allocated_idx;
 329
 330        count = 0;
 331        do {
 332                (*idx)++;
 333                if (*idx == pktids->array_size)
 334                        *idx = 0;
 335                if (array[*idx].allocated.counter == 0)
 336                        if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
 337                                break;
 338                count++;
 339        } while (count < pktids->array_size);
 340
 341        if (count == pktids->array_size)
 342                return -ENOMEM;
 343
 344        array[*idx].data_offset = data_offset;
 345        array[*idx].physaddr = *physaddr;
 346        array[*idx].skb = skb;
 347
 348        pktids->last_allocated_idx = *idx;
 349
 350        return 0;
 351}
 352
 353
 354static struct sk_buff *
 355brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
 356                       u32 idx)
 357{
 358        struct brcmf_msgbuf_pktid *pktid;
 359        struct sk_buff *skb;
 360
 361        if (idx >= pktids->array_size) {
 362                brcmf_err("Invalid packet id %d (max %d)\n", idx,
 363                          pktids->array_size);
 364                return NULL;
 365        }
 366        if (pktids->array[idx].allocated.counter) {
 367                pktid = &pktids->array[idx];
 368                dma_unmap_single(dev, pktid->physaddr,
 369                                 pktid->skb->len - pktid->data_offset,
 370                                 pktids->direction);
 371                skb = pktid->skb;
 372                pktid->allocated.counter = 0;
 373                return skb;
 374        } else {
 375                brcmf_err("Invalid packet id %d (not in use)\n", idx);
 376        }
 377
 378        return NULL;
 379}
 380
 381
 382static void
 383brcmf_msgbuf_release_array(struct device *dev,
 384                           struct brcmf_msgbuf_pktids *pktids)
 385{
 386        struct brcmf_msgbuf_pktid *array;
 387        struct brcmf_msgbuf_pktid *pktid;
 388        u32 count;
 389
 390        array = pktids->array;
 391        count = 0;
 392        do {
 393                if (array[count].allocated.counter) {
 394                        pktid = &array[count];
 395                        dma_unmap_single(dev, pktid->physaddr,
 396                                         pktid->skb->len - pktid->data_offset,
 397                                         pktids->direction);
 398                        brcmu_pkt_buf_free_skb(pktid->skb);
 399                }
 400                count++;
 401        } while (count < pktids->array_size);
 402
 403        kfree(array);
 404        kfree(pktids);
 405}
 406
 407
 408static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
 409{
 410        if (msgbuf->rx_pktids)
 411                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 412                                           msgbuf->rx_pktids);
 413        if (msgbuf->tx_pktids)
 414                brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
 415                                           msgbuf->tx_pktids);
 416}
 417
 418
 419static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
 420                                 uint cmd, void *buf, uint len)
 421{
 422        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 423        struct brcmf_commonring *commonring;
 424        struct msgbuf_ioctl_req_hdr *request;
 425        u16 buf_len;
 426        void *ret_ptr;
 427        int err;
 428
 429        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 430        brcmf_commonring_lock(commonring);
 431        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 432        if (!ret_ptr) {
 433                brcmf_err("Failed to reserve space in commonring\n");
 434                brcmf_commonring_unlock(commonring);
 435                return -ENOMEM;
 436        }
 437
 438        msgbuf->reqid++;
 439
 440        request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
 441        request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
 442        request->msg.ifidx = (u8)ifidx;
 443        request->msg.flags = 0;
 444        request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
 445        request->cmd = cpu_to_le32(cmd);
 446        request->output_buf_len = cpu_to_le16(len);
 447        request->trans_id = cpu_to_le16(msgbuf->reqid);
 448
 449        buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
 450        request->input_buf_len = cpu_to_le16(buf_len);
 451        request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
 452        request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
 453        if (buf)
 454                memcpy(msgbuf->ioctbuf, buf, buf_len);
 455        else
 456                memset(msgbuf->ioctbuf, 0, buf_len);
 457
 458        err = brcmf_commonring_write_complete(commonring);
 459        brcmf_commonring_unlock(commonring);
 460
 461        return err;
 462}
 463
 464
 465static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
 466{
 467        return wait_event_timeout(msgbuf->ioctl_resp_wait,
 468                                  msgbuf->ctl_completed,
 469                                  msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
 470}
 471
 472
 473static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
 474{
 475        msgbuf->ctl_completed = true;
 476        if (waitqueue_active(&msgbuf->ioctl_resp_wait))
 477                wake_up(&msgbuf->ioctl_resp_wait);
 478}
 479
 480
 481static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 482                                   uint cmd, void *buf, uint len)
 483{
 484        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 485        struct sk_buff *skb = NULL;
 486        int timeout;
 487        int err;
 488
 489        brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
 490        msgbuf->ctl_completed = false;
 491        err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
 492        if (err)
 493                return err;
 494
 495        timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
 496        if (!timeout) {
 497                brcmf_err("Timeout on response for query command\n");
 498                return -EIO;
 499        }
 500
 501        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 502                                     msgbuf->rx_pktids,
 503                                     msgbuf->ioctl_resp_pktid);
 504        if (msgbuf->ioctl_resp_ret_len != 0) {
 505                if (!skb)
 506                        return -EBADF;
 507
 508                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
 509                                       len : msgbuf->ioctl_resp_ret_len);
 510        }
 511        brcmu_pkt_buf_free_skb(skb);
 512
 513        return msgbuf->ioctl_resp_status;
 514}
 515
 516
 517static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
 518                                 uint cmd, void *buf, uint len)
 519{
 520        return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
 521}
 522
 523
 524static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
 525                                u8 *ifidx, struct sk_buff *skb)
 526{
 527        return -ENODEV;
 528}
 529
 530
 531static void
 532brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
 533{
 534        u32 dma_sz;
 535        void *dma_buf;
 536
 537        brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
 538
 539        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 540        dma_buf = msgbuf->flowrings[flowid]->buf_addr;
 541        dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
 542                          msgbuf->flowring_dma_handle[flowid]);
 543
 544        brcmf_flowring_delete(msgbuf->flow, flowid);
 545}
 546
 547
 548static struct brcmf_msgbuf_work_item *
 549brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
 550{
 551        struct brcmf_msgbuf_work_item *work = NULL;
 552        ulong flags;
 553
 554        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 555        if (!list_empty(&msgbuf->work_queue)) {
 556                work = list_first_entry(&msgbuf->work_queue,
 557                                        struct brcmf_msgbuf_work_item, queue);
 558                list_del(&work->queue);
 559        }
 560        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 561
 562        return work;
 563}
 564
 565
 566static u32
 567brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
 568                                    struct brcmf_msgbuf_work_item *work)
 569{
 570        struct msgbuf_tx_flowring_create_req *create;
 571        struct brcmf_commonring *commonring;
 572        void *ret_ptr;
 573        u32 flowid;
 574        void *dma_buf;
 575        u32 dma_sz;
 576        u64 address;
 577        int err;
 578
 579        flowid = work->flowid;
 580        dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
 581        dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
 582                                     &msgbuf->flowring_dma_handle[flowid],
 583                                     GFP_KERNEL);
 584        if (!dma_buf) {
 585                brcmf_err("dma_alloc_coherent failed\n");
 586                brcmf_flowring_delete(msgbuf->flow, flowid);
 587                return BRCMF_FLOWRING_INVALID_ID;
 588        }
 589
 590        brcmf_commonring_config(msgbuf->flowrings[flowid],
 591                                BRCMF_H2D_TXFLOWRING_MAX_ITEM,
 592                                BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
 593
 594        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
 595        brcmf_commonring_lock(commonring);
 596        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 597        if (!ret_ptr) {
 598                brcmf_err("Failed to reserve space in commonring\n");
 599                brcmf_commonring_unlock(commonring);
 600                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 601                return BRCMF_FLOWRING_INVALID_ID;
 602        }
 603
 604        create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
 605        create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
 606        create->msg.ifidx = work->ifidx;
 607        create->msg.request_id = 0;
 608        create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
 609        create->flow_ring_id = cpu_to_le16(flowid +
 610                                           BRCMF_NROF_H2D_COMMON_MSGRINGS);
 611        memcpy(create->sa, work->sa, ETH_ALEN);
 612        memcpy(create->da, work->da, ETH_ALEN);
 613        address = (u64)msgbuf->flowring_dma_handle[flowid];
 614        create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
 615        create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
 616        create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
 617        create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
 618
 619        brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
 620                  flowid, work->da, create->tid, work->ifidx);
 621
 622        err = brcmf_commonring_write_complete(commonring);
 623        brcmf_commonring_unlock(commonring);
 624        if (err) {
 625                brcmf_err("Failed to write commonring\n");
 626                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
 627                return BRCMF_FLOWRING_INVALID_ID;
 628        }
 629
 630        return flowid;
 631}
 632
 633
 634static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
 635{
 636        struct brcmf_msgbuf *msgbuf;
 637        struct brcmf_msgbuf_work_item *create;
 638
 639        msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
 640
 641        while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
 642                brcmf_msgbuf_flowring_create_worker(msgbuf, create);
 643                kfree(create);
 644        }
 645}
 646
 647
 648static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
 649                                        struct sk_buff *skb)
 650{
 651        struct brcmf_msgbuf_work_item *create;
 652        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 653        u32 flowid;
 654        ulong flags;
 655
 656        create = kzalloc(sizeof(*create), GFP_ATOMIC);
 657        if (create == NULL)
 658                return BRCMF_FLOWRING_INVALID_ID;
 659
 660        flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
 661                                       skb->priority, ifidx);
 662        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 663                kfree(create);
 664                return flowid;
 665        }
 666
 667        create->flowid = flowid;
 668        create->ifidx = ifidx;
 669        memcpy(create->sa, eh->h_source, ETH_ALEN);
 670        memcpy(create->da, eh->h_dest, ETH_ALEN);
 671
 672        spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
 673        list_add_tail(&create->queue, &msgbuf->work_queue);
 674        spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
 675        schedule_work(&msgbuf->flowring_work);
 676
 677        return flowid;
 678}
 679
 680
 681static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
 682{
 683        struct brcmf_flowring *flow = msgbuf->flow;
 684        struct brcmf_commonring *commonring;
 685        void *ret_ptr;
 686        u32 count;
 687        struct sk_buff *skb;
 688        dma_addr_t physaddr;
 689        u32 pktid;
 690        struct msgbuf_tx_msghdr *tx_msghdr;
 691        u64 address;
 692
 693        commonring = msgbuf->flowrings[flowid];
 694        if (!brcmf_commonring_write_available(commonring))
 695                return;
 696
 697        brcmf_commonring_lock(commonring);
 698
 699        count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
 700        while (brcmf_flowring_qlen(flow, flowid)) {
 701                skb = brcmf_flowring_dequeue(flow, flowid);
 702                if (skb == NULL) {
 703                        brcmf_err("No SKB, but qlen %d\n",
 704                                  brcmf_flowring_qlen(flow, flowid));
 705                        break;
 706                }
 707                skb_orphan(skb);
 708                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 709                                             msgbuf->tx_pktids, skb, ETH_HLEN,
 710                                             &physaddr, &pktid)) {
 711                        brcmf_flowring_reinsert(flow, flowid, skb);
 712                        brcmf_err("No PKTID available !!\n");
 713                        break;
 714                }
 715                ret_ptr = brcmf_commonring_reserve_for_write(commonring);
 716                if (!ret_ptr) {
 717                        brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 718                                               msgbuf->tx_pktids, pktid);
 719                        brcmf_flowring_reinsert(flow, flowid, skb);
 720                        break;
 721                }
 722                count++;
 723
 724                tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
 725
 726                tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
 727                tx_msghdr->msg.request_id = cpu_to_le32(pktid);
 728                tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 729                tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
 730                tx_msghdr->flags |= (skb->priority & 0x07) <<
 731                                    BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
 732                tx_msghdr->seg_cnt = 1;
 733                memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
 734                tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
 735                address = (u64)physaddr;
 736                tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
 737                tx_msghdr->data_buf_addr.low_addr =
 738                        cpu_to_le32(address & 0xffffffff);
 739                tx_msghdr->metadata_buf_len = 0;
 740                tx_msghdr->metadata_buf_addr.high_addr = 0;
 741                tx_msghdr->metadata_buf_addr.low_addr = 0;
 742                atomic_inc(&commonring->outstanding_tx);
 743                if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
 744                        brcmf_commonring_write_complete(commonring);
 745                        count = 0;
 746                }
 747        }
 748        if (count)
 749                brcmf_commonring_write_complete(commonring);
 750        brcmf_commonring_unlock(commonring);
 751}
 752
 753
 754static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
 755{
 756        struct brcmf_msgbuf *msgbuf;
 757        u32 flowid;
 758
 759        msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
 760        for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
 761                clear_bit(flowid, msgbuf->flow_map);
 762                brcmf_msgbuf_txflow(msgbuf, flowid);
 763        }
 764}
 765
 766
 767static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
 768                                        bool force)
 769{
 770        struct brcmf_commonring *commonring;
 771
 772        set_bit(flowid, msgbuf->flow_map);
 773        commonring = msgbuf->flowrings[flowid];
 774        if ((force) || (atomic_read(&commonring->outstanding_tx) <
 775                        BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
 776                queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
 777
 778        return 0;
 779}
 780
 781
 782static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
 783                               u8 offset, struct sk_buff *skb)
 784{
 785        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 786        struct brcmf_flowring *flow = msgbuf->flow;
 787        struct ethhdr *eh = (struct ethhdr *)(skb->data);
 788        u32 flowid;
 789        u32 queue_count;
 790        bool force;
 791
 792        flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
 793        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
 794                flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
 795                if (flowid == BRCMF_FLOWRING_INVALID_ID)
 796                        return -ENOMEM;
 797        }
 798        queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
 799        force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
 800        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
 801
 802        return 0;
 803}
 804
 805
 806static void
 807brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
 808                                 enum proto_addr_mode addr_mode)
 809{
 810        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 811
 812        brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
 813}
 814
 815
 816static void
 817brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 818{
 819        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 820
 821        brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
 822}
 823
 824
 825static void
 826brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
 827{
 828        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
 829
 830        brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
 831}
 832
 833
 834static void
 835brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 836{
 837        struct msgbuf_ioctl_resp_hdr *ioctl_resp;
 838
 839        ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
 840
 841        msgbuf->ioctl_resp_status =
 842                        (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
 843        msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
 844        msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
 845
 846        brcmf_msgbuf_ioctl_resp_wake(msgbuf);
 847
 848        if (msgbuf->cur_ioctlrespbuf)
 849                msgbuf->cur_ioctlrespbuf--;
 850        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
 851}
 852
 853
 854static void
 855brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
 856{
 857        struct brcmf_commonring *commonring;
 858        struct msgbuf_tx_status *tx_status;
 859        u32 idx;
 860        struct sk_buff *skb;
 861        u16 flowid;
 862
 863        tx_status = (struct msgbuf_tx_status *)buf;
 864        idx = le32_to_cpu(tx_status->msg.request_id);
 865        flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
 866        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
 867        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
 868                                     msgbuf->tx_pktids, idx);
 869        if (!skb)
 870                return;
 871
 872        set_bit(flowid, msgbuf->txstatus_done_map);
 873        commonring = msgbuf->flowrings[flowid];
 874        atomic_dec(&commonring->outstanding_tx);
 875
 876        brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
 877}
 878
 879
 880static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
 881{
 882        struct brcmf_commonring *commonring;
 883        void *ret_ptr;
 884        struct sk_buff *skb;
 885        u16 alloced;
 886        u32 pktlen;
 887        dma_addr_t physaddr;
 888        struct msgbuf_rx_bufpost *rx_bufpost;
 889        u64 address;
 890        u32 pktid;
 891        u32 i;
 892
 893        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
 894        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
 895                                                              count,
 896                                                              &alloced);
 897        if (!ret_ptr) {
 898                brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
 899                return 0;
 900        }
 901
 902        for (i = 0; i < alloced; i++) {
 903                rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
 904                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
 905
 906                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
 907
 908                if (skb == NULL) {
 909                        brcmf_err("Failed to alloc SKB\n");
 910                        brcmf_commonring_write_cancel(commonring, alloced - i);
 911                        break;
 912                }
 913
 914                pktlen = skb->len;
 915                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
 916                                             msgbuf->rx_pktids, skb, 0,
 917                                             &physaddr, &pktid)) {
 918                        dev_kfree_skb_any(skb);
 919                        brcmf_err("No PKTID available !!\n");
 920                        brcmf_commonring_write_cancel(commonring, alloced - i);
 921                        break;
 922                }
 923
 924                if (msgbuf->rx_metadata_offset) {
 925                        address = (u64)physaddr;
 926                        rx_bufpost->metadata_buf_len =
 927                                cpu_to_le16(msgbuf->rx_metadata_offset);
 928                        rx_bufpost->metadata_buf_addr.high_addr =
 929                                cpu_to_le32(address >> 32);
 930                        rx_bufpost->metadata_buf_addr.low_addr =
 931                                cpu_to_le32(address & 0xffffffff);
 932
 933                        skb_pull(skb, msgbuf->rx_metadata_offset);
 934                        pktlen = skb->len;
 935                        physaddr += msgbuf->rx_metadata_offset;
 936                }
 937                rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
 938                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
 939
 940                address = (u64)physaddr;
 941                rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
 942                rx_bufpost->data_buf_addr.high_addr =
 943                        cpu_to_le32(address >> 32);
 944                rx_bufpost->data_buf_addr.low_addr =
 945                        cpu_to_le32(address & 0xffffffff);
 946
 947                ret_ptr += brcmf_commonring_len_item(commonring);
 948        }
 949
 950        if (i)
 951                brcmf_commonring_write_complete(commonring);
 952
 953        return i;
 954}
 955
 956
 957static void
 958brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
 959{
 960        u32 fillbufs;
 961        u32 retcount;
 962
 963        fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
 964
 965        while (fillbufs) {
 966                retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
 967                if (!retcount)
 968                        break;
 969                msgbuf->rxbufpost += retcount;
 970                fillbufs -= retcount;
 971        }
 972}
 973
 974
 975static void
 976brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
 977{
 978        msgbuf->rxbufpost -= rxcnt;
 979        if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
 980                                  BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
 981                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
 982}
 983
 984
 985static u32
 986brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
 987                             u32 count)
 988{
 989        struct brcmf_commonring *commonring;
 990        void *ret_ptr;
 991        struct sk_buff *skb;
 992        u16 alloced;
 993        u32 pktlen;
 994        dma_addr_t physaddr;
 995        struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
 996        u64 address;
 997        u32 pktid;
 998        u32 i;
 999
1000        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1001        brcmf_commonring_lock(commonring);
1002        ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1003                                                              count,
1004                                                              &alloced);
1005        if (!ret_ptr) {
1006                brcmf_err("Failed to reserve space in commonring\n");
1007                brcmf_commonring_unlock(commonring);
1008                return 0;
1009        }
1010
1011        for (i = 0; i < alloced; i++) {
1012                rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1013                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1014
1015                skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
1016
1017                if (skb == NULL) {
1018                        brcmf_err("Failed to alloc SKB\n");
1019                        brcmf_commonring_write_cancel(commonring, alloced - i);
1020                        break;
1021                }
1022
1023                pktlen = skb->len;
1024                if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1025                                             msgbuf->rx_pktids, skb, 0,
1026                                             &physaddr, &pktid)) {
1027                        dev_kfree_skb_any(skb);
1028                        brcmf_err("No PKTID available !!\n");
1029                        brcmf_commonring_write_cancel(commonring, alloced - i);
1030                        break;
1031                }
1032                if (event_buf)
1033                        rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1034                else
1035                        rx_bufpost->msg.msgtype =
1036                                MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1037                rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1038
1039                address = (u64)physaddr;
1040                rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1041                rx_bufpost->host_buf_addr.high_addr =
1042                        cpu_to_le32(address >> 32);
1043                rx_bufpost->host_buf_addr.low_addr =
1044                        cpu_to_le32(address & 0xffffffff);
1045
1046                ret_ptr += brcmf_commonring_len_item(commonring);
1047        }
1048
1049        if (i)
1050                brcmf_commonring_write_complete(commonring);
1051
1052        brcmf_commonring_unlock(commonring);
1053
1054        return i;
1055}
1056
1057
1058static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1059{
1060        u32 count;
1061
1062        count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1063        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1064        msgbuf->cur_ioctlrespbuf += count;
1065}
1066
1067
1068static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1069{
1070        u32 count;
1071
1072        count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1073        count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1074        msgbuf->cur_eventbuf += count;
1075}
1076
1077
1078static void
1079brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
1080                    u8 ifidx)
1081{
1082        struct brcmf_if *ifp;
1083
1084        /* The ifidx is the idx to map to matching netdev/ifp. When receiving
1085         * events this is easy because it contains the bssidx which maps
1086         * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
1087         * bssidx 1 is used for p2p0 and no data can be received or
1088         * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
1089         */
1090        if (ifidx)
1091                (ifidx)++;
1092        ifp = msgbuf->drvr->iflist[ifidx];
1093        if (!ifp || !ifp->ndev) {
1094                brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
1095                brcmu_pkt_buf_free_skb(skb);
1096                return;
1097        }
1098        brcmf_netif_rx(ifp, skb);
1099}
1100
1101
1102static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1103{
1104        struct msgbuf_rx_event *event;
1105        u32 idx;
1106        u16 buflen;
1107        struct sk_buff *skb;
1108
1109        event = (struct msgbuf_rx_event *)buf;
1110        idx = le32_to_cpu(event->msg.request_id);
1111        buflen = le16_to_cpu(event->event_data_len);
1112
1113        if (msgbuf->cur_eventbuf)
1114                msgbuf->cur_eventbuf--;
1115        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1116
1117        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1118                                     msgbuf->rx_pktids, idx);
1119        if (!skb)
1120                return;
1121
1122        if (msgbuf->rx_dataoffset)
1123                skb_pull(skb, msgbuf->rx_dataoffset);
1124
1125        skb_trim(skb, buflen);
1126
1127        brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
1128}
1129
1130
1131static void
1132brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1133{
1134        struct msgbuf_rx_complete *rx_complete;
1135        struct sk_buff *skb;
1136        u16 data_offset;
1137        u16 buflen;
1138        u32 idx;
1139
1140        brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1141
1142        rx_complete = (struct msgbuf_rx_complete *)buf;
1143        data_offset = le16_to_cpu(rx_complete->data_offset);
1144        buflen = le16_to_cpu(rx_complete->data_len);
1145        idx = le32_to_cpu(rx_complete->msg.request_id);
1146
1147        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1148                                     msgbuf->rx_pktids, idx);
1149        if (!skb)
1150                return;
1151
1152        if (data_offset)
1153                skb_pull(skb, data_offset);
1154        else if (msgbuf->rx_dataoffset)
1155                skb_pull(skb, msgbuf->rx_dataoffset);
1156
1157        skb_trim(skb, buflen);
1158
1159        brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
1160}
1161
1162
1163static void
1164brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1165                                               void *buf)
1166{
1167        struct msgbuf_flowring_create_resp *flowring_create_resp;
1168        u16 status;
1169        u16 flowid;
1170
1171        flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1172
1173        flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1174        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1175        status =  le16_to_cpu(flowring_create_resp->compl_hdr.status);
1176
1177        if (status) {
1178                brcmf_err("Flowring creation failed, code %d\n", status);
1179                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1180                return;
1181        }
1182        brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1183                  status);
1184
1185        brcmf_flowring_open(msgbuf->flow, flowid);
1186
1187        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1188}
1189
1190
1191static void
1192brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1193                                               void *buf)
1194{
1195        struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1196        u16 status;
1197        u16 flowid;
1198
1199        flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1200
1201        flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1202        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1203        status =  le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1204
1205        if (status) {
1206                brcmf_err("Flowring deletion failed, code %d\n", status);
1207                brcmf_flowring_delete(msgbuf->flow, flowid);
1208                return;
1209        }
1210        brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1211                  status);
1212
1213        brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1214}
1215
1216
1217static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1218{
1219        struct msgbuf_common_hdr *msg;
1220
1221        msg = (struct msgbuf_common_hdr *)buf;
1222        switch (msg->msgtype) {
1223        case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1224                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1225                brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1226                break;
1227        case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1228                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1229                brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1230                break;
1231        case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1232                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1233                break;
1234        case MSGBUF_TYPE_IOCTL_CMPLT:
1235                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1236                brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1237                break;
1238        case MSGBUF_TYPE_WL_EVENT:
1239                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1240                brcmf_msgbuf_process_event(msgbuf, buf);
1241                break;
1242        case MSGBUF_TYPE_TX_STATUS:
1243                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1244                brcmf_msgbuf_process_txstatus(msgbuf, buf);
1245                break;
1246        case MSGBUF_TYPE_RX_CMPLT:
1247                brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1248                brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1249                break;
1250        default:
1251                brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
1252                break;
1253        }
1254}
1255
1256
1257static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1258                                    struct brcmf_commonring *commonring)
1259{
1260        void *buf;
1261        u16 count;
1262        u16 processed;
1263
1264again:
1265        buf = brcmf_commonring_get_read_ptr(commonring, &count);
1266        if (buf == NULL)
1267                return;
1268
1269        processed = 0;
1270        while (count) {
1271                brcmf_msgbuf_process_msgtype(msgbuf,
1272                                             buf + msgbuf->rx_dataoffset);
1273                buf += brcmf_commonring_len_item(commonring);
1274                processed++;
1275                if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1276                        brcmf_commonring_read_complete(commonring, processed);
1277                        processed = 0;
1278                }
1279                count--;
1280        }
1281        if (processed)
1282                brcmf_commonring_read_complete(commonring, processed);
1283
1284        if (commonring->r_ptr == 0)
1285                goto again;
1286}
1287
1288
1289int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1290{
1291        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1292        struct brcmf_pub *drvr = bus_if->drvr;
1293        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1294        struct brcmf_commonring *commonring;
1295        void *buf;
1296        u32 flowid;
1297        int qlen;
1298
1299        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1300        brcmf_msgbuf_process_rx(msgbuf, buf);
1301        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1302        brcmf_msgbuf_process_rx(msgbuf, buf);
1303        buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1304        brcmf_msgbuf_process_rx(msgbuf, buf);
1305
1306        for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1307                         msgbuf->nrof_flowrings) {
1308                clear_bit(flowid, msgbuf->txstatus_done_map);
1309                commonring = msgbuf->flowrings[flowid];
1310                qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1311                if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1312                    ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1313                                BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1314                        brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1315        }
1316
1317        return 0;
1318}
1319
1320
1321void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
1322{
1323        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1324        struct msgbuf_tx_flowring_delete_req *delete;
1325        struct brcmf_commonring *commonring;
1326        void *ret_ptr;
1327        u8 ifidx;
1328        int err;
1329
1330        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1331        brcmf_commonring_lock(commonring);
1332        ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1333        if (!ret_ptr) {
1334                brcmf_err("FW unaware, flowring will be removed !!\n");
1335                brcmf_commonring_unlock(commonring);
1336                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1337                return;
1338        }
1339
1340        delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1341
1342        ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1343
1344        delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1345        delete->msg.ifidx = ifidx;
1346        delete->msg.request_id = 0;
1347
1348        delete->flow_ring_id = cpu_to_le16(flowid +
1349                                           BRCMF_NROF_H2D_COMMON_MSGRINGS);
1350        delete->reason = 0;
1351
1352        brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1353                  flowid, ifidx);
1354
1355        err = brcmf_commonring_write_complete(commonring);
1356        brcmf_commonring_unlock(commonring);
1357        if (err) {
1358                brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1359                brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1360        }
1361}
1362
1363#ifdef DEBUG
1364static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1365{
1366        struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1367        struct brcmf_pub *drvr = bus_if->drvr;
1368        struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1369        struct brcmf_commonring *commonring;
1370        u16 i;
1371        struct brcmf_flowring_ring *ring;
1372        struct brcmf_flowring_hash *hash;
1373
1374        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1375        seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1376                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1377        commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1378        seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
1379                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1380        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1381        seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
1382                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1383        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1384        seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1385                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1386        commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1387        seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
1388                   commonring->r_ptr, commonring->w_ptr, commonring->depth);
1389
1390        seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1391                   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1392        seq_puts(seq, "Active flowrings:\n");
1393        hash = msgbuf->flow->hash;
1394        for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1395                if (!msgbuf->flow->rings[i])
1396                        continue;
1397                ring = msgbuf->flow->rings[i];
1398                if (ring->status != RING_OPEN)
1399                        continue;
1400                commonring = msgbuf->flowrings[i];
1401                hash = &msgbuf->flow->hash[ring->hash_id];
1402                seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1403                                "        ifidx %u, fifo %u, da %pM\n",
1404                                i, commonring->r_ptr, commonring->w_ptr,
1405                                skb_queue_len(&ring->skblist), ring->blocked,
1406                                hash->ifidx, hash->fifo, hash->mac);
1407        }
1408
1409        return 0;
1410}
1411#else
1412static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1413{
1414        return 0;
1415}
1416#endif
1417
1418int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1419{
1420        struct brcmf_bus_msgbuf *if_msgbuf;
1421        struct brcmf_msgbuf *msgbuf;
1422        u64 address;
1423        u32 count;
1424
1425        if_msgbuf = drvr->bus_if->msgbuf;
1426        msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1427        if (!msgbuf)
1428                goto fail;
1429
1430        msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1431        if (msgbuf->txflow_wq == NULL) {
1432                brcmf_err("workqueue creation failed\n");
1433                goto fail;
1434        }
1435        INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1436        count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
1437        count = count * sizeof(unsigned long);
1438        msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1439        if (!msgbuf->flow_map)
1440                goto fail;
1441
1442        msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1443        if (!msgbuf->txstatus_done_map)
1444                goto fail;
1445
1446        msgbuf->drvr = drvr;
1447        msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1448                                             BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1449                                             &msgbuf->ioctbuf_handle,
1450                                             GFP_KERNEL);
1451        if (!msgbuf->ioctbuf)
1452                goto fail;
1453        address = (u64)msgbuf->ioctbuf_handle;
1454        msgbuf->ioctbuf_phys_hi = address >> 32;
1455        msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1456
1457        drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1458        drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1459        drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1460        drvr->proto->txdata = brcmf_msgbuf_txdata;
1461        drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1462        drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1463        drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1464        drvr->proto->pd = msgbuf;
1465
1466        init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1467
1468        msgbuf->commonrings =
1469                (struct brcmf_commonring **)if_msgbuf->commonrings;
1470        msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1471        msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
1472        msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
1473                sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1474        if (!msgbuf->flowring_dma_handle)
1475                goto fail;
1476
1477        msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1478        msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1479
1480        msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1481        msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1482
1483        msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1484                                                     DMA_TO_DEVICE);
1485        if (!msgbuf->tx_pktids)
1486                goto fail;
1487        msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1488                                                     DMA_FROM_DEVICE);
1489        if (!msgbuf->rx_pktids)
1490                goto fail;
1491
1492        msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1493                                             if_msgbuf->nrof_flowrings);
1494        if (!msgbuf->flow)
1495                goto fail;
1496
1497
1498        brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1499                  msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1500                  msgbuf->max_ioctlrespbuf);
1501        count = 0;
1502        do {
1503                brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1504                if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1505                        msleep(10);
1506                else
1507                        break;
1508                count++;
1509        } while (count < 10);
1510        brcmf_msgbuf_rxbuf_event_post(msgbuf);
1511        brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1512
1513        INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1514        spin_lock_init(&msgbuf->flowring_work_lock);
1515        INIT_LIST_HEAD(&msgbuf->work_queue);
1516
1517        brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1518
1519        return 0;
1520
1521fail:
1522        if (msgbuf) {
1523                kfree(msgbuf->flow_map);
1524                kfree(msgbuf->txstatus_done_map);
1525                brcmf_msgbuf_release_pktids(msgbuf);
1526                kfree(msgbuf->flowring_dma_handle);
1527                if (msgbuf->ioctbuf)
1528                        dma_free_coherent(drvr->bus_if->dev,
1529                                          BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1530                                          msgbuf->ioctbuf,
1531                                          msgbuf->ioctbuf_handle);
1532                kfree(msgbuf);
1533        }
1534        return -ENOMEM;
1535}
1536
1537
1538void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1539{
1540        struct brcmf_msgbuf *msgbuf;
1541        struct brcmf_msgbuf_work_item *work;
1542
1543        brcmf_dbg(TRACE, "Enter\n");
1544        if (drvr->proto->pd) {
1545                msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1546                cancel_work_sync(&msgbuf->flowring_work);
1547                while (!list_empty(&msgbuf->work_queue)) {
1548                        work = list_first_entry(&msgbuf->work_queue,
1549                                                struct brcmf_msgbuf_work_item,
1550                                                queue);
1551                        list_del(&work->queue);
1552                        kfree(work);
1553                }
1554                kfree(msgbuf->flow_map);
1555                kfree(msgbuf->txstatus_done_map);
1556                if (msgbuf->txflow_wq)
1557                        destroy_workqueue(msgbuf->txflow_wq);
1558
1559                brcmf_flowring_detach(msgbuf->flow);
1560                dma_free_coherent(drvr->bus_if->dev,
1561                                  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1562                                  msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1563                brcmf_msgbuf_release_pktids(msgbuf);
1564                kfree(msgbuf->flowring_dma_handle);
1565                kfree(msgbuf);
1566                drvr->proto->pd = NULL;
1567        }
1568}
1569