linux/drivers/net/wireless/ath/ath10k/htt_tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 */
   6
   7#include <linux/etherdevice.h>
   8#include "htt.h"
   9#include "mac.h"
  10#include "hif.h"
  11#include "txrx.h"
  12#include "debug.h"
  13
  14static u8 ath10k_htt_tx_txq_calc_size(size_t count)
  15{
  16        int exp;
  17        int factor;
  18
  19        exp = 0;
  20        factor = count >> 7;
  21
  22        while (factor >= 64 && exp < 4) {
  23                factor >>= 3;
  24                exp++;
  25        }
  26
  27        if (exp == 4)
  28                return 0xff;
  29
  30        if (count > 0)
  31                factor = max(1, factor);
  32
  33        return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
  34               SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
  35}
  36
  37static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
  38                                       struct ieee80211_txq *txq)
  39{
  40        struct ath10k *ar = hw->priv;
  41        struct ath10k_sta *arsta;
  42        struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
  43        unsigned long frame_cnt;
  44        unsigned long byte_cnt;
  45        int idx;
  46        u32 bit;
  47        u16 peer_id;
  48        u8 tid;
  49        u8 count;
  50
  51        lockdep_assert_held(&ar->htt.tx_lock);
  52
  53        if (!ar->htt.tx_q_state.enabled)
  54                return;
  55
  56        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  57                return;
  58
  59        if (txq->sta) {
  60                arsta = (void *)txq->sta->drv_priv;
  61                peer_id = arsta->peer_id;
  62        } else {
  63                peer_id = arvif->peer_id;
  64        }
  65
  66        tid = txq->tid;
  67        bit = BIT(peer_id % 32);
  68        idx = peer_id / 32;
  69
  70        ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
  71        count = ath10k_htt_tx_txq_calc_size(byte_cnt);
  72
  73        if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  74            unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  75                ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
  76                            peer_id, tid);
  77                return;
  78        }
  79
  80        ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
  81        ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
  82        ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
  83
  84        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
  85                   peer_id, tid, count);
  86}
  87
  88static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
  89{
  90        u32 seq;
  91        size_t size;
  92
  93        lockdep_assert_held(&ar->htt.tx_lock);
  94
  95        if (!ar->htt.tx_q_state.enabled)
  96                return;
  97
  98        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  99                return;
 100
 101        seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
 102        seq++;
 103        ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
 104
 105        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
 106                   seq);
 107
 108        size = sizeof(*ar->htt.tx_q_state.vaddr);
 109        dma_sync_single_for_device(ar->dev,
 110                                   ar->htt.tx_q_state.paddr,
 111                                   size,
 112                                   DMA_TO_DEVICE);
 113}
 114
 115void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
 116                              struct ieee80211_txq *txq)
 117{
 118        struct ath10k *ar = hw->priv;
 119
 120        spin_lock_bh(&ar->htt.tx_lock);
 121        __ath10k_htt_tx_txq_recalc(hw, txq);
 122        spin_unlock_bh(&ar->htt.tx_lock);
 123}
 124
 125void ath10k_htt_tx_txq_sync(struct ath10k *ar)
 126{
 127        spin_lock_bh(&ar->htt.tx_lock);
 128        __ath10k_htt_tx_txq_sync(ar);
 129        spin_unlock_bh(&ar->htt.tx_lock);
 130}
 131
 132void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
 133                              struct ieee80211_txq *txq)
 134{
 135        struct ath10k *ar = hw->priv;
 136
 137        spin_lock_bh(&ar->htt.tx_lock);
 138        __ath10k_htt_tx_txq_recalc(hw, txq);
 139        __ath10k_htt_tx_txq_sync(ar);
 140        spin_unlock_bh(&ar->htt.tx_lock);
 141}
 142
 143void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
 144{
 145        lockdep_assert_held(&htt->tx_lock);
 146
 147        htt->num_pending_tx--;
 148        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 149                ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 150}
 151
 152int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 153{
 154        lockdep_assert_held(&htt->tx_lock);
 155
 156        if (htt->num_pending_tx >= htt->max_num_pending_tx)
 157                return -EBUSY;
 158
 159        htt->num_pending_tx++;
 160        if (htt->num_pending_tx == htt->max_num_pending_tx)
 161                ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 162
 163        return 0;
 164}
 165
 166int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 167                                   bool is_presp)
 168{
 169        struct ath10k *ar = htt->ar;
 170
 171        lockdep_assert_held(&htt->tx_lock);
 172
 173        if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
 174                return 0;
 175
 176        if (is_presp &&
 177            ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
 178                return -EBUSY;
 179
 180        htt->num_pending_mgmt_tx++;
 181
 182        return 0;
 183}
 184
 185void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
 186{
 187        lockdep_assert_held(&htt->tx_lock);
 188
 189        if (!htt->ar->hw_params.max_probe_resp_desc_thres)
 190                return;
 191
 192        htt->num_pending_mgmt_tx--;
 193}
 194
 195int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 196{
 197        struct ath10k *ar = htt->ar;
 198        int ret;
 199
 200        spin_lock_bh(&htt->tx_lock);
 201        ret = idr_alloc(&htt->pending_tx, skb, 0,
 202                        htt->max_num_pending_tx, GFP_ATOMIC);
 203        spin_unlock_bh(&htt->tx_lock);
 204
 205        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 206
 207        return ret;
 208}
 209
 210void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 211{
 212        struct ath10k *ar = htt->ar;
 213
 214        lockdep_assert_held(&htt->tx_lock);
 215
 216        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
 217
 218        idr_remove(&htt->pending_tx, msdu_id);
 219}
 220
 221static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 222{
 223        struct ath10k *ar = htt->ar;
 224        size_t size;
 225
 226        if (!htt->txbuf.vaddr_txbuff_32)
 227                return;
 228
 229        size = htt->txbuf.size;
 230        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
 231                          htt->txbuf.paddr);
 232        htt->txbuf.vaddr_txbuff_32 = NULL;
 233}
 234
 235static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 236{
 237        struct ath10k *ar = htt->ar;
 238        size_t size;
 239
 240        size = htt->max_num_pending_tx *
 241                        sizeof(struct ath10k_htt_txbuf_32);
 242
 243        htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
 244                                                        &htt->txbuf.paddr,
 245                                                        GFP_KERNEL);
 246        if (!htt->txbuf.vaddr_txbuff_32)
 247                return -ENOMEM;
 248
 249        htt->txbuf.size = size;
 250
 251        return 0;
 252}
 253
 254static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 255{
 256        struct ath10k *ar = htt->ar;
 257        size_t size;
 258
 259        if (!htt->txbuf.vaddr_txbuff_64)
 260                return;
 261
 262        size = htt->txbuf.size;
 263        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
 264                          htt->txbuf.paddr);
 265        htt->txbuf.vaddr_txbuff_64 = NULL;
 266}
 267
 268static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
 269{
 270        struct ath10k *ar = htt->ar;
 271        size_t size;
 272
 273        size = htt->max_num_pending_tx *
 274                        sizeof(struct ath10k_htt_txbuf_64);
 275
 276        htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
 277                                                        &htt->txbuf.paddr,
 278                                                        GFP_KERNEL);
 279        if (!htt->txbuf.vaddr_txbuff_64)
 280                return -ENOMEM;
 281
 282        htt->txbuf.size = size;
 283
 284        return 0;
 285}
 286
 287static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
 288{
 289        size_t size;
 290
 291        if (!htt->frag_desc.vaddr_desc_32)
 292                return;
 293
 294        size = htt->max_num_pending_tx *
 295                        sizeof(struct htt_msdu_ext_desc);
 296
 297        dma_free_coherent(htt->ar->dev,
 298                          size,
 299                          htt->frag_desc.vaddr_desc_32,
 300                          htt->frag_desc.paddr);
 301
 302        htt->frag_desc.vaddr_desc_32 = NULL;
 303}
 304
 305static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 306{
 307        struct ath10k *ar = htt->ar;
 308        size_t size;
 309
 310        if (!ar->hw_params.continuous_frag_desc)
 311                return 0;
 312
 313        size = htt->max_num_pending_tx *
 314                        sizeof(struct htt_msdu_ext_desc);
 315        htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
 316                                                          &htt->frag_desc.paddr,
 317                                                          GFP_KERNEL);
 318        if (!htt->frag_desc.vaddr_desc_32) {
 319                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 320                return -ENOMEM;
 321        }
 322        htt->frag_desc.size = size;
 323
 324        return 0;
 325}
 326
 327static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
 328{
 329        size_t size;
 330
 331        if (!htt->frag_desc.vaddr_desc_64)
 332                return;
 333
 334        size = htt->max_num_pending_tx *
 335                        sizeof(struct htt_msdu_ext_desc_64);
 336
 337        dma_free_coherent(htt->ar->dev,
 338                          size,
 339                          htt->frag_desc.vaddr_desc_64,
 340                          htt->frag_desc.paddr);
 341
 342        htt->frag_desc.vaddr_desc_64 = NULL;
 343}
 344
 345static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
 346{
 347        struct ath10k *ar = htt->ar;
 348        size_t size;
 349
 350        if (!ar->hw_params.continuous_frag_desc)
 351                return 0;
 352
 353        size = htt->max_num_pending_tx *
 354                        sizeof(struct htt_msdu_ext_desc_64);
 355
 356        htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
 357                                                          &htt->frag_desc.paddr,
 358                                                          GFP_KERNEL);
 359        if (!htt->frag_desc.vaddr_desc_64) {
 360                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 361                return -ENOMEM;
 362        }
 363        htt->frag_desc.size = size;
 364
 365        return 0;
 366}
 367
 368static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
 369{
 370        struct ath10k *ar = htt->ar;
 371        size_t size;
 372
 373        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 374                      ar->running_fw->fw_file.fw_features))
 375                return;
 376
 377        size = sizeof(*htt->tx_q_state.vaddr);
 378
 379        dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
 380        kfree(htt->tx_q_state.vaddr);
 381}
 382
 383static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
 384{
 385        struct ath10k *ar = htt->ar;
 386        size_t size;
 387        int ret;
 388
 389        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 390                      ar->running_fw->fw_file.fw_features))
 391                return 0;
 392
 393        htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
 394        htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
 395        htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
 396
 397        size = sizeof(*htt->tx_q_state.vaddr);
 398        htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
 399        if (!htt->tx_q_state.vaddr)
 400                return -ENOMEM;
 401
 402        htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
 403                                               size, DMA_TO_DEVICE);
 404        ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
 405        if (ret) {
 406                ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
 407                kfree(htt->tx_q_state.vaddr);
 408                return -EIO;
 409        }
 410
 411        return 0;
 412}
 413
 414static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
 415{
 416        WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
 417        kfifo_free(&htt->txdone_fifo);
 418}
 419
 420static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
 421{
 422        int ret;
 423        size_t size;
 424
 425        size = roundup_pow_of_two(htt->max_num_pending_tx);
 426        ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
 427        return ret;
 428}
 429
 430static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 431{
 432        struct ath10k *ar = htt->ar;
 433        int ret;
 434
 435        ret = ath10k_htt_alloc_txbuff(htt);
 436        if (ret) {
 437                ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 438                return ret;
 439        }
 440
 441        ret = ath10k_htt_alloc_frag_desc(htt);
 442        if (ret) {
 443                ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 444                goto free_txbuf;
 445        }
 446
 447        ret = ath10k_htt_tx_alloc_txq(htt);
 448        if (ret) {
 449                ath10k_err(ar, "failed to alloc txq: %d\n", ret);
 450                goto free_frag_desc;
 451        }
 452
 453        ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
 454        if (ret) {
 455                ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
 456                goto free_txq;
 457        }
 458
 459        return 0;
 460
 461free_txq:
 462        ath10k_htt_tx_free_txq(htt);
 463
 464free_frag_desc:
 465        ath10k_htt_free_frag_desc(htt);
 466
 467free_txbuf:
 468        ath10k_htt_free_txbuff(htt);
 469
 470        return ret;
 471}
 472
 473int ath10k_htt_tx_start(struct ath10k_htt *htt)
 474{
 475        struct ath10k *ar = htt->ar;
 476        int ret;
 477
 478        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 479                   htt->max_num_pending_tx);
 480
 481        spin_lock_init(&htt->tx_lock);
 482        idr_init(&htt->pending_tx);
 483
 484        if (htt->tx_mem_allocated)
 485                return 0;
 486
 487        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
 488                return 0;
 489
 490        ret = ath10k_htt_tx_alloc_buf(htt);
 491        if (ret)
 492                goto free_idr_pending_tx;
 493
 494        htt->tx_mem_allocated = true;
 495
 496        return 0;
 497
 498free_idr_pending_tx:
 499        idr_destroy(&htt->pending_tx);
 500
 501        return ret;
 502}
 503
 504static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 505{
 506        struct ath10k *ar = ctx;
 507        struct ath10k_htt *htt = &ar->htt;
 508        struct htt_tx_done tx_done = {0};
 509
 510        ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
 511
 512        tx_done.msdu_id = msdu_id;
 513        tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 514
 515        ath10k_txrx_tx_unref(htt, &tx_done);
 516
 517        return 0;
 518}
 519
 520void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 521{
 522        if (!htt->tx_mem_allocated)
 523                return;
 524
 525        ath10k_htt_free_txbuff(htt);
 526        ath10k_htt_tx_free_txq(htt);
 527        ath10k_htt_free_frag_desc(htt);
 528        ath10k_htt_tx_free_txdone_fifo(htt);
 529        htt->tx_mem_allocated = false;
 530}
 531
 532static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
 533{
 534        ath10k_htc_stop_hl(htt->ar);
 535        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 536}
 537
 538void ath10k_htt_tx_stop(struct ath10k_htt *htt)
 539{
 540        ath10k_htt_flush_tx_queue(htt);
 541        idr_destroy(&htt->pending_tx);
 542}
 543
 544void ath10k_htt_tx_free(struct ath10k_htt *htt)
 545{
 546        ath10k_htt_tx_stop(htt);
 547        ath10k_htt_tx_destroy(htt);
 548}
 549
 550void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
 551{
 552        queue_work(ar->workqueue, &ar->bundle_tx_work);
 553}
 554
 555void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 556{
 557        struct ath10k_htt *htt = &ar->htt;
 558        struct htt_tx_done tx_done = {0};
 559        struct htt_cmd_hdr *htt_hdr;
 560        struct htt_data_tx_desc *desc_hdr = NULL;
 561        u16 flags1 = 0;
 562        u8 msg_type = 0;
 563
 564        if (htt->disable_tx_comp) {
 565                htt_hdr = (struct htt_cmd_hdr *)skb->data;
 566                msg_type = htt_hdr->msg_type;
 567
 568                if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
 569                        desc_hdr = (struct htt_data_tx_desc *)
 570                                (skb->data + sizeof(*htt_hdr));
 571                        flags1 = __le16_to_cpu(desc_hdr->flags1);
 572                }
 573        }
 574
 575        dev_kfree_skb_any(skb);
 576
 577        if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
 578                return;
 579
 580        ath10k_dbg(ar, ATH10K_DBG_HTT,
 581                   "htt tx complete msdu id:%u ,flags1:%x\n",
 582                   __le16_to_cpu(desc_hdr->id), flags1);
 583
 584        if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
 585                return;
 586
 587        tx_done.status = HTT_TX_COMPL_STATE_ACK;
 588        tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
 589        ath10k_txrx_tx_unref(&ar->htt, &tx_done);
 590}
 591
 592void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 593{
 594        dev_kfree_skb_any(skb);
 595}
 596EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
 597
 598int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 599{
 600        struct ath10k *ar = htt->ar;
 601        struct sk_buff *skb;
 602        struct htt_cmd *cmd;
 603        int len = 0;
 604        int ret;
 605
 606        len += sizeof(cmd->hdr);
 607        len += sizeof(cmd->ver_req);
 608
 609        skb = ath10k_htc_alloc_skb(ar, len);
 610        if (!skb)
 611                return -ENOMEM;
 612
 613        skb_put(skb, len);
 614        cmd = (struct htt_cmd *)skb->data;
 615        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 616
 617        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 618        if (ret) {
 619                dev_kfree_skb_any(skb);
 620                return ret;
 621        }
 622
 623        return 0;
 624}
 625
 626int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
 627                             u64 cookie)
 628{
 629        struct ath10k *ar = htt->ar;
 630        struct htt_stats_req *req;
 631        struct sk_buff *skb;
 632        struct htt_cmd *cmd;
 633        int len = 0, ret;
 634
 635        len += sizeof(cmd->hdr);
 636        len += sizeof(cmd->stats_req);
 637
 638        skb = ath10k_htc_alloc_skb(ar, len);
 639        if (!skb)
 640                return -ENOMEM;
 641
 642        skb_put(skb, len);
 643        cmd = (struct htt_cmd *)skb->data;
 644        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
 645
 646        req = &cmd->stats_req;
 647
 648        memset(req, 0, sizeof(*req));
 649
 650        /* currently we support only max 24 bit masks so no need to worry
 651         * about endian support
 652         */
 653        memcpy(req->upload_types, &mask, 3);
 654        memcpy(req->reset_types, &reset_mask, 3);
 655        req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
 656        req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
 657        req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 658
 659        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 660        if (ret) {
 661                ath10k_warn(ar, "failed to send htt type stats request: %d",
 662                            ret);
 663                dev_kfree_skb_any(skb);
 664                return ret;
 665        }
 666
 667        return 0;
 668}
 669
 670static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 671{
 672        struct ath10k *ar = htt->ar;
 673        struct sk_buff *skb;
 674        struct htt_cmd *cmd;
 675        struct htt_frag_desc_bank_cfg32 *cfg;
 676        int ret, size;
 677        u8 info;
 678
 679        if (!ar->hw_params.continuous_frag_desc)
 680                return 0;
 681
 682        if (!htt->frag_desc.paddr) {
 683                ath10k_warn(ar, "invalid frag desc memory\n");
 684                return -EINVAL;
 685        }
 686
 687        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 688        skb = ath10k_htc_alloc_skb(ar, size);
 689        if (!skb)
 690                return -ENOMEM;
 691
 692        skb_put(skb, size);
 693        cmd = (struct htt_cmd *)skb->data;
 694        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 695
 696        info = 0;
 697        info |= SM(htt->tx_q_state.type,
 698                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 699
 700        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 701                     ar->running_fw->fw_file.fw_features))
 702                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 703
 704        cfg = &cmd->frag_desc_bank_cfg32;
 705        cfg->info = info;
 706        cfg->num_banks = 1;
 707        cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
 708        cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
 709        cfg->bank_id[0].bank_min_id = 0;
 710        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 711                                                    1);
 712
 713        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 714        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 715        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 716        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 717        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 718
 719        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 720
 721        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 722        if (ret) {
 723                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 724                            ret);
 725                dev_kfree_skb_any(skb);
 726                return ret;
 727        }
 728
 729        return 0;
 730}
 731
 732static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 733{
 734        struct ath10k *ar = htt->ar;
 735        struct sk_buff *skb;
 736        struct htt_cmd *cmd;
 737        struct htt_frag_desc_bank_cfg64 *cfg;
 738        int ret, size;
 739        u8 info;
 740
 741        if (!ar->hw_params.continuous_frag_desc)
 742                return 0;
 743
 744        if (!htt->frag_desc.paddr) {
 745                ath10k_warn(ar, "invalid frag desc memory\n");
 746                return -EINVAL;
 747        }
 748
 749        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
 750        skb = ath10k_htc_alloc_skb(ar, size);
 751        if (!skb)
 752                return -ENOMEM;
 753
 754        skb_put(skb, size);
 755        cmd = (struct htt_cmd *)skb->data;
 756        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 757
 758        info = 0;
 759        info |= SM(htt->tx_q_state.type,
 760                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 761
 762        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 763                     ar->running_fw->fw_file.fw_features))
 764                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 765
 766        cfg = &cmd->frag_desc_bank_cfg64;
 767        cfg->info = info;
 768        cfg->num_banks = 1;
 769        cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
 770        cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
 771        cfg->bank_id[0].bank_min_id = 0;
 772        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 773                                                    1);
 774
 775        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 776        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 777        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 778        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 779        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 780
 781        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 782
 783        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 784        if (ret) {
 785                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 786                            ret);
 787                dev_kfree_skb_any(skb);
 788                return ret;
 789        }
 790
 791        return 0;
 792}
 793
 794static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
 795{
 796        struct htt_rx_ring_setup_ring32 *ring =
 797                        (struct htt_rx_ring_setup_ring32 *)rx_ring;
 798
 799#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 800        ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 801        ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 802        ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 803        ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 804        ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 805        ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 806        ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 807        ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 808        ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 809        ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 810#undef desc_offset
 811}
 812
 813static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
 814{
 815        struct htt_rx_ring_setup_ring64 *ring =
 816                        (struct htt_rx_ring_setup_ring64 *)rx_ring;
 817
 818#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 819        ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 820        ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 821        ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 822        ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 823        ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 824        ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 825        ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 826        ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 827        ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 828        ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 829#undef desc_offset
 830}
 831
 832static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
 833{
 834        struct ath10k *ar = htt->ar;
 835        struct sk_buff *skb;
 836        struct htt_cmd *cmd;
 837        struct htt_rx_ring_setup_ring32 *ring;
 838        const int num_rx_ring = 1;
 839        u16 flags;
 840        u32 fw_idx;
 841        int len;
 842        int ret;
 843
 844        /*
 845         * the HW expects the buffer to be an integral number of 4-byte
 846         * "words"
 847         */
 848        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 849        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 850
 851        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 852            + (sizeof(*ring) * num_rx_ring);
 853        skb = ath10k_htc_alloc_skb(ar, len);
 854        if (!skb)
 855                return -ENOMEM;
 856
 857        skb_put(skb, len);
 858
 859        cmd = (struct htt_cmd *)skb->data;
 860        ring = &cmd->rx_setup_32.rings[0];
 861
 862        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 863        cmd->rx_setup_32.hdr.num_rings = 1;
 864
 865        /* FIXME: do we need all of this? */
 866        flags = 0;
 867        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 868        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 869        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 870        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 871        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 872        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 873        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 874        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 875        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 876        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 877        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 878        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 879        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 880        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 881        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 882        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 883
 884        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 885
 886        ring->fw_idx_shadow_reg_paddr =
 887                __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
 888        ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
 889        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 890        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 891        ring->flags = __cpu_to_le16(flags);
 892        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 893
 894        ath10k_htt_fill_rx_desc_offset_32(ring);
 895        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 896        if (ret) {
 897                dev_kfree_skb_any(skb);
 898                return ret;
 899        }
 900
 901        return 0;
 902}
 903
 904static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
 905{
 906        struct ath10k *ar = htt->ar;
 907        struct sk_buff *skb;
 908        struct htt_cmd *cmd;
 909        struct htt_rx_ring_setup_ring64 *ring;
 910        const int num_rx_ring = 1;
 911        u16 flags;
 912        u32 fw_idx;
 913        int len;
 914        int ret;
 915
 916        /* HW expects the buffer to be an integral number of 4-byte
 917         * "words"
 918         */
 919        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 920        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 921
 922        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
 923            + (sizeof(*ring) * num_rx_ring);
 924        skb = ath10k_htc_alloc_skb(ar, len);
 925        if (!skb)
 926                return -ENOMEM;
 927
 928        skb_put(skb, len);
 929
 930        cmd = (struct htt_cmd *)skb->data;
 931        ring = &cmd->rx_setup_64.rings[0];
 932
 933        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 934        cmd->rx_setup_64.hdr.num_rings = 1;
 935
 936        flags = 0;
 937        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 938        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 939        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 940        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 941        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 942        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 943        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 944        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 945        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 946        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 947        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 948        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 949        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 950        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 951        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 952        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 953
 954        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 955
 956        ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
 957        ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
 958        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 959        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 960        ring->flags = __cpu_to_le16(flags);
 961        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 962
 963        ath10k_htt_fill_rx_desc_offset_64(ring);
 964        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 965        if (ret) {
 966                dev_kfree_skb_any(skb);
 967                return ret;
 968        }
 969
 970        return 0;
 971}
 972
 973static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
 974{
 975        struct ath10k *ar = htt->ar;
 976        struct sk_buff *skb;
 977        struct htt_cmd *cmd;
 978        struct htt_rx_ring_setup_ring32 *ring;
 979        const int num_rx_ring = 1;
 980        u16 flags;
 981        int len;
 982        int ret;
 983
 984        /*
 985         * the HW expects the buffer to be an integral number of 4-byte
 986         * "words"
 987         */
 988        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 989        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 990
 991        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 992            + (sizeof(*ring) * num_rx_ring);
 993        skb = ath10k_htc_alloc_skb(ar, len);
 994        if (!skb)
 995                return -ENOMEM;
 996
 997        skb_put(skb, len);
 998
 999        cmd = (struct htt_cmd *)skb->data;
1000        ring = &cmd->rx_setup_32.rings[0];
1001
1002        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
1003        cmd->rx_setup_32.hdr.num_rings = 1;
1004
1005        flags = 0;
1006        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
1007        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
1008        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
1009
1010        memset(ring, 0, sizeof(*ring));
1011        ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
1012        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
1013        ring->flags = __cpu_to_le16(flags);
1014
1015        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1016        if (ret) {
1017                dev_kfree_skb_any(skb);
1018                return ret;
1019        }
1020
1021        return 0;
1022}
1023
1024static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
1025                                          u8 max_subfrms_ampdu,
1026                                          u8 max_subfrms_amsdu)
1027{
1028        struct ath10k *ar = htt->ar;
1029        struct htt_aggr_conf *aggr_conf;
1030        struct sk_buff *skb;
1031        struct htt_cmd *cmd;
1032        int len;
1033        int ret;
1034
1035        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1036
1037        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1038                return -EINVAL;
1039
1040        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1041                return -EINVAL;
1042
1043        len = sizeof(cmd->hdr);
1044        len += sizeof(cmd->aggr_conf);
1045
1046        skb = ath10k_htc_alloc_skb(ar, len);
1047        if (!skb)
1048                return -ENOMEM;
1049
1050        skb_put(skb, len);
1051        cmd = (struct htt_cmd *)skb->data;
1052        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1053
1054        aggr_conf = &cmd->aggr_conf;
1055        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1056        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1057
1058        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1059                   aggr_conf->max_num_amsdu_subframes,
1060                   aggr_conf->max_num_ampdu_subframes);
1061
1062        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1063        if (ret) {
1064                dev_kfree_skb_any(skb);
1065                return ret;
1066        }
1067
1068        return 0;
1069}
1070
1071static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
1072                                          u8 max_subfrms_ampdu,
1073                                          u8 max_subfrms_amsdu)
1074{
1075        struct ath10k *ar = htt->ar;
1076        struct htt_aggr_conf_v2 *aggr_conf;
1077        struct sk_buff *skb;
1078        struct htt_cmd *cmd;
1079        int len;
1080        int ret;
1081
1082        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1083
1084        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1085                return -EINVAL;
1086
1087        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1088                return -EINVAL;
1089
1090        len = sizeof(cmd->hdr);
1091        len += sizeof(cmd->aggr_conf_v2);
1092
1093        skb = ath10k_htc_alloc_skb(ar, len);
1094        if (!skb)
1095                return -ENOMEM;
1096
1097        skb_put(skb, len);
1098        cmd = (struct htt_cmd *)skb->data;
1099        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1100
1101        aggr_conf = &cmd->aggr_conf_v2;
1102        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1103        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1104
1105        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1106                   aggr_conf->max_num_amsdu_subframes,
1107                   aggr_conf->max_num_ampdu_subframes);
1108
1109        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1110        if (ret) {
1111                dev_kfree_skb_any(skb);
1112                return ret;
1113        }
1114
1115        return 0;
1116}
1117
1118int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1119                             __le32 token,
1120                             __le16 fetch_seq_num,
1121                             struct htt_tx_fetch_record *records,
1122                             size_t num_records)
1123{
1124        struct sk_buff *skb;
1125        struct htt_cmd *cmd;
1126        const u16 resp_id = 0;
1127        int len = 0;
1128        int ret;
1129
1130        /* Response IDs are echo-ed back only for host driver convienence
1131         * purposes. They aren't used for anything in the driver yet so use 0.
1132         */
1133
1134        len += sizeof(cmd->hdr);
1135        len += sizeof(cmd->tx_fetch_resp);
1136        len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
1137
1138        skb = ath10k_htc_alloc_skb(ar, len);
1139        if (!skb)
1140                return -ENOMEM;
1141
1142        skb_put(skb, len);
1143        cmd = (struct htt_cmd *)skb->data;
1144        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
1145        cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
1146        cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
1147        cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
1148        cmd->tx_fetch_resp.token = token;
1149
1150        memcpy(cmd->tx_fetch_resp.records, records,
1151               sizeof(records[0]) * num_records);
1152
1153        ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
1154        if (ret) {
1155                ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
1156                goto err_free_skb;
1157        }
1158
1159        return 0;
1160
1161err_free_skb:
1162        dev_kfree_skb_any(skb);
1163
1164        return ret;
1165}
1166
1167static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
1168{
1169        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1170        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1171        struct ath10k_vif *arvif;
1172
1173        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1174                return ar->scan.vdev_id;
1175        } else if (cb->vif) {
1176                arvif = (void *)cb->vif->drv_priv;
1177                return arvif->vdev_id;
1178        } else if (ar->monitor_started) {
1179                return ar->monitor_vdev_id;
1180        } else {
1181                return 0;
1182        }
1183}
1184
1185static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1186{
1187        struct ieee80211_hdr *hdr = (void *)skb->data;
1188        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1189
1190        if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1191                return HTT_DATA_TX_EXT_TID_MGMT;
1192        else if (cb->flags & ATH10K_SKB_F_QOS)
1193                return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1194        else
1195                return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1196}
1197
1198int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1199{
1200        struct ath10k *ar = htt->ar;
1201        struct device *dev = ar->dev;
1202        struct sk_buff *txdesc = NULL;
1203        struct htt_cmd *cmd;
1204        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1205        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1206        int len = 0;
1207        int msdu_id = -1;
1208        int res;
1209        const u8 *peer_addr;
1210        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1211
1212        len += sizeof(cmd->hdr);
1213        len += sizeof(cmd->mgmt_tx);
1214
1215        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1216        if (res < 0)
1217                goto err;
1218
1219        msdu_id = res;
1220
1221        if ((ieee80211_is_action(hdr->frame_control) ||
1222             ieee80211_is_deauth(hdr->frame_control) ||
1223             ieee80211_is_disassoc(hdr->frame_control)) &&
1224             ieee80211_has_protected(hdr->frame_control)) {
1225                peer_addr = hdr->addr1;
1226                if (is_multicast_ether_addr(peer_addr)) {
1227                        skb_put(msdu, sizeof(struct ieee80211_mmie_16));
1228                } else {
1229                        if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
1230                            skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
1231                                skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
1232                        else
1233                                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1234                }
1235        }
1236
1237        txdesc = ath10k_htc_alloc_skb(ar, len);
1238        if (!txdesc) {
1239                res = -ENOMEM;
1240                goto err_free_msdu_id;
1241        }
1242
1243        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1244                                       DMA_TO_DEVICE);
1245        res = dma_mapping_error(dev, skb_cb->paddr);
1246        if (res) {
1247                res = -EIO;
1248                goto err_free_txdesc;
1249        }
1250
1251        skb_put(txdesc, len);
1252        cmd = (struct htt_cmd *)txdesc->data;
1253        memset(cmd, 0, len);
1254
1255        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
1256        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1257        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
1258        cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
1259        cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
1260        memcpy(cmd->mgmt_tx.hdr, msdu->data,
1261               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1262
1263        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1264        if (res)
1265                goto err_unmap_msdu;
1266
1267        return 0;
1268
1269err_unmap_msdu:
1270        if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
1271                dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1272err_free_txdesc:
1273        dev_kfree_skb_any(txdesc);
1274err_free_msdu_id:
1275        spin_lock_bh(&htt->tx_lock);
1276        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1277        spin_unlock_bh(&htt->tx_lock);
1278err:
1279        return res;
1280}
1281
1282#define HTT_TX_HL_NEEDED_HEADROOM \
1283        (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1284        sizeof(struct htt_data_tx_desc) + \
1285        sizeof(struct ath10k_htc_hdr))
1286
1287static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1288                            struct sk_buff *msdu)
1289{
1290        struct ath10k *ar = htt->ar;
1291        int res, data_len;
1292        struct htt_cmd_hdr *cmd_hdr;
1293        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1294        struct htt_data_tx_desc *tx_desc;
1295        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1296        struct sk_buff *tmp_skb;
1297        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1298        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1299        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1300        u8 flags0 = 0;
1301        u16 flags1 = 0;
1302        u16 msdu_id = 0;
1303
1304        if ((ieee80211_is_action(hdr->frame_control) ||
1305             ieee80211_is_deauth(hdr->frame_control) ||
1306             ieee80211_is_disassoc(hdr->frame_control)) &&
1307             ieee80211_has_protected(hdr->frame_control)) {
1308                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1309        }
1310
1311        data_len = msdu->len;
1312
1313        switch (txmode) {
1314        case ATH10K_HW_TXRX_RAW:
1315        case ATH10K_HW_TXRX_NATIVE_WIFI:
1316                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1317                /* fall through */
1318        case ATH10K_HW_TXRX_ETHERNET:
1319                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1320                break;
1321        case ATH10K_HW_TXRX_MGMT:
1322                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1323                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1324                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1325
1326                if (htt->disable_tx_comp)
1327                        flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
1328                break;
1329        }
1330
1331        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1332                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1333
1334        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1335        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1336        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1337            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1338                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1339                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1340        }
1341
1342        /* Prepend the HTT header and TX desc struct to the data message
1343         * and realloc the skb if it does not have enough headroom.
1344         */
1345        if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
1346                tmp_skb = msdu;
1347
1348                ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
1349                           "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1350                           skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
1351                msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
1352                kfree_skb(tmp_skb);
1353                if (!msdu) {
1354                        ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
1355                        res = -ENOMEM;
1356                        goto out;
1357                }
1358        }
1359
1360        if (ar->bus_param.hl_msdu_ids) {
1361                flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1362                res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1363                if (res < 0) {
1364                        ath10k_err(ar, "msdu_id allocation failed %d\n", res);
1365                        goto out;
1366                }
1367                msdu_id = res;
1368        }
1369
1370        /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1371         * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1372         * reference by one to avoid a use-after-free case and a double
1373         * free.
1374         */
1375        skb_get(msdu);
1376
1377        skb_push(msdu, sizeof(*cmd_hdr));
1378        skb_push(msdu, sizeof(*tx_desc));
1379        cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
1380        tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
1381
1382        cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1383        tx_desc->flags0 = flags0;
1384        tx_desc->flags1 = __cpu_to_le16(flags1);
1385        tx_desc->len = __cpu_to_le16(data_len);
1386        tx_desc->id = __cpu_to_le16(msdu_id);
1387        tx_desc->frags_paddr = 0; /* always zero */
1388        /* Initialize peer_id to INVALID_PEER because this is NOT
1389         * Reinjection path
1390         */
1391        tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
1392
1393        res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
1394
1395out:
1396        return res;
1397}
1398
1399static int ath10k_htt_tx_32(struct ath10k_htt *htt,
1400                            enum ath10k_hw_txrx_mode txmode,
1401                            struct sk_buff *msdu)
1402{
1403        struct ath10k *ar = htt->ar;
1404        struct device *dev = ar->dev;
1405        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1406        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1407        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1408        struct ath10k_hif_sg_item sg_items[2];
1409        struct ath10k_htt_txbuf_32 *txbuf;
1410        struct htt_data_tx_desc_frag *frags;
1411        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1412        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1413        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1414        int prefetch_len;
1415        int res;
1416        u8 flags0 = 0;
1417        u16 msdu_id, flags1 = 0;
1418        u16 freq = 0;
1419        u32 frags_paddr = 0;
1420        u32 txbuf_paddr;
1421        struct htt_msdu_ext_desc *ext_desc = NULL;
1422        struct htt_msdu_ext_desc *ext_desc_t = NULL;
1423
1424        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1425        if (res < 0)
1426                goto err;
1427
1428        msdu_id = res;
1429
1430        prefetch_len = min(htt->prefetch_len, msdu->len);
1431        prefetch_len = roundup(prefetch_len, 4);
1432
1433        txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
1434        txbuf_paddr = htt->txbuf.paddr +
1435                      (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
1436
1437        if ((ieee80211_is_action(hdr->frame_control) ||
1438             ieee80211_is_deauth(hdr->frame_control) ||
1439             ieee80211_is_disassoc(hdr->frame_control)) &&
1440             ieee80211_has_protected(hdr->frame_control)) {
1441                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1442        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1443                   txmode == ATH10K_HW_TXRX_RAW &&
1444                   ieee80211_has_protected(hdr->frame_control)) {
1445                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1446        }
1447
1448        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1449                                       DMA_TO_DEVICE);
1450        res = dma_mapping_error(dev, skb_cb->paddr);
1451        if (res) {
1452                res = -EIO;
1453                goto err_free_msdu_id;
1454        }
1455
1456        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1457                freq = ar->scan.roc_freq;
1458
1459        switch (txmode) {
1460        case ATH10K_HW_TXRX_RAW:
1461        case ATH10K_HW_TXRX_NATIVE_WIFI:
1462                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1463                /* fall through */
1464        case ATH10K_HW_TXRX_ETHERNET:
1465                if (ar->hw_params.continuous_frag_desc) {
1466                        ext_desc_t = htt->frag_desc.vaddr_desc_32;
1467                        memset(&ext_desc_t[msdu_id], 0,
1468                               sizeof(struct htt_msdu_ext_desc));
1469                        frags = (struct htt_data_tx_desc_frag *)
1470                                &ext_desc_t[msdu_id].frags;
1471                        ext_desc = &ext_desc_t[msdu_id];
1472                        frags[0].tword_addr.paddr_lo =
1473                                __cpu_to_le32(skb_cb->paddr);
1474                        frags[0].tword_addr.paddr_hi = 0;
1475                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1476
1477                        frags_paddr =  htt->frag_desc.paddr +
1478                                (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1479                } else {
1480                        frags = txbuf->frags;
1481                        frags[0].dword_addr.paddr =
1482                                __cpu_to_le32(skb_cb->paddr);
1483                        frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1484                        frags[1].dword_addr.paddr = 0;
1485                        frags[1].dword_addr.len = 0;
1486
1487                        frags_paddr = txbuf_paddr;
1488                }
1489                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1490                break;
1491        case ATH10K_HW_TXRX_MGMT:
1492                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1493                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1494                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1495
1496                frags_paddr = skb_cb->paddr;
1497                break;
1498        }
1499
1500        /* Normally all commands go through HTC which manages tx credits for
1501         * each endpoint and notifies when tx is completed.
1502         *
1503         * HTT endpoint is creditless so there's no need to care about HTC
1504         * flags. In that case it is trivial to fill the HTC header here.
1505         *
1506         * MSDU transmission is considered completed upon HTT event. This
1507         * implies no relevant resources can be freed until after the event is
1508         * received. That's why HTC tx completion handler itself is ignored by
1509         * setting NULL to transfer_context for all sg items.
1510         *
1511         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1512         * as it's a waste of resources. By bypassing HTC it is possible to
1513         * avoid extra memory allocations, compress data structures and thus
1514         * improve performance.
1515         */
1516
1517        txbuf->htc_hdr.eid = htt->eid;
1518        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1519                                           sizeof(txbuf->cmd_tx) +
1520                                           prefetch_len);
1521        txbuf->htc_hdr.flags = 0;
1522
1523        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1524                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1525
1526        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1527        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1528        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1529            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1530                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1531                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1532                if (ar->hw_params.continuous_frag_desc)
1533                        ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1534        }
1535
1536        /* Prevent firmware from sending up tx inspection requests. There's
1537         * nothing ath10k can do with frames requested for inspection so force
1538         * it to simply rely a regular tx completion with discard status.
1539         */
1540        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1541
1542        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1543        txbuf->cmd_tx.flags0 = flags0;
1544        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1545        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1546        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1547        txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1548        if (ath10k_mac_tx_frm_has_freq(ar)) {
1549                txbuf->cmd_tx.offchan_tx.peerid =
1550                                __cpu_to_le16(HTT_INVALID_PEERID);
1551                txbuf->cmd_tx.offchan_tx.freq =
1552                                __cpu_to_le16(freq);
1553        } else {
1554                txbuf->cmd_tx.peerid =
1555                                __cpu_to_le32(HTT_INVALID_PEERID);
1556        }
1557
1558        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1559        ath10k_dbg(ar, ATH10K_DBG_HTT,
1560                   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1561                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1562                   &skb_cb->paddr, vdev_id, tid, freq);
1563        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1564                        msdu->data, msdu->len);
1565        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1566        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1567
1568        sg_items[0].transfer_id = 0;
1569        sg_items[0].transfer_context = NULL;
1570        sg_items[0].vaddr = &txbuf->htc_hdr;
1571        sg_items[0].paddr = txbuf_paddr +
1572                            sizeof(txbuf->frags);
1573        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1574                          sizeof(txbuf->cmd_hdr) +
1575                          sizeof(txbuf->cmd_tx);
1576
1577        sg_items[1].transfer_id = 0;
1578        sg_items[1].transfer_context = NULL;
1579        sg_items[1].vaddr = msdu->data;
1580        sg_items[1].paddr = skb_cb->paddr;
1581        sg_items[1].len = prefetch_len;
1582
1583        res = ath10k_hif_tx_sg(htt->ar,
1584                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1585                               sg_items, ARRAY_SIZE(sg_items));
1586        if (res)
1587                goto err_unmap_msdu;
1588
1589        return 0;
1590
1591err_unmap_msdu:
1592        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1593err_free_msdu_id:
1594        spin_lock_bh(&htt->tx_lock);
1595        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1596        spin_unlock_bh(&htt->tx_lock);
1597err:
1598        return res;
1599}
1600
1601static int ath10k_htt_tx_64(struct ath10k_htt *htt,
1602                            enum ath10k_hw_txrx_mode txmode,
1603                            struct sk_buff *msdu)
1604{
1605        struct ath10k *ar = htt->ar;
1606        struct device *dev = ar->dev;
1607        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1608        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1609        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1610        struct ath10k_hif_sg_item sg_items[2];
1611        struct ath10k_htt_txbuf_64 *txbuf;
1612        struct htt_data_tx_desc_frag *frags;
1613        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1614        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1615        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1616        int prefetch_len;
1617        int res;
1618        u8 flags0 = 0;
1619        u16 msdu_id, flags1 = 0;
1620        u16 freq = 0;
1621        dma_addr_t frags_paddr = 0;
1622        dma_addr_t txbuf_paddr;
1623        struct htt_msdu_ext_desc_64 *ext_desc = NULL;
1624        struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
1625
1626        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1627        if (res < 0)
1628                goto err;
1629
1630        msdu_id = res;
1631
1632        prefetch_len = min(htt->prefetch_len, msdu->len);
1633        prefetch_len = roundup(prefetch_len, 4);
1634
1635        txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
1636        txbuf_paddr = htt->txbuf.paddr +
1637                      (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
1638
1639        if ((ieee80211_is_action(hdr->frame_control) ||
1640             ieee80211_is_deauth(hdr->frame_control) ||
1641             ieee80211_is_disassoc(hdr->frame_control)) &&
1642             ieee80211_has_protected(hdr->frame_control)) {
1643                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1644        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1645                   txmode == ATH10K_HW_TXRX_RAW &&
1646                   ieee80211_has_protected(hdr->frame_control)) {
1647                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1648        }
1649
1650        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1651                                       DMA_TO_DEVICE);
1652        res = dma_mapping_error(dev, skb_cb->paddr);
1653        if (res) {
1654                res = -EIO;
1655                goto err_free_msdu_id;
1656        }
1657
1658        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1659                freq = ar->scan.roc_freq;
1660
1661        switch (txmode) {
1662        case ATH10K_HW_TXRX_RAW:
1663        case ATH10K_HW_TXRX_NATIVE_WIFI:
1664                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1665                /* fall through */
1666        case ATH10K_HW_TXRX_ETHERNET:
1667                if (ar->hw_params.continuous_frag_desc) {
1668                        ext_desc_t = htt->frag_desc.vaddr_desc_64;
1669                        memset(&ext_desc_t[msdu_id], 0,
1670                               sizeof(struct htt_msdu_ext_desc_64));
1671                        frags = (struct htt_data_tx_desc_frag *)
1672                                &ext_desc_t[msdu_id].frags;
1673                        ext_desc = &ext_desc_t[msdu_id];
1674                        frags[0].tword_addr.paddr_lo =
1675                                __cpu_to_le32(skb_cb->paddr);
1676                        frags[0].tword_addr.paddr_hi =
1677                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1678                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1679
1680                        frags_paddr =  htt->frag_desc.paddr +
1681                           (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
1682                } else {
1683                        frags = txbuf->frags;
1684                        frags[0].tword_addr.paddr_lo =
1685                                                __cpu_to_le32(skb_cb->paddr);
1686                        frags[0].tword_addr.paddr_hi =
1687                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1688                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1689                        frags[1].tword_addr.paddr_lo = 0;
1690                        frags[1].tword_addr.paddr_hi = 0;
1691                        frags[1].tword_addr.len_16 = 0;
1692                }
1693                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1694                break;
1695        case ATH10K_HW_TXRX_MGMT:
1696                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1697                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1698                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1699
1700                frags_paddr = skb_cb->paddr;
1701                break;
1702        }
1703
1704        /* Normally all commands go through HTC which manages tx credits for
1705         * each endpoint and notifies when tx is completed.
1706         *
1707         * HTT endpoint is creditless so there's no need to care about HTC
1708         * flags. In that case it is trivial to fill the HTC header here.
1709         *
1710         * MSDU transmission is considered completed upon HTT event. This
1711         * implies no relevant resources can be freed until after the event is
1712         * received. That's why HTC tx completion handler itself is ignored by
1713         * setting NULL to transfer_context for all sg items.
1714         *
1715         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1716         * as it's a waste of resources. By bypassing HTC it is possible to
1717         * avoid extra memory allocations, compress data structures and thus
1718         * improve performance.
1719         */
1720
1721        txbuf->htc_hdr.eid = htt->eid;
1722        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1723                                           sizeof(txbuf->cmd_tx) +
1724                                           prefetch_len);
1725        txbuf->htc_hdr.flags = 0;
1726
1727        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1728                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1729
1730        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1731        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1732        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1733            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1734                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1735                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1736                if (ar->hw_params.continuous_frag_desc) {
1737                        memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
1738                        ext_desc->tso_flag[3] |=
1739                                __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
1740                }
1741        }
1742
1743        /* Prevent firmware from sending up tx inspection requests. There's
1744         * nothing ath10k can do with frames requested for inspection so force
1745         * it to simply rely a regular tx completion with discard status.
1746         */
1747        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1748
1749        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1750        txbuf->cmd_tx.flags0 = flags0;
1751        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1752        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1753        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1754
1755        /* fill fragment descriptor */
1756        txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
1757        if (ath10k_mac_tx_frm_has_freq(ar)) {
1758                txbuf->cmd_tx.offchan_tx.peerid =
1759                                __cpu_to_le16(HTT_INVALID_PEERID);
1760                txbuf->cmd_tx.offchan_tx.freq =
1761                                __cpu_to_le16(freq);
1762        } else {
1763                txbuf->cmd_tx.peerid =
1764                                __cpu_to_le32(HTT_INVALID_PEERID);
1765        }
1766
1767        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1768        ath10k_dbg(ar, ATH10K_DBG_HTT,
1769                   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
1770                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1771                   &skb_cb->paddr, vdev_id, tid, freq);
1772        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1773                        msdu->data, msdu->len);
1774        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1775        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1776
1777        sg_items[0].transfer_id = 0;
1778        sg_items[0].transfer_context = NULL;
1779        sg_items[0].vaddr = &txbuf->htc_hdr;
1780        sg_items[0].paddr = txbuf_paddr +
1781                            sizeof(txbuf->frags);
1782        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1783                          sizeof(txbuf->cmd_hdr) +
1784                          sizeof(txbuf->cmd_tx);
1785
1786        sg_items[1].transfer_id = 0;
1787        sg_items[1].transfer_context = NULL;
1788        sg_items[1].vaddr = msdu->data;
1789        sg_items[1].paddr = skb_cb->paddr;
1790        sg_items[1].len = prefetch_len;
1791
1792        res = ath10k_hif_tx_sg(htt->ar,
1793                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1794                               sg_items, ARRAY_SIZE(sg_items));
1795        if (res)
1796                goto err_unmap_msdu;
1797
1798        return 0;
1799
1800err_unmap_msdu:
1801        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1802err_free_msdu_id:
1803        spin_lock_bh(&htt->tx_lock);
1804        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1805        spin_unlock_bh(&htt->tx_lock);
1806err:
1807        return res;
1808}
1809
1810static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1811        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1812        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1813        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1814        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1815        .htt_tx = ath10k_htt_tx_32,
1816        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
1817        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
1818        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1819};
1820
1821static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1822        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1823        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1824        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1825        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1826        .htt_tx = ath10k_htt_tx_64,
1827        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
1828        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
1829        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
1830};
1831
1832static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
1833        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
1834        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1835        .htt_tx = ath10k_htt_tx_hl,
1836        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1837        .htt_flush_tx = ath10k_htt_flush_tx_queue,
1838};
1839
1840void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1841{
1842        struct ath10k *ar = htt->ar;
1843
1844        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
1845                htt->tx_ops = &htt_tx_ops_hl;
1846        else if (ar->hw_params.target_64bit)
1847                htt->tx_ops = &htt_tx_ops_64;
1848        else
1849                htt->tx_ops = &htt_tx_ops_32;
1850}
1851