linux/drivers/net/wireless/ath/ath10k/htt_tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 */
   6
   7#include <linux/etherdevice.h>
   8#include "htt.h"
   9#include "mac.h"
  10#include "hif.h"
  11#include "txrx.h"
  12#include "debug.h"
  13
  14static u8 ath10k_htt_tx_txq_calc_size(size_t count)
  15{
  16        int exp;
  17        int factor;
  18
  19        exp = 0;
  20        factor = count >> 7;
  21
  22        while (factor >= 64 && exp < 4) {
  23                factor >>= 3;
  24                exp++;
  25        }
  26
  27        if (exp == 4)
  28                return 0xff;
  29
  30        if (count > 0)
  31                factor = max(1, factor);
  32
  33        return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
  34               SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
  35}
  36
  37static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
  38                                       struct ieee80211_txq *txq)
  39{
  40        struct ath10k *ar = hw->priv;
  41        struct ath10k_sta *arsta;
  42        struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
  43        unsigned long frame_cnt;
  44        unsigned long byte_cnt;
  45        int idx;
  46        u32 bit;
  47        u16 peer_id;
  48        u8 tid;
  49        u8 count;
  50
  51        lockdep_assert_held(&ar->htt.tx_lock);
  52
  53        if (!ar->htt.tx_q_state.enabled)
  54                return;
  55
  56        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  57                return;
  58
  59        if (txq->sta) {
  60                arsta = (void *)txq->sta->drv_priv;
  61                peer_id = arsta->peer_id;
  62        } else {
  63                peer_id = arvif->peer_id;
  64        }
  65
  66        tid = txq->tid;
  67        bit = BIT(peer_id % 32);
  68        idx = peer_id / 32;
  69
  70        ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
  71        count = ath10k_htt_tx_txq_calc_size(byte_cnt);
  72
  73        if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  74            unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  75                ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
  76                            peer_id, tid);
  77                return;
  78        }
  79
  80        ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
  81        ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
  82        ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
  83
  84        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
  85                   peer_id, tid, count);
  86}
  87
  88static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
  89{
  90        u32 seq;
  91        size_t size;
  92
  93        lockdep_assert_held(&ar->htt.tx_lock);
  94
  95        if (!ar->htt.tx_q_state.enabled)
  96                return;
  97
  98        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  99                return;
 100
 101        seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
 102        seq++;
 103        ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
 104
 105        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
 106                   seq);
 107
 108        size = sizeof(*ar->htt.tx_q_state.vaddr);
 109        dma_sync_single_for_device(ar->dev,
 110                                   ar->htt.tx_q_state.paddr,
 111                                   size,
 112                                   DMA_TO_DEVICE);
 113}
 114
 115void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
 116                              struct ieee80211_txq *txq)
 117{
 118        struct ath10k *ar = hw->priv;
 119
 120        spin_lock_bh(&ar->htt.tx_lock);
 121        __ath10k_htt_tx_txq_recalc(hw, txq);
 122        spin_unlock_bh(&ar->htt.tx_lock);
 123}
 124
 125void ath10k_htt_tx_txq_sync(struct ath10k *ar)
 126{
 127        spin_lock_bh(&ar->htt.tx_lock);
 128        __ath10k_htt_tx_txq_sync(ar);
 129        spin_unlock_bh(&ar->htt.tx_lock);
 130}
 131
 132void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
 133                              struct ieee80211_txq *txq)
 134{
 135        struct ath10k *ar = hw->priv;
 136
 137        spin_lock_bh(&ar->htt.tx_lock);
 138        __ath10k_htt_tx_txq_recalc(hw, txq);
 139        __ath10k_htt_tx_txq_sync(ar);
 140        spin_unlock_bh(&ar->htt.tx_lock);
 141}
 142
 143void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
 144{
 145        lockdep_assert_held(&htt->tx_lock);
 146
 147        htt->num_pending_tx--;
 148        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 149                ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 150}
 151
 152int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 153{
 154        lockdep_assert_held(&htt->tx_lock);
 155
 156        if (htt->num_pending_tx >= htt->max_num_pending_tx)
 157                return -EBUSY;
 158
 159        htt->num_pending_tx++;
 160        if (htt->num_pending_tx == htt->max_num_pending_tx)
 161                ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 162
 163        return 0;
 164}
 165
 166int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 167                                   bool is_presp)
 168{
 169        struct ath10k *ar = htt->ar;
 170
 171        lockdep_assert_held(&htt->tx_lock);
 172
 173        if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
 174                return 0;
 175
 176        if (is_presp &&
 177            ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
 178                return -EBUSY;
 179
 180        htt->num_pending_mgmt_tx++;
 181
 182        return 0;
 183}
 184
 185void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
 186{
 187        lockdep_assert_held(&htt->tx_lock);
 188
 189        if (!htt->ar->hw_params.max_probe_resp_desc_thres)
 190                return;
 191
 192        htt->num_pending_mgmt_tx--;
 193}
 194
 195int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 196{
 197        struct ath10k *ar = htt->ar;
 198        int ret;
 199
 200        spin_lock_bh(&htt->tx_lock);
 201        ret = idr_alloc(&htt->pending_tx, skb, 0,
 202                        htt->max_num_pending_tx, GFP_ATOMIC);
 203        spin_unlock_bh(&htt->tx_lock);
 204
 205        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 206
 207        return ret;
 208}
 209
 210void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 211{
 212        struct ath10k *ar = htt->ar;
 213
 214        lockdep_assert_held(&htt->tx_lock);
 215
 216        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
 217
 218        idr_remove(&htt->pending_tx, msdu_id);
 219}
 220
 221static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 222{
 223        struct ath10k *ar = htt->ar;
 224        size_t size;
 225
 226        if (!htt->txbuf.vaddr_txbuff_32)
 227                return;
 228
 229        size = htt->txbuf.size;
 230        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
 231                          htt->txbuf.paddr);
 232        htt->txbuf.vaddr_txbuff_32 = NULL;
 233}
 234
 235static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 236{
 237        struct ath10k *ar = htt->ar;
 238        size_t size;
 239
 240        size = htt->max_num_pending_tx *
 241                        sizeof(struct ath10k_htt_txbuf_32);
 242
 243        htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
 244                                                        &htt->txbuf.paddr,
 245                                                        GFP_KERNEL);
 246        if (!htt->txbuf.vaddr_txbuff_32)
 247                return -ENOMEM;
 248
 249        htt->txbuf.size = size;
 250
 251        return 0;
 252}
 253
 254static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 255{
 256        struct ath10k *ar = htt->ar;
 257        size_t size;
 258
 259        if (!htt->txbuf.vaddr_txbuff_64)
 260                return;
 261
 262        size = htt->txbuf.size;
 263        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
 264                          htt->txbuf.paddr);
 265        htt->txbuf.vaddr_txbuff_64 = NULL;
 266}
 267
 268static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
 269{
 270        struct ath10k *ar = htt->ar;
 271        size_t size;
 272
 273        size = htt->max_num_pending_tx *
 274                        sizeof(struct ath10k_htt_txbuf_64);
 275
 276        htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
 277                                                        &htt->txbuf.paddr,
 278                                                        GFP_KERNEL);
 279        if (!htt->txbuf.vaddr_txbuff_64)
 280                return -ENOMEM;
 281
 282        htt->txbuf.size = size;
 283
 284        return 0;
 285}
 286
 287static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
 288{
 289        size_t size;
 290
 291        if (!htt->frag_desc.vaddr_desc_32)
 292                return;
 293
 294        size = htt->max_num_pending_tx *
 295                        sizeof(struct htt_msdu_ext_desc);
 296
 297        dma_free_coherent(htt->ar->dev,
 298                          size,
 299                          htt->frag_desc.vaddr_desc_32,
 300                          htt->frag_desc.paddr);
 301
 302        htt->frag_desc.vaddr_desc_32 = NULL;
 303}
 304
 305static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 306{
 307        struct ath10k *ar = htt->ar;
 308        size_t size;
 309
 310        if (!ar->hw_params.continuous_frag_desc)
 311                return 0;
 312
 313        size = htt->max_num_pending_tx *
 314                        sizeof(struct htt_msdu_ext_desc);
 315        htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
 316                                                          &htt->frag_desc.paddr,
 317                                                          GFP_KERNEL);
 318        if (!htt->frag_desc.vaddr_desc_32) {
 319                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 320                return -ENOMEM;
 321        }
 322        htt->frag_desc.size = size;
 323
 324        return 0;
 325}
 326
 327static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
 328{
 329        size_t size;
 330
 331        if (!htt->frag_desc.vaddr_desc_64)
 332                return;
 333
 334        size = htt->max_num_pending_tx *
 335                        sizeof(struct htt_msdu_ext_desc_64);
 336
 337        dma_free_coherent(htt->ar->dev,
 338                          size,
 339                          htt->frag_desc.vaddr_desc_64,
 340                          htt->frag_desc.paddr);
 341
 342        htt->frag_desc.vaddr_desc_64 = NULL;
 343}
 344
 345static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
 346{
 347        struct ath10k *ar = htt->ar;
 348        size_t size;
 349
 350        if (!ar->hw_params.continuous_frag_desc)
 351                return 0;
 352
 353        size = htt->max_num_pending_tx *
 354                        sizeof(struct htt_msdu_ext_desc_64);
 355
 356        htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
 357                                                          &htt->frag_desc.paddr,
 358                                                          GFP_KERNEL);
 359        if (!htt->frag_desc.vaddr_desc_64) {
 360                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 361                return -ENOMEM;
 362        }
 363        htt->frag_desc.size = size;
 364
 365        return 0;
 366}
 367
 368static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
 369{
 370        struct ath10k *ar = htt->ar;
 371        size_t size;
 372
 373        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 374                      ar->running_fw->fw_file.fw_features))
 375                return;
 376
 377        size = sizeof(*htt->tx_q_state.vaddr);
 378
 379        dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
 380        kfree(htt->tx_q_state.vaddr);
 381}
 382
 383static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
 384{
 385        struct ath10k *ar = htt->ar;
 386        size_t size;
 387        int ret;
 388
 389        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 390                      ar->running_fw->fw_file.fw_features))
 391                return 0;
 392
 393        htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
 394        htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
 395        htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
 396
 397        size = sizeof(*htt->tx_q_state.vaddr);
 398        htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
 399        if (!htt->tx_q_state.vaddr)
 400                return -ENOMEM;
 401
 402        htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
 403                                               size, DMA_TO_DEVICE);
 404        ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
 405        if (ret) {
 406                ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
 407                kfree(htt->tx_q_state.vaddr);
 408                return -EIO;
 409        }
 410
 411        return 0;
 412}
 413
 414static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
 415{
 416        WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
 417        kfifo_free(&htt->txdone_fifo);
 418}
 419
 420static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
 421{
 422        int ret;
 423        size_t size;
 424
 425        size = roundup_pow_of_two(htt->max_num_pending_tx);
 426        ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
 427        return ret;
 428}
 429
 430static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 431{
 432        struct ath10k *ar = htt->ar;
 433        int ret;
 434
 435        ret = ath10k_htt_alloc_txbuff(htt);
 436        if (ret) {
 437                ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 438                return ret;
 439        }
 440
 441        ret = ath10k_htt_alloc_frag_desc(htt);
 442        if (ret) {
 443                ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 444                goto free_txbuf;
 445        }
 446
 447        ret = ath10k_htt_tx_alloc_txq(htt);
 448        if (ret) {
 449                ath10k_err(ar, "failed to alloc txq: %d\n", ret);
 450                goto free_frag_desc;
 451        }
 452
 453        ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
 454        if (ret) {
 455                ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
 456                goto free_txq;
 457        }
 458
 459        return 0;
 460
 461free_txq:
 462        ath10k_htt_tx_free_txq(htt);
 463
 464free_frag_desc:
 465        ath10k_htt_free_frag_desc(htt);
 466
 467free_txbuf:
 468        ath10k_htt_free_txbuff(htt);
 469
 470        return ret;
 471}
 472
 473int ath10k_htt_tx_start(struct ath10k_htt *htt)
 474{
 475        struct ath10k *ar = htt->ar;
 476        int ret;
 477
 478        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 479                   htt->max_num_pending_tx);
 480
 481        spin_lock_init(&htt->tx_lock);
 482        idr_init(&htt->pending_tx);
 483
 484        if (htt->tx_mem_allocated)
 485                return 0;
 486
 487        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
 488                return 0;
 489
 490        ret = ath10k_htt_tx_alloc_buf(htt);
 491        if (ret)
 492                goto free_idr_pending_tx;
 493
 494        htt->tx_mem_allocated = true;
 495
 496        return 0;
 497
 498free_idr_pending_tx:
 499        idr_destroy(&htt->pending_tx);
 500
 501        return ret;
 502}
 503
 504static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 505{
 506        struct ath10k *ar = ctx;
 507        struct ath10k_htt *htt = &ar->htt;
 508        struct htt_tx_done tx_done = {0};
 509
 510        ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
 511
 512        tx_done.msdu_id = msdu_id;
 513        tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 514
 515        ath10k_txrx_tx_unref(htt, &tx_done);
 516
 517        return 0;
 518}
 519
 520void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 521{
 522        if (!htt->tx_mem_allocated)
 523                return;
 524
 525        ath10k_htt_free_txbuff(htt);
 526        ath10k_htt_tx_free_txq(htt);
 527        ath10k_htt_free_frag_desc(htt);
 528        ath10k_htt_tx_free_txdone_fifo(htt);
 529        htt->tx_mem_allocated = false;
 530}
 531
 532static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
 533{
 534        ath10k_htc_stop_hl(htt->ar);
 535        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 536}
 537
 538void ath10k_htt_tx_stop(struct ath10k_htt *htt)
 539{
 540        ath10k_htt_flush_tx_queue(htt);
 541        idr_destroy(&htt->pending_tx);
 542}
 543
 544void ath10k_htt_tx_free(struct ath10k_htt *htt)
 545{
 546        ath10k_htt_tx_stop(htt);
 547        ath10k_htt_tx_destroy(htt);
 548}
 549
 550void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
 551{
 552        queue_work(ar->workqueue, &ar->bundle_tx_work);
 553}
 554
 555void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 556{
 557        struct ath10k_htt *htt = &ar->htt;
 558        struct htt_tx_done tx_done = {0};
 559        struct htt_cmd_hdr *htt_hdr;
 560        struct htt_data_tx_desc *desc_hdr = NULL;
 561        u16 flags1 = 0;
 562        u8 msg_type = 0;
 563
 564        if (htt->disable_tx_comp) {
 565                htt_hdr = (struct htt_cmd_hdr *)skb->data;
 566                msg_type = htt_hdr->msg_type;
 567
 568                if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
 569                        desc_hdr = (struct htt_data_tx_desc *)
 570                                (skb->data + sizeof(*htt_hdr));
 571                        flags1 = __le16_to_cpu(desc_hdr->flags1);
 572                        skb_pull(skb, sizeof(struct htt_cmd_hdr));
 573                        skb_pull(skb, sizeof(struct htt_data_tx_desc));
 574                }
 575        }
 576
 577        dev_kfree_skb_any(skb);
 578
 579        if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
 580                return;
 581
 582        ath10k_dbg(ar, ATH10K_DBG_HTT,
 583                   "htt tx complete msdu id:%u ,flags1:%x\n",
 584                   __le16_to_cpu(desc_hdr->id), flags1);
 585
 586        if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
 587                return;
 588
 589        tx_done.status = HTT_TX_COMPL_STATE_ACK;
 590        tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
 591        ath10k_txrx_tx_unref(&ar->htt, &tx_done);
 592}
 593
 594void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 595{
 596        dev_kfree_skb_any(skb);
 597}
 598EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
 599
 600int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 601{
 602        struct ath10k *ar = htt->ar;
 603        struct sk_buff *skb;
 604        struct htt_cmd *cmd;
 605        int len = 0;
 606        int ret;
 607
 608        len += sizeof(cmd->hdr);
 609        len += sizeof(cmd->ver_req);
 610
 611        skb = ath10k_htc_alloc_skb(ar, len);
 612        if (!skb)
 613                return -ENOMEM;
 614
 615        skb_put(skb, len);
 616        cmd = (struct htt_cmd *)skb->data;
 617        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 618
 619        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 620        if (ret) {
 621                dev_kfree_skb_any(skb);
 622                return ret;
 623        }
 624
 625        return 0;
 626}
 627
 628int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
 629                             u64 cookie)
 630{
 631        struct ath10k *ar = htt->ar;
 632        struct htt_stats_req *req;
 633        struct sk_buff *skb;
 634        struct htt_cmd *cmd;
 635        int len = 0, ret;
 636
 637        len += sizeof(cmd->hdr);
 638        len += sizeof(cmd->stats_req);
 639
 640        skb = ath10k_htc_alloc_skb(ar, len);
 641        if (!skb)
 642                return -ENOMEM;
 643
 644        skb_put(skb, len);
 645        cmd = (struct htt_cmd *)skb->data;
 646        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
 647
 648        req = &cmd->stats_req;
 649
 650        memset(req, 0, sizeof(*req));
 651
 652        /* currently we support only max 24 bit masks so no need to worry
 653         * about endian support
 654         */
 655        memcpy(req->upload_types, &mask, 3);
 656        memcpy(req->reset_types, &reset_mask, 3);
 657        req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
 658        req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
 659        req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 660
 661        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 662        if (ret) {
 663                ath10k_warn(ar, "failed to send htt type stats request: %d",
 664                            ret);
 665                dev_kfree_skb_any(skb);
 666                return ret;
 667        }
 668
 669        return 0;
 670}
 671
 672static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 673{
 674        struct ath10k *ar = htt->ar;
 675        struct sk_buff *skb;
 676        struct htt_cmd *cmd;
 677        struct htt_frag_desc_bank_cfg32 *cfg;
 678        int ret, size;
 679        u8 info;
 680
 681        if (!ar->hw_params.continuous_frag_desc)
 682                return 0;
 683
 684        if (!htt->frag_desc.paddr) {
 685                ath10k_warn(ar, "invalid frag desc memory\n");
 686                return -EINVAL;
 687        }
 688
 689        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 690        skb = ath10k_htc_alloc_skb(ar, size);
 691        if (!skb)
 692                return -ENOMEM;
 693
 694        skb_put(skb, size);
 695        cmd = (struct htt_cmd *)skb->data;
 696        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 697
 698        info = 0;
 699        info |= SM(htt->tx_q_state.type,
 700                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 701
 702        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 703                     ar->running_fw->fw_file.fw_features))
 704                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 705
 706        cfg = &cmd->frag_desc_bank_cfg32;
 707        cfg->info = info;
 708        cfg->num_banks = 1;
 709        cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
 710        cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
 711        cfg->bank_id[0].bank_min_id = 0;
 712        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 713                                                    1);
 714
 715        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 716        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 717        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 718        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 719        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 720
 721        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 722
 723        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 724        if (ret) {
 725                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 726                            ret);
 727                dev_kfree_skb_any(skb);
 728                return ret;
 729        }
 730
 731        return 0;
 732}
 733
 734static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 735{
 736        struct ath10k *ar = htt->ar;
 737        struct sk_buff *skb;
 738        struct htt_cmd *cmd;
 739        struct htt_frag_desc_bank_cfg64 *cfg;
 740        int ret, size;
 741        u8 info;
 742
 743        if (!ar->hw_params.continuous_frag_desc)
 744                return 0;
 745
 746        if (!htt->frag_desc.paddr) {
 747                ath10k_warn(ar, "invalid frag desc memory\n");
 748                return -EINVAL;
 749        }
 750
 751        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
 752        skb = ath10k_htc_alloc_skb(ar, size);
 753        if (!skb)
 754                return -ENOMEM;
 755
 756        skb_put(skb, size);
 757        cmd = (struct htt_cmd *)skb->data;
 758        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 759
 760        info = 0;
 761        info |= SM(htt->tx_q_state.type,
 762                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 763
 764        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 765                     ar->running_fw->fw_file.fw_features))
 766                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 767
 768        cfg = &cmd->frag_desc_bank_cfg64;
 769        cfg->info = info;
 770        cfg->num_banks = 1;
 771        cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
 772        cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
 773        cfg->bank_id[0].bank_min_id = 0;
 774        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 775                                                    1);
 776
 777        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 778        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 779        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 780        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 781        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 782
 783        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 784
 785        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 786        if (ret) {
 787                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 788                            ret);
 789                dev_kfree_skb_any(skb);
 790                return ret;
 791        }
 792
 793        return 0;
 794}
 795
 796static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
 797{
 798        struct htt_rx_ring_setup_ring32 *ring =
 799                        (struct htt_rx_ring_setup_ring32 *)rx_ring;
 800
 801#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 802        ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 803        ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 804        ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 805        ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 806        ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 807        ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 808        ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 809        ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 810        ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 811        ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 812#undef desc_offset
 813}
 814
 815static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
 816{
 817        struct htt_rx_ring_setup_ring64 *ring =
 818                        (struct htt_rx_ring_setup_ring64 *)rx_ring;
 819
 820#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 821        ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 822        ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 823        ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 824        ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 825        ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 826        ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 827        ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 828        ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 829        ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 830        ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 831#undef desc_offset
 832}
 833
 834static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
 835{
 836        struct ath10k *ar = htt->ar;
 837        struct sk_buff *skb;
 838        struct htt_cmd *cmd;
 839        struct htt_rx_ring_setup_ring32 *ring;
 840        const int num_rx_ring = 1;
 841        u16 flags;
 842        u32 fw_idx;
 843        int len;
 844        int ret;
 845
 846        /*
 847         * the HW expects the buffer to be an integral number of 4-byte
 848         * "words"
 849         */
 850        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 851        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 852
 853        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 854            + (sizeof(*ring) * num_rx_ring);
 855        skb = ath10k_htc_alloc_skb(ar, len);
 856        if (!skb)
 857                return -ENOMEM;
 858
 859        skb_put(skb, len);
 860
 861        cmd = (struct htt_cmd *)skb->data;
 862        ring = &cmd->rx_setup_32.rings[0];
 863
 864        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 865        cmd->rx_setup_32.hdr.num_rings = 1;
 866
 867        /* FIXME: do we need all of this? */
 868        flags = 0;
 869        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 870        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 871        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 872        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 873        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 874        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 875        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 876        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 877        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 878        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 879        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 880        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 881        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 882        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 883        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 884        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 885
 886        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 887
 888        ring->fw_idx_shadow_reg_paddr =
 889                __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
 890        ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
 891        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 892        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 893        ring->flags = __cpu_to_le16(flags);
 894        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 895
 896        ath10k_htt_fill_rx_desc_offset_32(ring);
 897        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 898        if (ret) {
 899                dev_kfree_skb_any(skb);
 900                return ret;
 901        }
 902
 903        return 0;
 904}
 905
 906static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
 907{
 908        struct ath10k *ar = htt->ar;
 909        struct sk_buff *skb;
 910        struct htt_cmd *cmd;
 911        struct htt_rx_ring_setup_ring64 *ring;
 912        const int num_rx_ring = 1;
 913        u16 flags;
 914        u32 fw_idx;
 915        int len;
 916        int ret;
 917
 918        /* HW expects the buffer to be an integral number of 4-byte
 919         * "words"
 920         */
 921        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 922        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 923
 924        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
 925            + (sizeof(*ring) * num_rx_ring);
 926        skb = ath10k_htc_alloc_skb(ar, len);
 927        if (!skb)
 928                return -ENOMEM;
 929
 930        skb_put(skb, len);
 931
 932        cmd = (struct htt_cmd *)skb->data;
 933        ring = &cmd->rx_setup_64.rings[0];
 934
 935        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 936        cmd->rx_setup_64.hdr.num_rings = 1;
 937
 938        flags = 0;
 939        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 940        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 941        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 942        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 943        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 944        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 945        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 946        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 947        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 948        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 949        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 950        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 951        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 952        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 953        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 954        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 955
 956        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 957
 958        ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
 959        ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
 960        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 961        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 962        ring->flags = __cpu_to_le16(flags);
 963        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 964
 965        ath10k_htt_fill_rx_desc_offset_64(ring);
 966        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 967        if (ret) {
 968                dev_kfree_skb_any(skb);
 969                return ret;
 970        }
 971
 972        return 0;
 973}
 974
 975static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
 976{
 977        struct ath10k *ar = htt->ar;
 978        struct sk_buff *skb;
 979        struct htt_cmd *cmd;
 980        struct htt_rx_ring_setup_ring32 *ring;
 981        const int num_rx_ring = 1;
 982        u16 flags;
 983        int len;
 984        int ret;
 985
 986        /*
 987         * the HW expects the buffer to be an integral number of 4-byte
 988         * "words"
 989         */
 990        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 991        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 992
 993        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 994            + (sizeof(*ring) * num_rx_ring);
 995        skb = ath10k_htc_alloc_skb(ar, len);
 996        if (!skb)
 997                return -ENOMEM;
 998
 999        skb_put(skb, len);
1000
1001        cmd = (struct htt_cmd *)skb->data;
1002        ring = &cmd->rx_setup_32.rings[0];
1003
1004        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
1005        cmd->rx_setup_32.hdr.num_rings = 1;
1006
1007        flags = 0;
1008        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
1009        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
1010        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
1011
1012        memset(ring, 0, sizeof(*ring));
1013        ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
1014        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
1015        ring->flags = __cpu_to_le16(flags);
1016
1017        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1018        if (ret) {
1019                dev_kfree_skb_any(skb);
1020                return ret;
1021        }
1022
1023        return 0;
1024}
1025
1026static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
1027                                          u8 max_subfrms_ampdu,
1028                                          u8 max_subfrms_amsdu)
1029{
1030        struct ath10k *ar = htt->ar;
1031        struct htt_aggr_conf *aggr_conf;
1032        struct sk_buff *skb;
1033        struct htt_cmd *cmd;
1034        int len;
1035        int ret;
1036
1037        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1038
1039        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1040                return -EINVAL;
1041
1042        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1043                return -EINVAL;
1044
1045        len = sizeof(cmd->hdr);
1046        len += sizeof(cmd->aggr_conf);
1047
1048        skb = ath10k_htc_alloc_skb(ar, len);
1049        if (!skb)
1050                return -ENOMEM;
1051
1052        skb_put(skb, len);
1053        cmd = (struct htt_cmd *)skb->data;
1054        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1055
1056        aggr_conf = &cmd->aggr_conf;
1057        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1058        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1059
1060        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1061                   aggr_conf->max_num_amsdu_subframes,
1062                   aggr_conf->max_num_ampdu_subframes);
1063
1064        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1065        if (ret) {
1066                dev_kfree_skb_any(skb);
1067                return ret;
1068        }
1069
1070        return 0;
1071}
1072
1073static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
1074                                          u8 max_subfrms_ampdu,
1075                                          u8 max_subfrms_amsdu)
1076{
1077        struct ath10k *ar = htt->ar;
1078        struct htt_aggr_conf_v2 *aggr_conf;
1079        struct sk_buff *skb;
1080        struct htt_cmd *cmd;
1081        int len;
1082        int ret;
1083
1084        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1085
1086        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1087                return -EINVAL;
1088
1089        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1090                return -EINVAL;
1091
1092        len = sizeof(cmd->hdr);
1093        len += sizeof(cmd->aggr_conf_v2);
1094
1095        skb = ath10k_htc_alloc_skb(ar, len);
1096        if (!skb)
1097                return -ENOMEM;
1098
1099        skb_put(skb, len);
1100        cmd = (struct htt_cmd *)skb->data;
1101        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1102
1103        aggr_conf = &cmd->aggr_conf_v2;
1104        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1105        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1106
1107        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1108                   aggr_conf->max_num_amsdu_subframes,
1109                   aggr_conf->max_num_ampdu_subframes);
1110
1111        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1112        if (ret) {
1113                dev_kfree_skb_any(skb);
1114                return ret;
1115        }
1116
1117        return 0;
1118}
1119
1120int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1121                             __le32 token,
1122                             __le16 fetch_seq_num,
1123                             struct htt_tx_fetch_record *records,
1124                             size_t num_records)
1125{
1126        struct sk_buff *skb;
1127        struct htt_cmd *cmd;
1128        const u16 resp_id = 0;
1129        int len = 0;
1130        int ret;
1131
1132        /* Response IDs are echo-ed back only for host driver convienence
1133         * purposes. They aren't used for anything in the driver yet so use 0.
1134         */
1135
1136        len += sizeof(cmd->hdr);
1137        len += sizeof(cmd->tx_fetch_resp);
1138        len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
1139
1140        skb = ath10k_htc_alloc_skb(ar, len);
1141        if (!skb)
1142                return -ENOMEM;
1143
1144        skb_put(skb, len);
1145        cmd = (struct htt_cmd *)skb->data;
1146        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
1147        cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
1148        cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
1149        cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
1150        cmd->tx_fetch_resp.token = token;
1151
1152        memcpy(cmd->tx_fetch_resp.records, records,
1153               sizeof(records[0]) * num_records);
1154
1155        ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
1156        if (ret) {
1157                ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
1158                goto err_free_skb;
1159        }
1160
1161        return 0;
1162
1163err_free_skb:
1164        dev_kfree_skb_any(skb);
1165
1166        return ret;
1167}
1168
1169static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
1170{
1171        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1172        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1173        struct ath10k_vif *arvif;
1174
1175        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1176                return ar->scan.vdev_id;
1177        } else if (cb->vif) {
1178                arvif = (void *)cb->vif->drv_priv;
1179                return arvif->vdev_id;
1180        } else if (ar->monitor_started) {
1181                return ar->monitor_vdev_id;
1182        } else {
1183                return 0;
1184        }
1185}
1186
1187static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1188{
1189        struct ieee80211_hdr *hdr = (void *)skb->data;
1190        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1191
1192        if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1193                return HTT_DATA_TX_EXT_TID_MGMT;
1194        else if (cb->flags & ATH10K_SKB_F_QOS)
1195                return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1196        else
1197                return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1198}
1199
1200int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1201{
1202        struct ath10k *ar = htt->ar;
1203        struct device *dev = ar->dev;
1204        struct sk_buff *txdesc = NULL;
1205        struct htt_cmd *cmd;
1206        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1207        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1208        int len = 0;
1209        int msdu_id = -1;
1210        int res;
1211        const u8 *peer_addr;
1212        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1213
1214        len += sizeof(cmd->hdr);
1215        len += sizeof(cmd->mgmt_tx);
1216
1217        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1218        if (res < 0)
1219                goto err;
1220
1221        msdu_id = res;
1222
1223        if ((ieee80211_is_action(hdr->frame_control) ||
1224             ieee80211_is_deauth(hdr->frame_control) ||
1225             ieee80211_is_disassoc(hdr->frame_control)) &&
1226             ieee80211_has_protected(hdr->frame_control)) {
1227                peer_addr = hdr->addr1;
1228                if (is_multicast_ether_addr(peer_addr)) {
1229                        skb_put(msdu, sizeof(struct ieee80211_mmie_16));
1230                } else {
1231                        if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
1232                            skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
1233                                skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
1234                        else
1235                                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1236                }
1237        }
1238
1239        txdesc = ath10k_htc_alloc_skb(ar, len);
1240        if (!txdesc) {
1241                res = -ENOMEM;
1242                goto err_free_msdu_id;
1243        }
1244
1245        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1246                                       DMA_TO_DEVICE);
1247        res = dma_mapping_error(dev, skb_cb->paddr);
1248        if (res) {
1249                res = -EIO;
1250                goto err_free_txdesc;
1251        }
1252
1253        skb_put(txdesc, len);
1254        cmd = (struct htt_cmd *)txdesc->data;
1255        memset(cmd, 0, len);
1256
1257        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
1258        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1259        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
1260        cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
1261        cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
1262        memcpy(cmd->mgmt_tx.hdr, msdu->data,
1263               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1264
1265        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1266        if (res)
1267                goto err_unmap_msdu;
1268
1269        return 0;
1270
1271err_unmap_msdu:
1272        if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
1273                dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1274err_free_txdesc:
1275        dev_kfree_skb_any(txdesc);
1276err_free_msdu_id:
1277        spin_lock_bh(&htt->tx_lock);
1278        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1279        spin_unlock_bh(&htt->tx_lock);
1280err:
1281        return res;
1282}
1283
1284#define HTT_TX_HL_NEEDED_HEADROOM \
1285        (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1286        sizeof(struct htt_data_tx_desc) + \
1287        sizeof(struct ath10k_htc_hdr))
1288
1289static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1290                            struct sk_buff *msdu)
1291{
1292        struct ath10k *ar = htt->ar;
1293        int res, data_len;
1294        struct htt_cmd_hdr *cmd_hdr;
1295        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1296        struct htt_data_tx_desc *tx_desc;
1297        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1298        struct sk_buff *tmp_skb;
1299        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1300        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1301        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1302        u8 flags0 = 0;
1303        u16 flags1 = 0;
1304        u16 msdu_id = 0;
1305
1306        if ((ieee80211_is_action(hdr->frame_control) ||
1307             ieee80211_is_deauth(hdr->frame_control) ||
1308             ieee80211_is_disassoc(hdr->frame_control)) &&
1309             ieee80211_has_protected(hdr->frame_control)) {
1310                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1311        }
1312
1313        data_len = msdu->len;
1314
1315        switch (txmode) {
1316        case ATH10K_HW_TXRX_RAW:
1317        case ATH10K_HW_TXRX_NATIVE_WIFI:
1318                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1319                fallthrough;
1320        case ATH10K_HW_TXRX_ETHERNET:
1321                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1322                break;
1323        case ATH10K_HW_TXRX_MGMT:
1324                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1325                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1326                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1327
1328                if (htt->disable_tx_comp)
1329                        flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
1330                break;
1331        }
1332
1333        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1334                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1335
1336        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1337        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1338        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1339            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1340                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1341                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1342        }
1343
1344        /* Prepend the HTT header and TX desc struct to the data message
1345         * and realloc the skb if it does not have enough headroom.
1346         */
1347        if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
1348                tmp_skb = msdu;
1349
1350                ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
1351                           "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1352                           skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
1353                msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
1354                kfree_skb(tmp_skb);
1355                if (!msdu) {
1356                        ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
1357                        res = -ENOMEM;
1358                        goto out;
1359                }
1360        }
1361
1362        if (ar->bus_param.hl_msdu_ids) {
1363                flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1364                res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1365                if (res < 0) {
1366                        ath10k_err(ar, "msdu_id allocation failed %d\n", res);
1367                        goto out;
1368                }
1369                msdu_id = res;
1370        }
1371
1372        /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1373         * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1374         * reference by one to avoid a use-after-free case and a double
1375         * free.
1376         */
1377        skb_get(msdu);
1378
1379        skb_push(msdu, sizeof(*cmd_hdr));
1380        skb_push(msdu, sizeof(*tx_desc));
1381        cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
1382        tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
1383
1384        cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1385        tx_desc->flags0 = flags0;
1386        tx_desc->flags1 = __cpu_to_le16(flags1);
1387        tx_desc->len = __cpu_to_le16(data_len);
1388        tx_desc->id = __cpu_to_le16(msdu_id);
1389        tx_desc->frags_paddr = 0; /* always zero */
1390        /* Initialize peer_id to INVALID_PEER because this is NOT
1391         * Reinjection path
1392         */
1393        tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
1394
1395        res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
1396
1397out:
1398        return res;
1399}
1400
1401static int ath10k_htt_tx_32(struct ath10k_htt *htt,
1402                            enum ath10k_hw_txrx_mode txmode,
1403                            struct sk_buff *msdu)
1404{
1405        struct ath10k *ar = htt->ar;
1406        struct device *dev = ar->dev;
1407        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1408        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1409        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1410        struct ath10k_hif_sg_item sg_items[2];
1411        struct ath10k_htt_txbuf_32 *txbuf;
1412        struct htt_data_tx_desc_frag *frags;
1413        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1414        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1415        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1416        int prefetch_len;
1417        int res;
1418        u8 flags0 = 0;
1419        u16 msdu_id, flags1 = 0;
1420        u16 freq = 0;
1421        u32 frags_paddr = 0;
1422        u32 txbuf_paddr;
1423        struct htt_msdu_ext_desc *ext_desc = NULL;
1424        struct htt_msdu_ext_desc *ext_desc_t = NULL;
1425
1426        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1427        if (res < 0)
1428                goto err;
1429
1430        msdu_id = res;
1431
1432        prefetch_len = min(htt->prefetch_len, msdu->len);
1433        prefetch_len = roundup(prefetch_len, 4);
1434
1435        txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
1436        txbuf_paddr = htt->txbuf.paddr +
1437                      (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
1438
1439        if ((ieee80211_is_action(hdr->frame_control) ||
1440             ieee80211_is_deauth(hdr->frame_control) ||
1441             ieee80211_is_disassoc(hdr->frame_control)) &&
1442             ieee80211_has_protected(hdr->frame_control)) {
1443                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1444        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1445                   txmode == ATH10K_HW_TXRX_RAW &&
1446                   ieee80211_has_protected(hdr->frame_control)) {
1447                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1448        }
1449
1450        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1451                                       DMA_TO_DEVICE);
1452        res = dma_mapping_error(dev, skb_cb->paddr);
1453        if (res) {
1454                res = -EIO;
1455                goto err_free_msdu_id;
1456        }
1457
1458        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1459                freq = ar->scan.roc_freq;
1460
1461        switch (txmode) {
1462        case ATH10K_HW_TXRX_RAW:
1463        case ATH10K_HW_TXRX_NATIVE_WIFI:
1464                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1465                fallthrough;
1466        case ATH10K_HW_TXRX_ETHERNET:
1467                if (ar->hw_params.continuous_frag_desc) {
1468                        ext_desc_t = htt->frag_desc.vaddr_desc_32;
1469                        memset(&ext_desc_t[msdu_id], 0,
1470                               sizeof(struct htt_msdu_ext_desc));
1471                        frags = (struct htt_data_tx_desc_frag *)
1472                                &ext_desc_t[msdu_id].frags;
1473                        ext_desc = &ext_desc_t[msdu_id];
1474                        frags[0].tword_addr.paddr_lo =
1475                                __cpu_to_le32(skb_cb->paddr);
1476                        frags[0].tword_addr.paddr_hi = 0;
1477                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1478
1479                        frags_paddr =  htt->frag_desc.paddr +
1480                                (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1481                } else {
1482                        frags = txbuf->frags;
1483                        frags[0].dword_addr.paddr =
1484                                __cpu_to_le32(skb_cb->paddr);
1485                        frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1486                        frags[1].dword_addr.paddr = 0;
1487                        frags[1].dword_addr.len = 0;
1488
1489                        frags_paddr = txbuf_paddr;
1490                }
1491                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1492                break;
1493        case ATH10K_HW_TXRX_MGMT:
1494                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1495                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1496                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1497
1498                frags_paddr = skb_cb->paddr;
1499                break;
1500        }
1501
1502        /* Normally all commands go through HTC which manages tx credits for
1503         * each endpoint and notifies when tx is completed.
1504         *
1505         * HTT endpoint is creditless so there's no need to care about HTC
1506         * flags. In that case it is trivial to fill the HTC header here.
1507         *
1508         * MSDU transmission is considered completed upon HTT event. This
1509         * implies no relevant resources can be freed until after the event is
1510         * received. That's why HTC tx completion handler itself is ignored by
1511         * setting NULL to transfer_context for all sg items.
1512         *
1513         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1514         * as it's a waste of resources. By bypassing HTC it is possible to
1515         * avoid extra memory allocations, compress data structures and thus
1516         * improve performance.
1517         */
1518
1519        txbuf->htc_hdr.eid = htt->eid;
1520        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1521                                           sizeof(txbuf->cmd_tx) +
1522                                           prefetch_len);
1523        txbuf->htc_hdr.flags = 0;
1524
1525        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1526                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1527
1528        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1529        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1530        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1531            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1532                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1533                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1534                if (ar->hw_params.continuous_frag_desc)
1535                        ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1536        }
1537
1538        /* Prevent firmware from sending up tx inspection requests. There's
1539         * nothing ath10k can do with frames requested for inspection so force
1540         * it to simply rely a regular tx completion with discard status.
1541         */
1542        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1543
1544        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1545        txbuf->cmd_tx.flags0 = flags0;
1546        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1547        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1548        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1549        txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1550        if (ath10k_mac_tx_frm_has_freq(ar)) {
1551                txbuf->cmd_tx.offchan_tx.peerid =
1552                                __cpu_to_le16(HTT_INVALID_PEERID);
1553                txbuf->cmd_tx.offchan_tx.freq =
1554                                __cpu_to_le16(freq);
1555        } else {
1556                txbuf->cmd_tx.peerid =
1557                                __cpu_to_le32(HTT_INVALID_PEERID);
1558        }
1559
1560        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1561        ath10k_dbg(ar, ATH10K_DBG_HTT,
1562                   "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1563                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1564                   &skb_cb->paddr, vdev_id, tid, freq);
1565        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1566                        msdu->data, msdu->len);
1567        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1568        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1569
1570        sg_items[0].transfer_id = 0;
1571        sg_items[0].transfer_context = NULL;
1572        sg_items[0].vaddr = &txbuf->htc_hdr;
1573        sg_items[0].paddr = txbuf_paddr +
1574                            sizeof(txbuf->frags);
1575        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1576                          sizeof(txbuf->cmd_hdr) +
1577                          sizeof(txbuf->cmd_tx);
1578
1579        sg_items[1].transfer_id = 0;
1580        sg_items[1].transfer_context = NULL;
1581        sg_items[1].vaddr = msdu->data;
1582        sg_items[1].paddr = skb_cb->paddr;
1583        sg_items[1].len = prefetch_len;
1584
1585        res = ath10k_hif_tx_sg(htt->ar,
1586                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1587                               sg_items, ARRAY_SIZE(sg_items));
1588        if (res)
1589                goto err_unmap_msdu;
1590
1591        return 0;
1592
1593err_unmap_msdu:
1594        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1595err_free_msdu_id:
1596        spin_lock_bh(&htt->tx_lock);
1597        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1598        spin_unlock_bh(&htt->tx_lock);
1599err:
1600        return res;
1601}
1602
1603static int ath10k_htt_tx_64(struct ath10k_htt *htt,
1604                            enum ath10k_hw_txrx_mode txmode,
1605                            struct sk_buff *msdu)
1606{
1607        struct ath10k *ar = htt->ar;
1608        struct device *dev = ar->dev;
1609        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1610        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1611        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1612        struct ath10k_hif_sg_item sg_items[2];
1613        struct ath10k_htt_txbuf_64 *txbuf;
1614        struct htt_data_tx_desc_frag *frags;
1615        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1616        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1617        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1618        int prefetch_len;
1619        int res;
1620        u8 flags0 = 0;
1621        u16 msdu_id, flags1 = 0;
1622        u16 freq = 0;
1623        dma_addr_t frags_paddr = 0;
1624        dma_addr_t txbuf_paddr;
1625        struct htt_msdu_ext_desc_64 *ext_desc = NULL;
1626        struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
1627
1628        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1629        if (res < 0)
1630                goto err;
1631
1632        msdu_id = res;
1633
1634        prefetch_len = min(htt->prefetch_len, msdu->len);
1635        prefetch_len = roundup(prefetch_len, 4);
1636
1637        txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
1638        txbuf_paddr = htt->txbuf.paddr +
1639                      (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
1640
1641        if ((ieee80211_is_action(hdr->frame_control) ||
1642             ieee80211_is_deauth(hdr->frame_control) ||
1643             ieee80211_is_disassoc(hdr->frame_control)) &&
1644             ieee80211_has_protected(hdr->frame_control)) {
1645                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1646        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1647                   txmode == ATH10K_HW_TXRX_RAW &&
1648                   ieee80211_has_protected(hdr->frame_control)) {
1649                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1650        }
1651
1652        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1653                                       DMA_TO_DEVICE);
1654        res = dma_mapping_error(dev, skb_cb->paddr);
1655        if (res) {
1656                res = -EIO;
1657                goto err_free_msdu_id;
1658        }
1659
1660        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1661                freq = ar->scan.roc_freq;
1662
1663        switch (txmode) {
1664        case ATH10K_HW_TXRX_RAW:
1665        case ATH10K_HW_TXRX_NATIVE_WIFI:
1666                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1667                fallthrough;
1668        case ATH10K_HW_TXRX_ETHERNET:
1669                if (ar->hw_params.continuous_frag_desc) {
1670                        ext_desc_t = htt->frag_desc.vaddr_desc_64;
1671                        memset(&ext_desc_t[msdu_id], 0,
1672                               sizeof(struct htt_msdu_ext_desc_64));
1673                        frags = (struct htt_data_tx_desc_frag *)
1674                                &ext_desc_t[msdu_id].frags;
1675                        ext_desc = &ext_desc_t[msdu_id];
1676                        frags[0].tword_addr.paddr_lo =
1677                                __cpu_to_le32(skb_cb->paddr);
1678                        frags[0].tword_addr.paddr_hi =
1679                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1680                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1681
1682                        frags_paddr =  htt->frag_desc.paddr +
1683                           (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
1684                } else {
1685                        frags = txbuf->frags;
1686                        frags[0].tword_addr.paddr_lo =
1687                                                __cpu_to_le32(skb_cb->paddr);
1688                        frags[0].tword_addr.paddr_hi =
1689                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1690                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1691                        frags[1].tword_addr.paddr_lo = 0;
1692                        frags[1].tword_addr.paddr_hi = 0;
1693                        frags[1].tword_addr.len_16 = 0;
1694                }
1695                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1696                break;
1697        case ATH10K_HW_TXRX_MGMT:
1698                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1699                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1700                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1701
1702                frags_paddr = skb_cb->paddr;
1703                break;
1704        }
1705
1706        /* Normally all commands go through HTC which manages tx credits for
1707         * each endpoint and notifies when tx is completed.
1708         *
1709         * HTT endpoint is creditless so there's no need to care about HTC
1710         * flags. In that case it is trivial to fill the HTC header here.
1711         *
1712         * MSDU transmission is considered completed upon HTT event. This
1713         * implies no relevant resources can be freed until after the event is
1714         * received. That's why HTC tx completion handler itself is ignored by
1715         * setting NULL to transfer_context for all sg items.
1716         *
1717         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1718         * as it's a waste of resources. By bypassing HTC it is possible to
1719         * avoid extra memory allocations, compress data structures and thus
1720         * improve performance.
1721         */
1722
1723        txbuf->htc_hdr.eid = htt->eid;
1724        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1725                                           sizeof(txbuf->cmd_tx) +
1726                                           prefetch_len);
1727        txbuf->htc_hdr.flags = 0;
1728
1729        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1730                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1731
1732        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1733        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1734        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1735            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1736                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1737                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1738                if (ar->hw_params.continuous_frag_desc) {
1739                        memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
1740                        ext_desc->tso_flag[3] |=
1741                                __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
1742                }
1743        }
1744
1745        /* Prevent firmware from sending up tx inspection requests. There's
1746         * nothing ath10k can do with frames requested for inspection so force
1747         * it to simply rely a regular tx completion with discard status.
1748         */
1749        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1750
1751        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1752        txbuf->cmd_tx.flags0 = flags0;
1753        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1754        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1755        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1756
1757        /* fill fragment descriptor */
1758        txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
1759        if (ath10k_mac_tx_frm_has_freq(ar)) {
1760                txbuf->cmd_tx.offchan_tx.peerid =
1761                                __cpu_to_le16(HTT_INVALID_PEERID);
1762                txbuf->cmd_tx.offchan_tx.freq =
1763                                __cpu_to_le16(freq);
1764        } else {
1765                txbuf->cmd_tx.peerid =
1766                                __cpu_to_le32(HTT_INVALID_PEERID);
1767        }
1768
1769        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1770        ath10k_dbg(ar, ATH10K_DBG_HTT,
1771                   "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1772                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1773                   &skb_cb->paddr, vdev_id, tid, freq);
1774        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1775                        msdu->data, msdu->len);
1776        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1777        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1778
1779        sg_items[0].transfer_id = 0;
1780        sg_items[0].transfer_context = NULL;
1781        sg_items[0].vaddr = &txbuf->htc_hdr;
1782        sg_items[0].paddr = txbuf_paddr +
1783                            sizeof(txbuf->frags);
1784        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1785                          sizeof(txbuf->cmd_hdr) +
1786                          sizeof(txbuf->cmd_tx);
1787
1788        sg_items[1].transfer_id = 0;
1789        sg_items[1].transfer_context = NULL;
1790        sg_items[1].vaddr = msdu->data;
1791        sg_items[1].paddr = skb_cb->paddr;
1792        sg_items[1].len = prefetch_len;
1793
1794        res = ath10k_hif_tx_sg(htt->ar,
1795                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1796                               sg_items, ARRAY_SIZE(sg_items));
1797        if (res)
1798                goto err_unmap_msdu;
1799
1800        return 0;
1801
1802err_unmap_msdu:
1803        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1804err_free_msdu_id:
1805        spin_lock_bh(&htt->tx_lock);
1806        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1807        spin_unlock_bh(&htt->tx_lock);
1808err:
1809        return res;
1810}
1811
1812static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1813        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1814        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1815        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1816        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1817        .htt_tx = ath10k_htt_tx_32,
1818        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
1819        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
1820        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1821};
1822
1823static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1824        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1825        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1826        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1827        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1828        .htt_tx = ath10k_htt_tx_64,
1829        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
1830        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
1831        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
1832};
1833
1834static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
1835        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
1836        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1837        .htt_tx = ath10k_htt_tx_hl,
1838        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1839        .htt_flush_tx = ath10k_htt_flush_tx_queue,
1840};
1841
1842void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1843{
1844        struct ath10k *ar = htt->ar;
1845
1846        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
1847                htt->tx_ops = &htt_tx_ops_hl;
1848        else if (ar->hw_params.target_64bit)
1849                htt->tx_ops = &htt_tx_ops_64;
1850        else
1851                htt->tx_ops = &htt_tx_ops_32;
1852}
1853