linux/drivers/net/wireless/ath/ath10k/htt_tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   5 */
   6
   7#include <linux/etherdevice.h>
   8#include "htt.h"
   9#include "mac.h"
  10#include "hif.h"
  11#include "txrx.h"
  12#include "debug.h"
  13
  14static u8 ath10k_htt_tx_txq_calc_size(size_t count)
  15{
  16        int exp;
  17        int factor;
  18
  19        exp = 0;
  20        factor = count >> 7;
  21
  22        while (factor >= 64 && exp < 4) {
  23                factor >>= 3;
  24                exp++;
  25        }
  26
  27        if (exp == 4)
  28                return 0xff;
  29
  30        if (count > 0)
  31                factor = max(1, factor);
  32
  33        return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
  34               SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
  35}
  36
  37static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
  38                                       struct ieee80211_txq *txq)
  39{
  40        struct ath10k *ar = hw->priv;
  41        struct ath10k_sta *arsta;
  42        struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
  43        unsigned long frame_cnt;
  44        unsigned long byte_cnt;
  45        int idx;
  46        u32 bit;
  47        u16 peer_id;
  48        u8 tid;
  49        u8 count;
  50
  51        lockdep_assert_held(&ar->htt.tx_lock);
  52
  53        if (!ar->htt.tx_q_state.enabled)
  54                return;
  55
  56        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  57                return;
  58
  59        if (txq->sta) {
  60                arsta = (void *)txq->sta->drv_priv;
  61                peer_id = arsta->peer_id;
  62        } else {
  63                peer_id = arvif->peer_id;
  64        }
  65
  66        tid = txq->tid;
  67        bit = BIT(peer_id % 32);
  68        idx = peer_id / 32;
  69
  70        ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
  71        count = ath10k_htt_tx_txq_calc_size(byte_cnt);
  72
  73        if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
  74            unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
  75                ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
  76                            peer_id, tid);
  77                return;
  78        }
  79
  80        ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
  81        ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
  82        ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
  83
  84        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
  85                   peer_id, tid, count);
  86}
  87
  88static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
  89{
  90        u32 seq;
  91        size_t size;
  92
  93        lockdep_assert_held(&ar->htt.tx_lock);
  94
  95        if (!ar->htt.tx_q_state.enabled)
  96                return;
  97
  98        if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
  99                return;
 100
 101        seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
 102        seq++;
 103        ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
 104
 105        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
 106                   seq);
 107
 108        size = sizeof(*ar->htt.tx_q_state.vaddr);
 109        dma_sync_single_for_device(ar->dev,
 110                                   ar->htt.tx_q_state.paddr,
 111                                   size,
 112                                   DMA_TO_DEVICE);
 113}
 114
 115void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
 116                              struct ieee80211_txq *txq)
 117{
 118        struct ath10k *ar = hw->priv;
 119
 120        spin_lock_bh(&ar->htt.tx_lock);
 121        __ath10k_htt_tx_txq_recalc(hw, txq);
 122        spin_unlock_bh(&ar->htt.tx_lock);
 123}
 124
 125void ath10k_htt_tx_txq_sync(struct ath10k *ar)
 126{
 127        spin_lock_bh(&ar->htt.tx_lock);
 128        __ath10k_htt_tx_txq_sync(ar);
 129        spin_unlock_bh(&ar->htt.tx_lock);
 130}
 131
 132void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
 133                              struct ieee80211_txq *txq)
 134{
 135        struct ath10k *ar = hw->priv;
 136
 137        spin_lock_bh(&ar->htt.tx_lock);
 138        __ath10k_htt_tx_txq_recalc(hw, txq);
 139        __ath10k_htt_tx_txq_sync(ar);
 140        spin_unlock_bh(&ar->htt.tx_lock);
 141}
 142
 143void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
 144{
 145        lockdep_assert_held(&htt->tx_lock);
 146
 147        htt->num_pending_tx--;
 148        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 149                ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 150
 151        if (htt->num_pending_tx == 0)
 152                wake_up(&htt->empty_tx_wq);
 153}
 154
 155int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 156{
 157        lockdep_assert_held(&htt->tx_lock);
 158
 159        if (htt->num_pending_tx >= htt->max_num_pending_tx)
 160                return -EBUSY;
 161
 162        htt->num_pending_tx++;
 163        if (htt->num_pending_tx == htt->max_num_pending_tx)
 164                ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 165
 166        return 0;
 167}
 168
 169int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 170                                   bool is_presp)
 171{
 172        struct ath10k *ar = htt->ar;
 173
 174        lockdep_assert_held(&htt->tx_lock);
 175
 176        if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
 177                return 0;
 178
 179        if (is_presp &&
 180            ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
 181                return -EBUSY;
 182
 183        htt->num_pending_mgmt_tx++;
 184
 185        return 0;
 186}
 187
 188void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
 189{
 190        lockdep_assert_held(&htt->tx_lock);
 191
 192        if (!htt->ar->hw_params.max_probe_resp_desc_thres)
 193                return;
 194
 195        htt->num_pending_mgmt_tx--;
 196}
 197
 198int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 199{
 200        struct ath10k *ar = htt->ar;
 201        int ret;
 202
 203        spin_lock_bh(&htt->tx_lock);
 204        ret = idr_alloc(&htt->pending_tx, skb, 0,
 205                        htt->max_num_pending_tx, GFP_ATOMIC);
 206        spin_unlock_bh(&htt->tx_lock);
 207
 208        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 209
 210        return ret;
 211}
 212
 213void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 214{
 215        struct ath10k *ar = htt->ar;
 216
 217        lockdep_assert_held(&htt->tx_lock);
 218
 219        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
 220
 221        idr_remove(&htt->pending_tx, msdu_id);
 222}
 223
 224static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 225{
 226        struct ath10k *ar = htt->ar;
 227        size_t size;
 228
 229        if (!htt->txbuf.vaddr_txbuff_32)
 230                return;
 231
 232        size = htt->txbuf.size;
 233        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
 234                          htt->txbuf.paddr);
 235        htt->txbuf.vaddr_txbuff_32 = NULL;
 236}
 237
 238static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 239{
 240        struct ath10k *ar = htt->ar;
 241        size_t size;
 242
 243        size = htt->max_num_pending_tx *
 244                        sizeof(struct ath10k_htt_txbuf_32);
 245
 246        htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
 247                                                        &htt->txbuf.paddr,
 248                                                        GFP_KERNEL);
 249        if (!htt->txbuf.vaddr_txbuff_32)
 250                return -ENOMEM;
 251
 252        htt->txbuf.size = size;
 253
 254        return 0;
 255}
 256
 257static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 258{
 259        struct ath10k *ar = htt->ar;
 260        size_t size;
 261
 262        if (!htt->txbuf.vaddr_txbuff_64)
 263                return;
 264
 265        size = htt->txbuf.size;
 266        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
 267                          htt->txbuf.paddr);
 268        htt->txbuf.vaddr_txbuff_64 = NULL;
 269}
 270
 271static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
 272{
 273        struct ath10k *ar = htt->ar;
 274        size_t size;
 275
 276        size = htt->max_num_pending_tx *
 277                        sizeof(struct ath10k_htt_txbuf_64);
 278
 279        htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
 280                                                        &htt->txbuf.paddr,
 281                                                        GFP_KERNEL);
 282        if (!htt->txbuf.vaddr_txbuff_64)
 283                return -ENOMEM;
 284
 285        htt->txbuf.size = size;
 286
 287        return 0;
 288}
 289
 290static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
 291{
 292        size_t size;
 293
 294        if (!htt->frag_desc.vaddr_desc_32)
 295                return;
 296
 297        size = htt->max_num_pending_tx *
 298                        sizeof(struct htt_msdu_ext_desc);
 299
 300        dma_free_coherent(htt->ar->dev,
 301                          size,
 302                          htt->frag_desc.vaddr_desc_32,
 303                          htt->frag_desc.paddr);
 304
 305        htt->frag_desc.vaddr_desc_32 = NULL;
 306}
 307
 308static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 309{
 310        struct ath10k *ar = htt->ar;
 311        size_t size;
 312
 313        if (!ar->hw_params.continuous_frag_desc)
 314                return 0;
 315
 316        size = htt->max_num_pending_tx *
 317                        sizeof(struct htt_msdu_ext_desc);
 318        htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
 319                                                          &htt->frag_desc.paddr,
 320                                                          GFP_KERNEL);
 321        if (!htt->frag_desc.vaddr_desc_32) {
 322                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 323                return -ENOMEM;
 324        }
 325        htt->frag_desc.size = size;
 326
 327        return 0;
 328}
 329
 330static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
 331{
 332        size_t size;
 333
 334        if (!htt->frag_desc.vaddr_desc_64)
 335                return;
 336
 337        size = htt->max_num_pending_tx *
 338                        sizeof(struct htt_msdu_ext_desc_64);
 339
 340        dma_free_coherent(htt->ar->dev,
 341                          size,
 342                          htt->frag_desc.vaddr_desc_64,
 343                          htt->frag_desc.paddr);
 344
 345        htt->frag_desc.vaddr_desc_64 = NULL;
 346}
 347
 348static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
 349{
 350        struct ath10k *ar = htt->ar;
 351        size_t size;
 352
 353        if (!ar->hw_params.continuous_frag_desc)
 354                return 0;
 355
 356        size = htt->max_num_pending_tx *
 357                        sizeof(struct htt_msdu_ext_desc_64);
 358
 359        htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
 360                                                          &htt->frag_desc.paddr,
 361                                                          GFP_KERNEL);
 362        if (!htt->frag_desc.vaddr_desc_64) {
 363                ath10k_err(ar, "failed to alloc fragment desc memory\n");
 364                return -ENOMEM;
 365        }
 366        htt->frag_desc.size = size;
 367
 368        return 0;
 369}
 370
 371static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
 372{
 373        struct ath10k *ar = htt->ar;
 374        size_t size;
 375
 376        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 377                      ar->running_fw->fw_file.fw_features))
 378                return;
 379
 380        size = sizeof(*htt->tx_q_state.vaddr);
 381
 382        dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
 383        kfree(htt->tx_q_state.vaddr);
 384}
 385
 386static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
 387{
 388        struct ath10k *ar = htt->ar;
 389        size_t size;
 390        int ret;
 391
 392        if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 393                      ar->running_fw->fw_file.fw_features))
 394                return 0;
 395
 396        htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
 397        htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
 398        htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
 399
 400        size = sizeof(*htt->tx_q_state.vaddr);
 401        htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
 402        if (!htt->tx_q_state.vaddr)
 403                return -ENOMEM;
 404
 405        htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
 406                                               size, DMA_TO_DEVICE);
 407        ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
 408        if (ret) {
 409                ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
 410                kfree(htt->tx_q_state.vaddr);
 411                return -EIO;
 412        }
 413
 414        return 0;
 415}
 416
 417static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
 418{
 419        WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
 420        kfifo_free(&htt->txdone_fifo);
 421}
 422
 423static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
 424{
 425        int ret;
 426        size_t size;
 427
 428        size = roundup_pow_of_two(htt->max_num_pending_tx);
 429        ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
 430        return ret;
 431}
 432
 433static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 434{
 435        struct ath10k *ar = htt->ar;
 436        int ret;
 437
 438        ret = ath10k_htt_alloc_txbuff(htt);
 439        if (ret) {
 440                ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 441                return ret;
 442        }
 443
 444        ret = ath10k_htt_alloc_frag_desc(htt);
 445        if (ret) {
 446                ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 447                goto free_txbuf;
 448        }
 449
 450        ret = ath10k_htt_tx_alloc_txq(htt);
 451        if (ret) {
 452                ath10k_err(ar, "failed to alloc txq: %d\n", ret);
 453                goto free_frag_desc;
 454        }
 455
 456        ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
 457        if (ret) {
 458                ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
 459                goto free_txq;
 460        }
 461
 462        return 0;
 463
 464free_txq:
 465        ath10k_htt_tx_free_txq(htt);
 466
 467free_frag_desc:
 468        ath10k_htt_free_frag_desc(htt);
 469
 470free_txbuf:
 471        ath10k_htt_free_txbuff(htt);
 472
 473        return ret;
 474}
 475
 476int ath10k_htt_tx_start(struct ath10k_htt *htt)
 477{
 478        struct ath10k *ar = htt->ar;
 479        int ret;
 480
 481        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 482                   htt->max_num_pending_tx);
 483
 484        spin_lock_init(&htt->tx_lock);
 485        idr_init(&htt->pending_tx);
 486
 487        if (htt->tx_mem_allocated)
 488                return 0;
 489
 490        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
 491                return 0;
 492
 493        ret = ath10k_htt_tx_alloc_buf(htt);
 494        if (ret)
 495                goto free_idr_pending_tx;
 496
 497        htt->tx_mem_allocated = true;
 498
 499        return 0;
 500
 501free_idr_pending_tx:
 502        idr_destroy(&htt->pending_tx);
 503
 504        return ret;
 505}
 506
 507static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 508{
 509        struct ath10k *ar = ctx;
 510        struct ath10k_htt *htt = &ar->htt;
 511        struct htt_tx_done tx_done = {0};
 512
 513        ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
 514
 515        tx_done.msdu_id = msdu_id;
 516        tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 517
 518        ath10k_txrx_tx_unref(htt, &tx_done);
 519
 520        return 0;
 521}
 522
 523void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 524{
 525        if (!htt->tx_mem_allocated)
 526                return;
 527
 528        ath10k_htt_free_txbuff(htt);
 529        ath10k_htt_tx_free_txq(htt);
 530        ath10k_htt_free_frag_desc(htt);
 531        ath10k_htt_tx_free_txdone_fifo(htt);
 532        htt->tx_mem_allocated = false;
 533}
 534
 535static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
 536{
 537        ath10k_htc_stop_hl(htt->ar);
 538        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 539}
 540
 541void ath10k_htt_tx_stop(struct ath10k_htt *htt)
 542{
 543        ath10k_htt_flush_tx_queue(htt);
 544        idr_destroy(&htt->pending_tx);
 545}
 546
 547void ath10k_htt_tx_free(struct ath10k_htt *htt)
 548{
 549        ath10k_htt_tx_stop(htt);
 550        ath10k_htt_tx_destroy(htt);
 551}
 552
 553void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
 554{
 555        queue_work(ar->workqueue, &ar->bundle_tx_work);
 556}
 557
 558void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 559{
 560        struct ath10k_htt *htt = &ar->htt;
 561        struct htt_tx_done tx_done = {0};
 562        struct htt_cmd_hdr *htt_hdr;
 563        struct htt_data_tx_desc *desc_hdr = NULL;
 564        u16 flags1 = 0;
 565        u8 msg_type = 0;
 566
 567        if (htt->disable_tx_comp) {
 568                htt_hdr = (struct htt_cmd_hdr *)skb->data;
 569                msg_type = htt_hdr->msg_type;
 570
 571                if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
 572                        desc_hdr = (struct htt_data_tx_desc *)
 573                                (skb->data + sizeof(*htt_hdr));
 574                        flags1 = __le16_to_cpu(desc_hdr->flags1);
 575                        skb_pull(skb, sizeof(struct htt_cmd_hdr));
 576                        skb_pull(skb, sizeof(struct htt_data_tx_desc));
 577                }
 578        }
 579
 580        dev_kfree_skb_any(skb);
 581
 582        if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
 583                return;
 584
 585        ath10k_dbg(ar, ATH10K_DBG_HTT,
 586                   "htt tx complete msdu id:%u ,flags1:%x\n",
 587                   __le16_to_cpu(desc_hdr->id), flags1);
 588
 589        if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
 590                return;
 591
 592        tx_done.status = HTT_TX_COMPL_STATE_ACK;
 593        tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
 594        ath10k_txrx_tx_unref(&ar->htt, &tx_done);
 595}
 596
 597void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 598{
 599        dev_kfree_skb_any(skb);
 600}
 601EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
 602
 603int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 604{
 605        struct ath10k *ar = htt->ar;
 606        struct sk_buff *skb;
 607        struct htt_cmd *cmd;
 608        int len = 0;
 609        int ret;
 610
 611        len += sizeof(cmd->hdr);
 612        len += sizeof(cmd->ver_req);
 613
 614        skb = ath10k_htc_alloc_skb(ar, len);
 615        if (!skb)
 616                return -ENOMEM;
 617
 618        skb_put(skb, len);
 619        cmd = (struct htt_cmd *)skb->data;
 620        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 621
 622        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 623        if (ret) {
 624                dev_kfree_skb_any(skb);
 625                return ret;
 626        }
 627
 628        return 0;
 629}
 630
 631int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
 632                             u64 cookie)
 633{
 634        struct ath10k *ar = htt->ar;
 635        struct htt_stats_req *req;
 636        struct sk_buff *skb;
 637        struct htt_cmd *cmd;
 638        int len = 0, ret;
 639
 640        len += sizeof(cmd->hdr);
 641        len += sizeof(cmd->stats_req);
 642
 643        skb = ath10k_htc_alloc_skb(ar, len);
 644        if (!skb)
 645                return -ENOMEM;
 646
 647        skb_put(skb, len);
 648        cmd = (struct htt_cmd *)skb->data;
 649        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
 650
 651        req = &cmd->stats_req;
 652
 653        memset(req, 0, sizeof(*req));
 654
 655        /* currently we support only max 24 bit masks so no need to worry
 656         * about endian support
 657         */
 658        memcpy(req->upload_types, &mask, 3);
 659        memcpy(req->reset_types, &reset_mask, 3);
 660        req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
 661        req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
 662        req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 663
 664        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 665        if (ret) {
 666                ath10k_warn(ar, "failed to send htt type stats request: %d",
 667                            ret);
 668                dev_kfree_skb_any(skb);
 669                return ret;
 670        }
 671
 672        return 0;
 673}
 674
 675static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 676{
 677        struct ath10k *ar = htt->ar;
 678        struct sk_buff *skb;
 679        struct htt_cmd *cmd;
 680        struct htt_frag_desc_bank_cfg32 *cfg;
 681        int ret, size;
 682        u8 info;
 683
 684        if (!ar->hw_params.continuous_frag_desc)
 685                return 0;
 686
 687        if (!htt->frag_desc.paddr) {
 688                ath10k_warn(ar, "invalid frag desc memory\n");
 689                return -EINVAL;
 690        }
 691
 692        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 693        skb = ath10k_htc_alloc_skb(ar, size);
 694        if (!skb)
 695                return -ENOMEM;
 696
 697        skb_put(skb, size);
 698        cmd = (struct htt_cmd *)skb->data;
 699        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 700
 701        info = 0;
 702        info |= SM(htt->tx_q_state.type,
 703                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 704
 705        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 706                     ar->running_fw->fw_file.fw_features))
 707                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 708
 709        cfg = &cmd->frag_desc_bank_cfg32;
 710        cfg->info = info;
 711        cfg->num_banks = 1;
 712        cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
 713        cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
 714        cfg->bank_id[0].bank_min_id = 0;
 715        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 716                                                    1);
 717
 718        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 719        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 720        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 721        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 722        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 723
 724        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 725
 726        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 727        if (ret) {
 728                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 729                            ret);
 730                dev_kfree_skb_any(skb);
 731                return ret;
 732        }
 733
 734        return 0;
 735}
 736
 737static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 738{
 739        struct ath10k *ar = htt->ar;
 740        struct sk_buff *skb;
 741        struct htt_cmd *cmd;
 742        struct htt_frag_desc_bank_cfg64 *cfg;
 743        int ret, size;
 744        u8 info;
 745
 746        if (!ar->hw_params.continuous_frag_desc)
 747                return 0;
 748
 749        if (!htt->frag_desc.paddr) {
 750                ath10k_warn(ar, "invalid frag desc memory\n");
 751                return -EINVAL;
 752        }
 753
 754        size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
 755        skb = ath10k_htc_alloc_skb(ar, size);
 756        if (!skb)
 757                return -ENOMEM;
 758
 759        skb_put(skb, size);
 760        cmd = (struct htt_cmd *)skb->data;
 761        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
 762
 763        info = 0;
 764        info |= SM(htt->tx_q_state.type,
 765                   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 766
 767        if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
 768                     ar->running_fw->fw_file.fw_features))
 769                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 770
 771        cfg = &cmd->frag_desc_bank_cfg64;
 772        cfg->info = info;
 773        cfg->num_banks = 1;
 774        cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
 775        cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
 776        cfg->bank_id[0].bank_min_id = 0;
 777        cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
 778                                                    1);
 779
 780        cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
 781        cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
 782        cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
 783        cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
 784        cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
 785
 786        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
 787
 788        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 789        if (ret) {
 790                ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
 791                            ret);
 792                dev_kfree_skb_any(skb);
 793                return ret;
 794        }
 795
 796        return 0;
 797}
 798
 799static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
 800{
 801        struct htt_rx_ring_setup_ring32 *ring =
 802                        (struct htt_rx_ring_setup_ring32 *)rx_ring;
 803
 804        ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
 805}
 806
 807static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
 808{
 809        struct htt_rx_ring_setup_ring64 *ring =
 810                        (struct htt_rx_ring_setup_ring64 *)rx_ring;
 811
 812        ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
 813}
 814
 815static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
 816{
 817        struct ath10k *ar = htt->ar;
 818        struct ath10k_hw_params *hw = &ar->hw_params;
 819        struct sk_buff *skb;
 820        struct htt_cmd *cmd;
 821        struct htt_rx_ring_setup_ring32 *ring;
 822        const int num_rx_ring = 1;
 823        u16 flags;
 824        u32 fw_idx;
 825        int len;
 826        int ret;
 827
 828        /*
 829         * the HW expects the buffer to be an integral number of 4-byte
 830         * "words"
 831         */
 832        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 833        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 834
 835        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 836            + (sizeof(*ring) * num_rx_ring);
 837        skb = ath10k_htc_alloc_skb(ar, len);
 838        if (!skb)
 839                return -ENOMEM;
 840
 841        skb_put(skb, len);
 842
 843        cmd = (struct htt_cmd *)skb->data;
 844        ring = &cmd->rx_setup_32.rings[0];
 845
 846        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 847        cmd->rx_setup_32.hdr.num_rings = 1;
 848
 849        /* FIXME: do we need all of this? */
 850        flags = 0;
 851        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 852        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 853        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 854        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 855        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 856        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 857        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 858        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 859        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 860        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 861        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 862        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 863        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 864        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 865        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 866        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 867
 868        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 869
 870        ring->fw_idx_shadow_reg_paddr =
 871                __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
 872        ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
 873        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 874        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 875        ring->flags = __cpu_to_le16(flags);
 876        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 877
 878        ath10k_htt_fill_rx_desc_offset_32(hw, ring);
 879        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 880        if (ret) {
 881                dev_kfree_skb_any(skb);
 882                return ret;
 883        }
 884
 885        return 0;
 886}
 887
 888static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
 889{
 890        struct ath10k *ar = htt->ar;
 891        struct ath10k_hw_params *hw = &ar->hw_params;
 892        struct sk_buff *skb;
 893        struct htt_cmd *cmd;
 894        struct htt_rx_ring_setup_ring64 *ring;
 895        const int num_rx_ring = 1;
 896        u16 flags;
 897        u32 fw_idx;
 898        int len;
 899        int ret;
 900
 901        /* HW expects the buffer to be an integral number of 4-byte
 902         * "words"
 903         */
 904        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 905        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 906
 907        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
 908            + (sizeof(*ring) * num_rx_ring);
 909        skb = ath10k_htc_alloc_skb(ar, len);
 910        if (!skb)
 911                return -ENOMEM;
 912
 913        skb_put(skb, len);
 914
 915        cmd = (struct htt_cmd *)skb->data;
 916        ring = &cmd->rx_setup_64.rings[0];
 917
 918        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 919        cmd->rx_setup_64.hdr.num_rings = 1;
 920
 921        flags = 0;
 922        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 923        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 924        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 925        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 926        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 927        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 928        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 929        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 930        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 931        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 932        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 933        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 934        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 935        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 936        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 937        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 938
 939        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 940
 941        ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
 942        ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
 943        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 944        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 945        ring->flags = __cpu_to_le16(flags);
 946        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 947
 948        ath10k_htt_fill_rx_desc_offset_64(hw, ring);
 949        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 950        if (ret) {
 951                dev_kfree_skb_any(skb);
 952                return ret;
 953        }
 954
 955        return 0;
 956}
 957
 958static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
 959{
 960        struct ath10k *ar = htt->ar;
 961        struct sk_buff *skb;
 962        struct htt_cmd *cmd;
 963        struct htt_rx_ring_setup_ring32 *ring;
 964        const int num_rx_ring = 1;
 965        u16 flags;
 966        int len;
 967        int ret;
 968
 969        /*
 970         * the HW expects the buffer to be an integral number of 4-byte
 971         * "words"
 972         */
 973        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 974        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 975
 976        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 977            + (sizeof(*ring) * num_rx_ring);
 978        skb = ath10k_htc_alloc_skb(ar, len);
 979        if (!skb)
 980                return -ENOMEM;
 981
 982        skb_put(skb, len);
 983
 984        cmd = (struct htt_cmd *)skb->data;
 985        ring = &cmd->rx_setup_32.rings[0];
 986
 987        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 988        cmd->rx_setup_32.hdr.num_rings = 1;
 989
 990        flags = 0;
 991        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 992        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 993        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 994
 995        memset(ring, 0, sizeof(*ring));
 996        ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
 997        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 998        ring->flags = __cpu_to_le16(flags);
 999
1000        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1001        if (ret) {
1002                dev_kfree_skb_any(skb);
1003                return ret;
1004        }
1005
1006        return 0;
1007}
1008
1009static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
1010                                          u8 max_subfrms_ampdu,
1011                                          u8 max_subfrms_amsdu)
1012{
1013        struct ath10k *ar = htt->ar;
1014        struct htt_aggr_conf *aggr_conf;
1015        struct sk_buff *skb;
1016        struct htt_cmd *cmd;
1017        int len;
1018        int ret;
1019
1020        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1021
1022        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1023                return -EINVAL;
1024
1025        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1026                return -EINVAL;
1027
1028        len = sizeof(cmd->hdr);
1029        len += sizeof(cmd->aggr_conf);
1030
1031        skb = ath10k_htc_alloc_skb(ar, len);
1032        if (!skb)
1033                return -ENOMEM;
1034
1035        skb_put(skb, len);
1036        cmd = (struct htt_cmd *)skb->data;
1037        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1038
1039        aggr_conf = &cmd->aggr_conf;
1040        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1041        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1042
1043        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1044                   aggr_conf->max_num_amsdu_subframes,
1045                   aggr_conf->max_num_ampdu_subframes);
1046
1047        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1048        if (ret) {
1049                dev_kfree_skb_any(skb);
1050                return ret;
1051        }
1052
1053        return 0;
1054}
1055
1056static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
1057                                          u8 max_subfrms_ampdu,
1058                                          u8 max_subfrms_amsdu)
1059{
1060        struct ath10k *ar = htt->ar;
1061        struct htt_aggr_conf_v2 *aggr_conf;
1062        struct sk_buff *skb;
1063        struct htt_cmd *cmd;
1064        int len;
1065        int ret;
1066
1067        /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
1068
1069        if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
1070                return -EINVAL;
1071
1072        if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
1073                return -EINVAL;
1074
1075        len = sizeof(cmd->hdr);
1076        len += sizeof(cmd->aggr_conf_v2);
1077
1078        skb = ath10k_htc_alloc_skb(ar, len);
1079        if (!skb)
1080                return -ENOMEM;
1081
1082        skb_put(skb, len);
1083        cmd = (struct htt_cmd *)skb->data;
1084        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
1085
1086        aggr_conf = &cmd->aggr_conf_v2;
1087        aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
1088        aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
1089
1090        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
1091                   aggr_conf->max_num_amsdu_subframes,
1092                   aggr_conf->max_num_ampdu_subframes);
1093
1094        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
1095        if (ret) {
1096                dev_kfree_skb_any(skb);
1097                return ret;
1098        }
1099
1100        return 0;
1101}
1102
1103int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
1104                             __le32 token,
1105                             __le16 fetch_seq_num,
1106                             struct htt_tx_fetch_record *records,
1107                             size_t num_records)
1108{
1109        struct sk_buff *skb;
1110        struct htt_cmd *cmd;
1111        const u16 resp_id = 0;
1112        int len = 0;
1113        int ret;
1114
1115        /* Response IDs are echo-ed back only for host driver convienence
1116         * purposes. They aren't used for anything in the driver yet so use 0.
1117         */
1118
1119        len += sizeof(cmd->hdr);
1120        len += sizeof(cmd->tx_fetch_resp);
1121        len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
1122
1123        skb = ath10k_htc_alloc_skb(ar, len);
1124        if (!skb)
1125                return -ENOMEM;
1126
1127        skb_put(skb, len);
1128        cmd = (struct htt_cmd *)skb->data;
1129        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
1130        cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
1131        cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
1132        cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
1133        cmd->tx_fetch_resp.token = token;
1134
1135        memcpy(cmd->tx_fetch_resp.records, records,
1136               sizeof(records[0]) * num_records);
1137
1138        ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
1139        if (ret) {
1140                ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
1141                goto err_free_skb;
1142        }
1143
1144        return 0;
1145
1146err_free_skb:
1147        dev_kfree_skb_any(skb);
1148
1149        return ret;
1150}
1151
1152static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
1153{
1154        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1155        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1156        struct ath10k_vif *arvif;
1157
1158        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1159                return ar->scan.vdev_id;
1160        } else if (cb->vif) {
1161                arvif = (void *)cb->vif->drv_priv;
1162                return arvif->vdev_id;
1163        } else if (ar->monitor_started) {
1164                return ar->monitor_vdev_id;
1165        } else {
1166                return 0;
1167        }
1168}
1169
1170static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1171{
1172        struct ieee80211_hdr *hdr = (void *)skb->data;
1173        struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
1174
1175        if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1176                return HTT_DATA_TX_EXT_TID_MGMT;
1177        else if (cb->flags & ATH10K_SKB_F_QOS)
1178                return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1179        else
1180                return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1181}
1182
1183int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
1184{
1185        struct ath10k *ar = htt->ar;
1186        struct device *dev = ar->dev;
1187        struct sk_buff *txdesc = NULL;
1188        struct htt_cmd *cmd;
1189        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1190        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1191        int len = 0;
1192        int msdu_id = -1;
1193        int res;
1194        const u8 *peer_addr;
1195        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1196
1197        len += sizeof(cmd->hdr);
1198        len += sizeof(cmd->mgmt_tx);
1199
1200        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1201        if (res < 0)
1202                goto err;
1203
1204        msdu_id = res;
1205
1206        if ((ieee80211_is_action(hdr->frame_control) ||
1207             ieee80211_is_deauth(hdr->frame_control) ||
1208             ieee80211_is_disassoc(hdr->frame_control)) &&
1209             ieee80211_has_protected(hdr->frame_control)) {
1210                peer_addr = hdr->addr1;
1211                if (is_multicast_ether_addr(peer_addr)) {
1212                        skb_put(msdu, sizeof(struct ieee80211_mmie_16));
1213                } else {
1214                        if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
1215                            skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
1216                                skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
1217                        else
1218                                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1219                }
1220        }
1221
1222        txdesc = ath10k_htc_alloc_skb(ar, len);
1223        if (!txdesc) {
1224                res = -ENOMEM;
1225                goto err_free_msdu_id;
1226        }
1227
1228        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1229                                       DMA_TO_DEVICE);
1230        res = dma_mapping_error(dev, skb_cb->paddr);
1231        if (res) {
1232                res = -EIO;
1233                goto err_free_txdesc;
1234        }
1235
1236        skb_put(txdesc, len);
1237        cmd = (struct htt_cmd *)txdesc->data;
1238        memset(cmd, 0, len);
1239
1240        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
1241        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
1242        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
1243        cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
1244        cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
1245        memcpy(cmd->mgmt_tx.hdr, msdu->data,
1246               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
1247
1248        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1249        if (res)
1250                goto err_unmap_msdu;
1251
1252        return 0;
1253
1254err_unmap_msdu:
1255        if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
1256                dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1257err_free_txdesc:
1258        dev_kfree_skb_any(txdesc);
1259err_free_msdu_id:
1260        spin_lock_bh(&htt->tx_lock);
1261        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1262        spin_unlock_bh(&htt->tx_lock);
1263err:
1264        return res;
1265}
1266
1267#define HTT_TX_HL_NEEDED_HEADROOM \
1268        (unsigned int)(sizeof(struct htt_cmd_hdr) + \
1269        sizeof(struct htt_data_tx_desc) + \
1270        sizeof(struct ath10k_htc_hdr))
1271
1272static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
1273                            struct sk_buff *msdu)
1274{
1275        struct ath10k *ar = htt->ar;
1276        int res, data_len;
1277        struct htt_cmd_hdr *cmd_hdr;
1278        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1279        struct htt_data_tx_desc *tx_desc;
1280        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1281        struct sk_buff *tmp_skb;
1282        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1283        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1284        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1285        u8 flags0 = 0;
1286        u16 flags1 = 0;
1287        u16 msdu_id = 0;
1288
1289        if ((ieee80211_is_action(hdr->frame_control) ||
1290             ieee80211_is_deauth(hdr->frame_control) ||
1291             ieee80211_is_disassoc(hdr->frame_control)) &&
1292             ieee80211_has_protected(hdr->frame_control)) {
1293                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1294        }
1295
1296        data_len = msdu->len;
1297
1298        switch (txmode) {
1299        case ATH10K_HW_TXRX_RAW:
1300        case ATH10K_HW_TXRX_NATIVE_WIFI:
1301                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1302                fallthrough;
1303        case ATH10K_HW_TXRX_ETHERNET:
1304                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1305                break;
1306        case ATH10K_HW_TXRX_MGMT:
1307                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1308                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1309                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1310
1311                if (htt->disable_tx_comp)
1312                        flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
1313                break;
1314        }
1315
1316        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1317                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1318
1319        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1320        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1321        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1322            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1323                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1324                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1325        }
1326
1327        /* Prepend the HTT header and TX desc struct to the data message
1328         * and realloc the skb if it does not have enough headroom.
1329         */
1330        if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
1331                tmp_skb = msdu;
1332
1333                ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
1334                           "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
1335                           skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
1336                msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
1337                kfree_skb(tmp_skb);
1338                if (!msdu) {
1339                        ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
1340                        res = -ENOMEM;
1341                        goto out;
1342                }
1343        }
1344
1345        if (ar->bus_param.hl_msdu_ids) {
1346                flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1347                res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1348                if (res < 0) {
1349                        ath10k_err(ar, "msdu_id allocation failed %d\n", res);
1350                        goto out;
1351                }
1352                msdu_id = res;
1353        }
1354
1355        /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
1356         * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
1357         * reference by one to avoid a use-after-free case and a double
1358         * free.
1359         */
1360        skb_get(msdu);
1361
1362        skb_push(msdu, sizeof(*cmd_hdr));
1363        skb_push(msdu, sizeof(*tx_desc));
1364        cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
1365        tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
1366
1367        cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1368        tx_desc->flags0 = flags0;
1369        tx_desc->flags1 = __cpu_to_le16(flags1);
1370        tx_desc->len = __cpu_to_le16(data_len);
1371        tx_desc->id = __cpu_to_le16(msdu_id);
1372        tx_desc->frags_paddr = 0; /* always zero */
1373        /* Initialize peer_id to INVALID_PEER because this is NOT
1374         * Reinjection path
1375         */
1376        tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
1377
1378        res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
1379
1380out:
1381        return res;
1382}
1383
1384static int ath10k_htt_tx_32(struct ath10k_htt *htt,
1385                            enum ath10k_hw_txrx_mode txmode,
1386                            struct sk_buff *msdu)
1387{
1388        struct ath10k *ar = htt->ar;
1389        struct device *dev = ar->dev;
1390        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1391        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1392        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1393        struct ath10k_hif_sg_item sg_items[2];
1394        struct ath10k_htt_txbuf_32 *txbuf;
1395        struct htt_data_tx_desc_frag *frags;
1396        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1397        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1398        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1399        int prefetch_len;
1400        int res;
1401        u8 flags0 = 0;
1402        u16 msdu_id, flags1 = 0;
1403        u16 freq = 0;
1404        u32 frags_paddr = 0;
1405        u32 txbuf_paddr;
1406        struct htt_msdu_ext_desc *ext_desc = NULL;
1407        struct htt_msdu_ext_desc *ext_desc_t = NULL;
1408
1409        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1410        if (res < 0)
1411                goto err;
1412
1413        msdu_id = res;
1414
1415        prefetch_len = min(htt->prefetch_len, msdu->len);
1416        prefetch_len = roundup(prefetch_len, 4);
1417
1418        txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
1419        txbuf_paddr = htt->txbuf.paddr +
1420                      (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
1421
1422        if ((ieee80211_is_action(hdr->frame_control) ||
1423             ieee80211_is_deauth(hdr->frame_control) ||
1424             ieee80211_is_disassoc(hdr->frame_control)) &&
1425             ieee80211_has_protected(hdr->frame_control)) {
1426                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1427        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1428                   txmode == ATH10K_HW_TXRX_RAW &&
1429                   ieee80211_has_protected(hdr->frame_control)) {
1430                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1431        }
1432
1433        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1434                                       DMA_TO_DEVICE);
1435        res = dma_mapping_error(dev, skb_cb->paddr);
1436        if (res) {
1437                res = -EIO;
1438                goto err_free_msdu_id;
1439        }
1440
1441        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1442                freq = ar->scan.roc_freq;
1443
1444        switch (txmode) {
1445        case ATH10K_HW_TXRX_RAW:
1446        case ATH10K_HW_TXRX_NATIVE_WIFI:
1447                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1448                fallthrough;
1449        case ATH10K_HW_TXRX_ETHERNET:
1450                if (ar->hw_params.continuous_frag_desc) {
1451                        ext_desc_t = htt->frag_desc.vaddr_desc_32;
1452                        memset(&ext_desc_t[msdu_id], 0,
1453                               sizeof(struct htt_msdu_ext_desc));
1454                        frags = (struct htt_data_tx_desc_frag *)
1455                                &ext_desc_t[msdu_id].frags;
1456                        ext_desc = &ext_desc_t[msdu_id];
1457                        frags[0].tword_addr.paddr_lo =
1458                                __cpu_to_le32(skb_cb->paddr);
1459                        frags[0].tword_addr.paddr_hi = 0;
1460                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1461
1462                        frags_paddr =  htt->frag_desc.paddr +
1463                                (sizeof(struct htt_msdu_ext_desc) * msdu_id);
1464                } else {
1465                        frags = txbuf->frags;
1466                        frags[0].dword_addr.paddr =
1467                                __cpu_to_le32(skb_cb->paddr);
1468                        frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
1469                        frags[1].dword_addr.paddr = 0;
1470                        frags[1].dword_addr.len = 0;
1471
1472                        frags_paddr = txbuf_paddr;
1473                }
1474                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1475                break;
1476        case ATH10K_HW_TXRX_MGMT:
1477                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1478                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1479                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1480
1481                frags_paddr = skb_cb->paddr;
1482                break;
1483        }
1484
1485        /* Normally all commands go through HTC which manages tx credits for
1486         * each endpoint and notifies when tx is completed.
1487         *
1488         * HTT endpoint is creditless so there's no need to care about HTC
1489         * flags. In that case it is trivial to fill the HTC header here.
1490         *
1491         * MSDU transmission is considered completed upon HTT event. This
1492         * implies no relevant resources can be freed until after the event is
1493         * received. That's why HTC tx completion handler itself is ignored by
1494         * setting NULL to transfer_context for all sg items.
1495         *
1496         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1497         * as it's a waste of resources. By bypassing HTC it is possible to
1498         * avoid extra memory allocations, compress data structures and thus
1499         * improve performance.
1500         */
1501
1502        txbuf->htc_hdr.eid = htt->eid;
1503        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1504                                           sizeof(txbuf->cmd_tx) +
1505                                           prefetch_len);
1506        txbuf->htc_hdr.flags = 0;
1507
1508        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1509                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1510
1511        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1512        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1513        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1514            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1515                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1516                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1517                if (ar->hw_params.continuous_frag_desc)
1518                        ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1519        }
1520
1521        /* Prevent firmware from sending up tx inspection requests. There's
1522         * nothing ath10k can do with frames requested for inspection so force
1523         * it to simply rely a regular tx completion with discard status.
1524         */
1525        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1526
1527        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1528        txbuf->cmd_tx.flags0 = flags0;
1529        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1530        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1531        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1532        txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1533        if (ath10k_mac_tx_frm_has_freq(ar)) {
1534                txbuf->cmd_tx.offchan_tx.peerid =
1535                                __cpu_to_le16(HTT_INVALID_PEERID);
1536                txbuf->cmd_tx.offchan_tx.freq =
1537                                __cpu_to_le16(freq);
1538        } else {
1539                txbuf->cmd_tx.peerid =
1540                                __cpu_to_le32(HTT_INVALID_PEERID);
1541        }
1542
1543        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1544        ath10k_dbg(ar, ATH10K_DBG_HTT,
1545                   "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1546                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1547                   &skb_cb->paddr, vdev_id, tid, freq);
1548        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1549                        msdu->data, msdu->len);
1550        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1551        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1552
1553        sg_items[0].transfer_id = 0;
1554        sg_items[0].transfer_context = NULL;
1555        sg_items[0].vaddr = &txbuf->htc_hdr;
1556        sg_items[0].paddr = txbuf_paddr +
1557                            sizeof(txbuf->frags);
1558        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1559                          sizeof(txbuf->cmd_hdr) +
1560                          sizeof(txbuf->cmd_tx);
1561
1562        sg_items[1].transfer_id = 0;
1563        sg_items[1].transfer_context = NULL;
1564        sg_items[1].vaddr = msdu->data;
1565        sg_items[1].paddr = skb_cb->paddr;
1566        sg_items[1].len = prefetch_len;
1567
1568        res = ath10k_hif_tx_sg(htt->ar,
1569                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1570                               sg_items, ARRAY_SIZE(sg_items));
1571        if (res)
1572                goto err_unmap_msdu;
1573
1574        return 0;
1575
1576err_unmap_msdu:
1577        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1578err_free_msdu_id:
1579        spin_lock_bh(&htt->tx_lock);
1580        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1581        spin_unlock_bh(&htt->tx_lock);
1582err:
1583        return res;
1584}
1585
1586static int ath10k_htt_tx_64(struct ath10k_htt *htt,
1587                            enum ath10k_hw_txrx_mode txmode,
1588                            struct sk_buff *msdu)
1589{
1590        struct ath10k *ar = htt->ar;
1591        struct device *dev = ar->dev;
1592        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1593        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1594        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1595        struct ath10k_hif_sg_item sg_items[2];
1596        struct ath10k_htt_txbuf_64 *txbuf;
1597        struct htt_data_tx_desc_frag *frags;
1598        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
1599        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1600        u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1601        int prefetch_len;
1602        int res;
1603        u8 flags0 = 0;
1604        u16 msdu_id, flags1 = 0;
1605        u16 freq = 0;
1606        dma_addr_t frags_paddr = 0;
1607        dma_addr_t txbuf_paddr;
1608        struct htt_msdu_ext_desc_64 *ext_desc = NULL;
1609        struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
1610
1611        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1612        if (res < 0)
1613                goto err;
1614
1615        msdu_id = res;
1616
1617        prefetch_len = min(htt->prefetch_len, msdu->len);
1618        prefetch_len = roundup(prefetch_len, 4);
1619
1620        txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
1621        txbuf_paddr = htt->txbuf.paddr +
1622                      (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
1623
1624        if ((ieee80211_is_action(hdr->frame_control) ||
1625             ieee80211_is_deauth(hdr->frame_control) ||
1626             ieee80211_is_disassoc(hdr->frame_control)) &&
1627             ieee80211_has_protected(hdr->frame_control)) {
1628                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1629        } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
1630                   txmode == ATH10K_HW_TXRX_RAW &&
1631                   ieee80211_has_protected(hdr->frame_control)) {
1632                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
1633        }
1634
1635        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
1636                                       DMA_TO_DEVICE);
1637        res = dma_mapping_error(dev, skb_cb->paddr);
1638        if (res) {
1639                res = -EIO;
1640                goto err_free_msdu_id;
1641        }
1642
1643        if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
1644                freq = ar->scan.roc_freq;
1645
1646        switch (txmode) {
1647        case ATH10K_HW_TXRX_RAW:
1648        case ATH10K_HW_TXRX_NATIVE_WIFI:
1649                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1650                fallthrough;
1651        case ATH10K_HW_TXRX_ETHERNET:
1652                if (ar->hw_params.continuous_frag_desc) {
1653                        ext_desc_t = htt->frag_desc.vaddr_desc_64;
1654                        memset(&ext_desc_t[msdu_id], 0,
1655                               sizeof(struct htt_msdu_ext_desc_64));
1656                        frags = (struct htt_data_tx_desc_frag *)
1657                                &ext_desc_t[msdu_id].frags;
1658                        ext_desc = &ext_desc_t[msdu_id];
1659                        frags[0].tword_addr.paddr_lo =
1660                                __cpu_to_le32(skb_cb->paddr);
1661                        frags[0].tword_addr.paddr_hi =
1662                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1663                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1664
1665                        frags_paddr =  htt->frag_desc.paddr +
1666                           (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
1667                } else {
1668                        frags = txbuf->frags;
1669                        frags[0].tword_addr.paddr_lo =
1670                                                __cpu_to_le32(skb_cb->paddr);
1671                        frags[0].tword_addr.paddr_hi =
1672                                __cpu_to_le16(upper_32_bits(skb_cb->paddr));
1673                        frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
1674                        frags[1].tword_addr.paddr_lo = 0;
1675                        frags[1].tword_addr.paddr_hi = 0;
1676                        frags[1].tword_addr.len_16 = 0;
1677                }
1678                flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1679                break;
1680        case ATH10K_HW_TXRX_MGMT:
1681                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
1682                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1683                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1684
1685                frags_paddr = skb_cb->paddr;
1686                break;
1687        }
1688
1689        /* Normally all commands go through HTC which manages tx credits for
1690         * each endpoint and notifies when tx is completed.
1691         *
1692         * HTT endpoint is creditless so there's no need to care about HTC
1693         * flags. In that case it is trivial to fill the HTC header here.
1694         *
1695         * MSDU transmission is considered completed upon HTT event. This
1696         * implies no relevant resources can be freed until after the event is
1697         * received. That's why HTC tx completion handler itself is ignored by
1698         * setting NULL to transfer_context for all sg items.
1699         *
1700         * There is simply no point in pushing HTT TX_FRM through HTC tx path
1701         * as it's a waste of resources. By bypassing HTC it is possible to
1702         * avoid extra memory allocations, compress data structures and thus
1703         * improve performance.
1704         */
1705
1706        txbuf->htc_hdr.eid = htt->eid;
1707        txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1708                                           sizeof(txbuf->cmd_tx) +
1709                                           prefetch_len);
1710        txbuf->htc_hdr.flags = 0;
1711
1712        if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1713                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1714
1715        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1716        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1717        if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1718            !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1719                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1720                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1721                if (ar->hw_params.continuous_frag_desc) {
1722                        memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
1723                        ext_desc->tso_flag[3] |=
1724                                __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
1725                }
1726        }
1727
1728        /* Prevent firmware from sending up tx inspection requests. There's
1729         * nothing ath10k can do with frames requested for inspection so force
1730         * it to simply rely a regular tx completion with discard status.
1731         */
1732        flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1733
1734        txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1735        txbuf->cmd_tx.flags0 = flags0;
1736        txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1737        txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1738        txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1739
1740        /* fill fragment descriptor */
1741        txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
1742        if (ath10k_mac_tx_frm_has_freq(ar)) {
1743                txbuf->cmd_tx.offchan_tx.peerid =
1744                                __cpu_to_le16(HTT_INVALID_PEERID);
1745                txbuf->cmd_tx.offchan_tx.freq =
1746                                __cpu_to_le16(freq);
1747        } else {
1748                txbuf->cmd_tx.peerid =
1749                                __cpu_to_le32(HTT_INVALID_PEERID);
1750        }
1751
1752        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1753        ath10k_dbg(ar, ATH10K_DBG_HTT,
1754                   "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
1755                   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
1756                   &skb_cb->paddr, vdev_id, tid, freq);
1757        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1758                        msdu->data, msdu->len);
1759        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1760        trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1761
1762        sg_items[0].transfer_id = 0;
1763        sg_items[0].transfer_context = NULL;
1764        sg_items[0].vaddr = &txbuf->htc_hdr;
1765        sg_items[0].paddr = txbuf_paddr +
1766                            sizeof(txbuf->frags);
1767        sg_items[0].len = sizeof(txbuf->htc_hdr) +
1768                          sizeof(txbuf->cmd_hdr) +
1769                          sizeof(txbuf->cmd_tx);
1770
1771        sg_items[1].transfer_id = 0;
1772        sg_items[1].transfer_context = NULL;
1773        sg_items[1].vaddr = msdu->data;
1774        sg_items[1].paddr = skb_cb->paddr;
1775        sg_items[1].len = prefetch_len;
1776
1777        res = ath10k_hif_tx_sg(htt->ar,
1778                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1779                               sg_items, ARRAY_SIZE(sg_items));
1780        if (res)
1781                goto err_unmap_msdu;
1782
1783        return 0;
1784
1785err_unmap_msdu:
1786        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1787err_free_msdu_id:
1788        spin_lock_bh(&htt->tx_lock);
1789        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1790        spin_unlock_bh(&htt->tx_lock);
1791err:
1792        return res;
1793}
1794
1795static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
1796        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1797        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1798        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
1799        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1800        .htt_tx = ath10k_htt_tx_32,
1801        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
1802        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
1803        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1804};
1805
1806static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
1807        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1808        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
1809        .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
1810        .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1811        .htt_tx = ath10k_htt_tx_64,
1812        .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
1813        .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
1814        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
1815};
1816
1817static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
1818        .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
1819        .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
1820        .htt_tx = ath10k_htt_tx_hl,
1821        .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
1822        .htt_flush_tx = ath10k_htt_flush_tx_queue,
1823};
1824
1825void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
1826{
1827        struct ath10k *ar = htt->ar;
1828
1829        if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
1830                htt->tx_ops = &htt_tx_ops_hl;
1831        else if (ar->hw_params.target_64bit)
1832                htt->tx_ops = &htt_tx_ops_64;
1833        else
1834                htt->tx_ops = &htt_tx_ops_32;
1835}
1836