linux/drivers/net/wireless/ath/ath10k/htt_tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/etherdevice.h>
  19#include "htt.h"
  20#include "mac.h"
  21#include "hif.h"
  22#include "txrx.h"
  23#include "debug.h"
  24
  25void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
  26{
  27        htt->num_pending_tx--;
  28        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
  29                ieee80211_wake_queues(htt->ar->hw);
  30}
  31
  32static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
  33{
  34        spin_lock_bh(&htt->tx_lock);
  35        __ath10k_htt_tx_dec_pending(htt);
  36        spin_unlock_bh(&htt->tx_lock);
  37}
  38
  39static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
  40{
  41        int ret = 0;
  42
  43        spin_lock_bh(&htt->tx_lock);
  44
  45        if (htt->num_pending_tx >= htt->max_num_pending_tx) {
  46                ret = -EBUSY;
  47                goto exit;
  48        }
  49
  50        htt->num_pending_tx++;
  51        if (htt->num_pending_tx == htt->max_num_pending_tx)
  52                ieee80211_stop_queues(htt->ar->hw);
  53
  54exit:
  55        spin_unlock_bh(&htt->tx_lock);
  56        return ret;
  57}
  58
  59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
  60{
  61        int msdu_id;
  62
  63        lockdep_assert_held(&htt->tx_lock);
  64
  65        msdu_id = find_first_zero_bit(htt->used_msdu_ids,
  66                                      htt->max_num_pending_tx);
  67        if (msdu_id == htt->max_num_pending_tx)
  68                return -ENOBUFS;
  69
  70        ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
  71        __set_bit(msdu_id, htt->used_msdu_ids);
  72        return msdu_id;
  73}
  74
  75void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
  76{
  77        lockdep_assert_held(&htt->tx_lock);
  78
  79        if (!test_bit(msdu_id, htt->used_msdu_ids))
  80                ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
  81
  82        ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
  83        __clear_bit(msdu_id, htt->used_msdu_ids);
  84}
  85
  86int ath10k_htt_tx_attach(struct ath10k_htt *htt)
  87{
  88        spin_lock_init(&htt->tx_lock);
  89        init_waitqueue_head(&htt->empty_tx_wq);
  90
  91        if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
  92                htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
  93        else
  94                htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
  95
  96        ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
  97                   htt->max_num_pending_tx);
  98
  99        htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
 100                                  htt->max_num_pending_tx, GFP_KERNEL);
 101        if (!htt->pending_tx)
 102                return -ENOMEM;
 103
 104        htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
 105                                     BITS_TO_LONGS(htt->max_num_pending_tx),
 106                                     GFP_KERNEL);
 107        if (!htt->used_msdu_ids) {
 108                kfree(htt->pending_tx);
 109                return -ENOMEM;
 110        }
 111
 112        htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
 113                                       sizeof(struct ath10k_htt_txbuf), 4, 0);
 114        if (!htt->tx_pool) {
 115                kfree(htt->used_msdu_ids);
 116                kfree(htt->pending_tx);
 117                return -ENOMEM;
 118        }
 119
 120        return 0;
 121}
 122
 123static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
 124{
 125        struct htt_tx_done tx_done = {0};
 126        int msdu_id;
 127
 128        spin_lock_bh(&htt->tx_lock);
 129        for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
 130                if (!test_bit(msdu_id, htt->used_msdu_ids))
 131                        continue;
 132
 133                ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
 134                           msdu_id);
 135
 136                tx_done.discard = 1;
 137                tx_done.msdu_id = msdu_id;
 138
 139                ath10k_txrx_tx_unref(htt, &tx_done);
 140        }
 141        spin_unlock_bh(&htt->tx_lock);
 142}
 143
 144void ath10k_htt_tx_detach(struct ath10k_htt *htt)
 145{
 146        ath10k_htt_tx_cleanup_pending(htt);
 147        kfree(htt->pending_tx);
 148        kfree(htt->used_msdu_ids);
 149        dma_pool_destroy(htt->tx_pool);
 150        return;
 151}
 152
 153void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 154{
 155        dev_kfree_skb_any(skb);
 156}
 157
 158int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 159{
 160        struct sk_buff *skb;
 161        struct htt_cmd *cmd;
 162        int len = 0;
 163        int ret;
 164
 165        len += sizeof(cmd->hdr);
 166        len += sizeof(cmd->ver_req);
 167
 168        skb = ath10k_htc_alloc_skb(len);
 169        if (!skb)
 170                return -ENOMEM;
 171
 172        skb_put(skb, len);
 173        cmd = (struct htt_cmd *)skb->data;
 174        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 175
 176        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 177        if (ret) {
 178                dev_kfree_skb_any(skb);
 179                return ret;
 180        }
 181
 182        return 0;
 183}
 184
 185int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
 186{
 187        struct htt_stats_req *req;
 188        struct sk_buff *skb;
 189        struct htt_cmd *cmd;
 190        int len = 0, ret;
 191
 192        len += sizeof(cmd->hdr);
 193        len += sizeof(cmd->stats_req);
 194
 195        skb = ath10k_htc_alloc_skb(len);
 196        if (!skb)
 197                return -ENOMEM;
 198
 199        skb_put(skb, len);
 200        cmd = (struct htt_cmd *)skb->data;
 201        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
 202
 203        req = &cmd->stats_req;
 204
 205        memset(req, 0, sizeof(*req));
 206
 207        /* currently we support only max 8 bit masks so no need to worry
 208         * about endian support */
 209        req->upload_types[0] = mask;
 210        req->reset_types[0] = mask;
 211        req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
 212        req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
 213        req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 214
 215        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 216        if (ret) {
 217                ath10k_warn("failed to send htt type stats request: %d", ret);
 218                dev_kfree_skb_any(skb);
 219                return ret;
 220        }
 221
 222        return 0;
 223}
 224
 225int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 226{
 227        struct sk_buff *skb;
 228        struct htt_cmd *cmd;
 229        struct htt_rx_ring_setup_ring *ring;
 230        const int num_rx_ring = 1;
 231        u16 flags;
 232        u32 fw_idx;
 233        int len;
 234        int ret;
 235
 236        /*
 237         * the HW expects the buffer to be an integral number of 4-byte
 238         * "words"
 239         */
 240        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 241        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 242
 243        len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
 244            + (sizeof(*ring) * num_rx_ring);
 245        skb = ath10k_htc_alloc_skb(len);
 246        if (!skb)
 247                return -ENOMEM;
 248
 249        skb_put(skb, len);
 250
 251        cmd = (struct htt_cmd *)skb->data;
 252        ring = &cmd->rx_setup.rings[0];
 253
 254        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
 255        cmd->rx_setup.hdr.num_rings = 1;
 256
 257        /* FIXME: do we need all of this? */
 258        flags = 0;
 259        flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
 260        flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
 261        flags |= HTT_RX_RING_FLAGS_PPDU_START;
 262        flags |= HTT_RX_RING_FLAGS_PPDU_END;
 263        flags |= HTT_RX_RING_FLAGS_MPDU_START;
 264        flags |= HTT_RX_RING_FLAGS_MPDU_END;
 265        flags |= HTT_RX_RING_FLAGS_MSDU_START;
 266        flags |= HTT_RX_RING_FLAGS_MSDU_END;
 267        flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
 268        flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
 269        flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
 270        flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
 271        flags |= HTT_RX_RING_FLAGS_CTRL_RX;
 272        flags |= HTT_RX_RING_FLAGS_MGMT_RX;
 273        flags |= HTT_RX_RING_FLAGS_NULL_RX;
 274        flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 275
 276        fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 277
 278        ring->fw_idx_shadow_reg_paddr =
 279                __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
 280        ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
 281        ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
 282        ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
 283        ring->flags = __cpu_to_le16(flags);
 284        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 285
 286#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
 287
 288        ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
 289        ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
 290        ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
 291        ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
 292        ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
 293        ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
 294        ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
 295        ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
 296        ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
 297        ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
 298
 299#undef desc_offset
 300
 301        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 302        if (ret) {
 303                dev_kfree_skb_any(skb);
 304                return ret;
 305        }
 306
 307        return 0;
 308}
 309
 310int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 311{
 312        struct device *dev = htt->ar->dev;
 313        struct sk_buff *txdesc = NULL;
 314        struct htt_cmd *cmd;
 315        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 316        u8 vdev_id = skb_cb->vdev_id;
 317        int len = 0;
 318        int msdu_id = -1;
 319        int res;
 320
 321
 322        res = ath10k_htt_tx_inc_pending(htt);
 323        if (res)
 324                goto err;
 325
 326        len += sizeof(cmd->hdr);
 327        len += sizeof(cmd->mgmt_tx);
 328
 329        spin_lock_bh(&htt->tx_lock);
 330        res = ath10k_htt_tx_alloc_msdu_id(htt);
 331        if (res < 0) {
 332                spin_unlock_bh(&htt->tx_lock);
 333                goto err_tx_dec;
 334        }
 335        msdu_id = res;
 336        htt->pending_tx[msdu_id] = msdu;
 337        spin_unlock_bh(&htt->tx_lock);
 338
 339        txdesc = ath10k_htc_alloc_skb(len);
 340        if (!txdesc) {
 341                res = -ENOMEM;
 342                goto err_free_msdu_id;
 343        }
 344
 345        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 346                                       DMA_TO_DEVICE);
 347        res = dma_mapping_error(dev, skb_cb->paddr);
 348        if (res)
 349                goto err_free_txdesc;
 350
 351        skb_put(txdesc, len);
 352        cmd = (struct htt_cmd *)txdesc->data;
 353        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
 354        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
 355        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
 356        cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
 357        cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
 358        memcpy(cmd->mgmt_tx.hdr, msdu->data,
 359               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
 360
 361        skb_cb->htt.txbuf = NULL;
 362
 363        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
 364        if (res)
 365                goto err_unmap_msdu;
 366
 367        return 0;
 368
 369err_unmap_msdu:
 370        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 371err_free_txdesc:
 372        dev_kfree_skb_any(txdesc);
 373err_free_msdu_id:
 374        spin_lock_bh(&htt->tx_lock);
 375        htt->pending_tx[msdu_id] = NULL;
 376        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 377        spin_unlock_bh(&htt->tx_lock);
 378err_tx_dec:
 379        ath10k_htt_tx_dec_pending(htt);
 380err:
 381        return res;
 382}
 383
 384int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 385{
 386        struct device *dev = htt->ar->dev;
 387        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
 388        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 389        struct ath10k_hif_sg_item sg_items[2];
 390        struct htt_data_tx_desc_frag *frags;
 391        u8 vdev_id = skb_cb->vdev_id;
 392        u8 tid = skb_cb->htt.tid;
 393        int prefetch_len;
 394        int res;
 395        u8 flags0 = 0;
 396        u16 msdu_id, flags1 = 0;
 397        dma_addr_t paddr;
 398        u32 frags_paddr;
 399        bool use_frags;
 400
 401        res = ath10k_htt_tx_inc_pending(htt);
 402        if (res)
 403                goto err;
 404
 405        spin_lock_bh(&htt->tx_lock);
 406        res = ath10k_htt_tx_alloc_msdu_id(htt);
 407        if (res < 0) {
 408                spin_unlock_bh(&htt->tx_lock);
 409                goto err_tx_dec;
 410        }
 411        msdu_id = res;
 412        htt->pending_tx[msdu_id] = msdu;
 413        spin_unlock_bh(&htt->tx_lock);
 414
 415        prefetch_len = min(htt->prefetch_len, msdu->len);
 416        prefetch_len = roundup(prefetch_len, 4);
 417
 418        /* Since HTT 3.0 there is no separate mgmt tx command. However in case
 419         * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
 420         * fragment list host driver specifies directly frame pointer. */
 421        use_frags = htt->target_version_major < 3 ||
 422                    !ieee80211_is_mgmt(hdr->frame_control);
 423
 424        skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
 425                                           &paddr);
 426        if (!skb_cb->htt.txbuf)
 427                goto err_free_msdu_id;
 428        skb_cb->htt.txbuf_paddr = paddr;
 429
 430        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 431                                       DMA_TO_DEVICE);
 432        res = dma_mapping_error(dev, skb_cb->paddr);
 433        if (res)
 434                goto err_free_txbuf;
 435
 436        if (likely(use_frags)) {
 437                frags = skb_cb->htt.txbuf->frags;
 438
 439                frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
 440                frags[0].len = __cpu_to_le32(msdu->len);
 441                frags[1].paddr = 0;
 442                frags[1].len = 0;
 443
 444                flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
 445                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 446
 447                frags_paddr = skb_cb->htt.txbuf_paddr;
 448        } else {
 449                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
 450                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 451
 452                frags_paddr = skb_cb->paddr;
 453        }
 454
 455        /* Normally all commands go through HTC which manages tx credits for
 456         * each endpoint and notifies when tx is completed.
 457         *
 458         * HTT endpoint is creditless so there's no need to care about HTC
 459         * flags. In that case it is trivial to fill the HTC header here.
 460         *
 461         * MSDU transmission is considered completed upon HTT event. This
 462         * implies no relevant resources can be freed until after the event is
 463         * received. That's why HTC tx completion handler itself is ignored by
 464         * setting NULL to transfer_context for all sg items.
 465         *
 466         * There is simply no point in pushing HTT TX_FRM through HTC tx path
 467         * as it's a waste of resources. By bypassing HTC it is possible to
 468         * avoid extra memory allocations, compress data structures and thus
 469         * improve performance. */
 470
 471        skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
 472        skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
 473                        sizeof(skb_cb->htt.txbuf->cmd_hdr) +
 474                        sizeof(skb_cb->htt.txbuf->cmd_tx) +
 475                        prefetch_len);
 476        skb_cb->htt.txbuf->htc_hdr.flags = 0;
 477
 478        if (!ieee80211_has_protected(hdr->frame_control))
 479                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 480
 481        flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 482
 483        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
 484        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
 485        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
 486        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
 487
 488        skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
 489        skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
 490        skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
 491        skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
 492        skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
 493        skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
 494        skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
 495
 496        ath10k_dbg(ATH10K_DBG_HTT,
 497                   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
 498                   flags0, flags1, msdu->len, msdu_id, frags_paddr,
 499                   (u32)skb_cb->paddr, vdev_id, tid);
 500        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
 501                        msdu->data, msdu->len);
 502
 503        sg_items[0].transfer_id = 0;
 504        sg_items[0].transfer_context = NULL;
 505        sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
 506        sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
 507                            sizeof(skb_cb->htt.txbuf->frags);
 508        sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
 509                          sizeof(skb_cb->htt.txbuf->cmd_hdr) +
 510                          sizeof(skb_cb->htt.txbuf->cmd_tx);
 511
 512        sg_items[1].transfer_id = 0;
 513        sg_items[1].transfer_context = NULL;
 514        sg_items[1].vaddr = msdu->data;
 515        sg_items[1].paddr = skb_cb->paddr;
 516        sg_items[1].len = prefetch_len;
 517
 518        res = ath10k_hif_tx_sg(htt->ar,
 519                               htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
 520                               sg_items, ARRAY_SIZE(sg_items));
 521        if (res)
 522                goto err_unmap_msdu;
 523
 524        return 0;
 525
 526err_unmap_msdu:
 527        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 528err_free_txbuf:
 529        dma_pool_free(htt->tx_pool,
 530                      skb_cb->htt.txbuf,
 531                      skb_cb->htt.txbuf_paddr);
 532err_free_msdu_id:
 533        spin_lock_bh(&htt->tx_lock);
 534        htt->pending_tx[msdu_id] = NULL;
 535        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 536        spin_unlock_bh(&htt->tx_lock);
 537err_tx_dec:
 538        ath10k_htt_tx_dec_pending(htt);
 539err:
 540        return res;
 541}
 542