linux/drivers/net/wireless/ath/ath10k/txrx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2005-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
   6 */
   7
   8#include "core.h"
   9#include "txrx.h"
  10#include "htt.h"
  11#include "mac.h"
  12#include "debug.h"
  13
  14static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
  15{
  16        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  17
  18        if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
  19                return;
  20
  21        if (ath10k_mac_tx_frm_has_freq(ar))
  22                return;
  23
  24        /* If the original wait_for_completion() timed out before
  25         * {data,mgmt}_tx_completed() was called then we could complete
  26         * offchan_tx_completed for a different skb. Prevent this by using
  27         * offchan_tx_skb.
  28         */
  29        spin_lock_bh(&ar->data_lock);
  30        if (ar->offchan_tx_skb != skb) {
  31                ath10k_warn(ar, "completed old offchannel frame\n");
  32                goto out;
  33        }
  34
  35        complete(&ar->offchan_tx_completed);
  36        ar->offchan_tx_skb = NULL; /* just for sanity */
  37
  38        ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
  39out:
  40        spin_unlock_bh(&ar->data_lock);
  41}
  42
  43int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
  44                         const struct htt_tx_done *tx_done)
  45{
  46        struct ath10k *ar = htt->ar;
  47        struct device *dev = ar->dev;
  48        struct ieee80211_tx_info *info;
  49        struct ieee80211_txq *txq;
  50        struct ath10k_skb_cb *skb_cb;
  51        struct ath10k_txq *artxq;
  52        struct sk_buff *msdu;
  53
  54        ath10k_dbg(ar, ATH10K_DBG_HTT,
  55                   "htt tx completion msdu_id %u status %d\n",
  56                   tx_done->msdu_id, tx_done->status);
  57
  58        if (tx_done->msdu_id >= htt->max_num_pending_tx) {
  59                ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
  60                            tx_done->msdu_id);
  61                return -EINVAL;
  62        }
  63
  64        spin_lock_bh(&htt->tx_lock);
  65        msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
  66        if (!msdu) {
  67                ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
  68                            tx_done->msdu_id);
  69                spin_unlock_bh(&htt->tx_lock);
  70                return -ENOENT;
  71        }
  72
  73        skb_cb = ATH10K_SKB_CB(msdu);
  74        txq = skb_cb->txq;
  75
  76        if (txq) {
  77                artxq = (void *)txq->drv_priv;
  78                artxq->num_fw_queued--;
  79        }
  80
  81        ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
  82        ath10k_htt_tx_dec_pending(htt);
  83        if (htt->num_pending_tx == 0)
  84                wake_up(&htt->empty_tx_wq);
  85        spin_unlock_bh(&htt->tx_lock);
  86
  87        if (txq && txq->sta && skb_cb->airtime_est)
  88                ieee80211_sta_register_airtime(txq->sta, txq->tid,
  89                                               skb_cb->airtime_est, 0);
  90
  91        if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
  92                dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
  93
  94        ath10k_report_offchan_tx(htt->ar, msdu);
  95
  96        info = IEEE80211_SKB_CB(msdu);
  97        memset(&info->status, 0, sizeof(info->status));
  98        trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
  99
 100        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 101                info->flags |= IEEE80211_TX_STAT_ACK;
 102
 103        if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
 104                info->flags &= ~IEEE80211_TX_STAT_ACK;
 105
 106        if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
 107            (info->flags & IEEE80211_TX_CTL_NO_ACK))
 108                info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 109
 110        if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
 111                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 112                        info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 113                else
 114                        info->flags &= ~IEEE80211_TX_STAT_ACK;
 115        }
 116
 117        if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
 118            tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
 119                info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
 120                                                tx_done->ack_rssi;
 121                info->status.is_valid_ack_signal = true;
 122        }
 123
 124        ieee80211_tx_status(htt->ar->hw, msdu);
 125        /* we do not own the msdu anymore */
 126
 127        return 0;
 128}
 129
 130struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
 131                                     const u8 *addr)
 132{
 133        struct ath10k_peer *peer;
 134
 135        lockdep_assert_held(&ar->data_lock);
 136
 137        list_for_each_entry(peer, &ar->peers, list) {
 138                if (peer->vdev_id != vdev_id)
 139                        continue;
 140                if (!ether_addr_equal(peer->addr, addr))
 141                        continue;
 142
 143                return peer;
 144        }
 145
 146        return NULL;
 147}
 148
 149struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
 150{
 151        struct ath10k_peer *peer;
 152
 153        lockdep_assert_held(&ar->data_lock);
 154
 155        list_for_each_entry(peer, &ar->peers, list)
 156                if (test_bit(peer_id, peer->peer_ids))
 157                        return peer;
 158
 159        return NULL;
 160}
 161
 162static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
 163                                       const u8 *addr, bool expect_mapped)
 164{
 165        long time_left;
 166
 167        time_left = wait_event_timeout(ar->peer_mapping_wq, ({
 168                        bool mapped;
 169
 170                        spin_lock_bh(&ar->data_lock);
 171                        mapped = !!ath10k_peer_find(ar, vdev_id, addr);
 172                        spin_unlock_bh(&ar->data_lock);
 173
 174                        (mapped == expect_mapped ||
 175                         test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
 176                }), 3 * HZ);
 177
 178        if (time_left == 0)
 179                return -ETIMEDOUT;
 180
 181        return 0;
 182}
 183
 184int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
 185{
 186        return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
 187}
 188
 189int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
 190{
 191        return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
 192}
 193
 194void ath10k_peer_map_event(struct ath10k_htt *htt,
 195                           struct htt_peer_map_event *ev)
 196{
 197        struct ath10k *ar = htt->ar;
 198        struct ath10k_peer *peer;
 199
 200        if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
 201                ath10k_warn(ar,
 202                            "received htt peer map event with idx out of bounds: %hu\n",
 203                            ev->peer_id);
 204                return;
 205        }
 206
 207        spin_lock_bh(&ar->data_lock);
 208        peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
 209        if (!peer) {
 210                peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
 211                if (!peer)
 212                        goto exit;
 213
 214                peer->vdev_id = ev->vdev_id;
 215                ether_addr_copy(peer->addr, ev->addr);
 216                list_add(&peer->list, &ar->peers);
 217                wake_up(&ar->peer_mapping_wq);
 218        }
 219
 220        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
 221                   ev->vdev_id, ev->addr, ev->peer_id);
 222
 223        WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
 224        ar->peer_map[ev->peer_id] = peer;
 225        set_bit(ev->peer_id, peer->peer_ids);
 226exit:
 227        spin_unlock_bh(&ar->data_lock);
 228}
 229
 230void ath10k_peer_unmap_event(struct ath10k_htt *htt,
 231                             struct htt_peer_unmap_event *ev)
 232{
 233        struct ath10k *ar = htt->ar;
 234        struct ath10k_peer *peer;
 235
 236        if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
 237                ath10k_warn(ar,
 238                            "received htt peer unmap event with idx out of bounds: %hu\n",
 239                            ev->peer_id);
 240                return;
 241        }
 242
 243        spin_lock_bh(&ar->data_lock);
 244        peer = ath10k_peer_find_by_id(ar, ev->peer_id);
 245        if (!peer) {
 246                ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
 247                            ev->peer_id);
 248                goto exit;
 249        }
 250
 251        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
 252                   peer->vdev_id, peer->addr, ev->peer_id);
 253
 254        ar->peer_map[ev->peer_id] = NULL;
 255        clear_bit(ev->peer_id, peer->peer_ids);
 256
 257        if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
 258                list_del(&peer->list);
 259                kfree(peer);
 260                wake_up(&ar->peer_mapping_wq);
 261        }
 262
 263exit:
 264        spin_unlock_bh(&ar->data_lock);
 265}
 266