linux/drivers/net/wireless/ath/ath10k/txrx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include "core.h"
  19#include "txrx.h"
  20#include "htt.h"
  21#include "mac.h"
  22#include "debug.h"
  23
  24static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
  25{
  26        if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
  27                return;
  28
  29        /* If the original wait_for_completion() timed out before
  30         * {data,mgmt}_tx_completed() was called then we could complete
  31         * offchan_tx_completed for a different skb. Prevent this by using
  32         * offchan_tx_skb. */
  33        spin_lock_bh(&ar->data_lock);
  34        if (ar->offchan_tx_skb != skb) {
  35                ath10k_warn(ar, "completed old offchannel frame\n");
  36                goto out;
  37        }
  38
  39        complete(&ar->offchan_tx_completed);
  40        ar->offchan_tx_skb = NULL; /* just for sanity */
  41
  42        ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
  43out:
  44        spin_unlock_bh(&ar->data_lock);
  45}
  46
  47void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
  48                          const struct htt_tx_done *tx_done)
  49{
  50        struct ath10k *ar = htt->ar;
  51        struct device *dev = ar->dev;
  52        struct ieee80211_tx_info *info;
  53        struct ath10k_skb_cb *skb_cb;
  54        struct sk_buff *msdu;
  55
  56        lockdep_assert_held(&htt->tx_lock);
  57
  58        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
  59                   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
  60
  61        if (tx_done->msdu_id >= htt->max_num_pending_tx) {
  62                ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
  63                            tx_done->msdu_id);
  64                return;
  65        }
  66
  67        msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
  68        if (!msdu) {
  69                ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
  70                            tx_done->msdu_id);
  71                return;
  72        }
  73
  74        skb_cb = ATH10K_SKB_CB(msdu);
  75
  76        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
  77
  78        if (skb_cb->htt.txbuf)
  79                dma_pool_free(htt->tx_pool,
  80                              skb_cb->htt.txbuf,
  81                              skb_cb->htt.txbuf_paddr);
  82
  83        ath10k_report_offchan_tx(htt->ar, msdu);
  84
  85        info = IEEE80211_SKB_CB(msdu);
  86        memset(&info->status, 0, sizeof(info->status));
  87        trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
  88
  89        if (tx_done->discard) {
  90                ieee80211_free_txskb(htt->ar->hw, msdu);
  91                goto exit;
  92        }
  93
  94        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  95                info->flags |= IEEE80211_TX_STAT_ACK;
  96
  97        if (tx_done->no_ack)
  98                info->flags &= ~IEEE80211_TX_STAT_ACK;
  99
 100        ieee80211_tx_status(htt->ar->hw, msdu);
 101        /* we do not own the msdu anymore */
 102
 103exit:
 104        ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
 105        __ath10k_htt_tx_dec_pending(htt);
 106        if (htt->num_pending_tx == 0)
 107                wake_up(&htt->empty_tx_wq);
 108}
 109
 110struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
 111                                     const u8 *addr)
 112{
 113        struct ath10k_peer *peer;
 114
 115        lockdep_assert_held(&ar->data_lock);
 116
 117        list_for_each_entry(peer, &ar->peers, list) {
 118                if (peer->vdev_id != vdev_id)
 119                        continue;
 120                if (memcmp(peer->addr, addr, ETH_ALEN))
 121                        continue;
 122
 123                return peer;
 124        }
 125
 126        return NULL;
 127}
 128
 129struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
 130{
 131        struct ath10k_peer *peer;
 132
 133        lockdep_assert_held(&ar->data_lock);
 134
 135        list_for_each_entry(peer, &ar->peers, list)
 136                if (test_bit(peer_id, peer->peer_ids))
 137                        return peer;
 138
 139        return NULL;
 140}
 141
 142static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
 143                                       const u8 *addr, bool expect_mapped)
 144{
 145        int ret;
 146
 147        ret = wait_event_timeout(ar->peer_mapping_wq, ({
 148                        bool mapped;
 149
 150                        spin_lock_bh(&ar->data_lock);
 151                        mapped = !!ath10k_peer_find(ar, vdev_id, addr);
 152                        spin_unlock_bh(&ar->data_lock);
 153
 154                        (mapped == expect_mapped ||
 155                         test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
 156                }), 3*HZ);
 157
 158        if (ret <= 0)
 159                return -ETIMEDOUT;
 160
 161        return 0;
 162}
 163
 164int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
 165{
 166        return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
 167}
 168
 169int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
 170{
 171        return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
 172}
 173
 174void ath10k_peer_map_event(struct ath10k_htt *htt,
 175                           struct htt_peer_map_event *ev)
 176{
 177        struct ath10k *ar = htt->ar;
 178        struct ath10k_peer *peer;
 179
 180        spin_lock_bh(&ar->data_lock);
 181        peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
 182        if (!peer) {
 183                peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
 184                if (!peer)
 185                        goto exit;
 186
 187                peer->vdev_id = ev->vdev_id;
 188                ether_addr_copy(peer->addr, ev->addr);
 189                list_add(&peer->list, &ar->peers);
 190                wake_up(&ar->peer_mapping_wq);
 191        }
 192
 193        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
 194                   ev->vdev_id, ev->addr, ev->peer_id);
 195
 196        set_bit(ev->peer_id, peer->peer_ids);
 197exit:
 198        spin_unlock_bh(&ar->data_lock);
 199}
 200
 201void ath10k_peer_unmap_event(struct ath10k_htt *htt,
 202                             struct htt_peer_unmap_event *ev)
 203{
 204        struct ath10k *ar = htt->ar;
 205        struct ath10k_peer *peer;
 206
 207        spin_lock_bh(&ar->data_lock);
 208        peer = ath10k_peer_find_by_id(ar, ev->peer_id);
 209        if (!peer) {
 210                ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
 211                            ev->peer_id);
 212                goto exit;
 213        }
 214
 215        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
 216                   peer->vdev_id, peer->addr, ev->peer_id);
 217
 218        clear_bit(ev->peer_id, peer->peer_ids);
 219
 220        if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
 221                list_del(&peer->list);
 222                kfree(peer);
 223                wake_up(&ar->peer_mapping_wq);
 224        }
 225
 226exit:
 227        spin_unlock_bh(&ar->data_lock);
 228}
 229