linux/net/mac80211/tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2002-2005, Instant802 Networks, Inc.
   4 * Copyright 2005-2006, Devicescape Software, Inc.
   5 * Copyright 2006-2007  Jiri Benc <jbenc@suse.cz>
   6 * Copyright 2007       Johannes Berg <johannes@sipsolutions.net>
   7 * Copyright 2013-2014  Intel Mobile Communications GmbH
   8 * Copyright (C) 2018-2020 Intel Corporation
   9 *
  10 * Transmit and frame generation functions.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/skbuff.h>
  16#include <linux/if_vlan.h>
  17#include <linux/etherdevice.h>
  18#include <linux/bitmap.h>
  19#include <linux/rcupdate.h>
  20#include <linux/export.h>
  21#include <net/net_namespace.h>
  22#include <net/ieee80211_radiotap.h>
  23#include <net/cfg80211.h>
  24#include <net/mac80211.h>
  25#include <net/codel.h>
  26#include <net/codel_impl.h>
  27#include <asm/unaligned.h>
  28#include <net/fq_impl.h>
  29
  30#include "ieee80211_i.h"
  31#include "driver-ops.h"
  32#include "led.h"
  33#include "mesh.h"
  34#include "wep.h"
  35#include "wpa.h"
  36#include "wme.h"
  37#include "rate.h"
  38
  39/* misc utils */
  40
  41static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
  42{
  43        struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  44
  45        u64_stats_update_begin(&tstats->syncp);
  46        tstats->tx_packets++;
  47        tstats->tx_bytes += len;
  48        u64_stats_update_end(&tstats->syncp);
  49}
  50
  51static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
  52                                 struct sk_buff *skb, int group_addr,
  53                                 int next_frag_len)
  54{
  55        int rate, mrate, erp, dur, i, shift = 0;
  56        struct ieee80211_rate *txrate;
  57        struct ieee80211_local *local = tx->local;
  58        struct ieee80211_supported_band *sband;
  59        struct ieee80211_hdr *hdr;
  60        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  61        struct ieee80211_chanctx_conf *chanctx_conf;
  62        u32 rate_flags = 0;
  63
  64        /* assume HW handles this */
  65        if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
  66                return 0;
  67
  68        rcu_read_lock();
  69        chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
  70        if (chanctx_conf) {
  71                shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
  72                rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
  73        }
  74        rcu_read_unlock();
  75
  76        /* uh huh? */
  77        if (WARN_ON_ONCE(tx->rate.idx < 0))
  78                return 0;
  79
  80        sband = local->hw.wiphy->bands[info->band];
  81        txrate = &sband->bitrates[tx->rate.idx];
  82
  83        erp = txrate->flags & IEEE80211_RATE_ERP_G;
  84
  85        /*
  86         * data and mgmt (except PS Poll):
  87         * - during CFP: 32768
  88         * - during contention period:
  89         *   if addr1 is group address: 0
  90         *   if more fragments = 0 and addr1 is individual address: time to
  91         *      transmit one ACK plus SIFS
  92         *   if more fragments = 1 and addr1 is individual address: time to
  93         *      transmit next fragment plus 2 x ACK plus 3 x SIFS
  94         *
  95         * IEEE 802.11, 9.6:
  96         * - control response frame (CTS or ACK) shall be transmitted using the
  97         *   same rate as the immediately previous frame in the frame exchange
  98         *   sequence, if this rate belongs to the PHY mandatory rates, or else
  99         *   at the highest possible rate belonging to the PHY rates in the
 100         *   BSSBasicRateSet
 101         */
 102        hdr = (struct ieee80211_hdr *)skb->data;
 103        if (ieee80211_is_ctl(hdr->frame_control)) {
 104                /* TODO: These control frames are not currently sent by
 105                 * mac80211, but should they be implemented, this function
 106                 * needs to be updated to support duration field calculation.
 107                 *
 108                 * RTS: time needed to transmit pending data/mgmt frame plus
 109                 *    one CTS frame plus one ACK frame plus 3 x SIFS
 110                 * CTS: duration of immediately previous RTS minus time
 111                 *    required to transmit CTS and its SIFS
 112                 * ACK: 0 if immediately previous directed data/mgmt had
 113                 *    more=0, with more=1 duration in ACK frame is duration
 114                 *    from previous frame minus time needed to transmit ACK
 115                 *    and its SIFS
 116                 * PS Poll: BIT(15) | BIT(14) | aid
 117                 */
 118                return 0;
 119        }
 120
 121        /* data/mgmt */
 122        if (0 /* FIX: data/mgmt during CFP */)
 123                return cpu_to_le16(32768);
 124
 125        if (group_addr) /* Group address as the destination - no ACK */
 126                return 0;
 127
 128        /* Individual destination address:
 129         * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
 130         * CTS and ACK frames shall be transmitted using the highest rate in
 131         * basic rate set that is less than or equal to the rate of the
 132         * immediately previous frame and that is using the same modulation
 133         * (CCK or OFDM). If no basic rate set matches with these requirements,
 134         * the highest mandatory rate of the PHY that is less than or equal to
 135         * the rate of the previous frame is used.
 136         * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
 137         */
 138        rate = -1;
 139        /* use lowest available if everything fails */
 140        mrate = sband->bitrates[0].bitrate;
 141        for (i = 0; i < sband->n_bitrates; i++) {
 142                struct ieee80211_rate *r = &sband->bitrates[i];
 143
 144                if (r->bitrate > txrate->bitrate)
 145                        break;
 146
 147                if ((rate_flags & r->flags) != rate_flags)
 148                        continue;
 149
 150                if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
 151                        rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
 152
 153                switch (sband->band) {
 154                case NL80211_BAND_2GHZ: {
 155                        u32 flag;
 156                        if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
 157                                flag = IEEE80211_RATE_MANDATORY_G;
 158                        else
 159                                flag = IEEE80211_RATE_MANDATORY_B;
 160                        if (r->flags & flag)
 161                                mrate = r->bitrate;
 162                        break;
 163                }
 164                case NL80211_BAND_5GHZ:
 165                case NL80211_BAND_6GHZ:
 166                        if (r->flags & IEEE80211_RATE_MANDATORY_A)
 167                                mrate = r->bitrate;
 168                        break;
 169                case NL80211_BAND_S1GHZ:
 170                case NL80211_BAND_60GHZ:
 171                        /* TODO, for now fall through */
 172                case NUM_NL80211_BANDS:
 173                        WARN_ON(1);
 174                        break;
 175                }
 176        }
 177        if (rate == -1) {
 178                /* No matching basic rate found; use highest suitable mandatory
 179                 * PHY rate */
 180                rate = DIV_ROUND_UP(mrate, 1 << shift);
 181        }
 182
 183        /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
 184        if (ieee80211_is_data_qos(hdr->frame_control) &&
 185            *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
 186                dur = 0;
 187        else
 188                /* Time needed to transmit ACK
 189                 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
 190                 * to closest integer */
 191                dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
 192                                tx->sdata->vif.bss_conf.use_short_preamble,
 193                                shift);
 194
 195        if (next_frag_len) {
 196                /* Frame is fragmented: duration increases with time needed to
 197                 * transmit next fragment plus ACK and 2 x SIFS. */
 198                dur *= 2; /* ACK + SIFS */
 199                /* next fragment */
 200                dur += ieee80211_frame_duration(sband->band, next_frag_len,
 201                                txrate->bitrate, erp,
 202                                tx->sdata->vif.bss_conf.use_short_preamble,
 203                                shift);
 204        }
 205
 206        return cpu_to_le16(dur);
 207}
 208
 209/* tx handlers */
 210static ieee80211_tx_result debug_noinline
 211ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
 212{
 213        struct ieee80211_local *local = tx->local;
 214        struct ieee80211_if_managed *ifmgd;
 215        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 216
 217        /* driver doesn't support power save */
 218        if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
 219                return TX_CONTINUE;
 220
 221        /* hardware does dynamic power save */
 222        if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 223                return TX_CONTINUE;
 224
 225        /* dynamic power save disabled */
 226        if (local->hw.conf.dynamic_ps_timeout <= 0)
 227                return TX_CONTINUE;
 228
 229        /* we are scanning, don't enable power save */
 230        if (local->scanning)
 231                return TX_CONTINUE;
 232
 233        if (!local->ps_sdata)
 234                return TX_CONTINUE;
 235
 236        /* No point if we're going to suspend */
 237        if (local->quiescing)
 238                return TX_CONTINUE;
 239
 240        /* dynamic ps is supported only in managed mode */
 241        if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
 242                return TX_CONTINUE;
 243
 244        if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
 245                return TX_CONTINUE;
 246
 247        ifmgd = &tx->sdata->u.mgd;
 248
 249        /*
 250         * Don't wakeup from power save if u-apsd is enabled, voip ac has
 251         * u-apsd enabled and the frame is in voip class. This effectively
 252         * means that even if all access categories have u-apsd enabled, in
 253         * practise u-apsd is only used with the voip ac. This is a
 254         * workaround for the case when received voip class packets do not
 255         * have correct qos tag for some reason, due the network or the
 256         * peer application.
 257         *
 258         * Note: ifmgd->uapsd_queues access is racy here. If the value is
 259         * changed via debugfs, user needs to reassociate manually to have
 260         * everything in sync.
 261         */
 262        if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
 263            (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
 264            skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
 265                return TX_CONTINUE;
 266
 267        if (local->hw.conf.flags & IEEE80211_CONF_PS) {
 268                ieee80211_stop_queues_by_reason(&local->hw,
 269                                                IEEE80211_MAX_QUEUE_MAP,
 270                                                IEEE80211_QUEUE_STOP_REASON_PS,
 271                                                false);
 272                ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
 273                ieee80211_queue_work(&local->hw,
 274                                     &local->dynamic_ps_disable_work);
 275        }
 276
 277        /* Don't restart the timer if we're not disassociated */
 278        if (!ifmgd->associated)
 279                return TX_CONTINUE;
 280
 281        mod_timer(&local->dynamic_ps_timer, jiffies +
 282                  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 283
 284        return TX_CONTINUE;
 285}
 286
 287static ieee80211_tx_result debug_noinline
 288ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
 289{
 290
 291        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 292        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 293        bool assoc = false;
 294
 295        if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
 296                return TX_CONTINUE;
 297
 298        if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
 299            test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
 300            !ieee80211_is_probe_req(hdr->frame_control) &&
 301            !ieee80211_is_any_nullfunc(hdr->frame_control))
 302                /*
 303                 * When software scanning only nullfunc frames (to notify
 304                 * the sleep state to the AP) and probe requests (for the
 305                 * active scan) are allowed, all other frames should not be
 306                 * sent and we should not get here, but if we do
 307                 * nonetheless, drop them to avoid sending them
 308                 * off-channel. See the link below and
 309                 * ieee80211_start_scan() for more.
 310                 *
 311                 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
 312                 */
 313                return TX_DROP;
 314
 315        if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
 316                return TX_CONTINUE;
 317
 318        if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
 319                return TX_CONTINUE;
 320
 321        if (tx->flags & IEEE80211_TX_PS_BUFFERED)
 322                return TX_CONTINUE;
 323
 324        if (tx->sta)
 325                assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
 326
 327        if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
 328                if (unlikely(!assoc &&
 329                             ieee80211_is_data(hdr->frame_control))) {
 330#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
 331                        sdata_info(tx->sdata,
 332                                   "dropped data frame to not associated station %pM\n",
 333                                   hdr->addr1);
 334#endif
 335                        I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
 336                        return TX_DROP;
 337                }
 338        } else if (unlikely(ieee80211_is_data(hdr->frame_control) &&
 339                            ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) {
 340                /*
 341                 * No associated STAs - no need to send multicast
 342                 * frames.
 343                 */
 344                return TX_DROP;
 345        }
 346
 347        return TX_CONTINUE;
 348}
 349
 350/* This function is called whenever the AP is about to exceed the maximum limit
 351 * of buffered frames for power saving STAs. This situation should not really
 352 * happen often during normal operation, so dropping the oldest buffered packet
 353 * from each queue should be OK to make some room for new frames. */
 354static void purge_old_ps_buffers(struct ieee80211_local *local)
 355{
 356        int total = 0, purged = 0;
 357        struct sk_buff *skb;
 358        struct ieee80211_sub_if_data *sdata;
 359        struct sta_info *sta;
 360
 361        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 362                struct ps_data *ps;
 363
 364                if (sdata->vif.type == NL80211_IFTYPE_AP)
 365                        ps = &sdata->u.ap.ps;
 366                else if (ieee80211_vif_is_mesh(&sdata->vif))
 367                        ps = &sdata->u.mesh.ps;
 368                else
 369                        continue;
 370
 371                skb = skb_dequeue(&ps->bc_buf);
 372                if (skb) {
 373                        purged++;
 374                        ieee80211_free_txskb(&local->hw, skb);
 375                }
 376                total += skb_queue_len(&ps->bc_buf);
 377        }
 378
 379        /*
 380         * Drop one frame from each station from the lowest-priority
 381         * AC that has frames at all.
 382         */
 383        list_for_each_entry_rcu(sta, &local->sta_list, list) {
 384                int ac;
 385
 386                for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
 387                        skb = skb_dequeue(&sta->ps_tx_buf[ac]);
 388                        total += skb_queue_len(&sta->ps_tx_buf[ac]);
 389                        if (skb) {
 390                                purged++;
 391                                ieee80211_free_txskb(&local->hw, skb);
 392                                break;
 393                        }
 394                }
 395        }
 396
 397        local->total_ps_buffered = total;
 398        ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
 399}
 400
 401static ieee80211_tx_result
 402ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
 403{
 404        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 405        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 406        struct ps_data *ps;
 407
 408        /*
 409         * broadcast/multicast frame
 410         *
 411         * If any of the associated/peer stations is in power save mode,
 412         * the frame is buffered to be sent after DTIM beacon frame.
 413         * This is done either by the hardware or us.
 414         */
 415
 416        /* powersaving STAs currently only in AP/VLAN/mesh mode */
 417        if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 418            tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
 419                if (!tx->sdata->bss)
 420                        return TX_CONTINUE;
 421
 422                ps = &tx->sdata->bss->ps;
 423        } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
 424                ps = &tx->sdata->u.mesh.ps;
 425        } else {
 426                return TX_CONTINUE;
 427        }
 428
 429
 430        /* no buffering for ordered frames */
 431        if (ieee80211_has_order(hdr->frame_control))
 432                return TX_CONTINUE;
 433
 434        if (ieee80211_is_probe_req(hdr->frame_control))
 435                return TX_CONTINUE;
 436
 437        if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
 438                info->hw_queue = tx->sdata->vif.cab_queue;
 439
 440        /* no stations in PS mode and no buffered packets */
 441        if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
 442                return TX_CONTINUE;
 443
 444        info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
 445
 446        /* device releases frame after DTIM beacon */
 447        if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
 448                return TX_CONTINUE;
 449
 450        /* buffered in mac80211 */
 451        if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
 452                purge_old_ps_buffers(tx->local);
 453
 454        if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
 455                ps_dbg(tx->sdata,
 456                       "BC TX buffer full - dropping the oldest frame\n");
 457                ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
 458        } else
 459                tx->local->total_ps_buffered++;
 460
 461        skb_queue_tail(&ps->bc_buf, tx->skb);
 462
 463        return TX_QUEUED;
 464}
 465
 466static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
 467                             struct sk_buff *skb)
 468{
 469        if (!ieee80211_is_mgmt(fc))
 470                return 0;
 471
 472        if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
 473                return 0;
 474
 475        if (!ieee80211_is_robust_mgmt_frame(skb))
 476                return 0;
 477
 478        return 1;
 479}
 480
 481static ieee80211_tx_result
 482ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 483{
 484        struct sta_info *sta = tx->sta;
 485        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 486        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 487        struct ieee80211_local *local = tx->local;
 488
 489        if (unlikely(!sta))
 490                return TX_CONTINUE;
 491
 492        if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
 493                      test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
 494                      test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
 495                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
 496                int ac = skb_get_queue_mapping(tx->skb);
 497
 498                if (ieee80211_is_mgmt(hdr->frame_control) &&
 499                    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
 500                        info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
 501                        return TX_CONTINUE;
 502                }
 503
 504                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
 505                       sta->sta.addr, sta->sta.aid, ac);
 506                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
 507                        purge_old_ps_buffers(tx->local);
 508
 509                /* sync with ieee80211_sta_ps_deliver_wakeup */
 510                spin_lock(&sta->ps_lock);
 511                /*
 512                 * STA woke up the meantime and all the frames on ps_tx_buf have
 513                 * been queued to pending queue. No reordering can happen, go
 514                 * ahead and Tx the packet.
 515                 */
 516                if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
 517                    !test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
 518                    !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
 519                        spin_unlock(&sta->ps_lock);
 520                        return TX_CONTINUE;
 521                }
 522
 523                if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
 524                        struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
 525                        ps_dbg(tx->sdata,
 526                               "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
 527                               sta->sta.addr, ac);
 528                        ieee80211_free_txskb(&local->hw, old);
 529                } else
 530                        tx->local->total_ps_buffered++;
 531
 532                info->control.jiffies = jiffies;
 533                info->control.vif = &tx->sdata->vif;
 534                info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
 535                info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
 536                skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
 537                spin_unlock(&sta->ps_lock);
 538
 539                if (!timer_pending(&local->sta_cleanup))
 540                        mod_timer(&local->sta_cleanup,
 541                                  round_jiffies(jiffies +
 542                                                STA_INFO_CLEANUP_INTERVAL));
 543
 544                /*
 545                 * We queued up some frames, so the TIM bit might
 546                 * need to be set, recalculate it.
 547                 */
 548                sta_info_recalc_tim(sta);
 549
 550                return TX_QUEUED;
 551        } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
 552                ps_dbg(tx->sdata,
 553                       "STA %pM in PS mode, but polling/in SP -> send frame\n",
 554                       sta->sta.addr);
 555        }
 556
 557        return TX_CONTINUE;
 558}
 559
 560static ieee80211_tx_result debug_noinline
 561ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 562{
 563        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
 564                return TX_CONTINUE;
 565
 566        if (tx->flags & IEEE80211_TX_UNICAST)
 567                return ieee80211_tx_h_unicast_ps_buf(tx);
 568        else
 569                return ieee80211_tx_h_multicast_ps_buf(tx);
 570}
 571
 572static ieee80211_tx_result debug_noinline
 573ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
 574{
 575        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 576
 577        if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
 578                if (tx->sdata->control_port_no_encrypt)
 579                        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
 580                info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
 581                info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
 582        }
 583
 584        return TX_CONTINUE;
 585}
 586
 587static ieee80211_tx_result debug_noinline
 588ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
 589{
 590        struct ieee80211_key *key;
 591        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 592        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 593
 594        if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
 595                tx->key = NULL;
 596                return TX_CONTINUE;
 597        }
 598
 599        if (tx->sta &&
 600            (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
 601                tx->key = key;
 602        else if (ieee80211_is_group_privacy_action(tx->skb) &&
 603                (key = rcu_dereference(tx->sdata->default_multicast_key)))
 604                tx->key = key;
 605        else if (ieee80211_is_mgmt(hdr->frame_control) &&
 606                 is_multicast_ether_addr(hdr->addr1) &&
 607                 ieee80211_is_robust_mgmt_frame(tx->skb) &&
 608                 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
 609                tx->key = key;
 610        else if (is_multicast_ether_addr(hdr->addr1) &&
 611                 (key = rcu_dereference(tx->sdata->default_multicast_key)))
 612                tx->key = key;
 613        else if (!is_multicast_ether_addr(hdr->addr1) &&
 614                 (key = rcu_dereference(tx->sdata->default_unicast_key)))
 615                tx->key = key;
 616        else
 617                tx->key = NULL;
 618
 619        if (tx->key) {
 620                bool skip_hw = false;
 621
 622                /* TODO: add threshold stuff again */
 623
 624                switch (tx->key->conf.cipher) {
 625                case WLAN_CIPHER_SUITE_WEP40:
 626                case WLAN_CIPHER_SUITE_WEP104:
 627                case WLAN_CIPHER_SUITE_TKIP:
 628                        if (!ieee80211_is_data_present(hdr->frame_control))
 629                                tx->key = NULL;
 630                        break;
 631                case WLAN_CIPHER_SUITE_CCMP:
 632                case WLAN_CIPHER_SUITE_CCMP_256:
 633                case WLAN_CIPHER_SUITE_GCMP:
 634                case WLAN_CIPHER_SUITE_GCMP_256:
 635                        if (!ieee80211_is_data_present(hdr->frame_control) &&
 636                            !ieee80211_use_mfp(hdr->frame_control, tx->sta,
 637                                               tx->skb) &&
 638                            !ieee80211_is_group_privacy_action(tx->skb))
 639                                tx->key = NULL;
 640                        else
 641                                skip_hw = (tx->key->conf.flags &
 642                                           IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
 643                                        ieee80211_is_mgmt(hdr->frame_control);
 644                        break;
 645                case WLAN_CIPHER_SUITE_AES_CMAC:
 646                case WLAN_CIPHER_SUITE_BIP_CMAC_256:
 647                case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 648                case WLAN_CIPHER_SUITE_BIP_GMAC_256:
 649                        if (!ieee80211_is_mgmt(hdr->frame_control))
 650                                tx->key = NULL;
 651                        break;
 652                }
 653
 654                if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
 655                             !ieee80211_is_deauth(hdr->frame_control)))
 656                        return TX_DROP;
 657
 658                if (!skip_hw && tx->key &&
 659                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
 660                        info->control.hw_key = &tx->key->conf;
 661        } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
 662                   test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
 663                return TX_DROP;
 664        }
 665
 666        return TX_CONTINUE;
 667}
 668
 669static ieee80211_tx_result debug_noinline
 670ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 671{
 672        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 673        struct ieee80211_hdr *hdr = (void *)tx->skb->data;
 674        struct ieee80211_supported_band *sband;
 675        u32 len;
 676        struct ieee80211_tx_rate_control txrc;
 677        struct ieee80211_sta_rates *ratetbl = NULL;
 678        bool assoc = false;
 679
 680        memset(&txrc, 0, sizeof(txrc));
 681
 682        sband = tx->local->hw.wiphy->bands[info->band];
 683
 684        len = min_t(u32, tx->skb->len + FCS_LEN,
 685                         tx->local->hw.wiphy->frag_threshold);
 686
 687        /* set up the tx rate control struct we give the RC algo */
 688        txrc.hw = &tx->local->hw;
 689        txrc.sband = sband;
 690        txrc.bss_conf = &tx->sdata->vif.bss_conf;
 691        txrc.skb = tx->skb;
 692        txrc.reported_rate.idx = -1;
 693        txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
 694
 695        if (tx->sdata->rc_has_mcs_mask[info->band])
 696                txrc.rate_idx_mcs_mask =
 697                        tx->sdata->rc_rateidx_mcs_mask[info->band];
 698
 699        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 700                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
 701                    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
 702                    tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 703
 704        /* set up RTS protection if desired */
 705        if (len > tx->local->hw.wiphy->rts_threshold) {
 706                txrc.rts = true;
 707        }
 708
 709        info->control.use_rts = txrc.rts;
 710        info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
 711
 712        /*
 713         * Use short preamble if the BSS can handle it, but not for
 714         * management frames unless we know the receiver can handle
 715         * that -- the management frame might be to a station that
 716         * just wants a probe response.
 717         */
 718        if (tx->sdata->vif.bss_conf.use_short_preamble &&
 719            (ieee80211_is_data(hdr->frame_control) ||
 720             (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
 721                txrc.short_preamble = true;
 722
 723        info->control.short_preamble = txrc.short_preamble;
 724
 725        /* don't ask rate control when rate already injected via radiotap */
 726        if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
 727                return TX_CONTINUE;
 728
 729        if (tx->sta)
 730                assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
 731
 732        /*
 733         * Lets not bother rate control if we're associated and cannot
 734         * talk to the sta. This should not happen.
 735         */
 736        if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
 737                 !rate_usable_index_exists(sband, &tx->sta->sta),
 738                 "%s: Dropped data frame as no usable bitrate found while "
 739                 "scanning and associated. Target station: "
 740                 "%pM on %d GHz band\n",
 741                 tx->sdata->name, hdr->addr1,
 742                 info->band ? 5 : 2))
 743                return TX_DROP;
 744
 745        /*
 746         * If we're associated with the sta at this point we know we can at
 747         * least send the frame at the lowest bit rate.
 748         */
 749        rate_control_get_rate(tx->sdata, tx->sta, &txrc);
 750
 751        if (tx->sta && !info->control.skip_table)
 752                ratetbl = rcu_dereference(tx->sta->sta.rates);
 753
 754        if (unlikely(info->control.rates[0].idx < 0)) {
 755                if (ratetbl) {
 756                        struct ieee80211_tx_rate rate = {
 757                                .idx = ratetbl->rate[0].idx,
 758                                .flags = ratetbl->rate[0].flags,
 759                                .count = ratetbl->rate[0].count
 760                        };
 761
 762                        if (ratetbl->rate[0].idx < 0)
 763                                return TX_DROP;
 764
 765                        tx->rate = rate;
 766                } else {
 767                        return TX_DROP;
 768                }
 769        } else {
 770                tx->rate = info->control.rates[0];
 771        }
 772
 773        if (txrc.reported_rate.idx < 0) {
 774                txrc.reported_rate = tx->rate;
 775                if (tx->sta && ieee80211_is_data(hdr->frame_control))
 776                        tx->sta->tx_stats.last_rate = txrc.reported_rate;
 777        } else if (tx->sta)
 778                tx->sta->tx_stats.last_rate = txrc.reported_rate;
 779
 780        if (ratetbl)
 781                return TX_CONTINUE;
 782
 783        if (unlikely(!info->control.rates[0].count))
 784                info->control.rates[0].count = 1;
 785
 786        if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
 787                         (info->flags & IEEE80211_TX_CTL_NO_ACK)))
 788                info->control.rates[0].count = 1;
 789
 790        return TX_CONTINUE;
 791}
 792
 793static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
 794{
 795        u16 *seq = &sta->tid_seq[tid];
 796        __le16 ret = cpu_to_le16(*seq);
 797
 798        /* Increase the sequence number. */
 799        *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
 800
 801        return ret;
 802}
 803
 804static ieee80211_tx_result debug_noinline
 805ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
 806{
 807        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 808        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 809        int tid;
 810
 811        /*
 812         * Packet injection may want to control the sequence
 813         * number, if we have no matching interface then we
 814         * neither assign one ourselves nor ask the driver to.
 815         */
 816        if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
 817                return TX_CONTINUE;
 818
 819        if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
 820                return TX_CONTINUE;
 821
 822        if (ieee80211_hdrlen(hdr->frame_control) < 24)
 823                return TX_CONTINUE;
 824
 825        if (ieee80211_is_qos_nullfunc(hdr->frame_control))
 826                return TX_CONTINUE;
 827
 828        if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO)
 829                return TX_CONTINUE;
 830
 831        /*
 832         * Anything but QoS data that has a sequence number field
 833         * (is long enough) gets a sequence number from the global
 834         * counter.  QoS data frames with a multicast destination
 835         * also use the global counter (802.11-2012 9.3.2.10).
 836         */
 837        if (!ieee80211_is_data_qos(hdr->frame_control) ||
 838            is_multicast_ether_addr(hdr->addr1)) {
 839                /* driver should assign sequence number */
 840                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
 841                /* for pure STA mode without beacons, we can do it */
 842                hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
 843                tx->sdata->sequence_number += 0x10;
 844                if (tx->sta)
 845                        tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
 846                return TX_CONTINUE;
 847        }
 848
 849        /*
 850         * This should be true for injected/management frames only, for
 851         * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
 852         * above since they are not QoS-data frames.
 853         */
 854        if (!tx->sta)
 855                return TX_CONTINUE;
 856
 857        /* include per-STA, per-TID sequence counter */
 858        tid = ieee80211_get_tid(hdr);
 859        tx->sta->tx_stats.msdu[tid]++;
 860
 861        hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
 862
 863        return TX_CONTINUE;
 864}
 865
 866static int ieee80211_fragment(struct ieee80211_tx_data *tx,
 867                              struct sk_buff *skb, int hdrlen,
 868                              int frag_threshold)
 869{
 870        struct ieee80211_local *local = tx->local;
 871        struct ieee80211_tx_info *info;
 872        struct sk_buff *tmp;
 873        int per_fragm = frag_threshold - hdrlen - FCS_LEN;
 874        int pos = hdrlen + per_fragm;
 875        int rem = skb->len - hdrlen - per_fragm;
 876
 877        if (WARN_ON(rem < 0))
 878                return -EINVAL;
 879
 880        /* first fragment was already added to queue by caller */
 881
 882        while (rem) {
 883                int fraglen = per_fragm;
 884
 885                if (fraglen > rem)
 886                        fraglen = rem;
 887                rem -= fraglen;
 888                tmp = dev_alloc_skb(local->tx_headroom +
 889                                    frag_threshold +
 890                                    tx->sdata->encrypt_headroom +
 891                                    IEEE80211_ENCRYPT_TAILROOM);
 892                if (!tmp)
 893                        return -ENOMEM;
 894
 895                __skb_queue_tail(&tx->skbs, tmp);
 896
 897                skb_reserve(tmp,
 898                            local->tx_headroom + tx->sdata->encrypt_headroom);
 899
 900                /* copy control information */
 901                memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
 902
 903                info = IEEE80211_SKB_CB(tmp);
 904                info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
 905                                 IEEE80211_TX_CTL_FIRST_FRAGMENT);
 906
 907                if (rem)
 908                        info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
 909
 910                skb_copy_queue_mapping(tmp, skb);
 911                tmp->priority = skb->priority;
 912                tmp->dev = skb->dev;
 913
 914                /* copy header and data */
 915                skb_put_data(tmp, skb->data, hdrlen);
 916                skb_put_data(tmp, skb->data + pos, fraglen);
 917
 918                pos += fraglen;
 919        }
 920
 921        /* adjust first fragment's length */
 922        skb_trim(skb, hdrlen + per_fragm);
 923        return 0;
 924}
 925
 926static ieee80211_tx_result debug_noinline
 927ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
 928{
 929        struct sk_buff *skb = tx->skb;
 930        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 931        struct ieee80211_hdr *hdr = (void *)skb->data;
 932        int frag_threshold = tx->local->hw.wiphy->frag_threshold;
 933        int hdrlen;
 934        int fragnum;
 935
 936        /* no matter what happens, tx->skb moves to tx->skbs */
 937        __skb_queue_tail(&tx->skbs, skb);
 938        tx->skb = NULL;
 939
 940        if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
 941                return TX_CONTINUE;
 942
 943        if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG))
 944                return TX_CONTINUE;
 945
 946        /*
 947         * Warn when submitting a fragmented A-MPDU frame and drop it.
 948         * This scenario is handled in ieee80211_tx_prepare but extra
 949         * caution taken here as fragmented ampdu may cause Tx stop.
 950         */
 951        if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
 952                return TX_DROP;
 953
 954        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 955
 956        /* internal error, why isn't DONTFRAG set? */
 957        if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
 958                return TX_DROP;
 959
 960        /*
 961         * Now fragment the frame. This will allocate all the fragments and
 962         * chain them (using skb as the first fragment) to skb->next.
 963         * During transmission, we will remove the successfully transmitted
 964         * fragments from this list. When the low-level driver rejects one
 965         * of the fragments then we will simply pretend to accept the skb
 966         * but store it away as pending.
 967         */
 968        if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
 969                return TX_DROP;
 970
 971        /* update duration/seq/flags of fragments */
 972        fragnum = 0;
 973
 974        skb_queue_walk(&tx->skbs, skb) {
 975                const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
 976
 977                hdr = (void *)skb->data;
 978                info = IEEE80211_SKB_CB(skb);
 979
 980                if (!skb_queue_is_last(&tx->skbs, skb)) {
 981                        hdr->frame_control |= morefrags;
 982                        /*
 983                         * No multi-rate retries for fragmented frames, that
 984                         * would completely throw off the NAV at other STAs.
 985                         */
 986                        info->control.rates[1].idx = -1;
 987                        info->control.rates[2].idx = -1;
 988                        info->control.rates[3].idx = -1;
 989                        BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
 990                        info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
 991                } else {
 992                        hdr->frame_control &= ~morefrags;
 993                }
 994                hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
 995                fragnum++;
 996        }
 997
 998        return TX_CONTINUE;
 999}
1000
1001static ieee80211_tx_result debug_noinline
1002ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
1003{
1004        struct sk_buff *skb;
1005        int ac = -1;
1006
1007        if (!tx->sta)
1008                return TX_CONTINUE;
1009
1010        skb_queue_walk(&tx->skbs, skb) {
1011                ac = skb_get_queue_mapping(skb);
1012                tx->sta->tx_stats.bytes[ac] += skb->len;
1013        }
1014        if (ac >= 0)
1015                tx->sta->tx_stats.packets[ac]++;
1016
1017        return TX_CONTINUE;
1018}
1019
1020static ieee80211_tx_result debug_noinline
1021ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
1022{
1023        if (!tx->key)
1024                return TX_CONTINUE;
1025
1026        switch (tx->key->conf.cipher) {
1027        case WLAN_CIPHER_SUITE_WEP40:
1028        case WLAN_CIPHER_SUITE_WEP104:
1029                return ieee80211_crypto_wep_encrypt(tx);
1030        case WLAN_CIPHER_SUITE_TKIP:
1031                return ieee80211_crypto_tkip_encrypt(tx);
1032        case WLAN_CIPHER_SUITE_CCMP:
1033                return ieee80211_crypto_ccmp_encrypt(
1034                        tx, IEEE80211_CCMP_MIC_LEN);
1035        case WLAN_CIPHER_SUITE_CCMP_256:
1036                return ieee80211_crypto_ccmp_encrypt(
1037                        tx, IEEE80211_CCMP_256_MIC_LEN);
1038        case WLAN_CIPHER_SUITE_AES_CMAC:
1039                return ieee80211_crypto_aes_cmac_encrypt(tx);
1040        case WLAN_CIPHER_SUITE_BIP_CMAC_256:
1041                return ieee80211_crypto_aes_cmac_256_encrypt(tx);
1042        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
1043        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
1044                return ieee80211_crypto_aes_gmac_encrypt(tx);
1045        case WLAN_CIPHER_SUITE_GCMP:
1046        case WLAN_CIPHER_SUITE_GCMP_256:
1047                return ieee80211_crypto_gcmp_encrypt(tx);
1048        default:
1049                return ieee80211_crypto_hw_encrypt(tx);
1050        }
1051
1052        return TX_DROP;
1053}
1054
1055static ieee80211_tx_result debug_noinline
1056ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
1057{
1058        struct sk_buff *skb;
1059        struct ieee80211_hdr *hdr;
1060        int next_len;
1061        bool group_addr;
1062
1063        skb_queue_walk(&tx->skbs, skb) {
1064                hdr = (void *) skb->data;
1065                if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
1066                        break; /* must not overwrite AID */
1067                if (!skb_queue_is_last(&tx->skbs, skb)) {
1068                        struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
1069                        next_len = next->len;
1070                } else
1071                        next_len = 0;
1072                group_addr = is_multicast_ether_addr(hdr->addr1);
1073
1074                hdr->duration_id =
1075                        ieee80211_duration(tx, skb, group_addr, next_len);
1076        }
1077
1078        return TX_CONTINUE;
1079}
1080
1081/* actual transmit path */
1082
1083static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1084                                  struct sk_buff *skb,
1085                                  struct ieee80211_tx_info *info,
1086                                  struct tid_ampdu_tx *tid_tx,
1087                                  int tid)
1088{
1089        bool queued = false;
1090        bool reset_agg_timer = false;
1091        struct sk_buff *purge_skb = NULL;
1092
1093        if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1094                info->flags |= IEEE80211_TX_CTL_AMPDU;
1095                reset_agg_timer = true;
1096        } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
1097                /*
1098                 * nothing -- this aggregation session is being started
1099                 * but that might still fail with the driver
1100                 */
1101        } else if (!tx->sta->sta.txq[tid]) {
1102                spin_lock(&tx->sta->lock);
1103                /*
1104                 * Need to re-check now, because we may get here
1105                 *
1106                 *  1) in the window during which the setup is actually
1107                 *     already done, but not marked yet because not all
1108                 *     packets are spliced over to the driver pending
1109                 *     queue yet -- if this happened we acquire the lock
1110                 *     either before or after the splice happens, but
1111                 *     need to recheck which of these cases happened.
1112                 *
1113                 *  2) during session teardown, if the OPERATIONAL bit
1114                 *     was cleared due to the teardown but the pointer
1115                 *     hasn't been assigned NULL yet (or we loaded it
1116                 *     before it was assigned) -- in this case it may
1117                 *     now be NULL which means we should just let the
1118                 *     packet pass through because splicing the frames
1119                 *     back is already done.
1120                 */
1121                tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
1122
1123                if (!tid_tx) {
1124                        /* do nothing, let packet pass through */
1125                } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1126                        info->flags |= IEEE80211_TX_CTL_AMPDU;
1127                        reset_agg_timer = true;
1128                } else {
1129                        queued = true;
1130                        if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
1131                                clear_sta_flag(tx->sta, WLAN_STA_SP);
1132                                ps_dbg(tx->sta->sdata,
1133                                       "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
1134                                       tx->sta->sta.addr, tx->sta->sta.aid);
1135                        }
1136                        info->control.vif = &tx->sdata->vif;
1137                        info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1138                        info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
1139                        __skb_queue_tail(&tid_tx->pending, skb);
1140                        if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1141                                purge_skb = __skb_dequeue(&tid_tx->pending);
1142                }
1143                spin_unlock(&tx->sta->lock);
1144
1145                if (purge_skb)
1146                        ieee80211_free_txskb(&tx->local->hw, purge_skb);
1147        }
1148
1149        /* reset session timer */
1150        if (reset_agg_timer)
1151                tid_tx->last_tx = jiffies;
1152
1153        return queued;
1154}
1155
1156/*
1157 * initialises @tx
1158 * pass %NULL for the station if unknown, a valid pointer if known
1159 * or an ERR_PTR() if the station is known not to exist
1160 */
1161static ieee80211_tx_result
1162ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1163                     struct ieee80211_tx_data *tx,
1164                     struct sta_info *sta, struct sk_buff *skb)
1165{
1166        struct ieee80211_local *local = sdata->local;
1167        struct ieee80211_hdr *hdr;
1168        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1169        int tid;
1170
1171        memset(tx, 0, sizeof(*tx));
1172        tx->skb = skb;
1173        tx->local = local;
1174        tx->sdata = sdata;
1175        __skb_queue_head_init(&tx->skbs);
1176
1177        /*
1178         * If this flag is set to true anywhere, and we get here,
1179         * we are doing the needed processing, so remove the flag
1180         * now.
1181         */
1182        info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1183
1184        hdr = (struct ieee80211_hdr *) skb->data;
1185
1186        if (likely(sta)) {
1187                if (!IS_ERR(sta))
1188                        tx->sta = sta;
1189        } else {
1190                if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1191                        tx->sta = rcu_dereference(sdata->u.vlan.sta);
1192                        if (!tx->sta && sdata->wdev.use_4addr)
1193                                return TX_DROP;
1194                } else if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
1195                                          IEEE80211_TX_CTL_INJECTED) ||
1196                           tx->sdata->control_port_protocol == tx->skb->protocol) {
1197                        tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1198                }
1199                if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
1200                        tx->sta = sta_info_get(sdata, hdr->addr1);
1201        }
1202
1203        if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1204            !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
1205            ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
1206            !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
1207                struct tid_ampdu_tx *tid_tx;
1208
1209                tid = ieee80211_get_tid(hdr);
1210
1211                tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1212                if (tid_tx) {
1213                        bool queued;
1214
1215                        queued = ieee80211_tx_prep_agg(tx, skb, info,
1216                                                       tid_tx, tid);
1217
1218                        if (unlikely(queued))
1219                                return TX_QUEUED;
1220                }
1221        }
1222
1223        if (is_multicast_ether_addr(hdr->addr1)) {
1224                tx->flags &= ~IEEE80211_TX_UNICAST;
1225                info->flags |= IEEE80211_TX_CTL_NO_ACK;
1226        } else
1227                tx->flags |= IEEE80211_TX_UNICAST;
1228
1229        if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
1230                if (!(tx->flags & IEEE80211_TX_UNICAST) ||
1231                    skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
1232                    info->flags & IEEE80211_TX_CTL_AMPDU)
1233                        info->flags |= IEEE80211_TX_CTL_DONTFRAG;
1234        }
1235
1236        if (!tx->sta)
1237                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1238        else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
1239                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1240                ieee80211_check_fast_xmit(tx->sta);
1241        }
1242
1243        info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1244
1245        return TX_CONTINUE;
1246}
1247
1248static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1249                                          struct ieee80211_vif *vif,
1250                                          struct sta_info *sta,
1251                                          struct sk_buff *skb)
1252{
1253        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1254        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1255        struct ieee80211_txq *txq = NULL;
1256
1257        if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
1258            (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1259                return NULL;
1260
1261        if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
1262            unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
1263                if ((!ieee80211_is_mgmt(hdr->frame_control) ||
1264                     ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
1265                     vif->type == NL80211_IFTYPE_STATION) &&
1266                    sta && sta->uploaded) {
1267                        /*
1268                         * This will be NULL if the driver didn't set the
1269                         * opt-in hardware flag.
1270                         */
1271                        txq = sta->sta.txq[IEEE80211_NUM_TIDS];
1272                }
1273        } else if (sta) {
1274                u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1275
1276                if (!sta->uploaded)
1277                        return NULL;
1278
1279                txq = sta->sta.txq[tid];
1280        } else if (vif) {
1281                txq = vif->txq;
1282        }
1283
1284        if (!txq)
1285                return NULL;
1286
1287        return to_txq_info(txq);
1288}
1289
1290static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
1291{
1292        IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
1293}
1294
1295static u32 codel_skb_len_func(const struct sk_buff *skb)
1296{
1297        return skb->len;
1298}
1299
1300static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
1301{
1302        const struct ieee80211_tx_info *info;
1303
1304        info = (const struct ieee80211_tx_info *)skb->cb;
1305        return info->control.enqueue_time;
1306}
1307
1308static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
1309                                          void *ctx)
1310{
1311        struct ieee80211_local *local;
1312        struct txq_info *txqi;
1313        struct fq *fq;
1314        struct fq_flow *flow;
1315
1316        txqi = ctx;
1317        local = vif_to_sdata(txqi->txq.vif)->local;
1318        fq = &local->fq;
1319
1320        if (cvars == &txqi->def_cvars)
1321                flow = &txqi->def_flow;
1322        else
1323                flow = &fq->flows[cvars - local->cvars];
1324
1325        return fq_flow_dequeue(fq, flow);
1326}
1327
1328static void codel_drop_func(struct sk_buff *skb,
1329                            void *ctx)
1330{
1331        struct ieee80211_local *local;
1332        struct ieee80211_hw *hw;
1333        struct txq_info *txqi;
1334
1335        txqi = ctx;
1336        local = vif_to_sdata(txqi->txq.vif)->local;
1337        hw = &local->hw;
1338
1339        ieee80211_free_txskb(hw, skb);
1340}
1341
1342static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
1343                                           struct fq_tin *tin,
1344                                           struct fq_flow *flow)
1345{
1346        struct ieee80211_local *local;
1347        struct txq_info *txqi;
1348        struct codel_vars *cvars;
1349        struct codel_params *cparams;
1350        struct codel_stats *cstats;
1351
1352        local = container_of(fq, struct ieee80211_local, fq);
1353        txqi = container_of(tin, struct txq_info, tin);
1354        cstats = &txqi->cstats;
1355
1356        if (txqi->txq.sta) {
1357                struct sta_info *sta = container_of(txqi->txq.sta,
1358                                                    struct sta_info, sta);
1359                cparams = &sta->cparams;
1360        } else {
1361                cparams = &local->cparams;
1362        }
1363
1364        if (flow == &txqi->def_flow)
1365                cvars = &txqi->def_cvars;
1366        else
1367                cvars = &local->cvars[flow - fq->flows];
1368
1369        return codel_dequeue(txqi,
1370                             &flow->backlog,
1371                             cparams,
1372                             cvars,
1373                             cstats,
1374                             codel_skb_len_func,
1375                             codel_skb_time_func,
1376                             codel_drop_func,
1377                             codel_dequeue_func);
1378}
1379
1380static void fq_skb_free_func(struct fq *fq,
1381                             struct fq_tin *tin,
1382                             struct fq_flow *flow,
1383                             struct sk_buff *skb)
1384{
1385        struct ieee80211_local *local;
1386
1387        local = container_of(fq, struct ieee80211_local, fq);
1388        ieee80211_free_txskb(&local->hw, skb);
1389}
1390
1391static struct fq_flow *fq_flow_get_default_func(struct fq *fq,
1392                                                struct fq_tin *tin,
1393                                                int idx,
1394                                                struct sk_buff *skb)
1395{
1396        struct txq_info *txqi;
1397
1398        txqi = container_of(tin, struct txq_info, tin);
1399        return &txqi->def_flow;
1400}
1401
1402static void ieee80211_txq_enqueue(struct ieee80211_local *local,
1403                                  struct txq_info *txqi,
1404                                  struct sk_buff *skb)
1405{
1406        struct fq *fq = &local->fq;
1407        struct fq_tin *tin = &txqi->tin;
1408        u32 flow_idx = fq_flow_idx(fq, skb);
1409
1410        ieee80211_set_skb_enqueue_time(skb);
1411
1412        spin_lock_bh(&fq->lock);
1413        fq_tin_enqueue(fq, tin, flow_idx, skb,
1414                       fq_skb_free_func,
1415                       fq_flow_get_default_func);
1416        spin_unlock_bh(&fq->lock);
1417}
1418
1419static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin,
1420                                struct fq_flow *flow, struct sk_buff *skb,
1421                                void *data)
1422{
1423        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1424
1425        return info->control.vif == data;
1426}
1427
1428void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
1429                               struct ieee80211_sub_if_data *sdata)
1430{
1431        struct fq *fq = &local->fq;
1432        struct txq_info *txqi;
1433        struct fq_tin *tin;
1434        struct ieee80211_sub_if_data *ap;
1435
1436        if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1437                return;
1438
1439        ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
1440
1441        if (!ap->vif.txq)
1442                return;
1443
1444        txqi = to_txq_info(ap->vif.txq);
1445        tin = &txqi->tin;
1446
1447        spin_lock_bh(&fq->lock);
1448        fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif,
1449                      fq_skb_free_func);
1450        spin_unlock_bh(&fq->lock);
1451}
1452
1453void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
1454                        struct sta_info *sta,
1455                        struct txq_info *txqi, int tid)
1456{
1457        fq_tin_init(&txqi->tin);
1458        fq_flow_init(&txqi->def_flow);
1459        codel_vars_init(&txqi->def_cvars);
1460        codel_stats_init(&txqi->cstats);
1461        __skb_queue_head_init(&txqi->frags);
1462        INIT_LIST_HEAD(&txqi->schedule_order);
1463
1464        txqi->txq.vif = &sdata->vif;
1465
1466        if (!sta) {
1467                sdata->vif.txq = &txqi->txq;
1468                txqi->txq.tid = 0;
1469                txqi->txq.ac = IEEE80211_AC_BE;
1470
1471                return;
1472        }
1473
1474        if (tid == IEEE80211_NUM_TIDS) {
1475                if (sdata->vif.type == NL80211_IFTYPE_STATION) {
1476                        /* Drivers need to opt in to the management MPDU TXQ */
1477                        if (!ieee80211_hw_check(&sdata->local->hw,
1478                                                STA_MMPDU_TXQ))
1479                                return;
1480                } else if (!ieee80211_hw_check(&sdata->local->hw,
1481                                               BUFF_MMPDU_TXQ)) {
1482                        /* Drivers need to opt in to the bufferable MMPDU TXQ */
1483                        return;
1484                }
1485                txqi->txq.ac = IEEE80211_AC_VO;
1486        } else {
1487                txqi->txq.ac = ieee80211_ac_from_tid(tid);
1488        }
1489
1490        txqi->txq.sta = &sta->sta;
1491        txqi->txq.tid = tid;
1492        sta->sta.txq[tid] = &txqi->txq;
1493}
1494
1495void ieee80211_txq_purge(struct ieee80211_local *local,
1496                         struct txq_info *txqi)
1497{
1498        struct fq *fq = &local->fq;
1499        struct fq_tin *tin = &txqi->tin;
1500
1501        spin_lock_bh(&fq->lock);
1502        fq_tin_reset(fq, tin, fq_skb_free_func);
1503        ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
1504        spin_unlock_bh(&fq->lock);
1505
1506        spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
1507        list_del_init(&txqi->schedule_order);
1508        spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
1509}
1510
1511void ieee80211_txq_set_params(struct ieee80211_local *local)
1512{
1513        if (local->hw.wiphy->txq_limit)
1514                local->fq.limit = local->hw.wiphy->txq_limit;
1515        else
1516                local->hw.wiphy->txq_limit = local->fq.limit;
1517
1518        if (local->hw.wiphy->txq_memory_limit)
1519                local->fq.memory_limit = local->hw.wiphy->txq_memory_limit;
1520        else
1521                local->hw.wiphy->txq_memory_limit = local->fq.memory_limit;
1522
1523        if (local->hw.wiphy->txq_quantum)
1524                local->fq.quantum = local->hw.wiphy->txq_quantum;
1525        else
1526                local->hw.wiphy->txq_quantum = local->fq.quantum;
1527}
1528
1529int ieee80211_txq_setup_flows(struct ieee80211_local *local)
1530{
1531        struct fq *fq = &local->fq;
1532        int ret;
1533        int i;
1534        bool supp_vht = false;
1535        enum nl80211_band band;
1536
1537        if (!local->ops->wake_tx_queue)
1538                return 0;
1539
1540        ret = fq_init(fq, 4096);
1541        if (ret)
1542                return ret;
1543
1544        /*
1545         * If the hardware doesn't support VHT, it is safe to limit the maximum
1546         * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
1547         */
1548        for (band = 0; band < NUM_NL80211_BANDS; band++) {
1549                struct ieee80211_supported_band *sband;
1550
1551                sband = local->hw.wiphy->bands[band];
1552                if (!sband)
1553                        continue;
1554
1555                supp_vht = supp_vht || sband->vht_cap.vht_supported;
1556        }
1557
1558        if (!supp_vht)
1559                fq->memory_limit = 4 << 20; /* 4 Mbytes */
1560
1561        codel_params_init(&local->cparams);
1562        local->cparams.interval = MS2TIME(100);
1563        local->cparams.target = MS2TIME(20);
1564        local->cparams.ecn = true;
1565
1566        local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
1567                               GFP_KERNEL);
1568        if (!local->cvars) {
1569                spin_lock_bh(&fq->lock);
1570                fq_reset(fq, fq_skb_free_func);
1571                spin_unlock_bh(&fq->lock);
1572                return -ENOMEM;
1573        }
1574
1575        for (i = 0; i < fq->flows_cnt; i++)
1576                codel_vars_init(&local->cvars[i]);
1577
1578        ieee80211_txq_set_params(local);
1579
1580        return 0;
1581}
1582
1583void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
1584{
1585        struct fq *fq = &local->fq;
1586
1587        if (!local->ops->wake_tx_queue)
1588                return;
1589
1590        kfree(local->cvars);
1591        local->cvars = NULL;
1592
1593        spin_lock_bh(&fq->lock);
1594        fq_reset(fq, fq_skb_free_func);
1595        spin_unlock_bh(&fq->lock);
1596}
1597
1598static bool ieee80211_queue_skb(struct ieee80211_local *local,
1599                                struct ieee80211_sub_if_data *sdata,
1600                                struct sta_info *sta,
1601                                struct sk_buff *skb)
1602{
1603        struct ieee80211_vif *vif;
1604        struct txq_info *txqi;
1605
1606        if (!local->ops->wake_tx_queue ||
1607            sdata->vif.type == NL80211_IFTYPE_MONITOR)
1608                return false;
1609
1610        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1611                sdata = container_of(sdata->bss,
1612                                     struct ieee80211_sub_if_data, u.ap);
1613
1614        vif = &sdata->vif;
1615        txqi = ieee80211_get_txq(local, vif, sta, skb);
1616
1617        if (!txqi)
1618                return false;
1619
1620        ieee80211_txq_enqueue(local, txqi, skb);
1621
1622        schedule_and_wake_txq(local, txqi);
1623
1624        return true;
1625}
1626
1627static bool ieee80211_tx_frags(struct ieee80211_local *local,
1628                               struct ieee80211_vif *vif,
1629                               struct sta_info *sta,
1630                               struct sk_buff_head *skbs,
1631                               bool txpending)
1632{
1633        struct ieee80211_tx_control control = {};
1634        struct sk_buff *skb, *tmp;
1635        unsigned long flags;
1636
1637        skb_queue_walk_safe(skbs, skb, tmp) {
1638                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1639                int q = info->hw_queue;
1640
1641#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1642                if (WARN_ON_ONCE(q >= local->hw.queues)) {
1643                        __skb_unlink(skb, skbs);
1644                        ieee80211_free_txskb(&local->hw, skb);
1645                        continue;
1646                }
1647#endif
1648
1649                spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1650                if (local->queue_stop_reasons[q] ||
1651                    (!txpending && !skb_queue_empty(&local->pending[q]))) {
1652                        if (unlikely(info->flags &
1653                                     IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1654                                if (local->queue_stop_reasons[q] &
1655                                    ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1656                                        /*
1657                                         * Drop off-channel frames if queues
1658                                         * are stopped for any reason other
1659                                         * than off-channel operation. Never
1660                                         * queue them.
1661                                         */
1662                                        spin_unlock_irqrestore(
1663                                                &local->queue_stop_reason_lock,
1664                                                flags);
1665                                        ieee80211_purge_tx_queue(&local->hw,
1666                                                                 skbs);
1667                                        return true;
1668                                }
1669                        } else {
1670
1671                                /*
1672                                 * Since queue is stopped, queue up frames for
1673                                 * later transmission from the tx-pending
1674                                 * tasklet when the queue is woken again.
1675                                 */
1676                                if (txpending)
1677                                        skb_queue_splice_init(skbs,
1678                                                              &local->pending[q]);
1679                                else
1680                                        skb_queue_splice_tail_init(skbs,
1681                                                                   &local->pending[q]);
1682
1683                                spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1684                                                       flags);
1685                                return false;
1686                        }
1687                }
1688                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1689
1690                info->control.vif = vif;
1691                control.sta = sta ? &sta->sta : NULL;
1692
1693                __skb_unlink(skb, skbs);
1694                drv_tx(local, &control, skb);
1695        }
1696
1697        return true;
1698}
1699
1700/*
1701 * Returns false if the frame couldn't be transmitted but was queued instead.
1702 */
1703static bool __ieee80211_tx(struct ieee80211_local *local,
1704                           struct sk_buff_head *skbs, int led_len,
1705                           struct sta_info *sta, bool txpending)
1706{
1707        struct ieee80211_tx_info *info;
1708        struct ieee80211_sub_if_data *sdata;
1709        struct ieee80211_vif *vif;
1710        struct sk_buff *skb;
1711        bool result = true;
1712        __le16 fc;
1713
1714        if (WARN_ON(skb_queue_empty(skbs)))
1715                return true;
1716
1717        skb = skb_peek(skbs);
1718        fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1719        info = IEEE80211_SKB_CB(skb);
1720        sdata = vif_to_sdata(info->control.vif);
1721        if (sta && !sta->uploaded)
1722                sta = NULL;
1723
1724        switch (sdata->vif.type) {
1725        case NL80211_IFTYPE_MONITOR:
1726                if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
1727                        vif = &sdata->vif;
1728                        break;
1729                }
1730                sdata = rcu_dereference(local->monitor_sdata);
1731                if (sdata) {
1732                        vif = &sdata->vif;
1733                        info->hw_queue =
1734                                vif->hw_queue[skb_get_queue_mapping(skb)];
1735                } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
1736                        ieee80211_purge_tx_queue(&local->hw, skbs);
1737                        return true;
1738                } else
1739                        vif = NULL;
1740                break;
1741        case NL80211_IFTYPE_AP_VLAN:
1742                sdata = container_of(sdata->bss,
1743                                     struct ieee80211_sub_if_data, u.ap);
1744                fallthrough;
1745        default:
1746                vif = &sdata->vif;
1747                break;
1748        }
1749
1750        result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
1751
1752        ieee80211_tpt_led_trig_tx(local, fc, led_len);
1753
1754        WARN_ON_ONCE(!skb_queue_empty(skbs));
1755
1756        return result;
1757}
1758
1759/*
1760 * Invoke TX handlers, return 0 on success and non-zero if the
1761 * frame was dropped or queued.
1762 *
1763 * The handlers are split into an early and late part. The latter is everything
1764 * that can be sensitive to reordering, and will be deferred to after packets
1765 * are dequeued from the intermediate queues (when they are enabled).
1766 */
1767static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
1768{
1769        ieee80211_tx_result res = TX_DROP;
1770
1771#define CALL_TXH(txh) \
1772        do {                            \
1773                res = txh(tx);          \
1774                if (res != TX_CONTINUE) \
1775                        goto txh_done;  \
1776        } while (0)
1777
1778        CALL_TXH(ieee80211_tx_h_dynamic_ps);
1779        CALL_TXH(ieee80211_tx_h_check_assoc);
1780        CALL_TXH(ieee80211_tx_h_ps_buf);
1781        CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
1782        CALL_TXH(ieee80211_tx_h_select_key);
1783        if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
1784                CALL_TXH(ieee80211_tx_h_rate_ctrl);
1785
1786 txh_done:
1787        if (unlikely(res == TX_DROP)) {
1788                I802_DEBUG_INC(tx->local->tx_handlers_drop);
1789                if (tx->skb)
1790                        ieee80211_free_txskb(&tx->local->hw, tx->skb);
1791                else
1792                        ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
1793                return -1;
1794        } else if (unlikely(res == TX_QUEUED)) {
1795                I802_DEBUG_INC(tx->local->tx_handlers_queued);
1796                return -1;
1797        }
1798
1799        return 0;
1800}
1801
1802/*
1803 * Late handlers can be called while the sta lock is held. Handlers that can
1804 * cause packets to be generated will cause deadlock!
1805 */
1806static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
1807{
1808        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1809        ieee80211_tx_result res = TX_CONTINUE;
1810
1811        if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
1812                __skb_queue_tail(&tx->skbs, tx->skb);
1813                tx->skb = NULL;
1814                goto txh_done;
1815        }
1816
1817        CALL_TXH(ieee80211_tx_h_michael_mic_add);
1818        CALL_TXH(ieee80211_tx_h_sequence);
1819        CALL_TXH(ieee80211_tx_h_fragment);
1820        /* handlers after fragment must be aware of tx info fragmentation! */
1821        CALL_TXH(ieee80211_tx_h_stats);
1822        CALL_TXH(ieee80211_tx_h_encrypt);
1823        if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
1824                CALL_TXH(ieee80211_tx_h_calculate_duration);
1825#undef CALL_TXH
1826
1827 txh_done:
1828        if (unlikely(res == TX_DROP)) {
1829                I802_DEBUG_INC(tx->local->tx_handlers_drop);
1830                if (tx->skb)
1831                        ieee80211_free_txskb(&tx->local->hw, tx->skb);
1832                else
1833                        ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
1834                return -1;
1835        } else if (unlikely(res == TX_QUEUED)) {
1836                I802_DEBUG_INC(tx->local->tx_handlers_queued);
1837                return -1;
1838        }
1839
1840        return 0;
1841}
1842
1843static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1844{
1845        int r = invoke_tx_handlers_early(tx);
1846
1847        if (r)
1848                return r;
1849        return invoke_tx_handlers_late(tx);
1850}
1851
1852bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
1853                              struct ieee80211_vif *vif, struct sk_buff *skb,
1854                              int band, struct ieee80211_sta **sta)
1855{
1856        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1857        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1858        struct ieee80211_tx_data tx;
1859        struct sk_buff *skb2;
1860
1861        if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
1862                return false;
1863
1864        info->band = band;
1865        info->control.vif = vif;
1866        info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
1867
1868        if (invoke_tx_handlers(&tx))
1869                return false;
1870
1871        if (sta) {
1872                if (tx.sta)
1873                        *sta = &tx.sta->sta;
1874                else
1875                        *sta = NULL;
1876        }
1877
1878        /* this function isn't suitable for fragmented data frames */
1879        skb2 = __skb_dequeue(&tx.skbs);
1880        if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
1881                ieee80211_free_txskb(hw, skb2);
1882                ieee80211_purge_tx_queue(hw, &tx.skbs);
1883                return false;
1884        }
1885
1886        return true;
1887}
1888EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
1889
1890/*
1891 * Returns false if the frame couldn't be transmitted but was queued instead.
1892 */
1893static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1894                         struct sta_info *sta, struct sk_buff *skb,
1895                         bool txpending)
1896{
1897        struct ieee80211_local *local = sdata->local;
1898        struct ieee80211_tx_data tx;
1899        ieee80211_tx_result res_prepare;
1900        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1901        bool result = true;
1902        int led_len;
1903
1904        if (unlikely(skb->len < 10)) {
1905                dev_kfree_skb(skb);
1906                return true;
1907        }
1908
1909        /* initialises tx */
1910        led_len = skb->len;
1911        res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
1912
1913        if (unlikely(res_prepare == TX_DROP)) {
1914                ieee80211_free_txskb(&local->hw, skb);
1915                return true;
1916        } else if (unlikely(res_prepare == TX_QUEUED)) {
1917                return true;
1918        }
1919
1920        /* set up hw_queue value early */
1921        if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
1922            !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
1923                info->hw_queue =
1924                        sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1925
1926        if (invoke_tx_handlers_early(&tx))
1927                return true;
1928
1929        if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
1930                return true;
1931
1932        if (!invoke_tx_handlers_late(&tx))
1933                result = __ieee80211_tx(local, &tx.skbs, led_len,
1934                                        tx.sta, txpending);
1935
1936        return result;
1937}
1938
1939/* device xmit handlers */
1940
1941static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1942                                struct sk_buff *skb,
1943                                int head_need, bool may_encrypt)
1944{
1945        struct ieee80211_local *local = sdata->local;
1946        struct ieee80211_hdr *hdr;
1947        bool enc_tailroom;
1948        int tail_need = 0;
1949
1950        hdr = (struct ieee80211_hdr *) skb->data;
1951        enc_tailroom = may_encrypt &&
1952                       (sdata->crypto_tx_tailroom_needed_cnt ||
1953                        ieee80211_is_mgmt(hdr->frame_control));
1954
1955        if (enc_tailroom) {
1956                tail_need = IEEE80211_ENCRYPT_TAILROOM;
1957                tail_need -= skb_tailroom(skb);
1958                tail_need = max_t(int, tail_need, 0);
1959        }
1960
1961        if (skb_cloned(skb) &&
1962            (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1963             !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1964                I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1965        else if (head_need || tail_need)
1966                I802_DEBUG_INC(local->tx_expand_skb_head);
1967        else
1968                return 0;
1969
1970        if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1971                wiphy_debug(local->hw.wiphy,
1972                            "failed to reallocate TX buffer\n");
1973                return -ENOMEM;
1974        }
1975
1976        return 0;
1977}
1978
1979void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1980                    struct sta_info *sta, struct sk_buff *skb)
1981{
1982        struct ieee80211_local *local = sdata->local;
1983        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1984        struct ieee80211_hdr *hdr;
1985        int headroom;
1986        bool may_encrypt;
1987
1988        may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1989
1990        headroom = local->tx_headroom;
1991        if (may_encrypt)
1992                headroom += sdata->encrypt_headroom;
1993        headroom -= skb_headroom(skb);
1994        headroom = max_t(int, 0, headroom);
1995
1996        if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
1997                ieee80211_free_txskb(&local->hw, skb);
1998                return;
1999        }
2000
2001        hdr = (struct ieee80211_hdr *) skb->data;
2002        info->control.vif = &sdata->vif;
2003
2004        if (ieee80211_vif_is_mesh(&sdata->vif)) {
2005                if (ieee80211_is_data(hdr->frame_control) &&
2006                    is_unicast_ether_addr(hdr->addr1)) {
2007                        if (mesh_nexthop_resolve(sdata, skb))
2008                                return; /* skb queued: don't free */
2009                } else {
2010                        ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
2011                }
2012        }
2013
2014        ieee80211_set_qos_hdr(sdata, skb);
2015        ieee80211_tx(sdata, sta, skb, false);
2016}
2017
2018bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
2019                                 struct net_device *dev)
2020{
2021        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2022        struct ieee80211_radiotap_iterator iterator;
2023        struct ieee80211_radiotap_header *rthdr =
2024                (struct ieee80211_radiotap_header *) skb->data;
2025        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2026        struct ieee80211_supported_band *sband =
2027                local->hw.wiphy->bands[info->band];
2028        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
2029                                                   NULL);
2030        u16 txflags;
2031        u16 rate = 0;
2032        bool rate_found = false;
2033        u8 rate_retries = 0;
2034        u16 rate_flags = 0;
2035        u8 mcs_known, mcs_flags, mcs_bw;
2036        u16 vht_known;
2037        u8 vht_mcs = 0, vht_nss = 0;
2038        int i;
2039
2040        /* check for not even having the fixed radiotap header part */
2041        if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
2042                return false; /* too short to be possibly valid */
2043
2044        /* is it a header version we can trust to find length from? */
2045        if (unlikely(rthdr->it_version))
2046                return false; /* only version 0 is supported */
2047
2048        /* does the skb contain enough to deliver on the alleged length? */
2049        if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
2050                return false; /* skb too short for claimed rt header extent */
2051
2052        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
2053                       IEEE80211_TX_CTL_DONTFRAG;
2054
2055        /*
2056         * for every radiotap entry that is present
2057         * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
2058         * entries present, or -EINVAL on error)
2059         */
2060
2061        while (!ret) {
2062                ret = ieee80211_radiotap_iterator_next(&iterator);
2063
2064                if (ret)
2065                        continue;
2066
2067                /* see if this argument is something we can use */
2068                switch (iterator.this_arg_index) {
2069                /*
2070                 * You must take care when dereferencing iterator.this_arg
2071                 * for multibyte types... the pointer is not aligned.  Use
2072                 * get_unaligned((type *)iterator.this_arg) to dereference
2073                 * iterator.this_arg for type "type" safely on all arches.
2074                */
2075                case IEEE80211_RADIOTAP_FLAGS:
2076                        if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
2077                                /*
2078                                 * this indicates that the skb we have been
2079                                 * handed has the 32-bit FCS CRC at the end...
2080                                 * we should react to that by snipping it off
2081                                 * because it will be recomputed and added
2082                                 * on transmission
2083                                 */
2084                                if (skb->len < (iterator._max_length + FCS_LEN))
2085                                        return false;
2086
2087                                skb_trim(skb, skb->len - FCS_LEN);
2088                        }
2089                        if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
2090                                info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
2091                        if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
2092                                info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
2093                        break;
2094
2095                case IEEE80211_RADIOTAP_TX_FLAGS:
2096                        txflags = get_unaligned_le16(iterator.this_arg);
2097                        if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
2098                                info->flags |= IEEE80211_TX_CTL_NO_ACK;
2099                        if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
2100                                info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
2101                        break;
2102
2103                case IEEE80211_RADIOTAP_RATE:
2104                        rate = *iterator.this_arg;
2105                        rate_flags = 0;
2106                        rate_found = true;
2107                        break;
2108
2109                case IEEE80211_RADIOTAP_DATA_RETRIES:
2110                        rate_retries = *iterator.this_arg;
2111                        break;
2112
2113                case IEEE80211_RADIOTAP_MCS:
2114                        mcs_known = iterator.this_arg[0];
2115                        mcs_flags = iterator.this_arg[1];
2116                        if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
2117                                break;
2118
2119                        rate_found = true;
2120                        rate = iterator.this_arg[2];
2121                        rate_flags = IEEE80211_TX_RC_MCS;
2122
2123                        if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
2124                            mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
2125                                rate_flags |= IEEE80211_TX_RC_SHORT_GI;
2126
2127                        mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
2128                        if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
2129                            mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
2130                                rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2131                        break;
2132
2133                case IEEE80211_RADIOTAP_VHT:
2134                        vht_known = get_unaligned_le16(iterator.this_arg);
2135                        rate_found = true;
2136
2137                        rate_flags = IEEE80211_TX_RC_VHT_MCS;
2138                        if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
2139                            (iterator.this_arg[2] &
2140                             IEEE80211_RADIOTAP_VHT_FLAG_SGI))
2141                                rate_flags |= IEEE80211_TX_RC_SHORT_GI;
2142                        if (vht_known &
2143                            IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
2144                                if (iterator.this_arg[3] == 1)
2145                                        rate_flags |=
2146                                                IEEE80211_TX_RC_40_MHZ_WIDTH;
2147                                else if (iterator.this_arg[3] == 4)
2148                                        rate_flags |=
2149                                                IEEE80211_TX_RC_80_MHZ_WIDTH;
2150                                else if (iterator.this_arg[3] == 11)
2151                                        rate_flags |=
2152                                                IEEE80211_TX_RC_160_MHZ_WIDTH;
2153                        }
2154
2155                        vht_mcs = iterator.this_arg[4] >> 4;
2156                        vht_nss = iterator.this_arg[4] & 0xF;
2157                        break;
2158
2159                /*
2160                 * Please update the file
2161                 * Documentation/networking/mac80211-injection.rst
2162                 * when parsing new fields here.
2163                 */
2164
2165                default:
2166                        break;
2167                }
2168        }
2169
2170        if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
2171                return false;
2172
2173        if (rate_found) {
2174                info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
2175
2176                for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
2177                        info->control.rates[i].idx = -1;
2178                        info->control.rates[i].flags = 0;
2179                        info->control.rates[i].count = 0;
2180                }
2181
2182                if (rate_flags & IEEE80211_TX_RC_MCS) {
2183                        info->control.rates[0].idx = rate;
2184                } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
2185                        ieee80211_rate_set_vht(info->control.rates, vht_mcs,
2186                                               vht_nss);
2187                } else {
2188                        for (i = 0; i < sband->n_bitrates; i++) {
2189                                if (rate * 5 != sband->bitrates[i].bitrate)
2190                                        continue;
2191
2192                                info->control.rates[0].idx = i;
2193                                break;
2194                        }
2195                }
2196
2197                if (info->control.rates[0].idx < 0)
2198                        info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
2199
2200                info->control.rates[0].flags = rate_flags;
2201                info->control.rates[0].count = min_t(u8, rate_retries + 1,
2202                                                     local->hw.max_rate_tries);
2203        }
2204
2205        return true;
2206}
2207
2208netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
2209                                         struct net_device *dev)
2210{
2211        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2212        struct ieee80211_chanctx_conf *chanctx_conf;
2213        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2214        struct ieee80211_hdr *hdr;
2215        struct ieee80211_sub_if_data *tmp_sdata, *sdata;
2216        struct cfg80211_chan_def *chandef;
2217        u16 len_rthdr;
2218        int hdrlen;
2219
2220        memset(info, 0, sizeof(*info));
2221        info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
2222                      IEEE80211_TX_CTL_INJECTED;
2223
2224        /* Sanity-check and process the injection radiotap header */
2225        if (!ieee80211_parse_tx_radiotap(skb, dev))
2226                goto fail;
2227
2228        /* we now know there is a radiotap header with a length we can use */
2229        len_rthdr = ieee80211_get_radiotap_len(skb->data);
2230
2231        /*
2232         * fix up the pointers accounting for the radiotap
2233         * header still being in there.  We are being given
2234         * a precooked IEEE80211 header so no need for
2235         * normal processing
2236         */
2237        skb_set_mac_header(skb, len_rthdr);
2238        /*
2239         * these are just fixed to the end of the rt area since we
2240         * don't have any better information and at this point, nobody cares
2241         */
2242        skb_set_network_header(skb, len_rthdr);
2243        skb_set_transport_header(skb, len_rthdr);
2244
2245        if (skb->len < len_rthdr + 2)
2246                goto fail;
2247
2248        hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
2249        hdrlen = ieee80211_hdrlen(hdr->frame_control);
2250
2251        if (skb->len < len_rthdr + hdrlen)
2252                goto fail;
2253
2254        /*
2255         * Initialize skb->protocol if the injected frame is a data frame
2256         * carrying a rfc1042 header
2257         */
2258        if (ieee80211_is_data(hdr->frame_control) &&
2259            skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
2260                u8 *payload = (u8 *)hdr + hdrlen;
2261
2262                if (ether_addr_equal(payload, rfc1042_header))
2263                        skb->protocol = cpu_to_be16((payload[6] << 8) |
2264                                                    payload[7]);
2265        }
2266
2267        /*
2268         * Initialize skb->priority for QoS frames. This is put in the TID field
2269         * of the frame before passing it to the driver.
2270         */
2271        if (ieee80211_is_data_qos(hdr->frame_control)) {
2272                u8 *p = ieee80211_get_qos_ctl(hdr);
2273                skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
2274        }
2275
2276        rcu_read_lock();
2277
2278        /*
2279         * We process outgoing injected frames that have a local address
2280         * we handle as though they are non-injected frames.
2281         * This code here isn't entirely correct, the local MAC address
2282         * isn't always enough to find the interface to use; for proper
2283         * VLAN/WDS support we will need a different mechanism (which
2284         * likely isn't going to be monitor interfaces).
2285         *
2286         * This is necessary, for example, for old hostapd versions that
2287         * don't use nl80211-based management TX/RX.
2288         */
2289        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2290
2291        list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
2292                if (!ieee80211_sdata_running(tmp_sdata))
2293                        continue;
2294                if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2295                    tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
2296                    tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
2297                        continue;
2298                if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
2299                        sdata = tmp_sdata;
2300                        break;
2301                }
2302        }
2303
2304        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2305        if (!chanctx_conf) {
2306                tmp_sdata = rcu_dereference(local->monitor_sdata);
2307                if (tmp_sdata)
2308                        chanctx_conf =
2309                                rcu_dereference(tmp_sdata->vif.chanctx_conf);
2310        }
2311
2312        if (chanctx_conf)
2313                chandef = &chanctx_conf->def;
2314        else if (!local->use_chanctx)
2315                chandef = &local->_oper_chandef;
2316        else
2317                goto fail_rcu;
2318
2319        /*
2320         * Frame injection is not allowed if beaconing is not allowed
2321         * or if we need radar detection. Beaconing is usually not allowed when
2322         * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
2323         * Passive scan is also used in world regulatory domains where
2324         * your country is not known and as such it should be treated as
2325         * NO TX unless the channel is explicitly allowed in which case
2326         * your current regulatory domain would not have the passive scan
2327         * flag.
2328         *
2329         * Since AP mode uses monitor interfaces to inject/TX management
2330         * frames we can make AP mode the exception to this rule once it
2331         * supports radar detection as its implementation can deal with
2332         * radar detection by itself. We can do that later by adding a
2333         * monitor flag interfaces used for AP support.
2334         */
2335        if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
2336                                     sdata->vif.type))
2337                goto fail_rcu;
2338
2339        info->band = chandef->chan->band;
2340
2341        /* remove the injection radiotap header */
2342        skb_pull(skb, len_rthdr);
2343
2344        ieee80211_xmit(sdata, NULL, skb);
2345        rcu_read_unlock();
2346
2347        return NETDEV_TX_OK;
2348
2349fail_rcu:
2350        rcu_read_unlock();
2351fail:
2352        dev_kfree_skb(skb);
2353        return NETDEV_TX_OK; /* meaning, we dealt with the skb */
2354}
2355
2356static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
2357{
2358        u16 ethertype = (skb->data[12] << 8) | skb->data[13];
2359
2360        return ethertype == ETH_P_TDLS &&
2361               skb->len > 14 &&
2362               skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
2363}
2364
2365int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
2366                            struct sk_buff *skb,
2367                            struct sta_info **sta_out)
2368{
2369        struct sta_info *sta;
2370
2371        switch (sdata->vif.type) {
2372        case NL80211_IFTYPE_AP_VLAN:
2373                sta = rcu_dereference(sdata->u.vlan.sta);
2374                if (sta) {
2375                        *sta_out = sta;
2376                        return 0;
2377                } else if (sdata->wdev.use_4addr) {
2378                        return -ENOLINK;
2379                }
2380                fallthrough;
2381        case NL80211_IFTYPE_AP:
2382        case NL80211_IFTYPE_OCB:
2383        case NL80211_IFTYPE_ADHOC:
2384                if (is_multicast_ether_addr(skb->data)) {
2385                        *sta_out = ERR_PTR(-ENOENT);
2386                        return 0;
2387                }
2388                sta = sta_info_get_bss(sdata, skb->data);
2389                break;
2390        case NL80211_IFTYPE_WDS:
2391                sta = sta_info_get(sdata, sdata->u.wds.remote_addr);
2392                break;
2393#ifdef CONFIG_MAC80211_MESH
2394        case NL80211_IFTYPE_MESH_POINT:
2395                /* determined much later */
2396                *sta_out = NULL;
2397                return 0;
2398#endif
2399        case NL80211_IFTYPE_STATION:
2400                if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
2401                        sta = sta_info_get(sdata, skb->data);
2402                        if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
2403                                if (test_sta_flag(sta,
2404                                                  WLAN_STA_TDLS_PEER_AUTH)) {
2405                                        *sta_out = sta;
2406                                        return 0;
2407                                }
2408
2409                                /*
2410                                 * TDLS link during setup - throw out frames to
2411                                 * peer. Allow TDLS-setup frames to unauthorized
2412                                 * peers for the special case of a link teardown
2413                                 * after a TDLS sta is removed due to being
2414                                 * unreachable.
2415                                 */
2416                                if (!ieee80211_is_tdls_setup(skb))
2417                                        return -EINVAL;
2418                        }
2419
2420                }
2421
2422                sta = sta_info_get(sdata, sdata->u.mgd.bssid);
2423                if (!sta)
2424                        return -ENOLINK;
2425                break;
2426        default:
2427                return -EINVAL;
2428        }
2429
2430        *sta_out = sta ?: ERR_PTR(-ENOENT);
2431        return 0;
2432}
2433
2434static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
2435                                   struct sk_buff *skb,
2436                                   u32 *info_flags,
2437                                   u64 *cookie)
2438{
2439        struct sk_buff *ack_skb;
2440        u16 info_id = 0;
2441
2442        if (skb->sk)
2443                ack_skb = skb_clone_sk(skb);
2444        else
2445                ack_skb = skb_clone(skb, GFP_ATOMIC);
2446
2447        if (ack_skb) {
2448                unsigned long flags;
2449                int id;
2450
2451                spin_lock_irqsave(&local->ack_status_lock, flags);
2452                id = idr_alloc(&local->ack_status_frames, ack_skb,
2453                               1, 0x2000, GFP_ATOMIC);
2454                spin_unlock_irqrestore(&local->ack_status_lock, flags);
2455
2456                if (id >= 0) {
2457                        info_id = id;
2458                        *info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
2459                        if (cookie) {
2460                                *cookie = ieee80211_mgmt_tx_cookie(local);
2461                                IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
2462                        }
2463                } else {
2464                        kfree_skb(ack_skb);
2465                }
2466        }
2467
2468        return info_id;
2469}
2470
2471/**
2472 * ieee80211_build_hdr - build 802.11 header in the given frame
2473 * @sdata: virtual interface to build the header for
2474 * @skb: the skb to build the header in
2475 * @info_flags: skb flags to set
2476 * @ctrl_flags: info control flags to set
2477 *
2478 * This function takes the skb with 802.3 header and reformats the header to
2479 * the appropriate IEEE 802.11 header based on which interface the packet is
2480 * being transmitted on.
2481 *
2482 * Note that this function also takes care of the TX status request and
2483 * potential unsharing of the SKB - this needs to be interleaved with the
2484 * header building.
2485 *
2486 * The function requires the read-side RCU lock held
2487 *
2488 * Returns: the (possibly reallocated) skb or an ERR_PTR() code
2489 */
2490static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
2491                                           struct sk_buff *skb, u32 info_flags,
2492                                           struct sta_info *sta, u32 ctrl_flags,
2493                                           u64 *cookie)
2494{
2495        struct ieee80211_local *local = sdata->local;
2496        struct ieee80211_tx_info *info;
2497        int head_need;
2498        u16 ethertype, hdrlen,  meshhdrlen = 0;
2499        __le16 fc;
2500        struct ieee80211_hdr hdr;
2501        struct ieee80211s_hdr mesh_hdr __maybe_unused;
2502        struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
2503        const u8 *encaps_data;
2504        int encaps_len, skip_header_bytes;
2505        bool wme_sta = false, authorized = false;
2506        bool tdls_peer;
2507        bool multicast;
2508        u16 info_id = 0;
2509        struct ieee80211_chanctx_conf *chanctx_conf;
2510        struct ieee80211_sub_if_data *ap_sdata;
2511        enum nl80211_band band;
2512        int ret;
2513
2514        if (IS_ERR(sta))
2515                sta = NULL;
2516
2517#ifdef CONFIG_MAC80211_DEBUGFS
2518        if (local->force_tx_status)
2519                info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
2520#endif
2521
2522        /* convert Ethernet header to proper 802.11 header (based on
2523         * operation mode) */
2524        ethertype = (skb->data[12] << 8) | skb->data[13];
2525        fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
2526
2527        switch (sdata->vif.type) {
2528        case NL80211_IFTYPE_AP_VLAN:
2529                if (sdata->wdev.use_4addr) {
2530                        fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
2531                        /* RA TA DA SA */
2532                        memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
2533                        memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
2534                        memcpy(hdr.addr3, skb->data, ETH_ALEN);
2535                        memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
2536                        hdrlen = 30;
2537                        authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
2538                        wme_sta = sta->sta.wme;
2539                }
2540                ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2541                                        u.ap);
2542                chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
2543                if (!chanctx_conf) {
2544                        ret = -ENOTCONN;
2545                        goto free;
2546                }
2547                band = chanctx_conf->def.chan->band;
2548                if (sdata->wdev.use_4addr)
2549                        break;
2550                fallthrough;
2551        case NL80211_IFTYPE_AP:
2552                if (sdata->vif.type == NL80211_IFTYPE_AP)
2553                        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2554                if (!chanctx_conf) {
2555                        ret = -ENOTCONN;
2556                        goto free;
2557                }
2558                fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
2559                /* DA BSSID SA */
2560                memcpy(hdr.addr1, skb->data, ETH_ALEN);
2561                memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
2562                memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
2563                hdrlen = 24;
2564                band = chanctx_conf->def.chan->band;
2565                break;
2566        case NL80211_IFTYPE_WDS:
2567                fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
2568                /* RA TA DA SA */
2569                memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
2570                memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
2571                memcpy(hdr.addr3, skb->data, ETH_ALEN);
2572                memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
2573                hdrlen = 30;
2574                /*
2575                 * This is the exception! WDS style interfaces are prohibited
2576                 * when channel contexts are in used so this must be valid
2577                 */
2578                band = local->hw.conf.chandef.chan->band;
2579                break;
2580#ifdef CONFIG_MAC80211_MESH
2581        case NL80211_IFTYPE_MESH_POINT:
2582                if (!is_multicast_ether_addr(skb->data)) {
2583                        struct sta_info *next_hop;
2584                        bool mpp_lookup = true;
2585
2586                        mpath = mesh_path_lookup(sdata, skb->data);
2587                        if (mpath) {
2588                                mpp_lookup = false;
2589                                next_hop = rcu_dereference(mpath->next_hop);
2590                                if (!next_hop ||
2591                                    !(mpath->flags & (MESH_PATH_ACTIVE |
2592                                                      MESH_PATH_RESOLVING)))
2593                                        mpp_lookup = true;
2594                        }
2595
2596                        if (mpp_lookup) {
2597                                mppath = mpp_path_lookup(sdata, skb->data);
2598                                if (mppath)
2599                                        mppath->exp_time = jiffies;
2600                        }
2601
2602                        if (mppath && mpath)
2603                                mesh_path_del(sdata, mpath->dst);
2604                }
2605
2606                /*
2607                 * Use address extension if it is a packet from
2608                 * another interface or if we know the destination
2609                 * is being proxied by a portal (i.e. portal address
2610                 * differs from proxied address)
2611                 */
2612                if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
2613                    !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
2614                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
2615                                        skb->data, skb->data + ETH_ALEN);
2616                        meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
2617                                                               NULL, NULL);
2618                } else {
2619                        /* DS -> MBSS (802.11-2012 13.11.3.3).
2620                         * For unicast with unknown forwarding information,
2621                         * destination might be in the MBSS or if that fails
2622                         * forwarded to another mesh gate. In either case
2623                         * resolution will be handled in ieee80211_xmit(), so
2624                         * leave the original DA. This also works for mcast */
2625                        const u8 *mesh_da = skb->data;
2626
2627                        if (mppath)
2628                                mesh_da = mppath->mpp;
2629                        else if (mpath)
2630                                mesh_da = mpath->dst;
2631
2632                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
2633                                        mesh_da, sdata->vif.addr);
2634                        if (is_multicast_ether_addr(mesh_da))
2635                                /* DA TA mSA AE:SA */
2636                                meshhdrlen = ieee80211_new_mesh_header(
2637                                                sdata, &mesh_hdr,
2638                                                skb->data + ETH_ALEN, NULL);
2639                        else
2640                                /* RA TA mDA mSA AE:DA SA */
2641                                meshhdrlen = ieee80211_new_mesh_header(
2642                                                sdata, &mesh_hdr, skb->data,
2643                                                skb->data + ETH_ALEN);
2644
2645                }
2646                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2647                if (!chanctx_conf) {
2648                        ret = -ENOTCONN;
2649                        goto free;
2650                }
2651                band = chanctx_conf->def.chan->band;
2652
2653                /* For injected frames, fill RA right away as nexthop lookup
2654                 * will be skipped.
2655                 */
2656                if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) &&
2657                    is_zero_ether_addr(hdr.addr1))
2658                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
2659                break;
2660#endif
2661        case NL80211_IFTYPE_STATION:
2662                /* we already did checks when looking up the RA STA */
2663                tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
2664
2665                if (tdls_peer) {
2666                        /* DA SA BSSID */
2667                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
2668                        memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
2669                        memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
2670                        hdrlen = 24;
2671                }  else if (sdata->u.mgd.use_4addr &&
2672                            cpu_to_be16(ethertype) != sdata->control_port_protocol) {
2673                        fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
2674                                          IEEE80211_FCTL_TODS);
2675                        /* RA TA DA SA */
2676                        memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
2677                        memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
2678                        memcpy(hdr.addr3, skb->data, ETH_ALEN);
2679                        memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
2680                        hdrlen = 30;
2681                } else {
2682                        fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
2683                        /* BSSID SA DA */
2684                        memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
2685                        memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
2686                        memcpy(hdr.addr3, skb->data, ETH_ALEN);
2687                        hdrlen = 24;
2688                }
2689                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2690                if (!chanctx_conf) {
2691                        ret = -ENOTCONN;
2692                        goto free;
2693                }
2694                band = chanctx_conf->def.chan->band;
2695                break;
2696        case NL80211_IFTYPE_OCB:
2697                /* DA SA BSSID */
2698                memcpy(hdr.addr1, skb->data, ETH_ALEN);
2699                memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
2700                eth_broadcast_addr(hdr.addr3);
2701                hdrlen = 24;
2702                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2703                if (!chanctx_conf) {
2704                        ret = -ENOTCONN;
2705                        goto free;
2706                }
2707                band = chanctx_conf->def.chan->band;
2708                break;
2709        case NL80211_IFTYPE_ADHOC:
2710                /* DA SA BSSID */
2711                memcpy(hdr.addr1, skb->data, ETH_ALEN);
2712                memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
2713                memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
2714                hdrlen = 24;
2715                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2716                if (!chanctx_conf) {
2717                        ret = -ENOTCONN;
2718                        goto free;
2719                }
2720                band = chanctx_conf->def.chan->band;
2721                break;
2722        default:
2723                ret = -EINVAL;
2724                goto free;
2725        }
2726
2727        multicast = is_multicast_ether_addr(hdr.addr1);
2728
2729        /* sta is always NULL for mesh */
2730        if (sta) {
2731                authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
2732                wme_sta = sta->sta.wme;
2733        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2734                /* For mesh, the use of the QoS header is mandatory */
2735                wme_sta = true;
2736        }
2737
2738        /* receiver does QoS (which also means we do) use it */
2739        if (wme_sta) {
2740                fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2741                hdrlen += 2;
2742        }
2743
2744        /*
2745         * Drop unicast frames to unauthorised stations unless they are
2746         * EAPOL frames from the local station.
2747         */
2748        if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
2749                     (sdata->vif.type != NL80211_IFTYPE_OCB) &&
2750                     !multicast && !authorized &&
2751                     (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
2752                      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
2753#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2754                net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
2755                                    sdata->name, hdr.addr1);
2756#endif
2757
2758                I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
2759
2760                ret = -EPERM;
2761                goto free;
2762        }
2763
2764        if (unlikely(!multicast && ((skb->sk &&
2765                     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
2766                     ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
2767                info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
2768                                                  cookie);
2769
2770        /*
2771         * If the skb is shared we need to obtain our own copy.
2772         */
2773        if (skb_shared(skb)) {
2774                struct sk_buff *tmp_skb = skb;
2775
2776                /* can't happen -- skb is a clone if info_id != 0 */
2777                WARN_ON(info_id);
2778
2779                skb = skb_clone(skb, GFP_ATOMIC);
2780                kfree_skb(tmp_skb);
2781
2782                if (!skb) {
2783                        ret = -ENOMEM;
2784                        goto free;
2785                }
2786        }
2787
2788        hdr.frame_control = fc;
2789        hdr.duration_id = 0;
2790        hdr.seq_ctrl = 0;
2791
2792        skip_header_bytes = ETH_HLEN;
2793        if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
2794                encaps_data = bridge_tunnel_header;
2795                encaps_len = sizeof(bridge_tunnel_header);
2796                skip_header_bytes -= 2;
2797        } else if (ethertype >= ETH_P_802_3_MIN) {
2798                encaps_data = rfc1042_header;
2799                encaps_len = sizeof(rfc1042_header);
2800                skip_header_bytes -= 2;
2801        } else {
2802                encaps_data = NULL;
2803                encaps_len = 0;
2804        }
2805
2806        skb_pull(skb, skip_header_bytes);
2807        head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
2808
2809        /*
2810         * So we need to modify the skb header and hence need a copy of
2811         * that. The head_need variable above doesn't, so far, include
2812         * the needed header space that we don't need right away. If we
2813         * can, then we don't reallocate right now but only after the
2814         * frame arrives at the master device (if it does...)
2815         *
2816         * If we cannot, however, then we will reallocate to include all
2817         * the ever needed space. Also, if we need to reallocate it anyway,
2818         * make it big enough for everything we may ever need.
2819         */
2820
2821        if (head_need > 0 || skb_cloned(skb)) {
2822                head_need += sdata->encrypt_headroom;
2823                head_need += local->tx_headroom;
2824                head_need = max_t(int, 0, head_need);
2825                if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
2826                        ieee80211_free_txskb(&local->hw, skb);
2827                        skb = NULL;
2828                        return ERR_PTR(-ENOMEM);
2829                }
2830        }
2831
2832        if (encaps_data)
2833                memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
2834
2835#ifdef CONFIG_MAC80211_MESH
2836        if (meshhdrlen > 0)
2837                memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
2838#endif
2839
2840        if (ieee80211_is_data_qos(fc)) {
2841                __le16 *qos_control;
2842
2843                qos_control = skb_push(skb, 2);
2844                memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
2845                /*
2846                 * Maybe we could actually set some fields here, for now just
2847                 * initialise to zero to indicate no special operation.
2848                 */
2849                *qos_control = 0;
2850        } else
2851                memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
2852
2853        skb_reset_mac_header(skb);
2854
2855        info = IEEE80211_SKB_CB(skb);
2856        memset(info, 0, sizeof(*info));
2857
2858        info->flags = info_flags;
2859        info->ack_frame_id = info_id;
2860        info->band = band;
2861        info->control.flags = ctrl_flags;
2862
2863        return skb;
2864 free:
2865        kfree_skb(skb);
2866        return ERR_PTR(ret);
2867}
2868
2869/*
2870 * fast-xmit overview
2871 *
2872 * The core idea of this fast-xmit is to remove per-packet checks by checking
2873 * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
2874 * checks that are needed to get the sta->fast_tx pointer assigned, after which
2875 * much less work can be done per packet. For example, fragmentation must be
2876 * disabled or the fast_tx pointer will not be set. All the conditions are seen
2877 * in the code here.
2878 *
2879 * Once assigned, the fast_tx data structure also caches the per-packet 802.11
2880 * header and other data to aid packet processing in ieee80211_xmit_fast().
2881 *
2882 * The most difficult part of this is that when any of these assumptions
2883 * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
2884 * ieee80211_check_fast_xmit() or friends) is required to reset the data,
2885 * since the per-packet code no longer checks the conditions. This is reflected
2886 * by the calls to these functions throughout the rest of the code, and must be
2887 * maintained if any of the TX path checks change.
2888 */
2889
2890void ieee80211_check_fast_xmit(struct sta_info *sta)
2891{
2892        struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
2893        struct ieee80211_local *local = sta->local;
2894        struct ieee80211_sub_if_data *sdata = sta->sdata;
2895        struct ieee80211_hdr *hdr = (void *)build.hdr;
2896        struct ieee80211_chanctx_conf *chanctx_conf;
2897        __le16 fc;
2898
2899        if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
2900                return;
2901
2902        /* Locking here protects both the pointer itself, and against concurrent
2903         * invocations winning data access races to, e.g., the key pointer that
2904         * is used.
2905         * Without it, the invocation of this function right after the key
2906         * pointer changes wouldn't be sufficient, as another CPU could access
2907         * the pointer, then stall, and then do the cache update after the CPU
2908         * that invalidated the key.
2909         * With the locking, such scenarios cannot happen as the check for the
2910         * key and the fast-tx assignment are done atomically, so the CPU that
2911         * modifies the key will either wait or other one will see the key
2912         * cleared/changed already.
2913         */
2914        spin_lock_bh(&sta->lock);
2915        if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
2916            !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2917            sdata->vif.type == NL80211_IFTYPE_STATION)
2918                goto out;
2919
2920        if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
2921                goto out;
2922
2923        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
2924            test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
2925            test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
2926            test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
2927                goto out;
2928
2929        if (sdata->noack_map)
2930                goto out;
2931
2932        /* fast-xmit doesn't handle fragmentation at all */
2933        if (local->hw.wiphy->frag_threshold != (u32)-1 &&
2934            !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
2935                goto out;
2936
2937        rcu_read_lock();
2938        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2939        if (!chanctx_conf) {
2940                rcu_read_unlock();
2941                goto out;
2942        }
2943        build.band = chanctx_conf->def.chan->band;
2944        rcu_read_unlock();
2945
2946        fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
2947
2948        switch (sdata->vif.type) {
2949        case NL80211_IFTYPE_ADHOC:
2950                /* DA SA BSSID */
2951                build.da_offs = offsetof(struct ieee80211_hdr, addr1);
2952                build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
2953                memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
2954                build.hdr_len = 24;
2955                break;
2956        case NL80211_IFTYPE_STATION:
2957                if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
2958                        /* DA SA BSSID */
2959                        build.da_offs = offsetof(struct ieee80211_hdr, addr1);
2960                        build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
2961                        memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
2962                        build.hdr_len = 24;
2963                        break;
2964                }
2965
2966                if (sdata->u.mgd.use_4addr) {
2967                        /* non-regular ethertype cannot use the fastpath */
2968                        fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
2969                                          IEEE80211_FCTL_TODS);
2970                        /* RA TA DA SA */
2971                        memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
2972                        memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
2973                        build.da_offs = offsetof(struct ieee80211_hdr, addr3);
2974                        build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
2975                        build.hdr_len = 30;
2976                        break;
2977                }
2978                fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
2979                /* BSSID SA DA */
2980                memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
2981                build.da_offs = offsetof(struct ieee80211_hdr, addr3);
2982                build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
2983                build.hdr_len = 24;
2984                break;
2985        case NL80211_IFTYPE_AP_VLAN:
2986                if (sdata->wdev.use_4addr) {
2987                        fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
2988                                          IEEE80211_FCTL_TODS);
2989                        /* RA TA DA SA */
2990                        memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
2991                        memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
2992                        build.da_offs = offsetof(struct ieee80211_hdr, addr3);
2993                        build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
2994                        build.hdr_len = 30;
2995                        break;
2996                }
2997                fallthrough;
2998        case NL80211_IFTYPE_AP:
2999                fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
3000                /* DA BSSID SA */
3001                build.da_offs = offsetof(struct ieee80211_hdr, addr1);
3002                memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
3003                build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
3004                build.hdr_len = 24;
3005                break;
3006        default:
3007                /* not handled on fast-xmit */
3008                goto out;
3009        }
3010
3011        if (sta->sta.wme) {
3012                build.hdr_len += 2;
3013                fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3014        }
3015
3016        /* We store the key here so there's no point in using rcu_dereference()
3017         * but that's fine because the code that changes the pointers will call
3018         * this function after doing so. For a single CPU that would be enough,
3019         * for multiple see the comment above.
3020         */
3021        build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
3022        if (!build.key)
3023                build.key = rcu_access_pointer(sdata->default_unicast_key);
3024        if (build.key) {
3025                bool gen_iv, iv_spc, mmic;
3026
3027                gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
3028                iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3029                mmic = build.key->conf.flags &
3030                        (IEEE80211_KEY_FLAG_GENERATE_MMIC |
3031                         IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
3032
3033                /* don't handle software crypto */
3034                if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
3035                        goto out;
3036
3037                /* Key is being removed */
3038                if (build.key->flags & KEY_FLAG_TAINTED)
3039                        goto out;
3040
3041                switch (build.key->conf.cipher) {
3042                case WLAN_CIPHER_SUITE_CCMP:
3043                case WLAN_CIPHER_SUITE_CCMP_256:
3044                        if (gen_iv)
3045                                build.pn_offs = build.hdr_len;
3046                        if (gen_iv || iv_spc)
3047                                build.hdr_len += IEEE80211_CCMP_HDR_LEN;
3048                        break;
3049                case WLAN_CIPHER_SUITE_GCMP:
3050                case WLAN_CIPHER_SUITE_GCMP_256:
3051                        if (gen_iv)
3052                                build.pn_offs = build.hdr_len;
3053                        if (gen_iv || iv_spc)
3054                                build.hdr_len += IEEE80211_GCMP_HDR_LEN;
3055                        break;
3056                case WLAN_CIPHER_SUITE_TKIP:
3057                        /* cannot handle MMIC or IV generation in xmit-fast */
3058                        if (mmic || gen_iv)
3059                                goto out;
3060                        if (iv_spc)
3061                                build.hdr_len += IEEE80211_TKIP_IV_LEN;
3062                        break;
3063                case WLAN_CIPHER_SUITE_WEP40:
3064                case WLAN_CIPHER_SUITE_WEP104:
3065                        /* cannot handle IV generation in fast-xmit */
3066                        if (gen_iv)
3067                                goto out;
3068                        if (iv_spc)
3069                                build.hdr_len += IEEE80211_WEP_IV_LEN;
3070                        break;
3071                case WLAN_CIPHER_SUITE_AES_CMAC:
3072                case WLAN_CIPHER_SUITE_BIP_CMAC_256:
3073                case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3074                case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3075                        WARN(1,
3076                             "management cipher suite 0x%x enabled for data\n",
3077                             build.key->conf.cipher);
3078                        goto out;
3079                default:
3080                        /* we don't know how to generate IVs for this at all */
3081                        if (WARN_ON(gen_iv))
3082                                goto out;
3083                        /* pure hardware keys are OK, of course */
3084                        if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
3085                                break;
3086                        /* cipher scheme might require space allocation */
3087                        if (iv_spc &&
3088                            build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
3089                                goto out;
3090                        if (iv_spc)
3091                                build.hdr_len += build.key->conf.iv_len;
3092                }
3093
3094                fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3095        }
3096
3097        hdr->frame_control = fc;
3098
3099        memcpy(build.hdr + build.hdr_len,
3100               rfc1042_header,  sizeof(rfc1042_header));
3101        build.hdr_len += sizeof(rfc1042_header);
3102
3103        fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
3104        /* if the kmemdup fails, continue w/o fast_tx */
3105        if (!fast_tx)
3106                goto out;
3107
3108 out:
3109        /* we might have raced against another call to this function */
3110        old = rcu_dereference_protected(sta->fast_tx,
3111                                        lockdep_is_held(&sta->lock));
3112        rcu_assign_pointer(sta->fast_tx, fast_tx);
3113        if (old)
3114                kfree_rcu(old, rcu_head);
3115        spin_unlock_bh(&sta->lock);
3116}
3117
3118void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
3119{
3120        struct sta_info *sta;
3121
3122        rcu_read_lock();
3123        list_for_each_entry_rcu(sta, &local->sta_list, list)
3124                ieee80211_check_fast_xmit(sta);
3125        rcu_read_unlock();
3126}
3127
3128void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
3129{
3130        struct ieee80211_local *local = sdata->local;
3131        struct sta_info *sta;
3132
3133        rcu_read_lock();
3134
3135        list_for_each_entry_rcu(sta, &local->sta_list, list) {
3136                if (sdata != sta->sdata &&
3137                    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
3138                        continue;
3139                ieee80211_check_fast_xmit(sta);
3140        }
3141
3142        rcu_read_unlock();
3143}
3144
3145void ieee80211_clear_fast_xmit(struct sta_info *sta)
3146{
3147        struct ieee80211_fast_tx *fast_tx;
3148
3149        spin_lock_bh(&sta->lock);
3150        fast_tx = rcu_dereference_protected(sta->fast_tx,
3151                                            lockdep_is_held(&sta->lock));
3152        RCU_INIT_POINTER(sta->fast_tx, NULL);
3153        spin_unlock_bh(&sta->lock);
3154
3155        if (fast_tx)
3156                kfree_rcu(fast_tx, rcu_head);
3157}
3158
3159static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
3160                                        struct sk_buff *skb, int headroom)
3161{
3162        if (skb_headroom(skb) < headroom) {
3163                I802_DEBUG_INC(local->tx_expand_skb_head);
3164
3165                if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
3166                        wiphy_debug(local->hw.wiphy,
3167                                    "failed to reallocate TX buffer\n");
3168                        return false;
3169                }
3170        }
3171
3172        return true;
3173}
3174
3175static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
3176                                         struct ieee80211_fast_tx *fast_tx,
3177                                         struct sk_buff *skb)
3178{
3179        struct ieee80211_local *local = sdata->local;
3180        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3181        struct ieee80211_hdr *hdr;
3182        struct ethhdr *amsdu_hdr;
3183        int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
3184        int subframe_len = skb->len - hdr_len;
3185        void *data;
3186        u8 *qc, *h_80211_src, *h_80211_dst;
3187        const u8 *bssid;
3188
3189        if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
3190                return false;
3191
3192        if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
3193                return true;
3194
3195        if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
3196                return false;
3197
3198        data = skb_push(skb, sizeof(*amsdu_hdr));
3199        memmove(data, data + sizeof(*amsdu_hdr), hdr_len);
3200        hdr = data;
3201        amsdu_hdr = data + hdr_len;
3202        /* h_80211_src/dst is addr* field within hdr */
3203        h_80211_src = data + fast_tx->sa_offs;
3204        h_80211_dst = data + fast_tx->da_offs;
3205
3206        amsdu_hdr->h_proto = cpu_to_be16(subframe_len);
3207        ether_addr_copy(amsdu_hdr->h_source, h_80211_src);
3208        ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst);
3209
3210        /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
3211         * fields needs to be changed to BSSID for A-MSDU frames depending
3212         * on FromDS/ToDS values.
3213         */
3214        switch (sdata->vif.type) {
3215        case NL80211_IFTYPE_STATION:
3216                bssid = sdata->u.mgd.bssid;
3217                break;
3218        case NL80211_IFTYPE_AP:
3219        case NL80211_IFTYPE_AP_VLAN:
3220                bssid = sdata->vif.addr;
3221                break;
3222        default:
3223                bssid = NULL;
3224        }
3225
3226        if (bssid && ieee80211_has_fromds(hdr->frame_control))
3227                ether_addr_copy(h_80211_src, bssid);
3228
3229        if (bssid && ieee80211_has_tods(hdr->frame_control))
3230                ether_addr_copy(h_80211_dst, bssid);
3231
3232        qc = ieee80211_get_qos_ctl(hdr);
3233        *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
3234
3235        info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
3236
3237        return true;
3238}
3239
3240static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3241                                      struct sta_info *sta,
3242                                      struct ieee80211_fast_tx *fast_tx,
3243                                      struct sk_buff *skb)
3244{
3245        struct ieee80211_local *local = sdata->local;
3246        struct fq *fq = &local->fq;
3247        struct fq_tin *tin;
3248        struct fq_flow *flow;
3249        u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3250        struct ieee80211_txq *txq = sta->sta.txq[tid];
3251        struct txq_info *txqi;
3252        struct sk_buff **frag_tail, *head;
3253        int subframe_len = skb->len - ETH_ALEN;
3254        u8 max_subframes = sta->sta.max_amsdu_subframes;
3255        int max_frags = local->hw.max_tx_fragments;
3256        int max_amsdu_len = sta->sta.max_amsdu_len;
3257        int orig_truesize;
3258        u32 flow_idx;
3259        __be16 len;
3260        void *data;
3261        bool ret = false;
3262        unsigned int orig_len;
3263        int n = 2, nfrags, pad = 0;
3264        u16 hdrlen;
3265
3266        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
3267                return false;
3268
3269        if (skb_is_gso(skb))
3270                return false;
3271
3272        if (!txq)
3273                return false;
3274
3275        txqi = to_txq_info(txq);
3276        if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
3277                return false;
3278
3279        if (sta->sta.max_rc_amsdu_len)
3280                max_amsdu_len = min_t(int, max_amsdu_len,
3281                                      sta->sta.max_rc_amsdu_len);
3282
3283        if (sta->sta.max_tid_amsdu_len[tid])
3284                max_amsdu_len = min_t(int, max_amsdu_len,
3285                                      sta->sta.max_tid_amsdu_len[tid]);
3286
3287        flow_idx = fq_flow_idx(fq, skb);
3288
3289        spin_lock_bh(&fq->lock);
3290
3291        /* TODO: Ideally aggregation should be done on dequeue to remain
3292         * responsive to environment changes.
3293         */
3294
3295        tin = &txqi->tin;
3296        flow = fq_flow_classify(fq, tin, flow_idx, skb,
3297                                fq_flow_get_default_func);
3298        head = skb_peek_tail(&flow->queue);
3299        if (!head || skb_is_gso(head))
3300                goto out;
3301
3302        orig_truesize = head->truesize;
3303        orig_len = head->len;
3304
3305        if (skb->len + head->len > max_amsdu_len)
3306                goto out;
3307
3308        nfrags = 1 + skb_shinfo(skb)->nr_frags;
3309        nfrags += 1 + skb_shinfo(head)->nr_frags;
3310        frag_tail = &skb_shinfo(head)->frag_list;
3311        while (*frag_tail) {
3312                nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
3313                frag_tail = &(*frag_tail)->next;
3314                n++;
3315        }
3316
3317        if (max_subframes && n > max_subframes)
3318                goto out;
3319
3320        if (max_frags && nfrags > max_frags)
3321                goto out;
3322
3323        if (!drv_can_aggregate_in_amsdu(local, head, skb))
3324                goto out;
3325
3326        if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3327                goto out;
3328
3329        /*
3330         * Pad out the previous subframe to a multiple of 4 by adding the
3331         * padding to the next one, that's being added. Note that head->len
3332         * is the length of the full A-MSDU, but that works since each time
3333         * we add a new subframe we pad out the previous one to a multiple
3334         * of 4 and thus it no longer matters in the next round.
3335         */
3336        hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
3337        if ((head->len - hdrlen) & 3)
3338                pad = 4 - ((head->len - hdrlen) & 3);
3339
3340        if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
3341                                                     2 + pad))
3342                goto out_recalc;
3343
3344        ret = true;
3345        data = skb_push(skb, ETH_ALEN + 2);
3346        memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
3347
3348        data += 2 * ETH_ALEN;
3349        len = cpu_to_be16(subframe_len);
3350        memcpy(data, &len, 2);
3351        memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
3352
3353        memset(skb_push(skb, pad), 0, pad);
3354
3355        head->len += skb->len;
3356        head->data_len += skb->len;
3357        *frag_tail = skb;
3358
3359out_recalc:
3360        fq->memory_usage += head->truesize - orig_truesize;
3361        if (head->len != orig_len) {
3362                flow->backlog += head->len - orig_len;
3363                tin->backlog_bytes += head->len - orig_len;
3364
3365                fq_recalc_backlog(fq, tin, flow);
3366        }
3367out:
3368        spin_unlock_bh(&fq->lock);
3369
3370        return ret;
3371}
3372
3373/*
3374 * Can be called while the sta lock is held. Anything that can cause packets to
3375 * be generated will cause deadlock!
3376 */
3377static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
3378                                       struct sta_info *sta, u8 pn_offs,
3379                                       struct ieee80211_key *key,
3380                                       struct sk_buff *skb)
3381{
3382        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3383        struct ieee80211_hdr *hdr = (void *)skb->data;
3384        u8 tid = IEEE80211_NUM_TIDS;
3385
3386        if (key)
3387                info->control.hw_key = &key->conf;
3388
3389        ieee80211_tx_stats(skb->dev, skb->len);
3390
3391        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3392                tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3393                hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
3394        } else {
3395                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
3396                hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
3397                sdata->sequence_number += 0x10;
3398        }
3399
3400        if (skb_shinfo(skb)->gso_size)
3401                sta->tx_stats.msdu[tid] +=
3402                        DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
3403        else
3404                sta->tx_stats.msdu[tid]++;
3405
3406        info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
3407
3408        /* statistics normally done by ieee80211_tx_h_stats (but that
3409         * has to consider fragmentation, so is more complex)
3410         */
3411        sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
3412        sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
3413
3414        if (pn_offs) {
3415                u64 pn;
3416                u8 *crypto_hdr = skb->data + pn_offs;
3417
3418                switch (key->conf.cipher) {
3419                case WLAN_CIPHER_SUITE_CCMP:
3420                case WLAN_CIPHER_SUITE_CCMP_256:
3421                case WLAN_CIPHER_SUITE_GCMP:
3422                case WLAN_CIPHER_SUITE_GCMP_256:
3423                        pn = atomic64_inc_return(&key->conf.tx_pn);
3424                        crypto_hdr[0] = pn;
3425                        crypto_hdr[1] = pn >> 8;
3426                        crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6);
3427                        crypto_hdr[4] = pn >> 16;
3428                        crypto_hdr[5] = pn >> 24;
3429                        crypto_hdr[6] = pn >> 32;
3430                        crypto_hdr[7] = pn >> 40;
3431                        break;
3432                }
3433        }
3434}
3435
3436static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3437                                struct sta_info *sta,
3438                                struct ieee80211_fast_tx *fast_tx,
3439                                struct sk_buff *skb)
3440{
3441        struct ieee80211_local *local = sdata->local;
3442        u16 ethertype = (skb->data[12] << 8) | skb->data[13];
3443        int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
3444        int hw_headroom = sdata->local->hw.extra_tx_headroom;
3445        struct ethhdr eth;
3446        struct ieee80211_tx_info *info;
3447        struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
3448        struct ieee80211_tx_data tx;
3449        ieee80211_tx_result r;
3450        struct tid_ampdu_tx *tid_tx = NULL;
3451        u8 tid = IEEE80211_NUM_TIDS;
3452
3453        /* control port protocol needs a lot of special handling */
3454        if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
3455                return false;
3456
3457        /* only RFC 1042 SNAP */
3458        if (ethertype < ETH_P_802_3_MIN)
3459                return false;
3460
3461        /* don't handle TX status request here either */
3462        if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
3463                return false;
3464
3465        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3466                tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3467                tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
3468                if (tid_tx) {
3469                        if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
3470                                return false;
3471                        if (tid_tx->timeout)
3472                                tid_tx->last_tx = jiffies;
3473                }
3474        }
3475
3476        /* after this point (skb is modified) we cannot return false */
3477
3478        if (skb_shared(skb)) {
3479                struct sk_buff *tmp_skb = skb;
3480
3481                skb = skb_clone(skb, GFP_ATOMIC);
3482                kfree_skb(tmp_skb);
3483
3484                if (!skb)
3485                        return true;
3486        }
3487
3488        if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
3489            ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
3490                return true;
3491
3492        /* will not be crypto-handled beyond what we do here, so use false
3493         * as the may-encrypt argument for the resize to not account for
3494         * more room than we already have in 'extra_head'
3495         */
3496        if (unlikely(ieee80211_skb_resize(sdata, skb,
3497                                          max_t(int, extra_head + hw_headroom -
3498                                                     skb_headroom(skb), 0),
3499                                          false))) {
3500                kfree_skb(skb);
3501                return true;
3502        }
3503
3504        memcpy(&eth, skb->data, ETH_HLEN - 2);
3505        hdr = skb_push(skb, extra_head);
3506        memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
3507        memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
3508        memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
3509
3510        info = IEEE80211_SKB_CB(skb);
3511        memset(info, 0, sizeof(*info));
3512        info->band = fast_tx->band;
3513        info->control.vif = &sdata->vif;
3514        info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
3515                      IEEE80211_TX_CTL_DONTFRAG |
3516                      (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
3517        info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
3518
3519#ifdef CONFIG_MAC80211_DEBUGFS
3520        if (local->force_tx_status)
3521                info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
3522#endif
3523
3524        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3525                tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3526                *ieee80211_get_qos_ctl(hdr) = tid;
3527        }
3528
3529        __skb_queue_head_init(&tx.skbs);
3530
3531        tx.flags = IEEE80211_TX_UNICAST;
3532        tx.local = local;
3533        tx.sdata = sdata;
3534        tx.sta = sta;
3535        tx.key = fast_tx->key;
3536
3537        if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
3538                tx.skb = skb;
3539                r = ieee80211_tx_h_rate_ctrl(&tx);
3540                skb = tx.skb;
3541                tx.skb = NULL;
3542
3543                if (r != TX_CONTINUE) {
3544                        if (r != TX_QUEUED)
3545                                kfree_skb(skb);
3546                        return true;
3547                }
3548        }
3549
3550        if (ieee80211_queue_skb(local, sdata, sta, skb))
3551                return true;
3552
3553        ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
3554                                   fast_tx->key, skb);
3555
3556        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
3557                sdata = container_of(sdata->bss,
3558                                     struct ieee80211_sub_if_data, u.ap);
3559
3560        __skb_queue_tail(&tx.skbs, skb);
3561        ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
3562        return true;
3563}
3564
3565struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
3566                                     struct ieee80211_txq *txq)
3567{
3568        struct ieee80211_local *local = hw_to_local(hw);
3569        struct txq_info *txqi = container_of(txq, struct txq_info, txq);
3570        struct ieee80211_hdr *hdr;
3571        struct sk_buff *skb = NULL;
3572        struct fq *fq = &local->fq;
3573        struct fq_tin *tin = &txqi->tin;
3574        struct ieee80211_tx_info *info;
3575        struct ieee80211_tx_data tx;
3576        ieee80211_tx_result r;
3577        struct ieee80211_vif *vif = txq->vif;
3578
3579        WARN_ON_ONCE(softirq_count() == 0);
3580
3581        if (!ieee80211_txq_airtime_check(hw, txq))
3582                return NULL;
3583
3584begin:
3585        spin_lock_bh(&fq->lock);
3586
3587        if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
3588            test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
3589                goto out;
3590
3591        if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
3592                set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
3593                goto out;
3594        }
3595
3596        /* Make sure fragments stay together. */
3597        skb = __skb_dequeue(&txqi->frags);
3598        if (skb)
3599                goto out;
3600
3601        skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
3602        if (!skb)
3603                goto out;
3604
3605        spin_unlock_bh(&fq->lock);
3606
3607        hdr = (struct ieee80211_hdr *)skb->data;
3608        info = IEEE80211_SKB_CB(skb);
3609
3610        memset(&tx, 0, sizeof(tx));
3611        __skb_queue_head_init(&tx.skbs);
3612        tx.local = local;
3613        tx.skb = skb;
3614        tx.sdata = vif_to_sdata(info->control.vif);
3615
3616        if (txq->sta && !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
3617                tx.sta = container_of(txq->sta, struct sta_info, sta);
3618                /*
3619                 * Drop unicast frames to unauthorised stations unless they are
3620                 * EAPOL frames from the local station.
3621                 */
3622                if (unlikely(ieee80211_is_data(hdr->frame_control) &&
3623                             !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
3624                             tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
3625                             !is_multicast_ether_addr(hdr->addr1) &&
3626                             !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
3627                             (!(info->control.flags &
3628                                IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
3629                              !ether_addr_equal(tx.sdata->vif.addr,
3630                                                hdr->addr2)))) {
3631                        I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
3632                        ieee80211_free_txskb(&local->hw, skb);
3633                        goto begin;
3634                }
3635        }
3636
3637        /*
3638         * The key can be removed while the packet was queued, so need to call
3639         * this here to get the current key.
3640         */
3641        r = ieee80211_tx_h_select_key(&tx);
3642        if (r != TX_CONTINUE) {
3643                ieee80211_free_txskb(&local->hw, skb);
3644                goto begin;
3645        }
3646
3647        if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
3648                info->flags |= IEEE80211_TX_CTL_AMPDU;
3649        else
3650                info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3651
3652        if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
3653                goto encap_out;
3654
3655        if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
3656                struct sta_info *sta = container_of(txq->sta, struct sta_info,
3657                                                    sta);
3658                u8 pn_offs = 0;
3659
3660                if (tx.key &&
3661                    (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
3662                        pn_offs = ieee80211_hdrlen(hdr->frame_control);
3663
3664                ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
3665                                           tx.key, skb);
3666        } else {
3667                if (invoke_tx_handlers_late(&tx))
3668                        goto begin;
3669
3670                skb = __skb_dequeue(&tx.skbs);
3671
3672                if (!skb_queue_empty(&tx.skbs)) {
3673                        spin_lock_bh(&fq->lock);
3674                        skb_queue_splice_tail(&tx.skbs, &txqi->frags);
3675                        spin_unlock_bh(&fq->lock);
3676                }
3677        }
3678
3679        if (skb_has_frag_list(skb) &&
3680            !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
3681                if (skb_linearize(skb)) {
3682                        ieee80211_free_txskb(&local->hw, skb);
3683                        goto begin;
3684                }
3685        }
3686
3687        switch (tx.sdata->vif.type) {
3688        case NL80211_IFTYPE_MONITOR:
3689                if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
3690                        vif = &tx.sdata->vif;
3691                        break;
3692                }
3693                tx.sdata = rcu_dereference(local->monitor_sdata);
3694                if (tx.sdata) {
3695                        vif = &tx.sdata->vif;
3696                        info->hw_queue =
3697                                vif->hw_queue[skb_get_queue_mapping(skb)];
3698                } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
3699                        ieee80211_free_txskb(&local->hw, skb);
3700                        goto begin;
3701                } else {
3702                        vif = NULL;
3703                }
3704                break;
3705        case NL80211_IFTYPE_AP_VLAN:
3706                tx.sdata = container_of(tx.sdata->bss,
3707                                        struct ieee80211_sub_if_data, u.ap);
3708                fallthrough;
3709        default:
3710                vif = &tx.sdata->vif;
3711                break;
3712        }
3713
3714encap_out:
3715        IEEE80211_SKB_CB(skb)->control.vif = vif;
3716
3717        if (vif &&
3718            wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
3719                bool ampdu = txq->ac != IEEE80211_AC_VO;
3720                u32 airtime;
3721
3722                airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
3723                                                             skb->len, ampdu);
3724                if (airtime) {
3725                        airtime = ieee80211_info_set_tx_time_est(info, airtime);
3726                        ieee80211_sta_update_pending_airtime(local, tx.sta,
3727                                                             txq->ac,
3728                                                             airtime,
3729                                                             false);
3730                }
3731        }
3732
3733        return skb;
3734
3735out:
3736        spin_unlock_bh(&fq->lock);
3737
3738        return skb;
3739}
3740EXPORT_SYMBOL(ieee80211_tx_dequeue);
3741
3742struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
3743{
3744        struct ieee80211_local *local = hw_to_local(hw);
3745        struct ieee80211_txq *ret = NULL;
3746        struct txq_info *txqi = NULL, *head = NULL;
3747        bool found_eligible_txq = false;
3748
3749        spin_lock_bh(&local->active_txq_lock[ac]);
3750
3751 begin:
3752        txqi = list_first_entry_or_null(&local->active_txqs[ac],
3753                                        struct txq_info,
3754                                        schedule_order);
3755        if (!txqi)
3756                goto out;
3757
3758        if (txqi == head) {
3759                if (!found_eligible_txq)
3760                        goto out;
3761                else
3762                        found_eligible_txq = false;
3763        }
3764
3765        if (!head)
3766                head = txqi;
3767
3768        if (txqi->txq.sta) {
3769                struct sta_info *sta = container_of(txqi->txq.sta,
3770                                                    struct sta_info, sta);
3771                bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
3772                s64 deficit = sta->airtime[txqi->txq.ac].deficit;
3773
3774                if (aql_check)
3775                        found_eligible_txq = true;
3776
3777                if (deficit < 0)
3778                        sta->airtime[txqi->txq.ac].deficit +=
3779                                sta->airtime_weight;
3780
3781                if (deficit < 0 || !aql_check) {
3782                        list_move_tail(&txqi->schedule_order,
3783                                       &local->active_txqs[txqi->txq.ac]);
3784                        goto begin;
3785                }
3786        }
3787
3788
3789        if (txqi->schedule_round == local->schedule_round[ac])
3790                goto out;
3791
3792        list_del_init(&txqi->schedule_order);
3793        txqi->schedule_round = local->schedule_round[ac];
3794        ret = &txqi->txq;
3795
3796out:
3797        spin_unlock_bh(&local->active_txq_lock[ac]);
3798        return ret;
3799}
3800EXPORT_SYMBOL(ieee80211_next_txq);
3801
3802void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
3803                              struct ieee80211_txq *txq,
3804                              bool force)
3805{
3806        struct ieee80211_local *local = hw_to_local(hw);
3807        struct txq_info *txqi = to_txq_info(txq);
3808
3809        spin_lock_bh(&local->active_txq_lock[txq->ac]);
3810
3811        if (list_empty(&txqi->schedule_order) &&
3812            (force || !skb_queue_empty(&txqi->frags) ||
3813             txqi->tin.backlog_packets)) {
3814                /* If airtime accounting is active, always enqueue STAs at the
3815                 * head of the list to ensure that they only get moved to the
3816                 * back by the airtime DRR scheduler once they have a negative
3817                 * deficit. A station that already has a negative deficit will
3818                 * get immediately moved to the back of the list on the next
3819                 * call to ieee80211_next_txq().
3820                 */
3821                if (txqi->txq.sta &&
3822                    wiphy_ext_feature_isset(local->hw.wiphy,
3823                                            NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
3824                        list_add(&txqi->schedule_order,
3825                                 &local->active_txqs[txq->ac]);
3826                else
3827                        list_add_tail(&txqi->schedule_order,
3828                                      &local->active_txqs[txq->ac]);
3829        }
3830
3831        spin_unlock_bh(&local->active_txq_lock[txq->ac]);
3832}
3833EXPORT_SYMBOL(__ieee80211_schedule_txq);
3834
3835bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
3836                                 struct ieee80211_txq *txq)
3837{
3838        struct sta_info *sta;
3839        struct ieee80211_local *local = hw_to_local(hw);
3840
3841        if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
3842                return true;
3843
3844        if (!txq->sta)
3845                return true;
3846
3847        sta = container_of(txq->sta, struct sta_info, sta);
3848        if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
3849            sta->airtime[txq->ac].aql_limit_low)
3850                return true;
3851
3852        if (atomic_read(&local->aql_total_pending_airtime) <
3853            local->aql_threshold &&
3854            atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
3855            sta->airtime[txq->ac].aql_limit_high)
3856                return true;
3857
3858        return false;
3859}
3860EXPORT_SYMBOL(ieee80211_txq_airtime_check);
3861
3862bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
3863                                struct ieee80211_txq *txq)
3864{
3865        struct ieee80211_local *local = hw_to_local(hw);
3866        struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
3867        struct sta_info *sta;
3868        u8 ac = txq->ac;
3869
3870        spin_lock_bh(&local->active_txq_lock[ac]);
3871
3872        if (!txqi->txq.sta)
3873                goto out;
3874
3875        if (list_empty(&txqi->schedule_order))
3876                goto out;
3877
3878        list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
3879                                 schedule_order) {
3880                if (iter == txqi)
3881                        break;
3882
3883                if (!iter->txq.sta) {
3884                        list_move_tail(&iter->schedule_order,
3885                                       &local->active_txqs[ac]);
3886                        continue;
3887                }
3888                sta = container_of(iter->txq.sta, struct sta_info, sta);
3889                if (sta->airtime[ac].deficit < 0)
3890                        sta->airtime[ac].deficit += sta->airtime_weight;
3891                list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
3892        }
3893
3894        sta = container_of(txqi->txq.sta, struct sta_info, sta);
3895        if (sta->airtime[ac].deficit >= 0)
3896                goto out;
3897
3898        sta->airtime[ac].deficit += sta->airtime_weight;
3899        list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
3900        spin_unlock_bh(&local->active_txq_lock[ac]);
3901
3902        return false;
3903out:
3904        if (!list_empty(&txqi->schedule_order))
3905                list_del_init(&txqi->schedule_order);
3906        spin_unlock_bh(&local->active_txq_lock[ac]);
3907
3908        return true;
3909}
3910EXPORT_SYMBOL(ieee80211_txq_may_transmit);
3911
3912void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
3913{
3914        struct ieee80211_local *local = hw_to_local(hw);
3915
3916        spin_lock_bh(&local->active_txq_lock[ac]);
3917        local->schedule_round[ac]++;
3918        spin_unlock_bh(&local->active_txq_lock[ac]);
3919}
3920EXPORT_SYMBOL(ieee80211_txq_schedule_start);
3921
3922void __ieee80211_subif_start_xmit(struct sk_buff *skb,
3923                                  struct net_device *dev,
3924                                  u32 info_flags,
3925                                  u32 ctrl_flags,
3926                                  u64 *cookie)
3927{
3928        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3929        struct ieee80211_local *local = sdata->local;
3930        struct sta_info *sta;
3931        struct sk_buff *next;
3932
3933        if (unlikely(skb->len < ETH_HLEN)) {
3934                kfree_skb(skb);
3935                return;
3936        }
3937
3938        rcu_read_lock();
3939
3940        if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
3941                goto out_free;
3942
3943        if (IS_ERR(sta))
3944                sta = NULL;
3945
3946        if (local->ops->wake_tx_queue) {
3947                u16 queue = __ieee80211_select_queue(sdata, sta, skb);
3948                skb_set_queue_mapping(skb, queue);
3949                skb_get_hash(skb);
3950        }
3951
3952        if (sta) {
3953                struct ieee80211_fast_tx *fast_tx;
3954
3955                sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
3956
3957                fast_tx = rcu_dereference(sta->fast_tx);
3958
3959                if (fast_tx &&
3960                    ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
3961                        goto out;
3962        }
3963
3964        if (skb_is_gso(skb)) {
3965                struct sk_buff *segs;
3966
3967                segs = skb_gso_segment(skb, 0);
3968                if (IS_ERR(segs)) {
3969                        goto out_free;
3970                } else if (segs) {
3971                        consume_skb(skb);
3972                        skb = segs;
3973                }
3974        } else {
3975                /* we cannot process non-linear frames on this path */
3976                if (skb_linearize(skb)) {
3977                        kfree_skb(skb);
3978                        goto out;
3979                }
3980
3981                /* the frame could be fragmented, software-encrypted, and other
3982                 * things so we cannot really handle checksum offload with it -
3983                 * fix it up in software before we handle anything else.
3984                 */
3985                if (skb->ip_summed == CHECKSUM_PARTIAL) {
3986                        skb_set_transport_header(skb,
3987                                                 skb_checksum_start_offset(skb));
3988                        if (skb_checksum_help(skb))
3989                                goto out_free;
3990                }
3991        }
3992
3993        skb_list_walk_safe(skb, skb, next) {
3994                skb_mark_not_on_list(skb);
3995
3996                if (skb->protocol == sdata->control_port_protocol)
3997                        ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
3998
3999                skb = ieee80211_build_hdr(sdata, skb, info_flags,
4000                                          sta, ctrl_flags, cookie);
4001                if (IS_ERR(skb)) {
4002                        kfree_skb_list(next);
4003                        goto out;
4004                }
4005
4006                ieee80211_tx_stats(dev, skb->len);
4007
4008                ieee80211_xmit(sdata, sta, skb);
4009        }
4010        goto out;
4011 out_free:
4012        kfree_skb(skb);
4013 out:
4014        rcu_read_unlock();
4015}
4016
4017static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
4018{
4019        struct ethhdr *eth;
4020        int err;
4021
4022        err = skb_ensure_writable(skb, ETH_HLEN);
4023        if (unlikely(err))
4024                return err;
4025
4026        eth = (void *)skb->data;
4027        ether_addr_copy(eth->h_dest, sta->sta.addr);
4028
4029        return 0;
4030}
4031
4032static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
4033                                           struct net_device *dev)
4034{
4035        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4036        const struct ethhdr *eth = (void *)skb->data;
4037        const struct vlan_ethhdr *ethvlan = (void *)skb->data;
4038        __be16 ethertype;
4039
4040        if (likely(!is_multicast_ether_addr(eth->h_dest)))
4041                return false;
4042
4043        switch (sdata->vif.type) {
4044        case NL80211_IFTYPE_AP_VLAN:
4045                if (sdata->u.vlan.sta)
4046                        return false;
4047                if (sdata->wdev.use_4addr)
4048                        return false;
4049                fallthrough;
4050        case NL80211_IFTYPE_AP:
4051                /* check runtime toggle for this bss */
4052                if (!sdata->bss->multicast_to_unicast)
4053                        return false;
4054                break;
4055        default:
4056                return false;
4057        }
4058
4059        /* multicast to unicast conversion only for some payload */
4060        ethertype = eth->h_proto;
4061        if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
4062                ethertype = ethvlan->h_vlan_encapsulated_proto;
4063        switch (ethertype) {
4064        case htons(ETH_P_ARP):
4065        case htons(ETH_P_IP):
4066        case htons(ETH_P_IPV6):
4067                break;
4068        default:
4069                return false;
4070        }
4071
4072        return true;
4073}
4074
4075static void
4076ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
4077                             struct sk_buff_head *queue)
4078{
4079        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4080        struct ieee80211_local *local = sdata->local;
4081        const struct ethhdr *eth = (struct ethhdr *)skb->data;
4082        struct sta_info *sta, *first = NULL;
4083        struct sk_buff *cloned_skb;
4084
4085        rcu_read_lock();
4086
4087        list_for_each_entry_rcu(sta, &local->sta_list, list) {
4088                if (sdata != sta->sdata)
4089                        /* AP-VLAN mismatch */
4090                        continue;
4091                if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
4092                        /* do not send back to source */
4093                        continue;
4094                if (!first) {
4095                        first = sta;
4096                        continue;
4097                }
4098                cloned_skb = skb_clone(skb, GFP_ATOMIC);
4099                if (!cloned_skb)
4100                        goto multicast;
4101                if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
4102                        dev_kfree_skb(cloned_skb);
4103                        goto multicast;
4104                }
4105                __skb_queue_tail(queue, cloned_skb);
4106        }
4107
4108        if (likely(first)) {
4109                if (unlikely(ieee80211_change_da(skb, first)))
4110                        goto multicast;
4111                __skb_queue_tail(queue, skb);
4112        } else {
4113                /* no STA connected, drop */
4114                kfree_skb(skb);
4115                skb = NULL;
4116        }
4117
4118        goto out;
4119multicast:
4120        __skb_queue_purge(queue);
4121        __skb_queue_tail(queue, skb);
4122out:
4123        rcu_read_unlock();
4124}
4125
4126/**
4127 * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
4128 * @skb: packet to be sent
4129 * @dev: incoming interface
4130 *
4131 * On failure skb will be freed.
4132 */
4133netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
4134                                       struct net_device *dev)
4135{
4136        if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
4137                struct sk_buff_head queue;
4138
4139                __skb_queue_head_init(&queue);
4140                ieee80211_convert_to_unicast(skb, dev, &queue);
4141                while ((skb = __skb_dequeue(&queue)))
4142                        __ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
4143        } else {
4144                __ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
4145        }
4146
4147        return NETDEV_TX_OK;
4148}
4149
4150static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
4151                              struct sk_buff *skb, int led_len,
4152                              struct sta_info *sta,
4153                              bool txpending)
4154{
4155        struct ieee80211_local *local = sdata->local;
4156        struct ieee80211_tx_control control = {};
4157        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4158        struct ieee80211_sta *pubsta = NULL;
4159        unsigned long flags;
4160        int q = info->hw_queue;
4161
4162        if (ieee80211_queue_skb(local, sdata, sta, skb))
4163                return true;
4164
4165        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
4166
4167        if (local->queue_stop_reasons[q] ||
4168            (!txpending && !skb_queue_empty(&local->pending[q]))) {
4169                if (txpending)
4170                        skb_queue_head(&local->pending[q], skb);
4171                else
4172                        skb_queue_tail(&local->pending[q], skb);
4173
4174                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
4175
4176                return false;
4177        }
4178
4179        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
4180
4181        if (sta && sta->uploaded)
4182                pubsta = &sta->sta;
4183
4184        control.sta = pubsta;
4185
4186        drv_tx(local, &control, skb);
4187
4188        return true;
4189}
4190
4191static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
4192                                struct net_device *dev, struct sta_info *sta,
4193                                struct sk_buff *skb)
4194{
4195        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4196        struct ethhdr *ehdr = (struct ethhdr *)skb->data;
4197        struct ieee80211_local *local = sdata->local;
4198        bool authorized = false;
4199        bool multicast;
4200        unsigned char *ra = ehdr->h_dest;
4201
4202        if (IS_ERR(sta) || (sta && !sta->uploaded))
4203                sta = NULL;
4204
4205        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
4206            (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
4207                ra = sdata->u.mgd.bssid;
4208
4209        if (is_zero_ether_addr(ra))
4210                goto out_free;
4211
4212        multicast = is_multicast_ether_addr(ra);
4213
4214        if (sta)
4215                authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
4216
4217        if (!multicast && !authorized &&
4218            (ehdr->h_proto != sdata->control_port_protocol ||
4219             !ether_addr_equal(sdata->vif.addr, ehdr->h_source)))
4220                goto out_free;
4221
4222        if (multicast && sdata->vif.type == NL80211_IFTYPE_AP &&
4223            !atomic_read(&sdata->u.ap.num_mcast_sta))
4224                goto out_free;
4225
4226        if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
4227            test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
4228                goto out_free;
4229
4230        memset(info, 0, sizeof(*info));
4231
4232        if (unlikely(!multicast && skb->sk &&
4233                     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
4234                info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
4235                                                             &info->flags, NULL);
4236
4237        if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
4238                if (sdata->control_port_no_encrypt)
4239                        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
4240                info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
4241        }
4242
4243        if (multicast)
4244                info->flags |= IEEE80211_TX_CTL_NO_ACK;
4245
4246        info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
4247
4248        ieee80211_tx_stats(dev, skb->len);
4249
4250        if (sta) {
4251                sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
4252                sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
4253        }
4254
4255        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4256                sdata = container_of(sdata->bss,
4257                                     struct ieee80211_sub_if_data, u.ap);
4258
4259        info->control.flags |= IEEE80211_TX_CTRL_HW_80211_ENCAP;
4260        info->control.vif = &sdata->vif;
4261
4262        ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
4263
4264        return;
4265
4266out_free:
4267        kfree_skb(skb);
4268}
4269
4270netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
4271                                            struct net_device *dev)
4272{
4273        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4274        struct sta_info *sta;
4275
4276        if (WARN_ON(!sdata->hw_80211_encap)) {
4277                kfree_skb(skb);
4278                return NETDEV_TX_OK;
4279        }
4280
4281        if (unlikely(skb->len < ETH_HLEN)) {
4282                kfree_skb(skb);
4283                return NETDEV_TX_OK;
4284        }
4285
4286        rcu_read_lock();
4287
4288        if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
4289                kfree_skb(skb);
4290        else
4291                ieee80211_8023_xmit(sdata, dev, sta, skb);
4292
4293        rcu_read_unlock();
4294
4295        return NETDEV_TX_OK;
4296}
4297
4298struct sk_buff *
4299ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
4300                              struct sk_buff *skb, u32 info_flags)
4301{
4302        struct ieee80211_hdr *hdr;
4303        struct ieee80211_tx_data tx = {
4304                .local = sdata->local,
4305                .sdata = sdata,
4306        };
4307        struct sta_info *sta;
4308
4309        rcu_read_lock();
4310
4311        if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
4312                kfree_skb(skb);
4313                skb = ERR_PTR(-EINVAL);
4314                goto out;
4315        }
4316
4317        skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, 0, NULL);
4318        if (IS_ERR(skb))
4319                goto out;
4320
4321        hdr = (void *)skb->data;
4322        tx.sta = sta_info_get(sdata, hdr->addr1);
4323        tx.skb = skb;
4324
4325        if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
4326                rcu_read_unlock();
4327                kfree_skb(skb);
4328                return ERR_PTR(-EINVAL);
4329        }
4330
4331out:
4332        rcu_read_unlock();
4333        return skb;
4334}
4335
4336/*
4337 * ieee80211_clear_tx_pending may not be called in a context where
4338 * it is possible that it packets could come in again.
4339 */
4340void ieee80211_clear_tx_pending(struct ieee80211_local *local)
4341{
4342        struct sk_buff *skb;
4343        int i;
4344
4345        for (i = 0; i < local->hw.queues; i++) {
4346                while ((skb = skb_dequeue(&local->pending[i])) != NULL)
4347                        ieee80211_free_txskb(&local->hw, skb);
4348        }
4349}
4350
4351/*
4352 * Returns false if the frame couldn't be transmitted but was queued instead,
4353 * which in this case means re-queued -- take as an indication to stop sending
4354 * more pending frames.
4355 */
4356static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
4357                                     struct sk_buff *skb)
4358{
4359        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4360        struct ieee80211_sub_if_data *sdata;
4361        struct sta_info *sta;
4362        struct ieee80211_hdr *hdr;
4363        bool result;
4364        struct ieee80211_chanctx_conf *chanctx_conf;
4365
4366        sdata = vif_to_sdata(info->control.vif);
4367
4368        if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
4369                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
4370                if (unlikely(!chanctx_conf)) {
4371                        dev_kfree_skb(skb);
4372                        return true;
4373                }
4374                info->band = chanctx_conf->def.chan->band;
4375                result = ieee80211_tx(sdata, NULL, skb, true);
4376        } else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
4377                if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
4378                        dev_kfree_skb(skb);
4379                        return true;
4380                }
4381
4382                if (IS_ERR(sta) || (sta && !sta->uploaded))
4383                        sta = NULL;
4384
4385                result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
4386        } else {
4387                struct sk_buff_head skbs;
4388
4389                __skb_queue_head_init(&skbs);
4390                __skb_queue_tail(&skbs, skb);
4391
4392                hdr = (struct ieee80211_hdr *)skb->data;
4393                sta = sta_info_get(sdata, hdr->addr1);
4394
4395                result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
4396        }
4397
4398        return result;
4399}
4400
4401/*
4402 * Transmit all pending packets. Called from tasklet.
4403 */
4404void ieee80211_tx_pending(unsigned long data)
4405{
4406        struct ieee80211_local *local = (struct ieee80211_local *)data;
4407        unsigned long flags;
4408        int i;
4409        bool txok;
4410
4411        rcu_read_lock();
4412
4413        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
4414        for (i = 0; i < local->hw.queues; i++) {
4415                /*
4416                 * If queue is stopped by something other than due to pending
4417                 * frames, or we have no pending frames, proceed to next queue.
4418                 */
4419                if (local->queue_stop_reasons[i] ||
4420                    skb_queue_empty(&local->pending[i]))
4421                        continue;
4422
4423                while (!skb_queue_empty(&local->pending[i])) {
4424                        struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
4425                        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4426
4427                        if (WARN_ON(!info->control.vif)) {
4428                                ieee80211_free_txskb(&local->hw, skb);
4429                                continue;
4430                        }
4431
4432                        spin_unlock_irqrestore(&local->queue_stop_reason_lock,
4433                                                flags);
4434
4435                        txok = ieee80211_tx_pending_skb(local, skb);
4436                        spin_lock_irqsave(&local->queue_stop_reason_lock,
4437                                          flags);
4438                        if (!txok)
4439                                break;
4440                }
4441
4442                if (skb_queue_empty(&local->pending[i]))
4443                        ieee80211_propagate_queue_wake(local, i);
4444        }
4445        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
4446
4447        rcu_read_unlock();
4448}
4449
4450/* functions for drivers to get certain frames */
4451
4452static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
4453                                       struct ps_data *ps, struct sk_buff *skb,
4454                                       bool is_template)
4455{
4456        u8 *pos, *tim;
4457        int aid0 = 0;
4458        int i, have_bits = 0, n1, n2;
4459
4460        /* Generate bitmap for TIM only if there are any STAs in power save
4461         * mode. */
4462        if (atomic_read(&ps->num_sta_ps) > 0)
4463                /* in the hope that this is faster than
4464                 * checking byte-for-byte */
4465                have_bits = !bitmap_empty((unsigned long *)ps->tim,
4466                                          IEEE80211_MAX_AID+1);
4467        if (!is_template) {
4468                if (ps->dtim_count == 0)
4469                        ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
4470                else
4471                        ps->dtim_count--;
4472        }
4473
4474        tim = pos = skb_put(skb, 6);
4475        *pos++ = WLAN_EID_TIM;
4476        *pos++ = 4;
4477        *pos++ = ps->dtim_count;
4478        *pos++ = sdata->vif.bss_conf.dtim_period;
4479
4480        if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
4481                aid0 = 1;
4482
4483        ps->dtim_bc_mc = aid0 == 1;
4484
4485        if (have_bits) {
4486                /* Find largest even number N1 so that bits numbered 1 through
4487                 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
4488                 * (N2 + 1) x 8 through 2007 are 0. */
4489                n1 = 0;
4490                for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
4491                        if (ps->tim[i]) {
4492                                n1 = i & 0xfe;
4493                                break;
4494                        }
4495                }
4496                n2 = n1;
4497                for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
4498                        if (ps->tim[i]) {
4499                                n2 = i;
4500                                break;
4501                        }
4502                }
4503
4504                /* Bitmap control */
4505                *pos++ = n1 | aid0;
4506                /* Part Virt Bitmap */
4507                skb_put(skb, n2 - n1);
4508                memcpy(pos, ps->tim + n1, n2 - n1 + 1);
4509
4510                tim[1] = n2 - n1 + 4;
4511        } else {
4512                *pos++ = aid0; /* Bitmap control */
4513                *pos++ = 0; /* Part Virt Bitmap */
4514        }
4515}
4516
4517static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
4518                                    struct ps_data *ps, struct sk_buff *skb,
4519                                    bool is_template)
4520{
4521        struct ieee80211_local *local = sdata->local;
4522
4523        /*
4524         * Not very nice, but we want to allow the driver to call
4525         * ieee80211_beacon_get() as a response to the set_tim()
4526         * callback. That, however, is already invoked under the
4527         * sta_lock to guarantee consistent and race-free update
4528         * of the tim bitmap in mac80211 and the driver.
4529         */
4530        if (local->tim_in_locked_section) {
4531                __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
4532        } else {
4533                spin_lock_bh(&local->tim_lock);
4534                __ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
4535                spin_unlock_bh(&local->tim_lock);
4536        }
4537
4538        return 0;
4539}
4540
4541static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
4542                              struct beacon_data *beacon)
4543{
4544        struct probe_resp *resp;
4545        u8 *beacon_data;
4546        size_t beacon_data_len;
4547        int i;
4548        u8 count = beacon->csa_current_counter;
4549
4550        switch (sdata->vif.type) {
4551        case NL80211_IFTYPE_AP:
4552                beacon_data = beacon->tail;
4553                beacon_data_len = beacon->tail_len;
4554                break;
4555        case NL80211_IFTYPE_ADHOC:
4556                beacon_data = beacon->head;
4557                beacon_data_len = beacon->head_len;
4558                break;
4559        case NL80211_IFTYPE_MESH_POINT:
4560                beacon_data = beacon->head;
4561                beacon_data_len = beacon->head_len;
4562                break;
4563        default:
4564                return;
4565        }
4566
4567        rcu_read_lock();
4568        for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
4569                resp = rcu_dereference(sdata->u.ap.probe_resp);
4570
4571                if (beacon->csa_counter_offsets[i]) {
4572                        if (WARN_ON_ONCE(beacon->csa_counter_offsets[i] >=
4573                                         beacon_data_len)) {
4574                                rcu_read_unlock();
4575                                return;
4576                        }
4577
4578                        beacon_data[beacon->csa_counter_offsets[i]] = count;
4579                }
4580
4581                if (sdata->vif.type == NL80211_IFTYPE_AP && resp)
4582                        resp->data[resp->csa_counter_offsets[i]] = count;
4583        }
4584        rcu_read_unlock();
4585}
4586
4587static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
4588{
4589        beacon->csa_current_counter--;
4590
4591        /* the counter should never reach 0 */
4592        WARN_ON_ONCE(!beacon->csa_current_counter);
4593
4594        return beacon->csa_current_counter;
4595}
4596
4597u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
4598{
4599        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4600        struct beacon_data *beacon = NULL;
4601        u8 count = 0;
4602
4603        rcu_read_lock();
4604
4605        if (sdata->vif.type == NL80211_IFTYPE_AP)
4606                beacon = rcu_dereference(sdata->u.ap.beacon);
4607        else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
4608                beacon = rcu_dereference(sdata->u.ibss.presp);
4609        else if (ieee80211_vif_is_mesh(&sdata->vif))
4610                beacon = rcu_dereference(sdata->u.mesh.beacon);
4611
4612        if (!beacon)
4613                goto unlock;
4614
4615        count = __ieee80211_csa_update_counter(beacon);
4616
4617unlock:
4618        rcu_read_unlock();
4619        return count;
4620}
4621EXPORT_SYMBOL(ieee80211_csa_update_counter);
4622
4623void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter)
4624{
4625        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4626        struct beacon_data *beacon = NULL;
4627
4628        rcu_read_lock();
4629
4630        if (sdata->vif.type == NL80211_IFTYPE_AP)
4631                beacon = rcu_dereference(sdata->u.ap.beacon);
4632        else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
4633                beacon = rcu_dereference(sdata->u.ibss.presp);
4634        else if (ieee80211_vif_is_mesh(&sdata->vif))
4635                beacon = rcu_dereference(sdata->u.mesh.beacon);
4636
4637        if (!beacon)
4638                goto unlock;
4639
4640        if (counter < beacon->csa_current_counter)
4641                beacon->csa_current_counter = counter;
4642
4643unlock:
4644        rcu_read_unlock();
4645}
4646EXPORT_SYMBOL(ieee80211_csa_set_counter);
4647
4648bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
4649{
4650        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4651        struct beacon_data *beacon = NULL;
4652        u8 *beacon_data;
4653        size_t beacon_data_len;
4654        int ret = false;
4655
4656        if (!ieee80211_sdata_running(sdata))
4657                return false;
4658
4659        rcu_read_lock();
4660        if (vif->type == NL80211_IFTYPE_AP) {
4661                struct ieee80211_if_ap *ap = &sdata->u.ap;
4662
4663                beacon = rcu_dereference(ap->beacon);
4664                if (WARN_ON(!beacon || !beacon->tail))
4665                        goto out;
4666                beacon_data = beacon->tail;
4667                beacon_data_len = beacon->tail_len;
4668        } else if (vif->type == NL80211_IFTYPE_ADHOC) {
4669                struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
4670
4671                beacon = rcu_dereference(ifibss->presp);
4672                if (!beacon)
4673                        goto out;
4674
4675                beacon_data = beacon->head;
4676                beacon_data_len = beacon->head_len;
4677        } else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
4678                struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
4679
4680                beacon = rcu_dereference(ifmsh->beacon);
4681                if (!beacon)
4682                        goto out;
4683
4684                beacon_data = beacon->head;
4685                beacon_data_len = beacon->head_len;
4686        } else {
4687                WARN_ON(1);
4688                goto out;
4689        }
4690
4691        if (!beacon->csa_counter_offsets[0])
4692                goto out;
4693
4694        if (WARN_ON_ONCE(beacon->csa_counter_offsets[0] > beacon_data_len))
4695                goto out;
4696
4697        if (beacon_data[beacon->csa_counter_offsets[0]] == 1)
4698                ret = true;
4699 out:
4700        rcu_read_unlock();
4701
4702        return ret;
4703}
4704EXPORT_SYMBOL(ieee80211_csa_is_complete);
4705
4706static int ieee80211_beacon_protect(struct sk_buff *skb,
4707                                    struct ieee80211_local *local,
4708                                    struct ieee80211_sub_if_data *sdata)
4709{
4710        ieee80211_tx_result res;
4711        struct ieee80211_tx_data tx;
4712        struct sk_buff *check_skb;
4713
4714        memset(&tx, 0, sizeof(tx));
4715        tx.key = rcu_dereference(sdata->default_beacon_key);
4716        if (!tx.key)
4717                return 0;
4718        tx.local = local;
4719        tx.sdata = sdata;
4720        __skb_queue_head_init(&tx.skbs);
4721        __skb_queue_tail(&tx.skbs, skb);
4722        res = ieee80211_tx_h_encrypt(&tx);
4723        check_skb = __skb_dequeue(&tx.skbs);
4724        /* we may crash after this, but it'd be a bug in crypto */
4725        WARN_ON(check_skb != skb);
4726        if (WARN_ON_ONCE(res != TX_CONTINUE))
4727                return -EINVAL;
4728
4729        return 0;
4730}
4731
4732static struct sk_buff *
4733__ieee80211_beacon_get(struct ieee80211_hw *hw,
4734                       struct ieee80211_vif *vif,
4735                       struct ieee80211_mutable_offsets *offs,
4736                       bool is_template)
4737{
4738        struct ieee80211_local *local = hw_to_local(hw);
4739        struct beacon_data *beacon = NULL;
4740        struct sk_buff *skb = NULL;
4741        struct ieee80211_tx_info *info;
4742        struct ieee80211_sub_if_data *sdata = NULL;
4743        enum nl80211_band band;
4744        struct ieee80211_tx_rate_control txrc;
4745        struct ieee80211_chanctx_conf *chanctx_conf;
4746        int csa_off_base = 0;
4747
4748        rcu_read_lock();
4749
4750        sdata = vif_to_sdata(vif);
4751        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
4752
4753        if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
4754                goto out;
4755
4756        if (offs)
4757                memset(offs, 0, sizeof(*offs));
4758
4759        if (sdata->vif.type == NL80211_IFTYPE_AP) {
4760                struct ieee80211_if_ap *ap = &sdata->u.ap;
4761
4762                beacon = rcu_dereference(ap->beacon);
4763                if (beacon) {
4764                        if (beacon->csa_counter_offsets[0]) {
4765                                if (!is_template)
4766                                        __ieee80211_csa_update_counter(beacon);
4767
4768                                ieee80211_set_csa(sdata, beacon);
4769                        }
4770
4771                        /*
4772                         * headroom, head length,
4773                         * tail length and maximum TIM length
4774                         */
4775                        skb = dev_alloc_skb(local->tx_headroom +
4776                                            beacon->head_len +
4777                                            beacon->tail_len + 256 +
4778                                            local->hw.extra_beacon_tailroom);
4779                        if (!skb)
4780                                goto out;
4781
4782                        skb_reserve(skb, local->tx_headroom);
4783                        skb_put_data(skb, beacon->head, beacon->head_len);
4784
4785                        ieee80211_beacon_add_tim(sdata, &ap->ps, skb,
4786                                                 is_template);
4787
4788                        if (offs) {
4789                                offs->tim_offset = beacon->head_len;
4790                                offs->tim_length = skb->len - beacon->head_len;
4791
4792                                /* for AP the csa offsets are from tail */
4793                                csa_off_base = skb->len;
4794                        }
4795
4796                        if (beacon->tail)
4797                                skb_put_data(skb, beacon->tail,
4798                                             beacon->tail_len);
4799
4800                        if (ieee80211_beacon_protect(skb, local, sdata) < 0)
4801                                goto out;
4802                } else
4803                        goto out;
4804        } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
4805                struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
4806                struct ieee80211_hdr *hdr;
4807
4808                beacon = rcu_dereference(ifibss->presp);
4809                if (!beacon)
4810                        goto out;
4811
4812                if (beacon->csa_counter_offsets[0]) {
4813                        if (!is_template)
4814                                __ieee80211_csa_update_counter(beacon);
4815
4816                        ieee80211_set_csa(sdata, beacon);
4817                }
4818
4819                skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
4820                                    local->hw.extra_beacon_tailroom);
4821                if (!skb)
4822                        goto out;
4823                skb_reserve(skb, local->tx_headroom);
4824                skb_put_data(skb, beacon->head, beacon->head_len);
4825
4826                hdr = (struct ieee80211_hdr *) skb->data;
4827                hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4828                                                 IEEE80211_STYPE_BEACON);
4829        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
4830                struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
4831
4832                beacon = rcu_dereference(ifmsh->beacon);
4833                if (!beacon)
4834                        goto out;
4835
4836                if (beacon->csa_counter_offsets[0]) {
4837                        if (!is_template)
4838                                /* TODO: For mesh csa_counter is in TU, so
4839                                 * decrementing it by one isn't correct, but
4840                                 * for now we leave it consistent with overall
4841                                 * mac80211's behavior.
4842                                 */
4843                                __ieee80211_csa_update_counter(beacon);
4844
4845                        ieee80211_set_csa(sdata, beacon);
4846                }
4847
4848                if (ifmsh->sync_ops)
4849                        ifmsh->sync_ops->adjust_tsf(sdata, beacon);
4850
4851                skb = dev_alloc_skb(local->tx_headroom +
4852                                    beacon->head_len +
4853                                    256 + /* TIM IE */
4854                                    beacon->tail_len +
4855                                    local->hw.extra_beacon_tailroom);
4856                if (!skb)
4857                        goto out;
4858                skb_reserve(skb, local->tx_headroom);
4859                skb_put_data(skb, beacon->head, beacon->head_len);
4860                ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
4861
4862                if (offs) {
4863                        offs->tim_offset = beacon->head_len;
4864                        offs->tim_length = skb->len - beacon->head_len;
4865                }
4866
4867                skb_put_data(skb, beacon->tail, beacon->tail_len);
4868        } else {
4869                WARN_ON(1);
4870                goto out;
4871        }
4872
4873        /* CSA offsets */
4874        if (offs && beacon) {
4875                int i;
4876
4877                for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
4878                        u16 csa_off = beacon->csa_counter_offsets[i];
4879
4880                        if (!csa_off)
4881                                continue;
4882
4883                        offs->csa_counter_offs[i] = csa_off_base + csa_off;
4884                }
4885        }
4886
4887        band = chanctx_conf->def.chan->band;
4888
4889        info = IEEE80211_SKB_CB(skb);
4890
4891        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
4892        info->flags |= IEEE80211_TX_CTL_NO_ACK;
4893        info->band = band;
4894
4895        memset(&txrc, 0, sizeof(txrc));
4896        txrc.hw = hw;
4897        txrc.sband = local->hw.wiphy->bands[band];
4898        txrc.bss_conf = &sdata->vif.bss_conf;
4899        txrc.skb = skb;
4900        txrc.reported_rate.idx = -1;
4901        if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
4902                txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
4903        else
4904                txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
4905        txrc.bss = true;
4906        rate_control_get_rate(sdata, NULL, &txrc);
4907
4908        info->control.vif = vif;
4909
4910        info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
4911                        IEEE80211_TX_CTL_ASSIGN_SEQ |
4912                        IEEE80211_TX_CTL_FIRST_FRAGMENT;
4913 out:
4914        rcu_read_unlock();
4915        return skb;
4916
4917}
4918
4919struct sk_buff *
4920ieee80211_beacon_get_template(struct ieee80211_hw *hw,
4921                              struct ieee80211_vif *vif,
4922                              struct ieee80211_mutable_offsets *offs)
4923{
4924        return __ieee80211_beacon_get(hw, vif, offs, true);
4925}
4926EXPORT_SYMBOL(ieee80211_beacon_get_template);
4927
4928struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
4929                                         struct ieee80211_vif *vif,
4930                                         u16 *tim_offset, u16 *tim_length)
4931{
4932        struct ieee80211_mutable_offsets offs = {};
4933        struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
4934        struct sk_buff *copy;
4935        struct ieee80211_supported_band *sband;
4936        int shift;
4937
4938        if (!bcn)
4939                return bcn;
4940
4941        if (tim_offset)
4942                *tim_offset = offs.tim_offset;
4943
4944        if (tim_length)
4945                *tim_length = offs.tim_length;
4946
4947        if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
4948            !hw_to_local(hw)->monitors)
4949                return bcn;
4950
4951        /* send a copy to monitor interfaces */
4952        copy = skb_copy(bcn, GFP_ATOMIC);
4953        if (!copy)
4954                return bcn;
4955
4956        shift = ieee80211_vif_get_shift(vif);
4957        sband = ieee80211_get_sband(vif_to_sdata(vif));
4958        if (!sband)
4959                return bcn;
4960
4961        ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false,
4962                             NULL);
4963
4964        return bcn;
4965}
4966EXPORT_SYMBOL(ieee80211_beacon_get_tim);
4967
4968struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
4969                                        struct ieee80211_vif *vif)
4970{
4971        struct ieee80211_if_ap *ap = NULL;
4972        struct sk_buff *skb = NULL;
4973        struct probe_resp *presp = NULL;
4974        struct ieee80211_hdr *hdr;
4975        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
4976
4977        if (sdata->vif.type != NL80211_IFTYPE_AP)
4978                return NULL;
4979
4980        rcu_read_lock();
4981
4982        ap = &sdata->u.ap;
4983        presp = rcu_dereference(ap->probe_resp);
4984        if (!presp)
4985                goto out;
4986
4987        skb = dev_alloc_skb(presp->len);
4988        if (!skb)
4989                goto out;
4990
4991        skb_put_data(skb, presp->data, presp->len);
4992
4993        hdr = (struct ieee80211_hdr *) skb->data;
4994        memset(hdr->addr1, 0, sizeof(hdr->addr1));
4995
4996out:
4997        rcu_read_unlock();
4998        return skb;
4999}
5000EXPORT_SYMBOL(ieee80211_proberesp_get);
5001
5002struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
5003                                     struct ieee80211_vif *vif)
5004{
5005        struct ieee80211_sub_if_data *sdata;
5006        struct ieee80211_if_managed *ifmgd;
5007        struct ieee80211_pspoll *pspoll;
5008        struct ieee80211_local *local;
5009        struct sk_buff *skb;
5010
5011        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
5012                return NULL;
5013
5014        sdata = vif_to_sdata(vif);
5015        ifmgd = &sdata->u.mgd;
5016        local = sdata->local;
5017
5018        skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
5019        if (!skb)
5020                return NULL;
5021
5022        skb_reserve(skb, local->hw.extra_tx_headroom);
5023
5024        pspoll = skb_put_zero(skb, sizeof(*pspoll));
5025        pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
5026                                            IEEE80211_STYPE_PSPOLL);
5027        pspoll->aid = cpu_to_le16(sdata->vif.bss_conf.aid);
5028
5029        /* aid in PS-Poll has its two MSBs each set to 1 */
5030        pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
5031
5032        memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
5033        memcpy(pspoll->ta, vif->addr, ETH_ALEN);
5034
5035        return skb;
5036}
5037EXPORT_SYMBOL(ieee80211_pspoll_get);
5038
5039struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
5040                                       struct ieee80211_vif *vif,
5041                                       bool qos_ok)
5042{
5043        struct ieee80211_hdr_3addr *nullfunc;
5044        struct ieee80211_sub_if_data *sdata;
5045        struct ieee80211_if_managed *ifmgd;
5046        struct ieee80211_local *local;
5047        struct sk_buff *skb;
5048        bool qos = false;
5049
5050        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
5051                return NULL;
5052
5053        sdata = vif_to_sdata(vif);
5054        ifmgd = &sdata->u.mgd;
5055        local = sdata->local;
5056
5057        if (qos_ok) {
5058                struct sta_info *sta;
5059
5060                rcu_read_lock();
5061                sta = sta_info_get(sdata, ifmgd->bssid);
5062                qos = sta && sta->sta.wme;
5063                rcu_read_unlock();
5064        }
5065
5066        skb = dev_alloc_skb(local->hw.extra_tx_headroom +
5067                            sizeof(*nullfunc) + 2);
5068        if (!skb)
5069                return NULL;
5070
5071        skb_reserve(skb, local->hw.extra_tx_headroom);
5072
5073        nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
5074        nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
5075                                              IEEE80211_STYPE_NULLFUNC |
5076                                              IEEE80211_FCTL_TODS);
5077        if (qos) {
5078                __le16 qoshdr = cpu_to_le16(7);
5079
5080                BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
5081                              IEEE80211_STYPE_NULLFUNC) !=
5082                             IEEE80211_STYPE_QOS_NULLFUNC);
5083                nullfunc->frame_control |=
5084                        cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
5085                skb->priority = 7;
5086                skb_set_queue_mapping(skb, IEEE80211_AC_VO);
5087                skb_put_data(skb, &qoshdr, sizeof(qoshdr));
5088        }
5089
5090        memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
5091        memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
5092        memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
5093
5094        return skb;
5095}
5096EXPORT_SYMBOL(ieee80211_nullfunc_get);
5097
5098struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
5099                                       const u8 *src_addr,
5100                                       const u8 *ssid, size_t ssid_len,
5101                                       size_t tailroom)
5102{
5103        struct ieee80211_local *local = hw_to_local(hw);
5104        struct ieee80211_hdr_3addr *hdr;
5105        struct sk_buff *skb;
5106        size_t ie_ssid_len;
5107        u8 *pos;
5108
5109        ie_ssid_len = 2 + ssid_len;
5110
5111        skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
5112                            ie_ssid_len + tailroom);
5113        if (!skb)
5114                return NULL;
5115
5116        skb_reserve(skb, local->hw.extra_tx_headroom);
5117
5118        hdr = skb_put_zero(skb, sizeof(*hdr));
5119        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
5120                                         IEEE80211_STYPE_PROBE_REQ);
5121        eth_broadcast_addr(hdr->addr1);
5122        memcpy(hdr->addr2, src_addr, ETH_ALEN);
5123        eth_broadcast_addr(hdr->addr3);
5124
5125        pos = skb_put(skb, ie_ssid_len);
5126        *pos++ = WLAN_EID_SSID;
5127        *pos++ = ssid_len;
5128        if (ssid_len)
5129                memcpy(pos, ssid, ssid_len);
5130        pos += ssid_len;
5131
5132        return skb;
5133}
5134EXPORT_SYMBOL(ieee80211_probereq_get);
5135
5136void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5137                       const void *frame, size_t frame_len,
5138                       const struct ieee80211_tx_info *frame_txctl,
5139                       struct ieee80211_rts *rts)
5140{
5141        const struct ieee80211_hdr *hdr = frame;
5142
5143        rts->frame_control =
5144            cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
5145        rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
5146                                               frame_txctl);
5147        memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
5148        memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
5149}
5150EXPORT_SYMBOL(ieee80211_rts_get);
5151
5152void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5153                             const void *frame, size_t frame_len,
5154                             const struct ieee80211_tx_info *frame_txctl,
5155                             struct ieee80211_cts *cts)
5156{
5157        const struct ieee80211_hdr *hdr = frame;
5158
5159        cts->frame_control =
5160            cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
5161        cts->duration = ieee80211_ctstoself_duration(hw, vif,
5162                                                     frame_len, frame_txctl);
5163        memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
5164}
5165EXPORT_SYMBOL(ieee80211_ctstoself_get);
5166
5167struct sk_buff *
5168ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
5169                          struct ieee80211_vif *vif)
5170{
5171        struct ieee80211_local *local = hw_to_local(hw);
5172        struct sk_buff *skb = NULL;
5173        struct ieee80211_tx_data tx;
5174        struct ieee80211_sub_if_data *sdata;
5175        struct ps_data *ps;
5176        struct ieee80211_tx_info *info;
5177        struct ieee80211_chanctx_conf *chanctx_conf;
5178
5179        sdata = vif_to_sdata(vif);
5180
5181        rcu_read_lock();
5182        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
5183
5184        if (!chanctx_conf)
5185                goto out;
5186
5187        if (sdata->vif.type == NL80211_IFTYPE_AP) {
5188                struct beacon_data *beacon =
5189                                rcu_dereference(sdata->u.ap.beacon);
5190
5191                if (!beacon || !beacon->head)
5192                        goto out;
5193
5194                ps = &sdata->u.ap.ps;
5195        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
5196                ps = &sdata->u.mesh.ps;
5197        } else {
5198                goto out;
5199        }
5200
5201        if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
5202                goto out; /* send buffered bc/mc only after DTIM beacon */
5203
5204        while (1) {
5205                skb = skb_dequeue(&ps->bc_buf);
5206                if (!skb)
5207                        goto out;
5208                local->total_ps_buffered--;
5209
5210                if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
5211                        struct ieee80211_hdr *hdr =
5212                                (struct ieee80211_hdr *) skb->data;
5213                        /* more buffered multicast/broadcast frames ==> set
5214                         * MoreData flag in IEEE 802.11 header to inform PS
5215                         * STAs */
5216                        hdr->frame_control |=
5217                                cpu_to_le16(IEEE80211_FCTL_MOREDATA);
5218                }
5219
5220                if (sdata->vif.type == NL80211_IFTYPE_AP)
5221                        sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
5222                if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
5223                        break;
5224                ieee80211_free_txskb(hw, skb);
5225        }
5226
5227        info = IEEE80211_SKB_CB(skb);
5228
5229        tx.flags |= IEEE80211_TX_PS_BUFFERED;
5230        info->band = chanctx_conf->def.chan->band;
5231
5232        if (invoke_tx_handlers(&tx))
5233                skb = NULL;
5234 out:
5235        rcu_read_unlock();
5236
5237        return skb;
5238}
5239EXPORT_SYMBOL(ieee80211_get_buffered_bc);
5240
5241int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
5242{
5243        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
5244        struct ieee80211_sub_if_data *sdata = sta->sdata;
5245        struct ieee80211_local *local = sdata->local;
5246        int ret;
5247        u32 queues;
5248
5249        lockdep_assert_held(&local->sta_mtx);
5250
5251        /* only some cases are supported right now */
5252        switch (sdata->vif.type) {
5253        case NL80211_IFTYPE_STATION:
5254        case NL80211_IFTYPE_AP:
5255        case NL80211_IFTYPE_AP_VLAN:
5256                break;
5257        default:
5258                WARN_ON(1);
5259                return -EINVAL;
5260        }
5261
5262        if (WARN_ON(tid >= IEEE80211_NUM_UPS))
5263                return -EINVAL;
5264
5265        if (sta->reserved_tid == tid) {
5266                ret = 0;
5267                goto out;
5268        }
5269
5270        if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
5271                sdata_err(sdata, "TID reservation already active\n");
5272                ret = -EALREADY;
5273                goto out;
5274        }
5275
5276        ieee80211_stop_vif_queues(sdata->local, sdata,
5277                                  IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
5278
5279        synchronize_net();
5280
5281        /* Tear down BA sessions so we stop aggregating on this TID */
5282        if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
5283                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
5284                __ieee80211_stop_tx_ba_session(sta, tid,
5285                                               AGG_STOP_LOCAL_REQUEST);
5286        }
5287
5288        queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
5289        __ieee80211_flush_queues(local, sdata, queues, false);
5290
5291        sta->reserved_tid = tid;
5292
5293        ieee80211_wake_vif_queues(local, sdata,
5294                                  IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
5295
5296        if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
5297                clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
5298
5299        ret = 0;
5300 out:
5301        return ret;
5302}
5303EXPORT_SYMBOL(ieee80211_reserve_tid);
5304
5305void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
5306{
5307        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
5308        struct ieee80211_sub_if_data *sdata = sta->sdata;
5309
5310        lockdep_assert_held(&sdata->local->sta_mtx);
5311
5312        /* only some cases are supported right now */
5313        switch (sdata->vif.type) {
5314        case NL80211_IFTYPE_STATION:
5315        case NL80211_IFTYPE_AP:
5316        case NL80211_IFTYPE_AP_VLAN:
5317                break;
5318        default:
5319                WARN_ON(1);
5320                return;
5321        }
5322
5323        if (tid != sta->reserved_tid) {
5324                sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
5325                return;
5326        }
5327
5328        sta->reserved_tid = IEEE80211_TID_UNRESERVED;
5329}
5330EXPORT_SYMBOL(ieee80211_unreserve_tid);
5331
5332void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
5333                                 struct sk_buff *skb, int tid,
5334                                 enum nl80211_band band)
5335{
5336        int ac = ieee80211_ac_from_tid(tid);
5337
5338        skb_reset_mac_header(skb);
5339        skb_set_queue_mapping(skb, ac);
5340        skb->priority = tid;
5341
5342        skb->dev = sdata->dev;
5343
5344        /*
5345         * The other path calling ieee80211_xmit is from the tasklet,
5346         * and while we can handle concurrent transmissions locking
5347         * requirements are that we do not come into tx with bhs on.
5348         */
5349        local_bh_disable();
5350        IEEE80211_SKB_CB(skb)->band = band;
5351        ieee80211_xmit(sdata, NULL, skb);
5352        local_bh_enable();
5353}
5354
5355int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5356                              const u8 *buf, size_t len,
5357                              const u8 *dest, __be16 proto, bool unencrypted,
5358                              u64 *cookie)
5359{
5360        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
5361        struct ieee80211_local *local = sdata->local;
5362        struct sk_buff *skb;
5363        struct ethhdr *ehdr;
5364        u32 ctrl_flags = 0;
5365        u32 flags = 0;
5366
5367        /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
5368         * or Pre-Authentication
5369         */
5370        if (proto != sdata->control_port_protocol &&
5371            proto != cpu_to_be16(ETH_P_PREAUTH))
5372                return -EINVAL;
5373
5374        if (proto == sdata->control_port_protocol)
5375                ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
5376                              IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
5377
5378        if (unencrypted)
5379                flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
5380
5381        if (cookie)
5382                ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
5383
5384        flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
5385                 IEEE80211_TX_CTL_INJECTED;
5386
5387        skb = dev_alloc_skb(local->hw.extra_tx_headroom +
5388                            sizeof(struct ethhdr) + len);
5389        if (!skb)
5390                return -ENOMEM;
5391
5392        skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
5393
5394        skb_put_data(skb, buf, len);
5395
5396        ehdr = skb_push(skb, sizeof(struct ethhdr));
5397        memcpy(ehdr->h_dest, dest, ETH_ALEN);
5398        memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
5399        ehdr->h_proto = proto;
5400
5401        skb->dev = dev;
5402        skb->protocol = htons(ETH_P_802_3);
5403        skb_reset_network_header(skb);
5404        skb_reset_mac_header(skb);
5405
5406        /* mutex lock is only needed for incrementing the cookie counter */
5407        mutex_lock(&local->mtx);
5408
5409        local_bh_disable();
5410        __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
5411        local_bh_enable();
5412
5413        mutex_unlock(&local->mtx);
5414
5415        return 0;
5416}
5417
5418int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
5419                              const u8 *buf, size_t len)
5420{
5421        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
5422        struct ieee80211_local *local = sdata->local;
5423        struct sk_buff *skb;
5424
5425        skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
5426                            30 + /* header size */
5427                            18); /* 11s header size */
5428        if (!skb)
5429                return -ENOMEM;
5430
5431        skb_reserve(skb, local->hw.extra_tx_headroom);
5432        skb_put_data(skb, buf, len);
5433
5434        skb->dev = dev;
5435        skb->protocol = htons(ETH_P_802_3);
5436        skb_reset_network_header(skb);
5437        skb_reset_mac_header(skb);
5438
5439        local_bh_disable();
5440        __ieee80211_subif_start_xmit(skb, skb->dev, 0,
5441                                     IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP,
5442                                     NULL);
5443        local_bh_enable();
5444
5445        return 0;
5446}
5447