linux/drivers/net/wireless/ti/wl1251/tx.c
<<
>>
Prefs
   1/*
   2 * This file is part of wl1251
   3 *
   4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
   5 * Copyright (C) 2008 Nokia Corporation
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * version 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  19 * 02110-1301 USA
  20 *
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25
  26#include "wl1251.h"
  27#include "reg.h"
  28#include "tx.h"
  29#include "ps.h"
  30#include "io.h"
  31#include "event.h"
  32
  33static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
  34{
  35        int used, data_in_count;
  36
  37        data_in_count = wl->data_in_count;
  38
  39        if (data_in_count < data_out_count)
  40                /* data_in_count has wrapped */
  41                data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
  42
  43        used = data_in_count - data_out_count;
  44
  45        WARN_ON(used < 0);
  46        WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
  47
  48        if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
  49                return true;
  50        else
  51                return false;
  52}
  53
  54static int wl1251_tx_path_status(struct wl1251 *wl)
  55{
  56        u32 status, addr, data_out_count;
  57        bool busy;
  58
  59        addr = wl->data_path->tx_control_addr;
  60        status = wl1251_mem_read32(wl, addr);
  61        data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
  62        busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
  63
  64        if (busy)
  65                return -EBUSY;
  66
  67        return 0;
  68}
  69
  70static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
  71{
  72        int i;
  73
  74        for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
  75                if (wl->tx_frames[i] == NULL) {
  76                        wl->tx_frames[i] = skb;
  77                        return i;
  78                }
  79
  80        return -EBUSY;
  81}
  82
  83static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
  84                              struct ieee80211_tx_info *control, u16 fc)
  85{
  86        *(u16 *)&tx_hdr->control = 0;
  87
  88        tx_hdr->control.rate_policy = 0;
  89
  90        /* 802.11 packets */
  91        tx_hdr->control.packet_type = 0;
  92
  93        /* Also disable retry and ACK policy for injected packets */
  94        if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
  95            (control->flags & IEEE80211_TX_CTL_INJECTED)) {
  96                tx_hdr->control.rate_policy = 1;
  97                tx_hdr->control.ack_policy = 1;
  98        }
  99
 100        tx_hdr->control.tx_complete = 1;
 101
 102        if ((fc & IEEE80211_FTYPE_DATA) &&
 103            ((fc & IEEE80211_STYPE_QOS_DATA) ||
 104             (fc & IEEE80211_STYPE_QOS_NULLFUNC)))
 105                tx_hdr->control.qos = 1;
 106}
 107
 108/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
 109#define MAX_MSDU_SECURITY_LENGTH      16
 110#define MAX_MPDU_SECURITY_LENGTH      16
 111#define WLAN_QOS_HDR_LEN              26
 112#define MAX_MPDU_HEADER_AND_SECURITY  (MAX_MPDU_SECURITY_LENGTH + \
 113                                       WLAN_QOS_HDR_LEN)
 114#define HW_BLOCK_SIZE                 252
 115static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
 116{
 117        u16 payload_len, frag_threshold, mem_blocks;
 118        u16 num_mpdus, mem_blocks_per_frag;
 119
 120        frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
 121        tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
 122
 123        payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH;
 124
 125        if (payload_len > frag_threshold) {
 126                mem_blocks_per_frag =
 127                        ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
 128                         HW_BLOCK_SIZE) + 1;
 129                num_mpdus = payload_len / frag_threshold;
 130                mem_blocks = num_mpdus * mem_blocks_per_frag;
 131                payload_len -= num_mpdus * frag_threshold;
 132                num_mpdus++;
 133
 134        } else {
 135                mem_blocks_per_frag = 0;
 136                mem_blocks = 0;
 137                num_mpdus = 1;
 138        }
 139
 140        mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
 141
 142        if (num_mpdus > 1)
 143                mem_blocks += min(num_mpdus, mem_blocks_per_frag);
 144
 145        tx_hdr->num_mem_blocks = mem_blocks;
 146}
 147
 148static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
 149                              struct ieee80211_tx_info *control)
 150{
 151        struct tx_double_buffer_desc *tx_hdr;
 152        struct ieee80211_rate *rate;
 153        int id;
 154        u16 fc;
 155
 156        if (!skb)
 157                return -EINVAL;
 158
 159        id = wl1251_tx_id(wl, skb);
 160        if (id < 0)
 161                return id;
 162
 163        fc = *(u16 *)skb->data;
 164        tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb,
 165                                                           sizeof(*tx_hdr));
 166
 167        tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
 168        rate = ieee80211_get_tx_rate(wl->hw, control);
 169        tx_hdr->rate = cpu_to_le16(rate->hw_value);
 170        tx_hdr->expiry_time = cpu_to_le32(1 << 16);
 171        tx_hdr->id = id;
 172
 173        tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
 174
 175        wl1251_tx_control(tx_hdr, control, fc);
 176        wl1251_tx_frag_block_num(tx_hdr);
 177
 178        return 0;
 179}
 180
 181/* We copy the packet to the target */
 182static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
 183                                 struct ieee80211_tx_info *control)
 184{
 185        struct tx_double_buffer_desc *tx_hdr;
 186        int len;
 187        u32 addr;
 188
 189        if (!skb)
 190                return -EINVAL;
 191
 192        tx_hdr = (struct tx_double_buffer_desc *) skb->data;
 193
 194        if (control->control.hw_key &&
 195            control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
 196                int hdrlen;
 197                __le16 fc;
 198                u16 length;
 199                u8 *pos;
 200
 201                fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
 202                length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE;
 203                tx_hdr->length = cpu_to_le16(length);
 204
 205                hdrlen = ieee80211_hdrlen(fc);
 206
 207                pos = skb_push(skb, WL1251_TKIP_IV_SPACE);
 208                memmove(pos, pos + WL1251_TKIP_IV_SPACE,
 209                        sizeof(*tx_hdr) + hdrlen);
 210        }
 211
 212        /* Revisit. This is a workaround for getting non-aligned packets.
 213           This happens at least with EAPOL packets from the user space.
 214           Our DMA requires packets to be aligned on a 4-byte boundary.
 215        */
 216        if (unlikely((long)skb->data & 0x03)) {
 217                int offset = (4 - (long)skb->data) & 0x03;
 218                wl1251_debug(DEBUG_TX, "skb offset %d", offset);
 219
 220                /* check whether the current skb can be used */
 221                if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
 222                        struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
 223                                                                 GFP_KERNEL);
 224
 225                        if (unlikely(newskb == NULL)) {
 226                                wl1251_error("Can't allocate skb!");
 227                                return -EINVAL;
 228                        }
 229
 230                        tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
 231
 232                        dev_kfree_skb_any(skb);
 233                        wl->tx_frames[tx_hdr->id] = skb = newskb;
 234
 235                        offset = (4 - (long)skb->data) & 0x03;
 236                        wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
 237                }
 238
 239                /* align the buffer on a 4-byte boundary */
 240                if (offset) {
 241                        unsigned char *src = skb->data;
 242                        skb_reserve(skb, offset);
 243                        memmove(skb->data, src, skb->len);
 244                        tx_hdr = (struct tx_double_buffer_desc *) skb->data;
 245                }
 246        }
 247
 248        /* Our skb->data at this point includes the HW header */
 249        len = WL1251_TX_ALIGN(skb->len);
 250
 251        if (wl->data_in_count & 0x1)
 252                addr = wl->data_path->tx_packet_ring_addr +
 253                        wl->data_path->tx_packet_ring_chunk_size;
 254        else
 255                addr = wl->data_path->tx_packet_ring_addr;
 256
 257        wl1251_mem_write(wl, addr, skb->data, len);
 258
 259        wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
 260                     "queue %d", tx_hdr->id, skb, tx_hdr->length,
 261                     tx_hdr->rate, tx_hdr->xmit_queue);
 262
 263        return 0;
 264}
 265
 266static void wl1251_tx_trigger(struct wl1251 *wl)
 267{
 268        u32 data, addr;
 269
 270        if (wl->data_in_count & 0x1) {
 271                addr = ACX_REG_INTERRUPT_TRIG_H;
 272                data = INTR_TRIG_TX_PROC1;
 273        } else {
 274                addr = ACX_REG_INTERRUPT_TRIG;
 275                data = INTR_TRIG_TX_PROC0;
 276        }
 277
 278        wl1251_reg_write32(wl, addr, data);
 279
 280        /* Bumping data in */
 281        wl->data_in_count = (wl->data_in_count + 1) &
 282                TX_STATUS_DATA_OUT_COUNT_MASK;
 283}
 284
 285static void enable_tx_for_packet_injection(struct wl1251 *wl)
 286{
 287        int ret;
 288
 289        ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
 290                              wl->beacon_int, wl->dtim_period);
 291        if (ret < 0) {
 292                wl1251_warning("join failed");
 293                return;
 294        }
 295
 296        ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
 297        if (ret < 0) {
 298                wl1251_warning("join timeout");
 299                return;
 300        }
 301
 302        wl->joined = true;
 303}
 304
 305/* caller must hold wl->mutex */
 306static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
 307{
 308        struct ieee80211_tx_info *info;
 309        int ret = 0;
 310        u8 idx;
 311
 312        info = IEEE80211_SKB_CB(skb);
 313
 314        if (info->control.hw_key) {
 315                if (unlikely(wl->monitor_present))
 316                        return -EINVAL;
 317
 318                idx = info->control.hw_key->hw_key_idx;
 319                if (unlikely(wl->default_key != idx)) {
 320                        ret = wl1251_acx_default_key(wl, idx);
 321                        if (ret < 0)
 322                                return ret;
 323                }
 324        }
 325
 326        /* Enable tx path in monitor mode for packet injection */
 327        if ((wl->vif == NULL) && !wl->joined)
 328                enable_tx_for_packet_injection(wl);
 329
 330        ret = wl1251_tx_path_status(wl);
 331        if (ret < 0)
 332                return ret;
 333
 334        ret = wl1251_tx_fill_hdr(wl, skb, info);
 335        if (ret < 0)
 336                return ret;
 337
 338        ret = wl1251_tx_send_packet(wl, skb, info);
 339        if (ret < 0)
 340                return ret;
 341
 342        wl1251_tx_trigger(wl);
 343
 344        return ret;
 345}
 346
 347void wl1251_tx_work(struct work_struct *work)
 348{
 349        struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
 350        struct sk_buff *skb;
 351        bool woken_up = false;
 352        int ret;
 353
 354        mutex_lock(&wl->mutex);
 355
 356        if (unlikely(wl->state == WL1251_STATE_OFF))
 357                goto out;
 358
 359        while ((skb = skb_dequeue(&wl->tx_queue))) {
 360                if (!woken_up) {
 361                        ret = wl1251_ps_elp_wakeup(wl);
 362                        if (ret < 0)
 363                                goto out;
 364                        woken_up = true;
 365                }
 366
 367                ret = wl1251_tx_frame(wl, skb);
 368                if (ret == -EBUSY) {
 369                        skb_queue_head(&wl->tx_queue, skb);
 370                        goto out;
 371                } else if (ret < 0) {
 372                        dev_kfree_skb(skb);
 373                        goto out;
 374                }
 375        }
 376
 377out:
 378        if (woken_up)
 379                wl1251_ps_elp_sleep(wl);
 380
 381        mutex_unlock(&wl->mutex);
 382}
 383
 384static const char *wl1251_tx_parse_status(u8 status)
 385{
 386        /* 8 bit status field, one character per bit plus null */
 387        static char buf[9];
 388        int i = 0;
 389
 390        memset(buf, 0, sizeof(buf));
 391
 392        if (status & TX_DMA_ERROR)
 393                buf[i++] = 'm';
 394        if (status & TX_DISABLED)
 395                buf[i++] = 'd';
 396        if (status & TX_RETRY_EXCEEDED)
 397                buf[i++] = 'r';
 398        if (status & TX_TIMEOUT)
 399                buf[i++] = 't';
 400        if (status & TX_KEY_NOT_FOUND)
 401                buf[i++] = 'k';
 402        if (status & TX_ENCRYPT_FAIL)
 403                buf[i++] = 'e';
 404        if (status & TX_UNAVAILABLE_PRIORITY)
 405                buf[i++] = 'p';
 406
 407        /* bit 0 is unused apparently */
 408
 409        return buf;
 410}
 411
 412static void wl1251_tx_packet_cb(struct wl1251 *wl,
 413                                struct tx_result *result)
 414{
 415        struct ieee80211_tx_info *info;
 416        struct sk_buff *skb;
 417        int hdrlen;
 418        u8 *frame;
 419
 420        skb = wl->tx_frames[result->id];
 421        if (skb == NULL) {
 422                wl1251_error("SKB for packet %d is NULL", result->id);
 423                return;
 424        }
 425
 426        info = IEEE80211_SKB_CB(skb);
 427
 428        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
 429            !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
 430            (result->status == TX_SUCCESS))
 431                info->flags |= IEEE80211_TX_STAT_ACK;
 432
 433        info->status.rates[0].count = result->ack_failures + 1;
 434        wl->stats.retry_count += result->ack_failures;
 435
 436        /*
 437         * We have to remove our private TX header before pushing
 438         * the skb back to mac80211.
 439         */
 440        frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
 441        if (info->control.hw_key &&
 442            info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
 443                hdrlen = ieee80211_get_hdrlen_from_skb(skb);
 444                memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
 445                skb_pull(skb, WL1251_TKIP_IV_SPACE);
 446        }
 447
 448        wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
 449                     " status 0x%x (%s)",
 450                     result->id, skb, result->ack_failures, result->rate,
 451                     result->status, wl1251_tx_parse_status(result->status));
 452
 453
 454        ieee80211_tx_status(wl->hw, skb);
 455
 456        wl->tx_frames[result->id] = NULL;
 457}
 458
 459/* Called upon reception of a TX complete interrupt */
 460void wl1251_tx_complete(struct wl1251 *wl)
 461{
 462        int i, result_index, num_complete = 0, queue_len;
 463        struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
 464        unsigned long flags;
 465
 466        if (unlikely(wl->state != WL1251_STATE_ON))
 467                return;
 468
 469        /* First we read the result */
 470        wl1251_mem_read(wl, wl->data_path->tx_complete_addr,
 471                            result, sizeof(result));
 472
 473        result_index = wl->next_tx_complete;
 474
 475        for (i = 0; i < ARRAY_SIZE(result); i++) {
 476                result_ptr = &result[result_index];
 477
 478                if (result_ptr->done_1 == 1 &&
 479                    result_ptr->done_2 == 1) {
 480                        wl1251_tx_packet_cb(wl, result_ptr);
 481
 482                        result_ptr->done_1 = 0;
 483                        result_ptr->done_2 = 0;
 484
 485                        result_index = (result_index + 1) &
 486                                (FW_TX_CMPLT_BLOCK_SIZE - 1);
 487                        num_complete++;
 488                } else {
 489                        break;
 490                }
 491        }
 492
 493        queue_len = skb_queue_len(&wl->tx_queue);
 494
 495        if ((num_complete > 0) && (queue_len > 0)) {
 496                /* firmware buffer has space, reschedule tx_work */
 497                wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
 498                ieee80211_queue_work(wl->hw, &wl->tx_work);
 499        }
 500
 501        if (wl->tx_queue_stopped &&
 502            queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
 503                /* tx_queue has space, restart queues */
 504                wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
 505                spin_lock_irqsave(&wl->wl_lock, flags);
 506                ieee80211_wake_queues(wl->hw);
 507                wl->tx_queue_stopped = false;
 508                spin_unlock_irqrestore(&wl->wl_lock, flags);
 509        }
 510
 511        /* Every completed frame needs to be acknowledged */
 512        if (num_complete) {
 513                /*
 514                 * If we've wrapped, we have to clear
 515                 * the results in 2 steps.
 516                 */
 517                if (result_index > wl->next_tx_complete) {
 518                        /* Only 1 write is needed */
 519                        wl1251_mem_write(wl,
 520                                         wl->data_path->tx_complete_addr +
 521                                         (wl->next_tx_complete *
 522                                          sizeof(struct tx_result)),
 523                                         &result[wl->next_tx_complete],
 524                                         num_complete *
 525                                         sizeof(struct tx_result));
 526
 527
 528                } else if (result_index < wl->next_tx_complete) {
 529                        /* 2 writes are needed */
 530                        wl1251_mem_write(wl,
 531                                         wl->data_path->tx_complete_addr +
 532                                         (wl->next_tx_complete *
 533                                          sizeof(struct tx_result)),
 534                                         &result[wl->next_tx_complete],
 535                                         (FW_TX_CMPLT_BLOCK_SIZE -
 536                                          wl->next_tx_complete) *
 537                                         sizeof(struct tx_result));
 538
 539                        wl1251_mem_write(wl,
 540                                         wl->data_path->tx_complete_addr,
 541                                         result,
 542                                         (num_complete -
 543                                          FW_TX_CMPLT_BLOCK_SIZE +
 544                                          wl->next_tx_complete) *
 545                                         sizeof(struct tx_result));
 546
 547                } else {
 548                        /* We have to write the whole array */
 549                        wl1251_mem_write(wl,
 550                                         wl->data_path->tx_complete_addr,
 551                                         result,
 552                                         FW_TX_CMPLT_BLOCK_SIZE *
 553                                         sizeof(struct tx_result));
 554                }
 555
 556        }
 557
 558        wl->next_tx_complete = result_index;
 559}
 560
 561/* caller must hold wl->mutex */
 562void wl1251_tx_flush(struct wl1251 *wl)
 563{
 564        int i;
 565        struct sk_buff *skb;
 566        struct ieee80211_tx_info *info;
 567
 568        /* TX failure */
 569/*      control->flags = 0; FIXME */
 570
 571        while ((skb = skb_dequeue(&wl->tx_queue))) {
 572                info = IEEE80211_SKB_CB(skb);
 573
 574                wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb);
 575
 576                if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
 577                                continue;
 578
 579                ieee80211_tx_status(wl->hw, skb);
 580        }
 581
 582        for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
 583                if (wl->tx_frames[i] != NULL) {
 584                        skb = wl->tx_frames[i];
 585                        info = IEEE80211_SKB_CB(skb);
 586
 587                        if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
 588                                continue;
 589
 590                        ieee80211_tx_status(wl->hw, skb);
 591                        wl->tx_frames[i] = NULL;
 592                }
 593}
 594