linux/drivers/staging/octeon/ethernet-tx.c
<<
>>
Prefs
   1/*********************************************************************
   2 * Author: Cavium Networks
   3 *
   4 * Contact: support@caviumnetworks.com
   5 * This file is part of the OCTEON SDK
   6 *
   7 * Copyright (c) 2003-2010 Cavium Networks
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this file; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22 * or visit http://www.gnu.org/licenses/.
  23 *
  24 * This file may also be available under a different license from Cavium.
  25 * Contact Cavium Networks for more information
  26*********************************************************************/
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/netdevice.h>
  30#include <linux/init.h>
  31#include <linux/etherdevice.h>
  32#include <linux/ip.h>
  33#include <linux/ratelimit.h>
  34#include <linux/string.h>
  35#include <linux/interrupt.h>
  36#include <net/dst.h>
  37#ifdef CONFIG_XFRM
  38#include <linux/xfrm.h>
  39#include <net/xfrm.h>
  40#endif /* CONFIG_XFRM */
  41
  42#include <linux/atomic.h>
  43#include <net/sch_generic.h>
  44
  45#include <asm/octeon/octeon.h>
  46
  47#include "ethernet-defines.h"
  48#include "octeon-ethernet.h"
  49#include "ethernet-tx.h"
  50#include "ethernet-util.h"
  51
  52#include <asm/octeon/cvmx-wqe.h>
  53#include <asm/octeon/cvmx-fau.h>
  54#include <asm/octeon/cvmx-pip.h>
  55#include <asm/octeon/cvmx-pko.h>
  56#include <asm/octeon/cvmx-helper.h>
  57
  58#include <asm/octeon/cvmx-gmxx-defs.h>
  59
  60#define CVM_OCT_SKB_CB(skb)     ((u64 *)((skb)->cb))
  61
  62/*
  63 * You can define GET_SKBUFF_QOS() to override how the skbuff output
  64 * function determines which output queue is used. The default
  65 * implementation always uses the base queue for the port. If, for
  66 * example, you wanted to use the skb->priority field, define
  67 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
  68 */
  69#ifndef GET_SKBUFF_QOS
  70#define GET_SKBUFF_QOS(skb) 0
  71#endif
  72
  73static void cvm_oct_tx_do_cleanup(unsigned long arg);
  74static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
  75
  76/* Maximum number of SKBs to try to free per xmit packet. */
  77#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
  78
  79static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
  80{
  81        int32_t undo;
  82        undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
  83        if (undo > 0)
  84                cvmx_fau_atomic_add32(fau, -undo);
  85        skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
  86        return skb_to_free;
  87}
  88
  89static void cvm_oct_kick_tx_poll_watchdog(void)
  90{
  91        union cvmx_ciu_timx ciu_timx;
  92        ciu_timx.u64 = 0;
  93        ciu_timx.s.one_shot = 1;
  94        ciu_timx.s.len = cvm_oct_tx_poll_interval;
  95        cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
  96}
  97
  98void cvm_oct_free_tx_skbs(struct net_device *dev)
  99{
 100        int32_t skb_to_free;
 101        int qos, queues_per_port;
 102        int total_freed = 0;
 103        int total_remaining = 0;
 104        unsigned long flags;
 105        struct octeon_ethernet *priv = netdev_priv(dev);
 106
 107        queues_per_port = cvmx_pko_get_num_queues(priv->port);
 108        /* Drain any pending packets in the free list */
 109        for (qos = 0; qos < queues_per_port; qos++) {
 110                if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
 111                        continue;
 112                skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
 113                skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
 114
 115
 116                total_freed += skb_to_free;
 117                if (skb_to_free > 0) {
 118                        struct sk_buff *to_free_list = NULL;
 119                        spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 120                        while (skb_to_free > 0) {
 121                                struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
 122                                t->next = to_free_list;
 123                                to_free_list = t;
 124                                skb_to_free--;
 125                        }
 126                        spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
 127                        /* Do the actual freeing outside of the lock. */
 128                        while (to_free_list) {
 129                                struct sk_buff *t = to_free_list;
 130                                to_free_list = to_free_list->next;
 131                                dev_kfree_skb_any(t);
 132                        }
 133                }
 134                total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
 135        }
 136        if (total_freed >= 0 && netif_queue_stopped(dev))
 137                netif_wake_queue(dev);
 138        if (total_remaining)
 139                cvm_oct_kick_tx_poll_watchdog();
 140}
 141
 142/**
 143 * cvm_oct_xmit - transmit a packet
 144 * @skb:    Packet to send
 145 * @dev:    Device info structure
 146 *
 147 * Returns Always returns NETDEV_TX_OK
 148 */
 149int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
 150{
 151        cvmx_pko_command_word0_t pko_command;
 152        union cvmx_buf_ptr hw_buffer;
 153        uint64_t old_scratch;
 154        uint64_t old_scratch2;
 155        int qos;
 156        int i;
 157        enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
 158        struct octeon_ethernet *priv = netdev_priv(dev);
 159        struct sk_buff *to_free_list;
 160        int32_t skb_to_free;
 161        int32_t buffers_to_free;
 162        u32 total_to_clean;
 163        unsigned long flags;
 164#if REUSE_SKBUFFS_WITHOUT_FREE
 165        unsigned char *fpa_head;
 166#endif
 167
 168        /*
 169         * Prefetch the private data structure.  It is larger than the
 170         * one cache line.
 171         */
 172        prefetch(priv);
 173
 174        /*
 175         * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
 176         * completely remove "qos" in the event neither interface
 177         * supports multiple queues per port.
 178         */
 179        if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
 180            (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
 181                qos = GET_SKBUFF_QOS(skb);
 182                if (qos <= 0)
 183                        qos = 0;
 184                else if (qos >= cvmx_pko_get_num_queues(priv->port))
 185                        qos = 0;
 186        } else
 187                qos = 0;
 188
 189        if (USE_ASYNC_IOBDMA) {
 190                /* Save scratch in case userspace is using it */
 191                CVMX_SYNCIOBDMA;
 192                old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 193                old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
 194
 195                /*
 196                 * Fetch and increment the number of packets to be
 197                 * freed.
 198                 */
 199                cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
 200                                               FAU_NUM_PACKET_BUFFERS_TO_FREE,
 201                                               0);
 202                cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
 203                                               priv->fau + qos * 4,
 204                                               MAX_SKB_TO_FREE);
 205        }
 206
 207        /*
 208         * We have space for 6 segment pointers, If there will be more
 209         * than that, we must linearize.
 210         */
 211        if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
 212                if (unlikely(__skb_linearize(skb))) {
 213                        queue_type = QUEUE_DROP;
 214                        if (USE_ASYNC_IOBDMA) {
 215                                /* Get the number of skbuffs in use by the hardware */
 216                                CVMX_SYNCIOBDMA;
 217                                skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 218                        } else {
 219                                /* Get the number of skbuffs in use by the hardware */
 220                                skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
 221                                                                       MAX_SKB_TO_FREE);
 222                        }
 223                        skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
 224                        spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 225                        goto skip_xmit;
 226                }
 227        }
 228
 229        /*
 230         * The CN3XXX series of parts has an errata (GMX-401) which
 231         * causes the GMX block to hang if a collision occurs towards
 232         * the end of a <68 byte packet. As a workaround for this, we
 233         * pad packets to be 68 bytes whenever we are in half duplex
 234         * mode. We don't handle the case of having a small packet but
 235         * no room to add the padding.  The kernel should always give
 236         * us at least a cache line
 237         */
 238        if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
 239                union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
 240                int interface = INTERFACE(priv->port);
 241                int index = INDEX(priv->port);
 242
 243                if (interface < 2) {
 244                        /* We only need to pad packet in half duplex mode */
 245                        gmx_prt_cfg.u64 =
 246                            cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
 247                        if (gmx_prt_cfg.s.duplex == 0) {
 248                                int add_bytes = 64 - skb->len;
 249                                if ((skb_tail_pointer(skb) + add_bytes) <=
 250                                    skb_end_pointer(skb))
 251                                        memset(__skb_put(skb, add_bytes), 0,
 252                                               add_bytes);
 253                        }
 254                }
 255        }
 256
 257        /* Build the PKO command */
 258        pko_command.u64 = 0;
 259        pko_command.s.n2 = 1;   /* Don't pollute L2 with the outgoing packet */
 260        pko_command.s.segs = 1;
 261        pko_command.s.total_bytes = skb->len;
 262        pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
 263        pko_command.s.subone0 = 1;
 264
 265        pko_command.s.dontfree = 1;
 266
 267        /* Build the PKO buffer pointer */
 268        hw_buffer.u64 = 0;
 269        if (skb_shinfo(skb)->nr_frags == 0) {
 270                hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
 271                hw_buffer.s.pool = 0;
 272                hw_buffer.s.size = skb->len;
 273        } else {
 274                hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
 275                hw_buffer.s.pool = 0;
 276                hw_buffer.s.size = skb_headlen(skb);
 277                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
 278                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 279                        struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
 280                        hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
 281                        hw_buffer.s.size = fs->size;
 282                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
 283                }
 284                hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
 285                hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
 286                pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
 287                pko_command.s.gather = 1;
 288                goto dont_put_skbuff_in_hw;
 289        }
 290
 291        /*
 292         * See if we can put this skb in the FPA pool. Any strange
 293         * behavior from the Linux networking stack will most likely
 294         * be caused by a bug in the following code. If some field is
 295         * in use by the network stack and gets carried over when a
 296         * buffer is reused, bad things may happen.  If in doubt and
 297         * you dont need the absolute best performance, disable the
 298         * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
 299         * shown a 25% increase in performance under some loads.
 300         */
 301#if REUSE_SKBUFFS_WITHOUT_FREE
 302        fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
 303        if (unlikely(skb->data < fpa_head)) {
 304                /*
 305                 * printk("TX buffer beginning can't meet FPA
 306                 * alignment constraints\n");
 307                 */
 308                goto dont_put_skbuff_in_hw;
 309        }
 310        if (unlikely
 311            ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
 312                /*
 313                   printk("TX buffer isn't large enough for the FPA\n");
 314                 */
 315                goto dont_put_skbuff_in_hw;
 316        }
 317        if (unlikely(skb_shared(skb))) {
 318                /*
 319                   printk("TX buffer sharing data with someone else\n");
 320                 */
 321                goto dont_put_skbuff_in_hw;
 322        }
 323        if (unlikely(skb_cloned(skb))) {
 324                /*
 325                   printk("TX buffer has been cloned\n");
 326                 */
 327                goto dont_put_skbuff_in_hw;
 328        }
 329        if (unlikely(skb_header_cloned(skb))) {
 330                /*
 331                   printk("TX buffer header has been cloned\n");
 332                 */
 333                goto dont_put_skbuff_in_hw;
 334        }
 335        if (unlikely(skb->destructor)) {
 336                /*
 337                   printk("TX buffer has a destructor\n");
 338                 */
 339                goto dont_put_skbuff_in_hw;
 340        }
 341        if (unlikely(skb_shinfo(skb)->nr_frags)) {
 342                /*
 343                   printk("TX buffer has fragments\n");
 344                 */
 345                goto dont_put_skbuff_in_hw;
 346        }
 347        if (unlikely
 348            (skb->truesize !=
 349             sizeof(*skb) + skb_end_offset(skb))) {
 350                /*
 351                   printk("TX buffer truesize has been changed\n");
 352                 */
 353                goto dont_put_skbuff_in_hw;
 354        }
 355
 356        /*
 357         * We can use this buffer in the FPA.  We don't need the FAU
 358         * update anymore
 359         */
 360        pko_command.s.dontfree = 0;
 361
 362        hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
 363        *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
 364
 365        /*
 366         * The skbuff will be reused without ever being freed. We must
 367         * cleanup a bunch of core things.
 368         */
 369        dst_release(skb_dst(skb));
 370        skb_dst_set(skb, NULL);
 371#ifdef CONFIG_XFRM
 372        secpath_put(skb->sp);
 373        skb->sp = NULL;
 374#endif
 375        nf_reset(skb);
 376
 377#ifdef CONFIG_NET_SCHED
 378        skb->tc_index = 0;
 379        skb_reset_tc(skb);
 380#endif /* CONFIG_NET_SCHED */
 381#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
 382
 383dont_put_skbuff_in_hw:
 384
 385        /* Check if we can use the hardware checksumming */
 386        if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
 387            (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
 388            ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
 389            && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
 390                || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
 391                /* Use hardware checksum calc */
 392                pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
 393        }
 394
 395        if (USE_ASYNC_IOBDMA) {
 396                /* Get the number of skbuffs in use by the hardware */
 397                CVMX_SYNCIOBDMA;
 398                skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 399                buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
 400        } else {
 401                /* Get the number of skbuffs in use by the hardware */
 402                skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
 403                                                       MAX_SKB_TO_FREE);
 404                buffers_to_free =
 405                    cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
 406        }
 407
 408        skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
 409
 410        /*
 411         * If we're sending faster than the receive can free them then
 412         * don't do the HW free.
 413         */
 414        if ((buffers_to_free < -100) && !pko_command.s.dontfree)
 415                pko_command.s.dontfree = 1;
 416
 417        if (pko_command.s.dontfree) {
 418                queue_type = QUEUE_CORE;
 419                pko_command.s.reg0 = priv->fau+qos*4;
 420        } else {
 421                queue_type = QUEUE_HW;
 422        }
 423        if (USE_ASYNC_IOBDMA)
 424                cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
 425
 426        spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 427
 428        /* Drop this packet if we have too many already queued to the HW */
 429        if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
 430                if (dev->tx_queue_len != 0) {
 431                        /* Drop the lock when notifying the core.  */
 432                        spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
 433                        netif_stop_queue(dev);
 434                        spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 435                } else {
 436                        /* If not using normal queueing.  */
 437                        queue_type = QUEUE_DROP;
 438                        goto skip_xmit;
 439                }
 440        }
 441
 442        cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
 443                                     CVMX_PKO_LOCK_NONE);
 444
 445        /* Send the packet to the output queue */
 446        if (unlikely(cvmx_pko_send_packet_finish(priv->port,
 447                                                 priv->queue + qos,
 448                                                 pko_command, hw_buffer,
 449                                                 CVMX_PKO_LOCK_NONE))) {
 450                printk_ratelimited("%s: Failed to send the packet\n", dev->name);
 451                queue_type = QUEUE_DROP;
 452        }
 453skip_xmit:
 454        to_free_list = NULL;
 455
 456        switch (queue_type) {
 457        case QUEUE_DROP:
 458                skb->next = to_free_list;
 459                to_free_list = skb;
 460                priv->stats.tx_dropped++;
 461                break;
 462        case QUEUE_HW:
 463                cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
 464                break;
 465        case QUEUE_CORE:
 466                __skb_queue_tail(&priv->tx_free_list[qos], skb);
 467                break;
 468        default:
 469                BUG();
 470        }
 471
 472        while (skb_to_free > 0) {
 473                struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
 474                t->next = to_free_list;
 475                to_free_list = t;
 476                skb_to_free--;
 477        }
 478
 479        spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
 480
 481        /* Do the actual freeing outside of the lock. */
 482        while (to_free_list) {
 483                struct sk_buff *t = to_free_list;
 484                to_free_list = to_free_list->next;
 485                dev_kfree_skb_any(t);
 486        }
 487
 488        if (USE_ASYNC_IOBDMA) {
 489                CVMX_SYNCIOBDMA;
 490                total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 491                /* Restore the scratch area */
 492                cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
 493                cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
 494        } else {
 495                total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
 496        }
 497
 498        if (total_to_clean & 0x3ff) {
 499                /*
 500                 * Schedule the cleanup tasklet every 1024 packets for
 501                 * the pathological case of high traffic on one port
 502                 * delaying clean up of packets on a different port
 503                 * that is blocked waiting for the cleanup.
 504                 */
 505                tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
 506        }
 507
 508        cvm_oct_kick_tx_poll_watchdog();
 509
 510        return NETDEV_TX_OK;
 511}
 512
 513/**
 514 * cvm_oct_xmit_pow - transmit a packet to the POW
 515 * @skb:    Packet to send
 516 * @dev:    Device info structure
 517
 518 * Returns Always returns zero
 519 */
 520int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
 521{
 522        struct octeon_ethernet *priv = netdev_priv(dev);
 523        void *packet_buffer;
 524        void *copy_location;
 525
 526        /* Get a work queue entry */
 527        cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
 528        if (unlikely(work == NULL)) {
 529                printk_ratelimited("%s: Failed to allocate a work "
 530                                   "queue entry\n", dev->name);
 531                priv->stats.tx_dropped++;
 532                dev_kfree_skb(skb);
 533                return 0;
 534        }
 535
 536        /* Get a packet buffer */
 537        packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
 538        if (unlikely(packet_buffer == NULL)) {
 539                printk_ratelimited("%s: Failed to allocate a packet buffer\n",
 540                                   dev->name);
 541                cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
 542                priv->stats.tx_dropped++;
 543                dev_kfree_skb(skb);
 544                return 0;
 545        }
 546
 547        /*
 548         * Calculate where we need to copy the data to. We need to
 549         * leave 8 bytes for a next pointer (unused). We also need to
 550         * include any configure skip. Then we need to align the IP
 551         * packet src and dest into the same 64bit word. The below
 552         * calculation may add a little extra, but that doesn't
 553         * hurt.
 554         */
 555        copy_location = packet_buffer + sizeof(uint64_t);
 556        copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
 557
 558        /*
 559         * We have to copy the packet since whoever processes this
 560         * packet will free it to a hardware pool. We can't use the
 561         * trick of counting outstanding packets like in
 562         * cvm_oct_xmit.
 563         */
 564        memcpy(copy_location, skb->data, skb->len);
 565
 566        /*
 567         * Fill in some of the work queue fields. We may need to add
 568         * more if the software at the other end needs them.
 569         */
 570        work->hw_chksum = skb->csum;
 571        work->len = skb->len;
 572        work->ipprt = priv->port;
 573        work->qos = priv->port & 0x7;
 574        work->grp = pow_send_group;
 575        work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
 576        work->tag = pow_send_group;     /* FIXME */
 577        /* Default to zero. Sets of zero later are commented out */
 578        work->word2.u64 = 0;
 579        work->word2.s.bufs = 1;
 580        work->packet_ptr.u64 = 0;
 581        work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
 582        work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
 583        work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
 584        work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
 585
 586        if (skb->protocol == htons(ETH_P_IP)) {
 587                work->word2.s.ip_offset = 14;
 588#if 0
 589                work->word2.s.vlan_valid = 0;   /* FIXME */
 590                work->word2.s.vlan_cfi = 0;     /* FIXME */
 591                work->word2.s.vlan_id = 0;      /* FIXME */
 592                work->word2.s.dec_ipcomp = 0;   /* FIXME */
 593#endif
 594                work->word2.s.tcp_or_udp =
 595                    (ip_hdr(skb)->protocol == IPPROTO_TCP)
 596                    || (ip_hdr(skb)->protocol == IPPROTO_UDP);
 597#if 0
 598                /* FIXME */
 599                work->word2.s.dec_ipsec = 0;
 600                /* We only support IPv4 right now */
 601                work->word2.s.is_v6 = 0;
 602                /* Hardware would set to zero */
 603                work->word2.s.software = 0;
 604                /* No error, packet is internal */
 605                work->word2.s.L4_error = 0;
 606#endif
 607                work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
 608                                          || (ip_hdr(skb)->frag_off ==
 609                                              1 << 14));
 610#if 0
 611                /* Assume Linux is sending a good packet */
 612                work->word2.s.IP_exc = 0;
 613#endif
 614                work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
 615                work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
 616#if 0
 617                /* This is an IP packet */
 618                work->word2.s.not_IP = 0;
 619                /* No error, packet is internal */
 620                work->word2.s.rcv_error = 0;
 621                /* No error, packet is internal */
 622                work->word2.s.err_code = 0;
 623#endif
 624
 625                /*
 626                 * When copying the data, include 4 bytes of the
 627                 * ethernet header to align the same way hardware
 628                 * does.
 629                 */
 630                memcpy(work->packet_data, skb->data + 10,
 631                       sizeof(work->packet_data));
 632        } else {
 633#if 0
 634                work->word2.snoip.vlan_valid = 0;       /* FIXME */
 635                work->word2.snoip.vlan_cfi = 0; /* FIXME */
 636                work->word2.snoip.vlan_id = 0;  /* FIXME */
 637                work->word2.snoip.software = 0; /* Hardware would set to zero */
 638#endif
 639                work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
 640                work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
 641                work->word2.snoip.is_bcast =
 642                    (skb->pkt_type == PACKET_BROADCAST);
 643                work->word2.snoip.is_mcast =
 644                    (skb->pkt_type == PACKET_MULTICAST);
 645                work->word2.snoip.not_IP = 1;   /* IP was done up above */
 646#if 0
 647                /* No error, packet is internal */
 648                work->word2.snoip.rcv_error = 0;
 649                /* No error, packet is internal */
 650                work->word2.snoip.err_code = 0;
 651#endif
 652                memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
 653        }
 654
 655        /* Submit the packet to the POW */
 656        cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
 657                             work->grp);
 658        priv->stats.tx_packets++;
 659        priv->stats.tx_bytes += skb->len;
 660        dev_kfree_skb(skb);
 661        return 0;
 662}
 663
 664/**
 665 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
 666 * @dev:    Device being shutdown
 667 *
 668 */
 669void cvm_oct_tx_shutdown_dev(struct net_device *dev)
 670{
 671        struct octeon_ethernet *priv = netdev_priv(dev);
 672        unsigned long flags;
 673        int qos;
 674
 675        for (qos = 0; qos < 16; qos++) {
 676                spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 677                while (skb_queue_len(&priv->tx_free_list[qos]))
 678                        dev_kfree_skb_any(__skb_dequeue
 679                                          (&priv->tx_free_list[qos]));
 680                spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
 681        }
 682}
 683
 684static void cvm_oct_tx_do_cleanup(unsigned long arg)
 685{
 686        int port;
 687
 688        for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
 689                if (cvm_oct_device[port]) {
 690                        struct net_device *dev = cvm_oct_device[port];
 691                        cvm_oct_free_tx_skbs(dev);
 692                }
 693        }
 694}
 695
 696static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
 697{
 698        /* Disable the interrupt.  */
 699        cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
 700        /* Do the work in the tasklet.  */
 701        tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
 702        return IRQ_HANDLED;
 703}
 704
 705void cvm_oct_tx_initialize(void)
 706{
 707        int i;
 708
 709        /* Disable the interrupt.  */
 710        cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
 711        /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
 712        i = request_irq(OCTEON_IRQ_TIMER1,
 713                        cvm_oct_tx_cleanup_watchdog, 0,
 714                        "Ethernet", cvm_oct_device);
 715
 716        if (i)
 717                panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
 718}
 719
 720void cvm_oct_tx_shutdown(void)
 721{
 722        /* Free the interrupt handler */
 723        free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
 724}
 725