linux/drivers/staging/octeon/ethernet-tx.c
<<
>>
Prefs
   1/*********************************************************************
   2 * Author: Cavium Networks
   3 *
   4 * Contact: support@caviumnetworks.com
   5 * This file is part of the OCTEON SDK
   6 *
   7 * Copyright (c) 2003-2007 Cavium Networks
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this file; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22 * or visit http://www.gnu.org/licenses/.
  23 *
  24 * This file may also be available under a different license from Cavium.
  25 * Contact Cavium Networks for more information
  26*********************************************************************/
  27#include <linux/module.h>
  28#include <linux/kernel.h>
  29#include <linux/netdevice.h>
  30#include <linux/init.h>
  31#include <linux/etherdevice.h>
  32#include <linux/ip.h>
  33#include <linux/string.h>
  34#include <linux/ethtool.h>
  35#include <linux/mii.h>
  36#include <linux/seq_file.h>
  37#include <linux/proc_fs.h>
  38#include <net/dst.h>
  39#ifdef CONFIG_XFRM
  40#include <linux/xfrm.h>
  41#include <net/xfrm.h>
  42#endif /* CONFIG_XFRM */
  43
  44#include <asm/atomic.h>
  45
  46#include <asm/octeon/octeon.h>
  47
  48#include "ethernet-defines.h"
  49#include "octeon-ethernet.h"
  50#include "ethernet-tx.h"
  51#include "ethernet-util.h"
  52
  53#include "cvmx-wqe.h"
  54#include "cvmx-fau.h"
  55#include "cvmx-pko.h"
  56#include "cvmx-helper.h"
  57
  58#include "cvmx-gmxx-defs.h"
  59
  60/*
  61 * You can define GET_SKBUFF_QOS() to override how the skbuff output
  62 * function determines which output queue is used. The default
  63 * implementation always uses the base queue for the port. If, for
  64 * example, you wanted to use the skb->priority fieid, define
  65 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
  66 */
  67#ifndef GET_SKBUFF_QOS
  68#define GET_SKBUFF_QOS(skb) 0
  69#endif
  70
  71/**
  72 * Packet transmit
  73 *
  74 * @skb:    Packet to send
  75 * @dev:    Device info structure
  76 * Returns Always returns zero
  77 */
  78int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
  79{
  80        cvmx_pko_command_word0_t pko_command;
  81        union cvmx_buf_ptr hw_buffer;
  82        uint64_t old_scratch;
  83        uint64_t old_scratch2;
  84        int dropped;
  85        int qos;
  86        int queue_it_up;
  87        struct octeon_ethernet *priv = netdev_priv(dev);
  88        int32_t skb_to_free;
  89        int32_t undo;
  90        int32_t buffers_to_free;
  91#if REUSE_SKBUFFS_WITHOUT_FREE
  92        unsigned char *fpa_head;
  93#endif
  94
  95        /*
  96         * Prefetch the private data structure.  It is larger that one
  97         * cache line.
  98         */
  99        prefetch(priv);
 100
 101        /* Start off assuming no drop */
 102        dropped = 0;
 103
 104        /*
 105         * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
 106         * completely remove "qos" in the event neither interface
 107         * supports multiple queues per port.
 108         */
 109        if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
 110            (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
 111                qos = GET_SKBUFF_QOS(skb);
 112                if (qos <= 0)
 113                        qos = 0;
 114                else if (qos >= cvmx_pko_get_num_queues(priv->port))
 115                        qos = 0;
 116        } else
 117                qos = 0;
 118
 119        if (USE_ASYNC_IOBDMA) {
 120                /* Save scratch in case userspace is using it */
 121                CVMX_SYNCIOBDMA;
 122                old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 123                old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
 124
 125                /*
 126                 * Fetch and increment the number of packets to be
 127                 * freed.
 128                 */
 129                cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
 130                                               FAU_NUM_PACKET_BUFFERS_TO_FREE,
 131                                               0);
 132                cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
 133                                               priv->fau + qos * 4,
 134                                               MAX_SKB_TO_FREE);
 135        }
 136
 137        /*
 138         * The CN3XXX series of parts has an errata (GMX-401) which
 139         * causes the GMX block to hang if a collision occurs towards
 140         * the end of a <68 byte packet. As a workaround for this, we
 141         * pad packets to be 68 bytes whenever we are in half duplex
 142         * mode. We don't handle the case of having a small packet but
 143         * no room to add the padding.  The kernel should always give
 144         * us at least a cache line
 145         */
 146        if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
 147                union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
 148                int interface = INTERFACE(priv->port);
 149                int index = INDEX(priv->port);
 150
 151                if (interface < 2) {
 152                        /* We only need to pad packet in half duplex mode */
 153                        gmx_prt_cfg.u64 =
 154                            cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
 155                        if (gmx_prt_cfg.s.duplex == 0) {
 156                                int add_bytes = 64 - skb->len;
 157                                if ((skb_tail_pointer(skb) + add_bytes) <=
 158                                    skb_end_pointer(skb))
 159                                        memset(__skb_put(skb, add_bytes), 0,
 160                                               add_bytes);
 161                        }
 162                }
 163        }
 164
 165        /* Build the PKO buffer pointer */
 166        hw_buffer.u64 = 0;
 167        hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
 168        hw_buffer.s.pool = 0;
 169        hw_buffer.s.size =
 170            (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
 171
 172        /* Build the PKO command */
 173        pko_command.u64 = 0;
 174        pko_command.s.n2 = 1;   /* Don't pollute L2 with the outgoing packet */
 175        pko_command.s.segs = 1;
 176        pko_command.s.total_bytes = skb->len;
 177        pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
 178        pko_command.s.subone0 = 1;
 179
 180        pko_command.s.dontfree = 1;
 181        pko_command.s.reg0 = priv->fau + qos * 4;
 182        /*
 183         * See if we can put this skb in the FPA pool. Any strange
 184         * behavior from the Linux networking stack will most likely
 185         * be caused by a bug in the following code. If some field is
 186         * in use by the network stack and get carried over when a
 187         * buffer is reused, bad thing may happen.  If in doubt and
 188         * you dont need the absolute best performance, disable the
 189         * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
 190         * shown a 25% increase in performance under some loads.
 191         */
 192#if REUSE_SKBUFFS_WITHOUT_FREE
 193        fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f);
 194        if (unlikely(skb->data < fpa_head)) {
 195                /*
 196                 * printk("TX buffer beginning can't meet FPA
 197                 * alignment constraints\n");
 198                 */
 199                goto dont_put_skbuff_in_hw;
 200        }
 201        if (unlikely
 202            ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
 203                /*
 204                   printk("TX buffer isn't large enough for the FPA\n");
 205                 */
 206                goto dont_put_skbuff_in_hw;
 207        }
 208        if (unlikely(skb_shared(skb))) {
 209                /*
 210                   printk("TX buffer sharing data with someone else\n");
 211                 */
 212                goto dont_put_skbuff_in_hw;
 213        }
 214        if (unlikely(skb_cloned(skb))) {
 215                /*
 216                   printk("TX buffer has been cloned\n");
 217                 */
 218                goto dont_put_skbuff_in_hw;
 219        }
 220        if (unlikely(skb_header_cloned(skb))) {
 221                /*
 222                   printk("TX buffer header has been cloned\n");
 223                 */
 224                goto dont_put_skbuff_in_hw;
 225        }
 226        if (unlikely(skb->destructor)) {
 227                /*
 228                   printk("TX buffer has a destructor\n");
 229                 */
 230                goto dont_put_skbuff_in_hw;
 231        }
 232        if (unlikely(skb_shinfo(skb)->nr_frags)) {
 233                /*
 234                   printk("TX buffer has fragments\n");
 235                 */
 236                goto dont_put_skbuff_in_hw;
 237        }
 238        if (unlikely
 239            (skb->truesize !=
 240             sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
 241                /*
 242                   printk("TX buffer truesize has been changed\n");
 243                 */
 244                goto dont_put_skbuff_in_hw;
 245        }
 246
 247        /*
 248         * We can use this buffer in the FPA.  We don't need the FAU
 249         * update anymore
 250         */
 251        pko_command.s.reg0 = 0;
 252        pko_command.s.dontfree = 0;
 253
 254        hw_buffer.s.back = (skb->data - fpa_head) >> 7;
 255        *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
 256
 257        /*
 258         * The skbuff will be reused without ever being freed. We must
 259         * cleanup a bunch of core things.
 260         */
 261        dst_release(skb_dst(skb));
 262        skb_dst_set(skb, NULL);
 263#ifdef CONFIG_XFRM
 264        secpath_put(skb->sp);
 265        skb->sp = NULL;
 266#endif
 267        nf_reset(skb);
 268
 269#ifdef CONFIG_NET_SCHED
 270        skb->tc_index = 0;
 271#ifdef CONFIG_NET_CLS_ACT
 272        skb->tc_verd = 0;
 273#endif /* CONFIG_NET_CLS_ACT */
 274#endif /* CONFIG_NET_SCHED */
 275
 276dont_put_skbuff_in_hw:
 277#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
 278
 279        /* Check if we can use the hardware checksumming */
 280        if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
 281            (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
 282            ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
 283            && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
 284                || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) {
 285                /* Use hardware checksum calc */
 286                pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
 287        }
 288
 289        if (USE_ASYNC_IOBDMA) {
 290                /* Get the number of skbuffs in use by the hardware */
 291                CVMX_SYNCIOBDMA;
 292                skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 293                buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
 294        } else {
 295                /* Get the number of skbuffs in use by the hardware */
 296                skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
 297                                                       MAX_SKB_TO_FREE);
 298                buffers_to_free =
 299                    cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
 300        }
 301
 302        /*
 303         * We try to claim MAX_SKB_TO_FREE buffers.  If there were not
 304         * that many available, we have to un-claim (undo) any that
 305         * were in excess.  If skb_to_free is positive we will free
 306         * that many buffers.
 307         */
 308        undo = skb_to_free > 0 ?
 309                MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
 310        if (undo > 0)
 311                cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
 312        skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
 313                MAX_SKB_TO_FREE : -skb_to_free;
 314
 315        /*
 316         * If we're sending faster than the receive can free them then
 317         * don't do the HW free.
 318         */
 319        if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
 320                pko_command.s.dontfree = 1;
 321                pko_command.s.reg0 = priv->fau + qos * 4;
 322        }
 323
 324        cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
 325                                     CVMX_PKO_LOCK_CMD_QUEUE);
 326
 327        /* Drop this packet if we have too many already queued to the HW */
 328        if (unlikely
 329            (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
 330                /*
 331                   DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name);
 332                 */
 333                dropped = 1;
 334        }
 335        /* Send the packet to the output queue */
 336        else if (unlikely
 337                 (cvmx_pko_send_packet_finish
 338                  (priv->port, priv->queue + qos, pko_command, hw_buffer,
 339                   CVMX_PKO_LOCK_CMD_QUEUE))) {
 340                DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
 341                dropped = 1;
 342        }
 343
 344        if (USE_ASYNC_IOBDMA) {
 345                /* Restore the scratch area */
 346                cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
 347                cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
 348        }
 349
 350        queue_it_up = 0;
 351        if (unlikely(dropped)) {
 352                dev_kfree_skb_any(skb);
 353                priv->stats.tx_dropped++;
 354        } else {
 355                if (USE_SKBUFFS_IN_HW) {
 356                        /* Put this packet on the queue to be freed later */
 357                        if (pko_command.s.dontfree)
 358                                queue_it_up = 1;
 359                        else
 360                                cvmx_fau_atomic_add32
 361                                    (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
 362                } else {
 363                        /* Put this packet on the queue to be freed later */
 364                        queue_it_up = 1;
 365                }
 366        }
 367
 368        if (queue_it_up) {
 369                spin_lock(&priv->tx_free_list[qos].lock);
 370                __skb_queue_tail(&priv->tx_free_list[qos], skb);
 371                cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
 372                spin_unlock(&priv->tx_free_list[qos].lock);
 373        } else {
 374                cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
 375        }
 376
 377        return 0;
 378}
 379
 380/**
 381 * Packet transmit to the POW
 382 *
 383 * @skb:    Packet to send
 384 * @dev:    Device info structure
 385 * Returns Always returns zero
 386 */
 387int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
 388{
 389        struct octeon_ethernet *priv = netdev_priv(dev);
 390        void *packet_buffer;
 391        void *copy_location;
 392
 393        /* Get a work queue entry */
 394        cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
 395        if (unlikely(work == NULL)) {
 396                DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
 397                           dev->name);
 398                priv->stats.tx_dropped++;
 399                dev_kfree_skb(skb);
 400                return 0;
 401        }
 402
 403        /* Get a packet buffer */
 404        packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
 405        if (unlikely(packet_buffer == NULL)) {
 406                DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
 407                           dev->name);
 408                cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
 409                priv->stats.tx_dropped++;
 410                dev_kfree_skb(skb);
 411                return 0;
 412        }
 413
 414        /*
 415         * Calculate where we need to copy the data to. We need to
 416         * leave 8 bytes for a next pointer (unused). We also need to
 417         * include any configure skip. Then we need to align the IP
 418         * packet src and dest into the same 64bit word. The below
 419         * calculation may add a little extra, but that doesn't
 420         * hurt.
 421         */
 422        copy_location = packet_buffer + sizeof(uint64_t);
 423        copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
 424
 425        /*
 426         * We have to copy the packet since whoever processes this
 427         * packet will free it to a hardware pool. We can't use the
 428         * trick of counting outstanding packets like in
 429         * cvm_oct_xmit.
 430         */
 431        memcpy(copy_location, skb->data, skb->len);
 432
 433        /*
 434         * Fill in some of the work queue fields. We may need to add
 435         * more if the software at the other end needs them.
 436         */
 437        work->hw_chksum = skb->csum;
 438        work->len = skb->len;
 439        work->ipprt = priv->port;
 440        work->qos = priv->port & 0x7;
 441        work->grp = pow_send_group;
 442        work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
 443        work->tag = pow_send_group;     /* FIXME */
 444        /* Default to zero. Sets of zero later are commented out */
 445        work->word2.u64 = 0;
 446        work->word2.s.bufs = 1;
 447        work->packet_ptr.u64 = 0;
 448        work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
 449        work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
 450        work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
 451        work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
 452
 453        if (skb->protocol == htons(ETH_P_IP)) {
 454                work->word2.s.ip_offset = 14;
 455#if 0
 456                work->word2.s.vlan_valid = 0;   /* FIXME */
 457                work->word2.s.vlan_cfi = 0;     /* FIXME */
 458                work->word2.s.vlan_id = 0;      /* FIXME */
 459                work->word2.s.dec_ipcomp = 0;   /* FIXME */
 460#endif
 461                work->word2.s.tcp_or_udp =
 462                    (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
 463                    || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP);
 464#if 0
 465                /* FIXME */
 466                work->word2.s.dec_ipsec = 0;
 467                /* We only support IPv4 right now */
 468                work->word2.s.is_v6 = 0;
 469                /* Hardware would set to zero */
 470                work->word2.s.software = 0;
 471                /* No error, packet is internal */
 472                work->word2.s.L4_error = 0;
 473#endif
 474                work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
 475                                          || (ip_hdr(skb)->frag_off ==
 476                                              1 << 14));
 477#if 0
 478                /* Assume Linux is sending a good packet */
 479                work->word2.s.IP_exc = 0;
 480#endif
 481                work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
 482                work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
 483#if 0
 484                /* This is an IP packet */
 485                work->word2.s.not_IP = 0;
 486                /* No error, packet is internal */
 487                work->word2.s.rcv_error = 0;
 488                /* No error, packet is internal */
 489                work->word2.s.err_code = 0;
 490#endif
 491
 492                /*
 493                 * When copying the data, include 4 bytes of the
 494                 * ethernet header to align the same way hardware
 495                 * does.
 496                 */
 497                memcpy(work->packet_data, skb->data + 10,
 498                       sizeof(work->packet_data));
 499        } else {
 500#if 0
 501                work->word2.snoip.vlan_valid = 0;       /* FIXME */
 502                work->word2.snoip.vlan_cfi = 0; /* FIXME */
 503                work->word2.snoip.vlan_id = 0;  /* FIXME */
 504                work->word2.snoip.software = 0; /* Hardware would set to zero */
 505#endif
 506                work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
 507                work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
 508                work->word2.snoip.is_bcast =
 509                    (skb->pkt_type == PACKET_BROADCAST);
 510                work->word2.snoip.is_mcast =
 511                    (skb->pkt_type == PACKET_MULTICAST);
 512                work->word2.snoip.not_IP = 1;   /* IP was done up above */
 513#if 0
 514                /* No error, packet is internal */
 515                work->word2.snoip.rcv_error = 0;
 516                /* No error, packet is internal */
 517                work->word2.snoip.err_code = 0;
 518#endif
 519                memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
 520        }
 521
 522        /* Submit the packet to the POW */
 523        cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
 524                             work->grp);
 525        priv->stats.tx_packets++;
 526        priv->stats.tx_bytes += skb->len;
 527        dev_kfree_skb(skb);
 528        return 0;
 529}
 530
 531/**
 532 * Transmit a work queue entry out of the ethernet port. Both
 533 * the work queue entry and the packet data can optionally be
 534 * freed. The work will be freed on error as well.
 535 *
 536 * @dev:     Device to transmit out.
 537 * @work_queue_entry:
 538 *                Work queue entry to send
 539 * @do_free: True if the work queue entry and packet data should be
 540 *                freed. If false, neither will be freed.
 541 * @qos:     Index into the queues for this port to transmit on. This
 542 *                is used to implement QoS if their are multiple queues per
 543 *                port. This parameter must be between 0 and the number of
 544 *                queues per port minus 1. Values outside of this range will
 545 *                be change to zero.
 546 *
 547 * Returns Zero on success, negative on failure.
 548 */
 549int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
 550                         int do_free, int qos)
 551{
 552        unsigned long flags;
 553        union cvmx_buf_ptr hw_buffer;
 554        cvmx_pko_command_word0_t pko_command;
 555        int dropped;
 556        struct octeon_ethernet *priv = netdev_priv(dev);
 557        cvmx_wqe_t *work = work_queue_entry;
 558
 559        if (!(dev->flags & IFF_UP)) {
 560                DEBUGPRINT("%s: Device not up\n", dev->name);
 561                if (do_free)
 562                        cvm_oct_free_work(work);
 563                return -1;
 564        }
 565
 566        /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
 567           remove "qos" in the event neither interface supports
 568           multiple queues per port */
 569        if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
 570            (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
 571                if (qos <= 0)
 572                        qos = 0;
 573                else if (qos >= cvmx_pko_get_num_queues(priv->port))
 574                        qos = 0;
 575        } else
 576                qos = 0;
 577
 578        /* Start off assuming no drop */
 579        dropped = 0;
 580
 581        local_irq_save(flags);
 582        cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
 583                                     CVMX_PKO_LOCK_CMD_QUEUE);
 584
 585        /* Build the PKO buffer pointer */
 586        hw_buffer.u64 = 0;
 587        hw_buffer.s.addr = work->packet_ptr.s.addr;
 588        hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
 589        hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
 590        hw_buffer.s.back = work->packet_ptr.s.back;
 591
 592        /* Build the PKO command */
 593        pko_command.u64 = 0;
 594        pko_command.s.n2 = 1;   /* Don't pollute L2 with the outgoing packet */
 595        pko_command.s.dontfree = !do_free;
 596        pko_command.s.segs = work->word2.s.bufs;
 597        pko_command.s.total_bytes = work->len;
 598
 599        /* Check if we can use the hardware checksumming */
 600        if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc))
 601                pko_command.s.ipoffp1 = 0;
 602        else
 603                pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
 604
 605        /* Send the packet to the output queue */
 606        if (unlikely
 607            (cvmx_pko_send_packet_finish
 608             (priv->port, priv->queue + qos, pko_command, hw_buffer,
 609              CVMX_PKO_LOCK_CMD_QUEUE))) {
 610                DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
 611                dropped = -1;
 612        }
 613        local_irq_restore(flags);
 614
 615        if (unlikely(dropped)) {
 616                if (do_free)
 617                        cvm_oct_free_work(work);
 618                priv->stats.tx_dropped++;
 619        } else if (do_free)
 620                cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
 621
 622        return dropped;
 623}
 624EXPORT_SYMBOL(cvm_oct_transmit_qos);
 625
 626/**
 627 * This function frees all skb that are currenty queued for TX.
 628 *
 629 * @dev:    Device being shutdown
 630 */
 631void cvm_oct_tx_shutdown(struct net_device *dev)
 632{
 633        struct octeon_ethernet *priv = netdev_priv(dev);
 634        unsigned long flags;
 635        int qos;
 636
 637        for (qos = 0; qos < 16; qos++) {
 638                spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 639                while (skb_queue_len(&priv->tx_free_list[qos]))
 640                        dev_kfree_skb_any(__skb_dequeue
 641                                          (&priv->tx_free_list[qos]));
 642                spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
 643        }
 644}
 645