linux/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
<<
>>
Prefs
   1/*
   2 * AMD 10Gb Ethernet driver
   3 *
   4 * This file is available to you under your choice of the following two
   5 * licenses:
   6 *
   7 * License 1: GPLv2
   8 *
   9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10 *
  11 * This file is free software; you may copy, redistribute and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation, either version 2 of the License, or (at
  14 * your option) any later version.
  15 *
  16 * This file is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  23 *
  24 * This file incorporates work covered by the following copyright and
  25 * permission notice:
  26 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  29 *     and you.
  30 *
  31 *     The Software IS NOT an item of Licensed Software or Licensed Product
  32 *     under any End User Software License Agreement or Agreement for Licensed
  33 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  34 *     granted, free of charge, to any person obtaining a copy of this software
  35 *     annotated with this license and the Software, to deal in the Software
  36 *     without restriction, including without limitation the rights to use,
  37 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38 *     of the Software, and to permit persons to whom the Software is furnished
  39 *     to do so, subject to the following conditions:
  40 *
  41 *     The above copyright notice and this permission notice shall be included
  42 *     in all copies or substantial portions of the Software.
  43 *
  44 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54 *     THE POSSIBILITY OF SUCH DAMAGE.
  55 *
  56 *
  57 * License 2: Modified BSD
  58 *
  59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60 * All rights reserved.
  61 *
  62 * Redistribution and use in source and binary forms, with or without
  63 * modification, are permitted provided that the following conditions are met:
  64 *     * Redistributions of source code must retain the above copyright
  65 *       notice, this list of conditions and the following disclaimer.
  66 *     * Redistributions in binary form must reproduce the above copyright
  67 *       notice, this list of conditions and the following disclaimer in the
  68 *       documentation and/or other materials provided with the distribution.
  69 *     * Neither the name of Advanced Micro Devices, Inc. nor the
  70 *       names of its contributors may be used to endorse or promote products
  71 *       derived from this software without specific prior written permission.
  72 *
  73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83 *
  84 * This file incorporates work covered by the following copyright and
  85 * permission notice:
  86 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  89 *     and you.
  90 *
  91 *     The Software IS NOT an item of Licensed Software or Licensed Product
  92 *     under any End User Software License Agreement or Agreement for Licensed
  93 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  94 *     granted, free of charge, to any person obtaining a copy of this software
  95 *     annotated with this license and the Software, to deal in the Software
  96 *     without restriction, including without limitation the rights to use,
  97 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98 *     of the Software, and to permit persons to whom the Software is furnished
  99 *     to do so, subject to the following conditions:
 100 *
 101 *     The above copyright notice and this permission notice shall be included
 102 *     in all copies or substantial portions of the Software.
 103 *
 104 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
 105 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 106 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 107 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
 108 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 109 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 110 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 111 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 112 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 113 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 114 *     THE POSSIBILITY OF SUCH DAMAGE.
 115 */
 116
 117#include "xgbe.h"
 118#include "xgbe-common.h"
 119
 120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
 121
 122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
 123                           struct xgbe_ring *ring)
 124{
 125        struct xgbe_ring_data *rdata;
 126        unsigned int i;
 127
 128        if (!ring)
 129                return;
 130
 131        if (ring->rdata) {
 132                for (i = 0; i < ring->rdesc_count; i++) {
 133                        rdata = XGBE_GET_DESC_DATA(ring, i);
 134                        xgbe_unmap_rdata(pdata, rdata);
 135                }
 136
 137                kfree(ring->rdata);
 138                ring->rdata = NULL;
 139        }
 140
 141        if (ring->rx_hdr_pa.pages) {
 142                dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
 143                               ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
 144                put_page(ring->rx_hdr_pa.pages);
 145
 146                ring->rx_hdr_pa.pages = NULL;
 147                ring->rx_hdr_pa.pages_len = 0;
 148                ring->rx_hdr_pa.pages_offset = 0;
 149                ring->rx_hdr_pa.pages_dma = 0;
 150        }
 151
 152        if (ring->rx_buf_pa.pages) {
 153                dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
 154                               ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
 155                put_page(ring->rx_buf_pa.pages);
 156
 157                ring->rx_buf_pa.pages = NULL;
 158                ring->rx_buf_pa.pages_len = 0;
 159                ring->rx_buf_pa.pages_offset = 0;
 160                ring->rx_buf_pa.pages_dma = 0;
 161        }
 162
 163        if (ring->rdesc) {
 164                dma_free_coherent(pdata->dev,
 165                                  (sizeof(struct xgbe_ring_desc) *
 166                                   ring->rdesc_count),
 167                                  ring->rdesc, ring->rdesc_dma);
 168                ring->rdesc = NULL;
 169        }
 170}
 171
 172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
 173{
 174        struct xgbe_channel *channel;
 175        unsigned int i;
 176
 177        DBGPR("-->xgbe_free_ring_resources\n");
 178
 179        for (i = 0; i < pdata->channel_count; i++) {
 180                channel = pdata->channel[i];
 181                xgbe_free_ring(pdata, channel->tx_ring);
 182                xgbe_free_ring(pdata, channel->rx_ring);
 183        }
 184
 185        DBGPR("<--xgbe_free_ring_resources\n");
 186}
 187
 188static void *xgbe_alloc_node(size_t size, int node)
 189{
 190        void *mem;
 191
 192        mem = kzalloc_node(size, GFP_KERNEL, node);
 193        if (!mem)
 194                mem = kzalloc(size, GFP_KERNEL);
 195
 196        return mem;
 197}
 198
 199static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
 200                                 dma_addr_t *dma, int node)
 201{
 202        void *mem;
 203        int cur_node = dev_to_node(dev);
 204
 205        set_dev_node(dev, node);
 206        mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
 207        set_dev_node(dev, cur_node);
 208
 209        if (!mem)
 210                mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
 211
 212        return mem;
 213}
 214
 215static int xgbe_init_ring(struct xgbe_prv_data *pdata,
 216                          struct xgbe_ring *ring, unsigned int rdesc_count)
 217{
 218        size_t size;
 219
 220        if (!ring)
 221                return 0;
 222
 223        /* Descriptors */
 224        size = rdesc_count * sizeof(struct xgbe_ring_desc);
 225
 226        ring->rdesc_count = rdesc_count;
 227        ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
 228                                          ring->node);
 229        if (!ring->rdesc)
 230                return -ENOMEM;
 231
 232        /* Descriptor information */
 233        size = rdesc_count * sizeof(struct xgbe_ring_data);
 234
 235        ring->rdata = xgbe_alloc_node(size, ring->node);
 236        if (!ring->rdata)
 237                return -ENOMEM;
 238
 239        netif_dbg(pdata, drv, pdata->netdev,
 240                  "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
 241                  ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
 242
 243        return 0;
 244}
 245
 246static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
 247{
 248        struct xgbe_channel *channel;
 249        unsigned int i;
 250        int ret;
 251
 252        for (i = 0; i < pdata->channel_count; i++) {
 253                channel = pdata->channel[i];
 254                netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
 255                          channel->name);
 256
 257                ret = xgbe_init_ring(pdata, channel->tx_ring,
 258                                     pdata->tx_desc_count);
 259                if (ret) {
 260                        netdev_alert(pdata->netdev,
 261                                     "error initializing Tx ring\n");
 262                        goto err_ring;
 263                }
 264
 265                netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
 266                          channel->name);
 267
 268                ret = xgbe_init_ring(pdata, channel->rx_ring,
 269                                     pdata->rx_desc_count);
 270                if (ret) {
 271                        netdev_alert(pdata->netdev,
 272                                     "error initializing Rx ring\n");
 273                        goto err_ring;
 274                }
 275        }
 276
 277        return 0;
 278
 279err_ring:
 280        xgbe_free_ring_resources(pdata);
 281
 282        return ret;
 283}
 284
 285static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
 286                            struct xgbe_page_alloc *pa, int alloc_order,
 287                            int node)
 288{
 289        struct page *pages = NULL;
 290        dma_addr_t pages_dma;
 291        gfp_t gfp;
 292        int order, ret;
 293
 294again:
 295        order = alloc_order;
 296
 297        /* Try to obtain pages, decreasing order if necessary */
 298        gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
 299        while (order >= 0) {
 300                pages = alloc_pages_node(node, gfp, order);
 301                if (pages)
 302                        break;
 303
 304                order--;
 305        }
 306
 307        /* If we couldn't get local pages, try getting from anywhere */
 308        if (!pages && (node != NUMA_NO_NODE)) {
 309                node = NUMA_NO_NODE;
 310                goto again;
 311        }
 312
 313        if (!pages)
 314                return -ENOMEM;
 315
 316        /* Map the pages */
 317        pages_dma = dma_map_page(pdata->dev, pages, 0,
 318                                 PAGE_SIZE << order, DMA_FROM_DEVICE);
 319        ret = dma_mapping_error(pdata->dev, pages_dma);
 320        if (ret) {
 321                put_page(pages);
 322                return ret;
 323        }
 324
 325        pa->pages = pages;
 326        pa->pages_len = PAGE_SIZE << order;
 327        pa->pages_offset = 0;
 328        pa->pages_dma = pages_dma;
 329
 330        return 0;
 331}
 332
 333static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
 334                                 struct xgbe_page_alloc *pa,
 335                                 unsigned int len)
 336{
 337        get_page(pa->pages);
 338        bd->pa = *pa;
 339
 340        bd->dma_base = pa->pages_dma;
 341        bd->dma_off = pa->pages_offset;
 342        bd->dma_len = len;
 343
 344        pa->pages_offset += len;
 345        if ((pa->pages_offset + len) > pa->pages_len) {
 346                /* This data descriptor is responsible for unmapping page(s) */
 347                bd->pa_unmap = *pa;
 348
 349                /* Get a new allocation next time */
 350                pa->pages = NULL;
 351                pa->pages_len = 0;
 352                pa->pages_offset = 0;
 353                pa->pages_dma = 0;
 354        }
 355}
 356
 357static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
 358                              struct xgbe_ring *ring,
 359                              struct xgbe_ring_data *rdata)
 360{
 361        int ret;
 362
 363        if (!ring->rx_hdr_pa.pages) {
 364                ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
 365                if (ret)
 366                        return ret;
 367        }
 368
 369        if (!ring->rx_buf_pa.pages) {
 370                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
 371                                       PAGE_ALLOC_COSTLY_ORDER, ring->node);
 372                if (ret)
 373                        return ret;
 374        }
 375
 376        /* Set up the header page info */
 377        xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
 378                             XGBE_SKB_ALLOC_SIZE);
 379
 380        /* Set up the buffer page info */
 381        xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
 382                             pdata->rx_buf_size);
 383
 384        return 0;
 385}
 386
 387static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
 388{
 389        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 390        struct xgbe_channel *channel;
 391        struct xgbe_ring *ring;
 392        struct xgbe_ring_data *rdata;
 393        struct xgbe_ring_desc *rdesc;
 394        dma_addr_t rdesc_dma;
 395        unsigned int i, j;
 396
 397        DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
 398
 399        for (i = 0; i < pdata->channel_count; i++) {
 400                channel = pdata->channel[i];
 401                ring = channel->tx_ring;
 402                if (!ring)
 403                        break;
 404
 405                rdesc = ring->rdesc;
 406                rdesc_dma = ring->rdesc_dma;
 407
 408                for (j = 0; j < ring->rdesc_count; j++) {
 409                        rdata = XGBE_GET_DESC_DATA(ring, j);
 410
 411                        rdata->rdesc = rdesc;
 412                        rdata->rdesc_dma = rdesc_dma;
 413
 414                        rdesc++;
 415                        rdesc_dma += sizeof(struct xgbe_ring_desc);
 416                }
 417
 418                ring->cur = 0;
 419                ring->dirty = 0;
 420                memset(&ring->tx, 0, sizeof(ring->tx));
 421
 422                hw_if->tx_desc_init(channel);
 423        }
 424
 425        DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
 426}
 427
 428static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
 429{
 430        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 431        struct xgbe_channel *channel;
 432        struct xgbe_ring *ring;
 433        struct xgbe_ring_desc *rdesc;
 434        struct xgbe_ring_data *rdata;
 435        dma_addr_t rdesc_dma;
 436        unsigned int i, j;
 437
 438        DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
 439
 440        for (i = 0; i < pdata->channel_count; i++) {
 441                channel = pdata->channel[i];
 442                ring = channel->rx_ring;
 443                if (!ring)
 444                        break;
 445
 446                rdesc = ring->rdesc;
 447                rdesc_dma = ring->rdesc_dma;
 448
 449                for (j = 0; j < ring->rdesc_count; j++) {
 450                        rdata = XGBE_GET_DESC_DATA(ring, j);
 451
 452                        rdata->rdesc = rdesc;
 453                        rdata->rdesc_dma = rdesc_dma;
 454
 455                        if (xgbe_map_rx_buffer(pdata, ring, rdata))
 456                                break;
 457
 458                        rdesc++;
 459                        rdesc_dma += sizeof(struct xgbe_ring_desc);
 460                }
 461
 462                ring->cur = 0;
 463                ring->dirty = 0;
 464
 465                hw_if->rx_desc_init(channel);
 466        }
 467
 468        DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
 469}
 470
 471static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
 472                             struct xgbe_ring_data *rdata)
 473{
 474        if (rdata->skb_dma) {
 475                if (rdata->mapped_as_page) {
 476                        dma_unmap_page(pdata->dev, rdata->skb_dma,
 477                                       rdata->skb_dma_len, DMA_TO_DEVICE);
 478                } else {
 479                        dma_unmap_single(pdata->dev, rdata->skb_dma,
 480                                         rdata->skb_dma_len, DMA_TO_DEVICE);
 481                }
 482                rdata->skb_dma = 0;
 483                rdata->skb_dma_len = 0;
 484        }
 485
 486        if (rdata->skb) {
 487                dev_kfree_skb_any(rdata->skb);
 488                rdata->skb = NULL;
 489        }
 490
 491        if (rdata->rx.hdr.pa.pages)
 492                put_page(rdata->rx.hdr.pa.pages);
 493
 494        if (rdata->rx.hdr.pa_unmap.pages) {
 495                dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
 496                               rdata->rx.hdr.pa_unmap.pages_len,
 497                               DMA_FROM_DEVICE);
 498                put_page(rdata->rx.hdr.pa_unmap.pages);
 499        }
 500
 501        if (rdata->rx.buf.pa.pages)
 502                put_page(rdata->rx.buf.pa.pages);
 503
 504        if (rdata->rx.buf.pa_unmap.pages) {
 505                dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
 506                               rdata->rx.buf.pa_unmap.pages_len,
 507                               DMA_FROM_DEVICE);
 508                put_page(rdata->rx.buf.pa_unmap.pages);
 509        }
 510
 511        memset(&rdata->tx, 0, sizeof(rdata->tx));
 512        memset(&rdata->rx, 0, sizeof(rdata->rx));
 513
 514        rdata->mapped_as_page = 0;
 515
 516        if (rdata->state_saved) {
 517                rdata->state_saved = 0;
 518                rdata->state.skb = NULL;
 519                rdata->state.len = 0;
 520                rdata->state.error = 0;
 521        }
 522}
 523
 524static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
 525{
 526        struct xgbe_prv_data *pdata = channel->pdata;
 527        struct xgbe_ring *ring = channel->tx_ring;
 528        struct xgbe_ring_data *rdata;
 529        struct xgbe_packet_data *packet;
 530        struct skb_frag_struct *frag;
 531        dma_addr_t skb_dma;
 532        unsigned int start_index, cur_index;
 533        unsigned int offset, tso, vlan, datalen, len;
 534        unsigned int i;
 535
 536        DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
 537
 538        offset = 0;
 539        start_index = ring->cur;
 540        cur_index = ring->cur;
 541
 542        packet = &ring->packet_data;
 543        packet->rdesc_count = 0;
 544        packet->length = 0;
 545
 546        tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
 547                             TSO_ENABLE);
 548        vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
 549                              VLAN_CTAG);
 550
 551        /* Save space for a context descriptor if needed */
 552        if ((tso && (packet->mss != ring->tx.cur_mss)) ||
 553            (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
 554                cur_index++;
 555        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 556
 557        if (tso) {
 558                /* Map the TSO header */
 559                skb_dma = dma_map_single(pdata->dev, skb->data,
 560                                         packet->header_len, DMA_TO_DEVICE);
 561                if (dma_mapping_error(pdata->dev, skb_dma)) {
 562                        netdev_alert(pdata->netdev, "dma_map_single failed\n");
 563                        goto err_out;
 564                }
 565                rdata->skb_dma = skb_dma;
 566                rdata->skb_dma_len = packet->header_len;
 567                netif_dbg(pdata, tx_queued, pdata->netdev,
 568                          "skb header: index=%u, dma=%pad, len=%u\n",
 569                          cur_index, &skb_dma, packet->header_len);
 570
 571                offset = packet->header_len;
 572
 573                packet->length += packet->header_len;
 574
 575                cur_index++;
 576                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 577        }
 578
 579        /* Map the (remainder of the) packet */
 580        for (datalen = skb_headlen(skb) - offset; datalen; ) {
 581                len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
 582
 583                skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
 584                                         DMA_TO_DEVICE);
 585                if (dma_mapping_error(pdata->dev, skb_dma)) {
 586                        netdev_alert(pdata->netdev, "dma_map_single failed\n");
 587                        goto err_out;
 588                }
 589                rdata->skb_dma = skb_dma;
 590                rdata->skb_dma_len = len;
 591                netif_dbg(pdata, tx_queued, pdata->netdev,
 592                          "skb data: index=%u, dma=%pad, len=%u\n",
 593                          cur_index, &skb_dma, len);
 594
 595                datalen -= len;
 596                offset += len;
 597
 598                packet->length += len;
 599
 600                cur_index++;
 601                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 602        }
 603
 604        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 605                netif_dbg(pdata, tx_queued, pdata->netdev,
 606                          "mapping frag %u\n", i);
 607
 608                frag = &skb_shinfo(skb)->frags[i];
 609                offset = 0;
 610
 611                for (datalen = skb_frag_size(frag); datalen; ) {
 612                        len = min_t(unsigned int, datalen,
 613                                    XGBE_TX_MAX_BUF_SIZE);
 614
 615                        skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
 616                                                   len, DMA_TO_DEVICE);
 617                        if (dma_mapping_error(pdata->dev, skb_dma)) {
 618                                netdev_alert(pdata->netdev,
 619                                             "skb_frag_dma_map failed\n");
 620                                goto err_out;
 621                        }
 622                        rdata->skb_dma = skb_dma;
 623                        rdata->skb_dma_len = len;
 624                        rdata->mapped_as_page = 1;
 625                        netif_dbg(pdata, tx_queued, pdata->netdev,
 626                                  "skb frag: index=%u, dma=%pad, len=%u\n",
 627                                  cur_index, &skb_dma, len);
 628
 629                        datalen -= len;
 630                        offset += len;
 631
 632                        packet->length += len;
 633
 634                        cur_index++;
 635                        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 636                }
 637        }
 638
 639        /* Save the skb address in the last entry. We always have some data
 640         * that has been mapped so rdata is always advanced past the last
 641         * piece of mapped data - use the entry pointed to by cur_index - 1.
 642         */
 643        rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
 644        rdata->skb = skb;
 645
 646        /* Save the number of descriptor entries used */
 647        packet->rdesc_count = cur_index - start_index;
 648
 649        DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
 650
 651        return packet->rdesc_count;
 652
 653err_out:
 654        while (start_index < cur_index) {
 655                rdata = XGBE_GET_DESC_DATA(ring, start_index++);
 656                xgbe_unmap_rdata(pdata, rdata);
 657        }
 658
 659        DBGPR("<--xgbe_map_tx_skb: count=0\n");
 660
 661        return 0;
 662}
 663
 664void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
 665{
 666        DBGPR("-->xgbe_init_function_ptrs_desc\n");
 667
 668        desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
 669        desc_if->free_ring_resources = xgbe_free_ring_resources;
 670        desc_if->map_tx_skb = xgbe_map_tx_skb;
 671        desc_if->map_rx_buffer = xgbe_map_rx_buffer;
 672        desc_if->unmap_rdata = xgbe_unmap_rdata;
 673        desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
 674        desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
 675
 676        DBGPR("<--xgbe_init_function_ptrs_desc\n");
 677}
 678