linux/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
<<
>>
Prefs
   1/*
   2 * AMD 10Gb Ethernet driver
   3 *
   4 * This file is available to you under your choice of the following two
   5 * licenses:
   6 *
   7 * License 1: GPLv2
   8 *
   9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
  10 *
  11 * This file is free software; you may copy, redistribute and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation, either version 2 of the License, or (at
  14 * your option) any later version.
  15 *
  16 * This file is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  23 *
  24 * This file incorporates work covered by the following copyright and
  25 * permission notice:
  26 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  29 *     and you.
  30 *
  31 *     The Software IS NOT an item of Licensed Software or Licensed Product
  32 *     under any End User Software License Agreement or Agreement for Licensed
  33 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  34 *     granted, free of charge, to any person obtaining a copy of this software
  35 *     annotated with this license and the Software, to deal in the Software
  36 *     without restriction, including without limitation the rights to use,
  37 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38 *     of the Software, and to permit persons to whom the Software is furnished
  39 *     to do so, subject to the following conditions:
  40 *
  41 *     The above copyright notice and this permission notice shall be included
  42 *     in all copies or substantial portions of the Software.
  43 *
  44 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54 *     THE POSSIBILITY OF SUCH DAMAGE.
  55 *
  56 *
  57 * License 2: Modified BSD
  58 *
  59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
  60 * All rights reserved.
  61 *
  62 * Redistribution and use in source and binary forms, with or without
  63 * modification, are permitted provided that the following conditions are met:
  64 *     * Redistributions of source code must retain the above copyright
  65 *       notice, this list of conditions and the following disclaimer.
  66 *     * Redistributions in binary form must reproduce the above copyright
  67 *       notice, this list of conditions and the following disclaimer in the
  68 *       documentation and/or other materials provided with the distribution.
  69 *     * Neither the name of Advanced Micro Devices, Inc. nor the
  70 *       names of its contributors may be used to endorse or promote products
  71 *       derived from this software without specific prior written permission.
  72 *
  73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83 *
  84 * This file incorporates work covered by the following copyright and
  85 * permission notice:
  86 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  89 *     and you.
  90 *
  91 *     The Software IS NOT an item of Licensed Software or Licensed Product
  92 *     under any End User Software License Agreement or Agreement for Licensed
  93 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  94 *     granted, free of charge, to any person obtaining a copy of this software
  95 *     annotated with this license and the Software, to deal in the Software
  96 *     without restriction, including without limitation the rights to use,
  97 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98 *     of the Software, and to permit persons to whom the Software is furnished
  99 *     to do so, subject to the following conditions:
 100 *
 101 *     The above copyright notice and this permission notice shall be included
 102 *     in all copies or substantial portions of the Software.
 103 *
 104 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
 105 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 106 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 107 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
 108 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 109 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 110 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 111 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 112 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 113 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 114 *     THE POSSIBILITY OF SUCH DAMAGE.
 115 */
 116
 117#include "xgbe.h"
 118#include "xgbe-common.h"
 119
 120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
 121
 122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
 123                           struct xgbe_ring *ring)
 124{
 125        struct xgbe_ring_data *rdata;
 126        unsigned int i;
 127
 128        if (!ring)
 129                return;
 130
 131        if (ring->rdata) {
 132                for (i = 0; i < ring->rdesc_count; i++) {
 133                        rdata = XGBE_GET_DESC_DATA(ring, i);
 134                        xgbe_unmap_rdata(pdata, rdata);
 135                }
 136
 137                kfree(ring->rdata);
 138                ring->rdata = NULL;
 139        }
 140
 141        if (ring->rx_hdr_pa.pages) {
 142                dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
 143                               ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
 144                put_page(ring->rx_hdr_pa.pages);
 145
 146                ring->rx_hdr_pa.pages = NULL;
 147                ring->rx_hdr_pa.pages_len = 0;
 148                ring->rx_hdr_pa.pages_offset = 0;
 149                ring->rx_hdr_pa.pages_dma = 0;
 150        }
 151
 152        if (ring->rx_buf_pa.pages) {
 153                dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
 154                               ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
 155                put_page(ring->rx_buf_pa.pages);
 156
 157                ring->rx_buf_pa.pages = NULL;
 158                ring->rx_buf_pa.pages_len = 0;
 159                ring->rx_buf_pa.pages_offset = 0;
 160                ring->rx_buf_pa.pages_dma = 0;
 161        }
 162
 163        if (ring->rdesc) {
 164                dma_free_coherent(pdata->dev,
 165                                  (sizeof(struct xgbe_ring_desc) *
 166                                   ring->rdesc_count),
 167                                  ring->rdesc, ring->rdesc_dma);
 168                ring->rdesc = NULL;
 169        }
 170}
 171
 172static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
 173{
 174        struct xgbe_channel *channel;
 175        unsigned int i;
 176
 177        DBGPR("-->xgbe_free_ring_resources\n");
 178
 179        channel = pdata->channel;
 180        for (i = 0; i < pdata->channel_count; i++, channel++) {
 181                xgbe_free_ring(pdata, channel->tx_ring);
 182                xgbe_free_ring(pdata, channel->rx_ring);
 183        }
 184
 185        DBGPR("<--xgbe_free_ring_resources\n");
 186}
 187
 188static int xgbe_init_ring(struct xgbe_prv_data *pdata,
 189                          struct xgbe_ring *ring, unsigned int rdesc_count)
 190{
 191        DBGPR("-->xgbe_init_ring\n");
 192
 193        if (!ring)
 194                return 0;
 195
 196        /* Descriptors */
 197        ring->rdesc_count = rdesc_count;
 198        ring->rdesc = dma_alloc_coherent(pdata->dev,
 199                                         (sizeof(struct xgbe_ring_desc) *
 200                                          rdesc_count), &ring->rdesc_dma,
 201                                         GFP_KERNEL);
 202        if (!ring->rdesc)
 203                return -ENOMEM;
 204
 205        /* Descriptor information */
 206        ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
 207                              GFP_KERNEL);
 208        if (!ring->rdata)
 209                return -ENOMEM;
 210
 211        DBGPR("    rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
 212              ring->rdesc, ring->rdesc_dma, ring->rdata);
 213
 214        DBGPR("<--xgbe_init_ring\n");
 215
 216        return 0;
 217}
 218
 219static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
 220{
 221        struct xgbe_channel *channel;
 222        unsigned int i;
 223        int ret;
 224
 225        DBGPR("-->xgbe_alloc_ring_resources\n");
 226
 227        channel = pdata->channel;
 228        for (i = 0; i < pdata->channel_count; i++, channel++) {
 229                DBGPR("  %s - tx_ring:\n", channel->name);
 230                ret = xgbe_init_ring(pdata, channel->tx_ring,
 231                                     pdata->tx_desc_count);
 232                if (ret) {
 233                        netdev_alert(pdata->netdev,
 234                                     "error initializing Tx ring\n");
 235                        goto err_ring;
 236                }
 237
 238                DBGPR("  %s - rx_ring:\n", channel->name);
 239                ret = xgbe_init_ring(pdata, channel->rx_ring,
 240                                     pdata->rx_desc_count);
 241                if (ret) {
 242                        netdev_alert(pdata->netdev,
 243                                     "error initializing Tx ring\n");
 244                        goto err_ring;
 245                }
 246        }
 247
 248        DBGPR("<--xgbe_alloc_ring_resources\n");
 249
 250        return 0;
 251
 252err_ring:
 253        xgbe_free_ring_resources(pdata);
 254
 255        return ret;
 256}
 257
 258static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
 259                            struct xgbe_page_alloc *pa, gfp_t gfp, int order)
 260{
 261        struct page *pages = NULL;
 262        dma_addr_t pages_dma;
 263        int ret;
 264
 265        /* Try to obtain pages, decreasing order if necessary */
 266        gfp |= __GFP_COLD | __GFP_COMP;
 267        while (order >= 0) {
 268                pages = alloc_pages(gfp, order);
 269                if (pages)
 270                        break;
 271
 272                order--;
 273        }
 274        if (!pages)
 275                return -ENOMEM;
 276
 277        /* Map the pages */
 278        pages_dma = dma_map_page(pdata->dev, pages, 0,
 279                                 PAGE_SIZE << order, DMA_FROM_DEVICE);
 280        ret = dma_mapping_error(pdata->dev, pages_dma);
 281        if (ret) {
 282                put_page(pages);
 283                return ret;
 284        }
 285
 286        pa->pages = pages;
 287        pa->pages_len = PAGE_SIZE << order;
 288        pa->pages_offset = 0;
 289        pa->pages_dma = pages_dma;
 290
 291        return 0;
 292}
 293
 294static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
 295                                 struct xgbe_page_alloc *pa,
 296                                 unsigned int len)
 297{
 298        get_page(pa->pages);
 299        bd->pa = *pa;
 300
 301        bd->dma = pa->pages_dma + pa->pages_offset;
 302        bd->dma_len = len;
 303
 304        pa->pages_offset += len;
 305        if ((pa->pages_offset + len) > pa->pages_len) {
 306                /* This data descriptor is responsible for unmapping page(s) */
 307                bd->pa_unmap = *pa;
 308
 309                /* Get a new allocation next time */
 310                pa->pages = NULL;
 311                pa->pages_len = 0;
 312                pa->pages_offset = 0;
 313                pa->pages_dma = 0;
 314        }
 315}
 316
 317static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
 318                              struct xgbe_ring *ring,
 319                              struct xgbe_ring_data *rdata)
 320{
 321        int order, ret;
 322
 323        if (!ring->rx_hdr_pa.pages) {
 324                ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
 325                if (ret)
 326                        return ret;
 327        }
 328
 329        if (!ring->rx_buf_pa.pages) {
 330                order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
 331                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
 332                                       order);
 333                if (ret)
 334                        return ret;
 335        }
 336
 337        /* Set up the header page info */
 338        xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
 339                             XGBE_SKB_ALLOC_SIZE);
 340
 341        /* Set up the buffer page info */
 342        xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
 343                             pdata->rx_buf_size);
 344
 345        return 0;
 346}
 347
 348static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
 349{
 350        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 351        struct xgbe_channel *channel;
 352        struct xgbe_ring *ring;
 353        struct xgbe_ring_data *rdata;
 354        struct xgbe_ring_desc *rdesc;
 355        dma_addr_t rdesc_dma;
 356        unsigned int i, j;
 357
 358        DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
 359
 360        channel = pdata->channel;
 361        for (i = 0; i < pdata->channel_count; i++, channel++) {
 362                ring = channel->tx_ring;
 363                if (!ring)
 364                        break;
 365
 366                rdesc = ring->rdesc;
 367                rdesc_dma = ring->rdesc_dma;
 368
 369                for (j = 0; j < ring->rdesc_count; j++) {
 370                        rdata = XGBE_GET_DESC_DATA(ring, j);
 371
 372                        rdata->rdesc = rdesc;
 373                        rdata->rdesc_dma = rdesc_dma;
 374
 375                        rdesc++;
 376                        rdesc_dma += sizeof(struct xgbe_ring_desc);
 377                }
 378
 379                ring->cur = 0;
 380                ring->dirty = 0;
 381                memset(&ring->tx, 0, sizeof(ring->tx));
 382
 383                hw_if->tx_desc_init(channel);
 384        }
 385
 386        DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
 387}
 388
 389static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
 390{
 391        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 392        struct xgbe_channel *channel;
 393        struct xgbe_ring *ring;
 394        struct xgbe_ring_desc *rdesc;
 395        struct xgbe_ring_data *rdata;
 396        dma_addr_t rdesc_dma;
 397        unsigned int i, j;
 398
 399        DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
 400
 401        channel = pdata->channel;
 402        for (i = 0; i < pdata->channel_count; i++, channel++) {
 403                ring = channel->rx_ring;
 404                if (!ring)
 405                        break;
 406
 407                rdesc = ring->rdesc;
 408                rdesc_dma = ring->rdesc_dma;
 409
 410                for (j = 0; j < ring->rdesc_count; j++) {
 411                        rdata = XGBE_GET_DESC_DATA(ring, j);
 412
 413                        rdata->rdesc = rdesc;
 414                        rdata->rdesc_dma = rdesc_dma;
 415
 416                        if (xgbe_map_rx_buffer(pdata, ring, rdata))
 417                                break;
 418
 419                        rdesc++;
 420                        rdesc_dma += sizeof(struct xgbe_ring_desc);
 421                }
 422
 423                ring->cur = 0;
 424                ring->dirty = 0;
 425                memset(&ring->rx, 0, sizeof(ring->rx));
 426
 427                hw_if->rx_desc_init(channel);
 428        }
 429
 430        DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
 431}
 432
 433static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
 434                             struct xgbe_ring_data *rdata)
 435{
 436        if (rdata->skb_dma) {
 437                if (rdata->mapped_as_page) {
 438                        dma_unmap_page(pdata->dev, rdata->skb_dma,
 439                                       rdata->skb_dma_len, DMA_TO_DEVICE);
 440                } else {
 441                        dma_unmap_single(pdata->dev, rdata->skb_dma,
 442                                         rdata->skb_dma_len, DMA_TO_DEVICE);
 443                }
 444                rdata->skb_dma = 0;
 445                rdata->skb_dma_len = 0;
 446        }
 447
 448        if (rdata->skb) {
 449                dev_kfree_skb_any(rdata->skb);
 450                rdata->skb = NULL;
 451        }
 452
 453        if (rdata->rx.hdr.pa.pages)
 454                put_page(rdata->rx.hdr.pa.pages);
 455
 456        if (rdata->rx.hdr.pa_unmap.pages) {
 457                dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
 458                               rdata->rx.hdr.pa_unmap.pages_len,
 459                               DMA_FROM_DEVICE);
 460                put_page(rdata->rx.hdr.pa_unmap.pages);
 461        }
 462
 463        if (rdata->rx.buf.pa.pages)
 464                put_page(rdata->rx.buf.pa.pages);
 465
 466        if (rdata->rx.buf.pa_unmap.pages) {
 467                dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
 468                               rdata->rx.buf.pa_unmap.pages_len,
 469                               DMA_FROM_DEVICE);
 470                put_page(rdata->rx.buf.pa_unmap.pages);
 471        }
 472
 473        memset(&rdata->tx, 0, sizeof(rdata->tx));
 474        memset(&rdata->rx, 0, sizeof(rdata->rx));
 475
 476        rdata->mapped_as_page = 0;
 477
 478        if (rdata->state_saved) {
 479                rdata->state_saved = 0;
 480                rdata->state.incomplete = 0;
 481                rdata->state.context_next = 0;
 482                rdata->state.skb = NULL;
 483                rdata->state.len = 0;
 484                rdata->state.error = 0;
 485        }
 486}
 487
 488static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
 489{
 490        struct xgbe_prv_data *pdata = channel->pdata;
 491        struct xgbe_ring *ring = channel->tx_ring;
 492        struct xgbe_ring_data *rdata;
 493        struct xgbe_packet_data *packet;
 494        struct skb_frag_struct *frag;
 495        dma_addr_t skb_dma;
 496        unsigned int start_index, cur_index;
 497        unsigned int offset, tso, vlan, datalen, len;
 498        unsigned int i;
 499
 500        DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
 501
 502        offset = 0;
 503        start_index = ring->cur;
 504        cur_index = ring->cur;
 505
 506        packet = &ring->packet_data;
 507        packet->rdesc_count = 0;
 508        packet->length = 0;
 509
 510        tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
 511                             TSO_ENABLE);
 512        vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
 513                              VLAN_CTAG);
 514
 515        /* Save space for a context descriptor if needed */
 516        if ((tso && (packet->mss != ring->tx.cur_mss)) ||
 517            (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
 518                cur_index++;
 519        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 520
 521        if (tso) {
 522                DBGPR("  TSO packet\n");
 523
 524                /* Map the TSO header */
 525                skb_dma = dma_map_single(pdata->dev, skb->data,
 526                                         packet->header_len, DMA_TO_DEVICE);
 527                if (dma_mapping_error(pdata->dev, skb_dma)) {
 528                        netdev_alert(pdata->netdev, "dma_map_single failed\n");
 529                        goto err_out;
 530                }
 531                rdata->skb_dma = skb_dma;
 532                rdata->skb_dma_len = packet->header_len;
 533
 534                offset = packet->header_len;
 535
 536                packet->length += packet->header_len;
 537
 538                cur_index++;
 539                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 540        }
 541
 542        /* Map the (remainder of the) packet */
 543        for (datalen = skb_headlen(skb) - offset; datalen; ) {
 544                len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
 545
 546                skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
 547                                         DMA_TO_DEVICE);
 548                if (dma_mapping_error(pdata->dev, skb_dma)) {
 549                        netdev_alert(pdata->netdev, "dma_map_single failed\n");
 550                        goto err_out;
 551                }
 552                rdata->skb_dma = skb_dma;
 553                rdata->skb_dma_len = len;
 554                DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
 555                      cur_index, skb_dma, len);
 556
 557                datalen -= len;
 558                offset += len;
 559
 560                packet->length += len;
 561
 562                cur_index++;
 563                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 564        }
 565
 566        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 567                DBGPR("  mapping frag %u\n", i);
 568
 569                frag = &skb_shinfo(skb)->frags[i];
 570                offset = 0;
 571
 572                for (datalen = skb_frag_size(frag); datalen; ) {
 573                        len = min_t(unsigned int, datalen,
 574                                    XGBE_TX_MAX_BUF_SIZE);
 575
 576                        skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
 577                                                   len, DMA_TO_DEVICE);
 578                        if (dma_mapping_error(pdata->dev, skb_dma)) {
 579                                netdev_alert(pdata->netdev,
 580                                             "skb_frag_dma_map failed\n");
 581                                goto err_out;
 582                        }
 583                        rdata->skb_dma = skb_dma;
 584                        rdata->skb_dma_len = len;
 585                        rdata->mapped_as_page = 1;
 586                        DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
 587                              cur_index, skb_dma, len);
 588
 589                        datalen -= len;
 590                        offset += len;
 591
 592                        packet->length += len;
 593
 594                        cur_index++;
 595                        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 596                }
 597        }
 598
 599        /* Save the skb address in the last entry. We always have some data
 600         * that has been mapped so rdata is always advanced past the last
 601         * piece of mapped data - use the entry pointed to by cur_index - 1.
 602         */
 603        rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
 604        rdata->skb = skb;
 605
 606        /* Save the number of descriptor entries used */
 607        packet->rdesc_count = cur_index - start_index;
 608
 609        DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
 610
 611        return packet->rdesc_count;
 612
 613err_out:
 614        while (start_index < cur_index) {
 615                rdata = XGBE_GET_DESC_DATA(ring, start_index++);
 616                xgbe_unmap_rdata(pdata, rdata);
 617        }
 618
 619        DBGPR("<--xgbe_map_tx_skb: count=0\n");
 620
 621        return 0;
 622}
 623
 624static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
 625{
 626        struct xgbe_prv_data *pdata = channel->pdata;
 627        struct xgbe_hw_if *hw_if = &pdata->hw_if;
 628        struct xgbe_ring *ring = channel->rx_ring;
 629        struct xgbe_ring_data *rdata;
 630        int i;
 631
 632        DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
 633              ring->rx.realloc_index);
 634
 635        for (i = 0; i < ring->dirty; i++) {
 636                rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
 637
 638                /* Reset rdata values */
 639                xgbe_unmap_rdata(pdata, rdata);
 640
 641                if (xgbe_map_rx_buffer(pdata, ring, rdata))
 642                        break;
 643
 644                hw_if->rx_desc_reset(rdata);
 645
 646                ring->rx.realloc_index++;
 647        }
 648        ring->dirty = 0;
 649
 650        DBGPR("<--xgbe_realloc_rx_buffer\n");
 651}
 652
 653void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
 654{
 655        DBGPR("-->xgbe_init_function_ptrs_desc\n");
 656
 657        desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
 658        desc_if->free_ring_resources = xgbe_free_ring_resources;
 659        desc_if->map_tx_skb = xgbe_map_tx_skb;
 660        desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
 661        desc_if->unmap_rdata = xgbe_unmap_rdata;
 662        desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
 663        desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
 664
 665        DBGPR("<--xgbe_init_function_ptrs_desc\n");
 666}
 667