linux/drivers/staging/et131x/et1310_rx.c
<<
>>
Prefs
   1/*
   2 * Agere Systems Inc.
   3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
   4 *
   5 * Copyright © 2005 Agere Systems Inc.
   6 * All rights reserved.
   7 *   http://www.agere.com
   8 *
   9 *------------------------------------------------------------------------------
  10 *
  11 * et1310_rx.c - Routines used to perform data reception
  12 *
  13 *------------------------------------------------------------------------------
  14 *
  15 * SOFTWARE LICENSE
  16 *
  17 * This software is provided subject to the following terms and conditions,
  18 * which you should read carefully before using the software.  Using this
  19 * software indicates your acceptance of these terms and conditions.  If you do
  20 * not agree with these terms and conditions, do not use the software.
  21 *
  22 * Copyright © 2005 Agere Systems Inc.
  23 * All rights reserved.
  24 *
  25 * Redistribution and use in source or binary forms, with or without
  26 * modifications, are permitted provided that the following conditions are met:
  27 *
  28 * . Redistributions of source code must retain the above copyright notice, this
  29 *    list of conditions and the following Disclaimer as comments in the code as
  30 *    well as in the documentation and/or other materials provided with the
  31 *    distribution.
  32 *
  33 * . Redistributions in binary form must reproduce the above copyright notice,
  34 *    this list of conditions and the following Disclaimer in the documentation
  35 *    and/or other materials provided with the distribution.
  36 *
  37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
  38 *    may be used to endorse or promote products derived from this software
  39 *    without specific prior written permission.
  40 *
  41 * Disclaimer
  42 *
  43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
  45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
  46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
  47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
  48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
  52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  54 * DAMAGE.
  55 *
  56 */
  57
  58#include "et131x_version.h"
  59#include "et131x_defs.h"
  60
  61#include <linux/pci.h>
  62#include <linux/init.h>
  63#include <linux/module.h>
  64#include <linux/types.h>
  65#include <linux/kernel.h>
  66
  67#include <linux/sched.h>
  68#include <linux/ptrace.h>
  69#include <linux/slab.h>
  70#include <linux/ctype.h>
  71#include <linux/string.h>
  72#include <linux/timer.h>
  73#include <linux/interrupt.h>
  74#include <linux/in.h>
  75#include <linux/delay.h>
  76#include <linux/io.h>
  77#include <linux/bitops.h>
  78#include <asm/system.h>
  79
  80#include <linux/netdevice.h>
  81#include <linux/etherdevice.h>
  82#include <linux/skbuff.h>
  83#include <linux/if_arp.h>
  84#include <linux/ioport.h>
  85
  86#include "et1310_phy.h"
  87#include "et1310_pm.h"
  88#include "et1310_jagcore.h"
  89
  90#include "et131x_adapter.h"
  91#include "et131x_initpci.h"
  92
  93#include "et1310_rx.h"
  94
  95
  96void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
  97
  98/**
  99 * et131x_rx_dma_memory_alloc
 100 * @adapter: pointer to our private adapter structure
 101 *
 102 * Returns 0 on success and errno on failure (as defined in errno.h)
 103 *
 104 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
 105 * and the Packet Status Ring.
 106 */
 107int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
 108{
 109        uint32_t OuterLoop, InnerLoop;
 110        uint32_t bufsize;
 111        uint32_t pktStatRingSize, FBRChunkSize;
 112        RX_RING_t *rx_ring;
 113
 114        /* Setup some convenience pointers */
 115        rx_ring = (RX_RING_t *) &adapter->RxRing;
 116
 117        /* Alloc memory for the lookup table */
 118#ifdef USE_FBR0
 119        rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
 120#endif
 121
 122        rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
 123
 124        /* The first thing we will do is configure the sizes of the buffer
 125         * rings. These will change based on jumbo packet support.  Larger
 126         * jumbo packets increases the size of each entry in FBR0, and the
 127         * number of entries in FBR0, while at the same time decreasing the
 128         * number of entries in FBR1.
 129         *
 130         * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
 131         * entries are huge in order to accomodate a "jumbo" frame, then it
 132         * will have less entries.  Conversely, FBR1 will now be relied upon
 133         * to carry more "normal" frames, thus it's entry size also increases
 134         * and the number of entries goes up too (since it now carries
 135         * "small" + "regular" packets.
 136         *
 137         * In this scheme, we try to maintain 512 entries between the two
 138         * rings. Also, FBR1 remains a constant size - when it's size doubles
 139         * the number of entries halves.  FBR0 increases in size, however.
 140         */
 141
 142        if (adapter->RegistryJumboPacket < 2048) {
 143#ifdef USE_FBR0
 144                rx_ring->Fbr0BufferSize = 256;
 145                rx_ring->Fbr0NumEntries = 512;
 146#endif
 147                rx_ring->Fbr1BufferSize = 2048;
 148                rx_ring->Fbr1NumEntries = 512;
 149        } else if (adapter->RegistryJumboPacket < 4096) {
 150#ifdef USE_FBR0
 151                rx_ring->Fbr0BufferSize = 512;
 152                rx_ring->Fbr0NumEntries = 1024;
 153#endif
 154                rx_ring->Fbr1BufferSize = 4096;
 155                rx_ring->Fbr1NumEntries = 512;
 156        } else {
 157#ifdef USE_FBR0
 158                rx_ring->Fbr0BufferSize = 1024;
 159                rx_ring->Fbr0NumEntries = 768;
 160#endif
 161                rx_ring->Fbr1BufferSize = 16384;
 162                rx_ring->Fbr1NumEntries = 128;
 163        }
 164
 165#ifdef USE_FBR0
 166        adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
 167            adapter->RxRing.Fbr1NumEntries;
 168#else
 169        adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
 170#endif
 171
 172        /* Allocate an area of memory for Free Buffer Ring 1 */
 173        bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
 174        rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
 175                                                    bufsize,
 176                                                    &rx_ring->pFbr1RingPa);
 177        if (!rx_ring->pFbr1RingVa) {
 178                dev_err(&adapter->pdev->dev,
 179                          "Cannot alloc memory for Free Buffer Ring 1\n");
 180                return -ENOMEM;
 181        }
 182
 183        /* Save physical address
 184         *
 185         * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
 186         * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
 187         * are ever returned, make sure the high part is retrieved here
 188         * before storing the adjusted address.
 189         */
 190        rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
 191
 192        /* Align Free Buffer Ring 1 on a 4K boundary */
 193        et131x_align_allocated_memory(adapter,
 194                                      &rx_ring->Fbr1Realpa,
 195                                      &rx_ring->Fbr1offset, 0x0FFF);
 196
 197        rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
 198                                        rx_ring->Fbr1offset);
 199
 200#ifdef USE_FBR0
 201        /* Allocate an area of memory for Free Buffer Ring 0 */
 202        bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
 203        rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
 204                                                    bufsize,
 205                                                    &rx_ring->pFbr0RingPa);
 206        if (!rx_ring->pFbr0RingVa) {
 207                dev_err(&adapter->pdev->dev,
 208                          "Cannot alloc memory for Free Buffer Ring 0\n");
 209                return -ENOMEM;
 210        }
 211
 212        /* Save physical address
 213         *
 214         * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
 215         * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
 216         * are ever returned, make sure the high part is retrieved here before
 217         * storing the adjusted address.
 218         */
 219        rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
 220
 221        /* Align Free Buffer Ring 0 on a 4K boundary */
 222        et131x_align_allocated_memory(adapter,
 223                                      &rx_ring->Fbr0Realpa,
 224                                      &rx_ring->Fbr0offset, 0x0FFF);
 225
 226        rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
 227                                        rx_ring->Fbr0offset);
 228#endif
 229
 230        for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
 231             OuterLoop++) {
 232                uint64_t Fbr1Offset;
 233                uint64_t Fbr1TempPa;
 234                uint32_t Fbr1Align;
 235
 236                /* This code allocates an area of memory big enough for N
 237                 * free buffers + (buffer_size - 1) so that the buffers can
 238                 * be aligned on 4k boundaries.  If each buffer were aligned
 239                 * to a buffer_size boundary, the effect would be to double
 240                 * the size of FBR0.  By allocating N buffers at once, we
 241                 * reduce this overhead.
 242                 */
 243                if (rx_ring->Fbr1BufferSize > 4096)
 244                        Fbr1Align = 4096;
 245                else
 246                        Fbr1Align = rx_ring->Fbr1BufferSize;
 247
 248                FBRChunkSize =
 249                    (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
 250                rx_ring->Fbr1MemVa[OuterLoop] =
 251                    pci_alloc_consistent(adapter->pdev, FBRChunkSize,
 252                                         &rx_ring->Fbr1MemPa[OuterLoop]);
 253
 254                if (!rx_ring->Fbr1MemVa[OuterLoop]) {
 255                dev_err(&adapter->pdev->dev,
 256                                "Could not alloc memory\n");
 257                        return -ENOMEM;
 258                }
 259
 260                /* See NOTE in "Save Physical Address" comment above */
 261                Fbr1TempPa = rx_ring->Fbr1MemPa[OuterLoop];
 262
 263                et131x_align_allocated_memory(adapter,
 264                                              &Fbr1TempPa,
 265                                              &Fbr1Offset, (Fbr1Align - 1));
 266
 267                for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
 268                        uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
 269
 270                        /* Save the Virtual address of this index for quick
 271                         * access later
 272                         */
 273                        rx_ring->Fbr[1]->Va[index] =
 274                            (uint8_t *) rx_ring->Fbr1MemVa[OuterLoop] +
 275                            (InnerLoop * rx_ring->Fbr1BufferSize) + Fbr1Offset;
 276
 277                        /* now store the physical address in the descriptor
 278                         * so the device can access it
 279                         */
 280                        rx_ring->Fbr[1]->PAHigh[index] =
 281                            (uint32_t) (Fbr1TempPa >> 32);
 282                        rx_ring->Fbr[1]->PALow[index] = (uint32_t) Fbr1TempPa;
 283
 284                        Fbr1TempPa += rx_ring->Fbr1BufferSize;
 285
 286                        rx_ring->Fbr[1]->Buffer1[index] =
 287                            rx_ring->Fbr[1]->Va[index];
 288                        rx_ring->Fbr[1]->Buffer2[index] =
 289                            rx_ring->Fbr[1]->Va[index] - 4;
 290                }
 291        }
 292
 293#ifdef USE_FBR0
 294        /* Same for FBR0 (if in use) */
 295        for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
 296             OuterLoop++) {
 297                uint64_t Fbr0Offset;
 298                uint64_t Fbr0TempPa;
 299
 300                FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
 301                rx_ring->Fbr0MemVa[OuterLoop] =
 302                    pci_alloc_consistent(adapter->pdev, FBRChunkSize,
 303                                         &rx_ring->Fbr0MemPa[OuterLoop]);
 304
 305                if (!rx_ring->Fbr0MemVa[OuterLoop]) {
 306                        dev_err(&adapter->pdev->dev,
 307                                "Could not alloc memory\n");
 308                        return -ENOMEM;
 309                }
 310
 311                /* See NOTE in "Save Physical Address" comment above */
 312                Fbr0TempPa = rx_ring->Fbr0MemPa[OuterLoop];
 313
 314                et131x_align_allocated_memory(adapter,
 315                                              &Fbr0TempPa,
 316                                              &Fbr0Offset,
 317                                              rx_ring->Fbr0BufferSize - 1);
 318
 319                for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
 320                        uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
 321
 322                        rx_ring->Fbr[0]->Va[index] =
 323                            (uint8_t *) rx_ring->Fbr0MemVa[OuterLoop] +
 324                            (InnerLoop * rx_ring->Fbr0BufferSize) + Fbr0Offset;
 325
 326                        rx_ring->Fbr[0]->PAHigh[index] =
 327                            (uint32_t) (Fbr0TempPa >> 32);
 328                        rx_ring->Fbr[0]->PALow[index] = (uint32_t) Fbr0TempPa;
 329
 330                        Fbr0TempPa += rx_ring->Fbr0BufferSize;
 331
 332                        rx_ring->Fbr[0]->Buffer1[index] =
 333                            rx_ring->Fbr[0]->Va[index];
 334                        rx_ring->Fbr[0]->Buffer2[index] =
 335                            rx_ring->Fbr[0]->Va[index] - 4;
 336                }
 337        }
 338#endif
 339
 340        /* Allocate an area of memory for FIFO of Packet Status ring entries */
 341        pktStatRingSize =
 342            sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
 343
 344        rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
 345                                                  pktStatRingSize + 0x0fff,
 346                                                  &rx_ring->pPSRingPa);
 347
 348        if (!rx_ring->pPSRingVa) {
 349                dev_err(&adapter->pdev->dev,
 350                          "Cannot alloc memory for Packet Status Ring\n");
 351                return -ENOMEM;
 352        }
 353
 354        /* Save physical address
 355         *
 356         * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
 357         * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
 358         * are ever returned, make sure the high part is retrieved here before
 359         * storing the adjusted address.
 360         */
 361        rx_ring->pPSRingRealPa = rx_ring->pPSRingPa;
 362
 363        /* Align Packet Status Ring on a 4K boundary */
 364        et131x_align_allocated_memory(adapter,
 365                                      &rx_ring->pPSRingRealPa,
 366                                      &rx_ring->pPSRingOffset, 0x0FFF);
 367
 368        rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa +
 369                                      rx_ring->pPSRingOffset);
 370
 371        /* Allocate an area of memory for writeback of status information */
 372        rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
 373                                                    sizeof(RX_STATUS_BLOCK_t) +
 374                                                    0x7, &rx_ring->pRxStatusPa);
 375        if (!rx_ring->pRxStatusVa) {
 376                dev_err(&adapter->pdev->dev,
 377                          "Cannot alloc memory for Status Block\n");
 378                return -ENOMEM;
 379        }
 380
 381        /* Save physical address */
 382        rx_ring->RxStatusRealPA = rx_ring->pRxStatusPa;
 383
 384        /* Align write back on an 8 byte boundary */
 385        et131x_align_allocated_memory(adapter,
 386                                      &rx_ring->RxStatusRealPA,
 387                                      &rx_ring->RxStatusOffset, 0x07);
 388
 389        rx_ring->pRxStatusVa = (void *)((uint8_t *) rx_ring->pRxStatusVa +
 390                                        rx_ring->RxStatusOffset);
 391        rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
 392
 393        /* Recv
 394         * pci_pool_create initializes a lookaside list. After successful
 395         * creation, nonpaged fixed-size blocks can be allocated from and
 396         * freed to the lookaside list.
 397         * RFDs will be allocated from this pool.
 398         */
 399        rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
 400                                                   sizeof(MP_RFD),
 401                                                   0,
 402                                                   SLAB_CACHE_DMA |
 403                                                   SLAB_HWCACHE_ALIGN,
 404                                                   NULL);
 405
 406        adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
 407
 408        /* The RFDs are going to be put on lists later on, so initialize the
 409         * lists now.
 410         */
 411        INIT_LIST_HEAD(&rx_ring->RecvList);
 412        INIT_LIST_HEAD(&rx_ring->RecvPendingList);
 413        return 0;
 414}
 415
 416/**
 417 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
 418 * @adapter: pointer to our private adapter structure
 419 */
 420void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
 421{
 422        uint32_t index;
 423        uint32_t bufsize;
 424        uint32_t pktStatRingSize;
 425        PMP_RFD pMpRfd;
 426        RX_RING_t *rx_ring;
 427
 428        /* Setup some convenience pointers */
 429        rx_ring = (RX_RING_t *) &adapter->RxRing;
 430
 431        /* Free RFDs and associated packet descriptors */
 432        WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
 433
 434        while (!list_empty(&rx_ring->RecvList)) {
 435                pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
 436                                               MP_RFD, list_node);
 437
 438                list_del(&pMpRfd->list_node);
 439                et131x_rfd_resources_free(adapter, pMpRfd);
 440        }
 441
 442        while (!list_empty(&rx_ring->RecvPendingList)) {
 443                pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvPendingList.next,
 444                                               MP_RFD, list_node);
 445                list_del(&pMpRfd->list_node);
 446                et131x_rfd_resources_free(adapter, pMpRfd);
 447        }
 448
 449        /* Free Free Buffer Ring 1 */
 450        if (rx_ring->pFbr1RingVa) {
 451                /* First the packet memory */
 452                for (index = 0; index <
 453                     (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
 454                        if (rx_ring->Fbr1MemVa[index]) {
 455                                uint32_t Fbr1Align;
 456
 457                                if (rx_ring->Fbr1BufferSize > 4096)
 458                                        Fbr1Align = 4096;
 459                                else
 460                                        Fbr1Align = rx_ring->Fbr1BufferSize;
 461
 462                                bufsize =
 463                                    (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
 464                                    Fbr1Align - 1;
 465
 466                                pci_free_consistent(adapter->pdev,
 467                                                    bufsize,
 468                                                    rx_ring->Fbr1MemVa[index],
 469                                                    rx_ring->Fbr1MemPa[index]);
 470
 471                                rx_ring->Fbr1MemVa[index] = NULL;
 472                        }
 473                }
 474
 475                /* Now the FIFO itself */
 476                rx_ring->pFbr1RingVa = (void *)((uint8_t *)
 477                                rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
 478
 479                bufsize =
 480                    (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
 481
 482                pci_free_consistent(adapter->pdev,
 483                                    bufsize,
 484                                    rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
 485
 486                rx_ring->pFbr1RingVa = NULL;
 487        }
 488
 489#ifdef USE_FBR0
 490        /* Now the same for Free Buffer Ring 0 */
 491        if (rx_ring->pFbr0RingVa) {
 492                /* First the packet memory */
 493                for (index = 0; index <
 494                     (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
 495                        if (rx_ring->Fbr0MemVa[index]) {
 496                                bufsize =
 497                                    (rx_ring->Fbr0BufferSize *
 498                                     (FBR_CHUNKS + 1)) - 1;
 499
 500                                pci_free_consistent(adapter->pdev,
 501                                                    bufsize,
 502                                                    rx_ring->Fbr0MemVa[index],
 503                                                    rx_ring->Fbr0MemPa[index]);
 504
 505                                rx_ring->Fbr0MemVa[index] = NULL;
 506                        }
 507                }
 508
 509                /* Now the FIFO itself */
 510                rx_ring->pFbr0RingVa = (void *)((uint8_t *)
 511                                rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
 512
 513                bufsize =
 514                    (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
 515
 516                pci_free_consistent(adapter->pdev,
 517                                    bufsize,
 518                                    rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
 519
 520                rx_ring->pFbr0RingVa = NULL;
 521        }
 522#endif
 523
 524        /* Free Packet Status Ring */
 525        if (rx_ring->pPSRingVa) {
 526                rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa -
 527                                              rx_ring->pPSRingOffset);
 528
 529                pktStatRingSize =
 530                    sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
 531
 532                pci_free_consistent(adapter->pdev,
 533                                    pktStatRingSize + 0x0fff,
 534                                    rx_ring->pPSRingVa, rx_ring->pPSRingPa);
 535
 536                rx_ring->pPSRingVa = NULL;
 537        }
 538
 539        /* Free area of memory for the writeback of status information */
 540        if (rx_ring->pRxStatusVa) {
 541                rx_ring->pRxStatusVa = (void *)((uint8_t *)
 542                                rx_ring->pRxStatusVa - rx_ring->RxStatusOffset);
 543
 544                pci_free_consistent(adapter->pdev,
 545                                sizeof(RX_STATUS_BLOCK_t) + 0x7,
 546                                rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
 547
 548                rx_ring->pRxStatusVa = NULL;
 549        }
 550
 551        /* Free receive buffer pool */
 552
 553        /* Free receive packet pool */
 554
 555        /* Destroy the lookaside (RFD) pool */
 556        if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
 557                kmem_cache_destroy(rx_ring->RecvLookaside);
 558                adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
 559        }
 560
 561        /* Free the FBR Lookup Table */
 562#ifdef USE_FBR0
 563        kfree(rx_ring->Fbr[0]);
 564#endif
 565
 566        kfree(rx_ring->Fbr[1]);
 567
 568        /* Reset Counters */
 569        rx_ring->nReadyRecv = 0;
 570}
 571
 572/**
 573 * et131x_init_recv - Initialize receive data structures.
 574 * @adapter: pointer to our private adapter structure
 575 *
 576 * Returns 0 on success and errno on failure (as defined in errno.h)
 577 */
 578int et131x_init_recv(struct et131x_adapter *adapter)
 579{
 580        int status = -ENOMEM;
 581        PMP_RFD pMpRfd = NULL;
 582        uint32_t RfdCount;
 583        uint32_t TotalNumRfd = 0;
 584        RX_RING_t *rx_ring = NULL;
 585
 586        /* Setup some convenience pointers */
 587        rx_ring = (RX_RING_t *) &adapter->RxRing;
 588
 589        /* Setup each RFD */
 590        for (RfdCount = 0; RfdCount < rx_ring->NumRfd; RfdCount++) {
 591                pMpRfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
 592                                                     GFP_ATOMIC | GFP_DMA);
 593
 594                if (!pMpRfd) {
 595                        dev_err(&adapter->pdev->dev,
 596                                  "Couldn't alloc RFD out of kmem_cache\n");
 597                        status = -ENOMEM;
 598                        continue;
 599                }
 600
 601                status = et131x_rfd_resources_alloc(adapter, pMpRfd);
 602                if (status != 0) {
 603                        dev_err(&adapter->pdev->dev,
 604                                  "Couldn't alloc packet for RFD\n");
 605                        kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
 606                        continue;
 607                }
 608
 609                /* Add this RFD to the RecvList */
 610                list_add_tail(&pMpRfd->list_node, &rx_ring->RecvList);
 611
 612                /* Increment both the available RFD's, and the total RFD's. */
 613                rx_ring->nReadyRecv++;
 614                TotalNumRfd++;
 615        }
 616
 617        if (TotalNumRfd > NIC_MIN_NUM_RFD)
 618                status = 0;
 619
 620        rx_ring->NumRfd = TotalNumRfd;
 621
 622        if (status != 0) {
 623                kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
 624                dev_err(&adapter->pdev->dev,
 625                          "Allocation problems in et131x_init_recv\n");
 626        }
 627        return status;
 628}
 629
 630/**
 631 * et131x_rfd_resources_alloc
 632 * @adapter: pointer to our private adapter structure
 633 * @pMpRfd: pointer to a RFD
 634 *
 635 * Returns 0 on success and errno on failure (as defined in errno.h)
 636 */
 637int et131x_rfd_resources_alloc(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
 638{
 639        pMpRfd->Packet = NULL;
 640
 641        return 0;
 642}
 643
 644/**
 645 * et131x_rfd_resources_free - Free the packet allocated for the given RFD
 646 * @adapter: pointer to our private adapter structure
 647 * @pMpRfd: pointer to a RFD
 648 */
 649void et131x_rfd_resources_free(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
 650{
 651        pMpRfd->Packet = NULL;
 652        kmem_cache_free(adapter->RxRing.RecvLookaside, pMpRfd);
 653}
 654
 655/**
 656 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
 657 * @etdev: pointer to our adapter structure
 658 */
 659void ConfigRxDmaRegs(struct et131x_adapter *etdev)
 660{
 661        struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
 662        struct _rx_ring_t *pRxLocal = &etdev->RxRing;
 663        PFBR_DESC_t fbr_entry;
 664        uint32_t entry;
 665        RXDMA_PSR_NUM_DES_t psr_num_des;
 666        unsigned long flags;
 667
 668        /* Halt RXDMA to perform the reconfigure.  */
 669        et131x_rx_dma_disable(etdev);
 670
 671        /* Load the completion writeback physical address
 672         *
 673         * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
 674         * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
 675         * are ever returned, make sure the high part is retrieved here
 676         * before storing the adjusted address.
 677         */
 678        writel((uint32_t) (pRxLocal->RxStatusRealPA >> 32),
 679               &rx_dma->dma_wb_base_hi);
 680        writel((uint32_t) pRxLocal->RxStatusRealPA, &rx_dma->dma_wb_base_lo);
 681
 682        memset(pRxLocal->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
 683
 684        /* Set the address and parameters of the packet status ring into the
 685         * 1310's registers
 686         */
 687        writel((uint32_t) (pRxLocal->pPSRingRealPa >> 32),
 688               &rx_dma->psr_base_hi);
 689        writel((uint32_t) pRxLocal->pPSRingRealPa, &rx_dma->psr_base_lo);
 690        writel(pRxLocal->PsrNumEntries - 1, &rx_dma->psr_num_des.value);
 691        writel(0, &rx_dma->psr_full_offset.value);
 692
 693        psr_num_des.value = readl(&rx_dma->psr_num_des.value);
 694        writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100,
 695               &rx_dma->psr_min_des.value);
 696
 697        spin_lock_irqsave(&etdev->RcvLock, flags);
 698
 699        /* These local variables track the PSR in the adapter structure */
 700        pRxLocal->local_psr_full.bits.psr_full = 0;
 701        pRxLocal->local_psr_full.bits.psr_full_wrap = 0;
 702
 703        /* Now's the best time to initialize FBR1 contents */
 704        fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr1RingVa;
 705        for (entry = 0; entry < pRxLocal->Fbr1NumEntries; entry++) {
 706                fbr_entry->addr_hi = pRxLocal->Fbr[1]->PAHigh[entry];
 707                fbr_entry->addr_lo = pRxLocal->Fbr[1]->PALow[entry];
 708                fbr_entry->word2.bits.bi = entry;
 709                fbr_entry++;
 710        }
 711
 712        /* Set the address and parameters of Free buffer ring 1 (and 0 if
 713         * required) into the 1310's registers
 714         */
 715        writel((uint32_t) (pRxLocal->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
 716        writel((uint32_t) pRxLocal->Fbr1Realpa, &rx_dma->fbr1_base_lo);
 717        writel(pRxLocal->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des.value);
 718        writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
 719
 720        /* This variable tracks the free buffer ring 1 full position, so it
 721         * has to match the above.
 722         */
 723        pRxLocal->local_Fbr1_full = ET_DMA10_WRAP;
 724        writel(((pRxLocal->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
 725               &rx_dma->fbr1_min_des.value);
 726
 727#ifdef USE_FBR0
 728        /* Now's the best time to initialize FBR0 contents */
 729        fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr0RingVa;
 730        for (entry = 0; entry < pRxLocal->Fbr0NumEntries; entry++) {
 731                fbr_entry->addr_hi = pRxLocal->Fbr[0]->PAHigh[entry];
 732                fbr_entry->addr_lo = pRxLocal->Fbr[0]->PALow[entry];
 733                fbr_entry->word2.bits.bi = entry;
 734                fbr_entry++;
 735        }
 736
 737        writel((uint32_t) (pRxLocal->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
 738        writel((uint32_t) pRxLocal->Fbr0Realpa, &rx_dma->fbr0_base_lo);
 739        writel(pRxLocal->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des.value);
 740        writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
 741
 742        /* This variable tracks the free buffer ring 0 full position, so it
 743         * has to match the above.
 744         */
 745        pRxLocal->local_Fbr0_full = ET_DMA10_WRAP;
 746        writel(((pRxLocal->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
 747               &rx_dma->fbr0_min_des.value);
 748#endif
 749
 750        /* Program the number of packets we will receive before generating an
 751         * interrupt.
 752         * For version B silicon, this value gets updated once autoneg is
 753         *complete.
 754         */
 755        writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done.value);
 756
 757        /* The "time_done" is not working correctly to coalesce interrupts
 758         * after a given time period, but rather is giving us an interrupt
 759         * regardless of whether we have received packets.
 760         * This value gets updated once autoneg is complete.
 761         */
 762        writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value);
 763
 764        spin_unlock_irqrestore(&etdev->RcvLock, flags);
 765}
 766
 767/**
 768 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
 769 * @etdev: pointer to our adapter structure
 770 */
 771void SetRxDmaTimer(struct et131x_adapter *etdev)
 772{
 773        /* For version B silicon, we do not use the RxDMA timer for 10 and 100
 774         * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
 775         */
 776        if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
 777            (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
 778                writel(0, &etdev->regs->rxdma.max_pkt_time.value);
 779                writel(1, &etdev->regs->rxdma.num_pkt_done.value);
 780        }
 781}
 782
 783/**
 784 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
 785 * @etdev: pointer to our adapter structure
 786 */
 787void et131x_rx_dma_disable(struct et131x_adapter *etdev)
 788{
 789        RXDMA_CSR_t csr;
 790
 791        /* Setup the receive dma configuration register */
 792        writel(0x00002001, &etdev->regs->rxdma.csr.value);
 793        csr.value = readl(&etdev->regs->rxdma.csr.value);
 794        if (csr.bits.halt_status != 1) {
 795                udelay(5);
 796                csr.value = readl(&etdev->regs->rxdma.csr.value);
 797                if (csr.bits.halt_status != 1)
 798                        dev_err(&etdev->pdev->dev,
 799                                "RX Dma failed to enter halt state. CSR 0x%08x\n",
 800                                csr.value);
 801        }
 802}
 803
 804/**
 805 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
 806 * @etdev: pointer to our adapter structure
 807 */
 808void et131x_rx_dma_enable(struct et131x_adapter *etdev)
 809{
 810        if (etdev->RegistryPhyLoopbk)
 811                /* RxDMA is disabled for loopback operation. */
 812                writel(0x1, &etdev->regs->rxdma.csr.value);
 813        else {
 814        /* Setup the receive dma configuration register for normal operation */
 815                RXDMA_CSR_t csr = { 0 };
 816
 817                csr.bits.fbr1_enable = 1;
 818                if (etdev->RxRing.Fbr1BufferSize == 4096)
 819                        csr.bits.fbr1_size = 1;
 820                else if (etdev->RxRing.Fbr1BufferSize == 8192)
 821                        csr.bits.fbr1_size = 2;
 822                else if (etdev->RxRing.Fbr1BufferSize == 16384)
 823                        csr.bits.fbr1_size = 3;
 824#ifdef USE_FBR0
 825                csr.bits.fbr0_enable = 1;
 826                if (etdev->RxRing.Fbr0BufferSize == 256)
 827                        csr.bits.fbr0_size = 1;
 828                else if (etdev->RxRing.Fbr0BufferSize == 512)
 829                        csr.bits.fbr0_size = 2;
 830                else if (etdev->RxRing.Fbr0BufferSize == 1024)
 831                        csr.bits.fbr0_size = 3;
 832#endif
 833                writel(csr.value, &etdev->regs->rxdma.csr.value);
 834
 835                csr.value = readl(&etdev->regs->rxdma.csr.value);
 836                if (csr.bits.halt_status != 0) {
 837                        udelay(5);
 838                        csr.value = readl(&etdev->regs->rxdma.csr.value);
 839                        if (csr.bits.halt_status != 0) {
 840                                dev_err(&etdev->pdev->dev,
 841                                        "RX Dma failed to exit halt state.  CSR 0x%08x\n",
 842                                        csr.value);
 843                        }
 844                }
 845        }
 846}
 847
 848/**
 849 * nic_rx_pkts - Checks the hardware for available packets
 850 * @etdev: pointer to our adapter
 851 *
 852 * Returns pMpRfd, a pointer to our MPRFD.
 853 *
 854 * Checks the hardware for available packets, using completion ring
 855 * If packets are available, it gets an RFD from the RecvList, attaches
 856 * the packet to it, puts the RFD in the RecvPendList, and also returns
 857 * the pointer to the RFD.
 858 */
 859PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
 860{
 861        struct _rx_ring_t *pRxLocal = &etdev->RxRing;
 862        PRX_STATUS_BLOCK_t pRxStatusBlock;
 863        PPKT_STAT_DESC_t pPSREntry;
 864        PMP_RFD pMpRfd;
 865        uint32_t nIndex;
 866        uint8_t *pBufVa;
 867        unsigned long flags;
 868        struct list_head *element;
 869        uint8_t ringIndex;
 870        uint16_t bufferIndex;
 871        uint32_t localLen;
 872        PKT_STAT_DESC_WORD0_t Word0;
 873
 874        /* RX Status block is written by the DMA engine prior to every
 875         * interrupt. It contains the next to be used entry in the Packet
 876         * Status Ring, and also the two Free Buffer rings.
 877         */
 878        pRxStatusBlock = (PRX_STATUS_BLOCK_t) pRxLocal->pRxStatusVa;
 879
 880        if (pRxStatusBlock->Word1.bits.PSRoffset ==
 881                        pRxLocal->local_psr_full.bits.psr_full &&
 882                        pRxStatusBlock->Word1.bits.PSRwrap ==
 883                        pRxLocal->local_psr_full.bits.psr_full_wrap) {
 884                /* Looks like this ring is not updated yet */
 885                return NULL;
 886        }
 887
 888        /* The packet status ring indicates that data is available. */
 889        pPSREntry = (PPKT_STAT_DESC_t) (pRxLocal->pPSRingVa) +
 890                        pRxLocal->local_psr_full.bits.psr_full;
 891
 892        /* Grab any information that is required once the PSR is
 893         * advanced, since we can no longer rely on the memory being
 894         * accurate
 895         */
 896        localLen = pPSREntry->word1.bits.length;
 897        ringIndex = (uint8_t) pPSREntry->word1.bits.ri;
 898        bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
 899        Word0 = pPSREntry->word0;
 900
 901        /* Indicate that we have used this PSR entry. */
 902        if (++pRxLocal->local_psr_full.bits.psr_full >
 903            pRxLocal->PsrNumEntries - 1) {
 904                pRxLocal->local_psr_full.bits.psr_full = 0;
 905                pRxLocal->local_psr_full.bits.psr_full_wrap ^= 1;
 906        }
 907
 908        writel(pRxLocal->local_psr_full.value,
 909               &etdev->regs->rxdma.psr_full_offset.value);
 910
 911#ifndef USE_FBR0
 912        if (ringIndex != 1) {
 913                return NULL;
 914        }
 915#endif
 916
 917#ifdef USE_FBR0
 918        if (ringIndex > 1 ||
 919                (ringIndex == 0 &&
 920                bufferIndex > pRxLocal->Fbr0NumEntries - 1) ||
 921                (ringIndex == 1 &&
 922                bufferIndex > pRxLocal->Fbr1NumEntries - 1))
 923#else
 924        if (ringIndex != 1 ||
 925                bufferIndex > pRxLocal->Fbr1NumEntries - 1)
 926#endif
 927        {
 928                /* Illegal buffer or ring index cannot be used by S/W*/
 929                dev_err(&etdev->pdev->dev,
 930                          "NICRxPkts PSR Entry %d indicates "
 931                          "length of %d and/or bad bi(%d)\n",
 932                          pRxLocal->local_psr_full.bits.psr_full,
 933                          localLen, bufferIndex);
 934                return NULL;
 935        }
 936
 937        /* Get and fill the RFD. */
 938        spin_lock_irqsave(&etdev->RcvLock, flags);
 939
 940        pMpRfd = NULL;
 941        element = pRxLocal->RecvList.next;
 942        pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
 943
 944        if (pMpRfd == NULL) {
 945                spin_unlock_irqrestore(&etdev->RcvLock, flags);
 946                return NULL;
 947        }
 948
 949        list_del(&pMpRfd->list_node);
 950        pRxLocal->nReadyRecv--;
 951
 952        spin_unlock_irqrestore(&etdev->RcvLock, flags);
 953
 954        pMpRfd->bufferindex = bufferIndex;
 955        pMpRfd->ringindex = ringIndex;
 956
 957        /* In V1 silicon, there is a bug which screws up filtering of
 958         * runt packets.  Therefore runt packet filtering is disabled
 959         * in the MAC and the packets are dropped here.  They are
 960         * also counted here.
 961         */
 962        if (localLen < (NIC_MIN_PACKET_SIZE + 4)) {
 963                etdev->Stats.other_errors++;
 964                localLen = 0;
 965        }
 966
 967        if (localLen) {
 968                if (etdev->ReplicaPhyLoopbk == 1) {
 969                        pBufVa = pRxLocal->Fbr[ringIndex]->Va[bufferIndex];
 970
 971                        if (memcmp(&pBufVa[6], &etdev->CurrentAddress[0],
 972                                   ETH_ALEN) == 0) {
 973                                if (memcmp(&pBufVa[42], "Replica packet",
 974                                           ETH_HLEN)) {
 975                                        etdev->ReplicaPhyLoopbkPF = 1;
 976                                }
 977                        }
 978                }
 979
 980                /* Determine if this is a multicast packet coming in */
 981                if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
 982                    !(Word0.value & ALCATEL_BROADCAST_PKT)) {
 983                        /* Promiscuous mode and Multicast mode are
 984                         * not mutually exclusive as was first
 985                         * thought.  I guess Promiscuous is just
 986                         * considered a super-set of the other
 987                         * filters. Generally filter is 0x2b when in
 988                         * promiscuous mode.
 989                         */
 990                        if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
 991                            && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
 992                            && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
 993                                pBufVa = pRxLocal->Fbr[ringIndex]->
 994                                                Va[bufferIndex];
 995
 996                                /* Loop through our list to see if the
 997                                 * destination address of this packet
 998                                 * matches one in our list.
 999                                 */
1000                                for (nIndex = 0;
1001                                     nIndex < etdev->MCAddressCount;
1002                                     nIndex++) {
1003                                        if (pBufVa[0] ==
1004                                            etdev->MCList[nIndex][0]
1005                                            && pBufVa[1] ==
1006                                            etdev->MCList[nIndex][1]
1007                                            && pBufVa[2] ==
1008                                            etdev->MCList[nIndex][2]
1009                                            && pBufVa[3] ==
1010                                            etdev->MCList[nIndex][3]
1011                                            && pBufVa[4] ==
1012                                            etdev->MCList[nIndex][4]
1013                                            && pBufVa[5] ==
1014                                            etdev->MCList[nIndex][5]) {
1015                                                break;
1016                                        }
1017                                }
1018
1019                                /* If our index is equal to the number
1020                                 * of Multicast address we have, then
1021                                 * this means we did not find this
1022                                 * packet's matching address in our
1023                                 * list.  Set the PacketSize to zero,
1024                                 * so we free our RFD when we return
1025                                 * from this function.
1026                                 */
1027                                if (nIndex == etdev->MCAddressCount)
1028                                        localLen = 0;
1029                        }
1030
1031                        if (localLen > 0)
1032                                etdev->Stats.multircv++;
1033                } else if (Word0.value & ALCATEL_BROADCAST_PKT)
1034                        etdev->Stats.brdcstrcv++;
1035                else
1036                        /* Not sure what this counter measures in
1037                         * promiscuous mode. Perhaps we should check
1038                         * the MAC address to see if it is directed
1039                         * to us in promiscuous mode.
1040                         */
1041                        etdev->Stats.unircv++;
1042        }
1043
1044        if (localLen > 0) {
1045                struct sk_buff *skb = NULL;
1046
1047                /* pMpRfd->PacketSize = localLen - 4; */
1048                pMpRfd->PacketSize = localLen;
1049
1050                skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
1051                if (!skb) {
1052                        dev_err(&etdev->pdev->dev,
1053                                  "Couldn't alloc an SKB for Rx\n");
1054                        return NULL;
1055                }
1056
1057                etdev->net_stats.rx_bytes += pMpRfd->PacketSize;
1058
1059                memcpy(skb_put(skb, pMpRfd->PacketSize),
1060                       pRxLocal->Fbr[ringIndex]->Va[bufferIndex],
1061                       pMpRfd->PacketSize);
1062
1063                skb->dev = etdev->netdev;
1064                skb->protocol = eth_type_trans(skb, etdev->netdev);
1065                skb->ip_summed = CHECKSUM_NONE;
1066
1067                netif_rx(skb);
1068        } else {
1069                pMpRfd->PacketSize = 0;
1070        }
1071
1072        nic_return_rfd(etdev, pMpRfd);
1073        return pMpRfd;
1074}
1075
1076/**
1077 * et131x_reset_recv - Reset the receive list
1078 * @etdev: pointer to our adapter
1079 *
1080 * Assumption, Rcv spinlock has been acquired.
1081 */
1082void et131x_reset_recv(struct et131x_adapter *etdev)
1083{
1084        PMP_RFD pMpRfd;
1085        struct list_head *element;
1086
1087        WARN_ON(list_empty(&etdev->RxRing.RecvList));
1088
1089        /* Take all the RFD's from the pending list, and stick them on the
1090         * RecvList.
1091         */
1092        while (!list_empty(&etdev->RxRing.RecvPendingList)) {
1093                element = etdev->RxRing.RecvPendingList.next;
1094
1095                pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1096
1097                list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList);
1098        }
1099}
1100
1101/**
1102 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1103 * @etdev: pointer to our adapter
1104 *
1105 * Assumption, Rcv spinlock has been acquired.
1106 */
1107void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1108{
1109        PMP_RFD pMpRfd = NULL;
1110        struct sk_buff *PacketArray[NUM_PACKETS_HANDLED];
1111        PMP_RFD RFDFreeArray[NUM_PACKETS_HANDLED];
1112        uint32_t PacketArrayCount = 0;
1113        uint32_t PacketsToHandle;
1114        uint32_t PacketFreeCount = 0;
1115        bool TempUnfinishedRec = false;
1116
1117        PacketsToHandle = NUM_PACKETS_HANDLED;
1118
1119        /* Process up to available RFD's */
1120        while (PacketArrayCount < PacketsToHandle) {
1121                if (list_empty(&etdev->RxRing.RecvList)) {
1122                        WARN_ON(etdev->RxRing.nReadyRecv != 0);
1123                        TempUnfinishedRec = true;
1124                        break;
1125                }
1126
1127                pMpRfd = nic_rx_pkts(etdev);
1128
1129                if (pMpRfd == NULL)
1130                        break;
1131
1132                /* Do not receive any packets until a filter has been set.
1133                 * Do not receive any packets until we have link.
1134                 * If length is zero, return the RFD in order to advance the
1135                 * Free buffer ring.
1136                 */
1137                if (!etdev->PacketFilter ||
1138                    !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1139                    pMpRfd->PacketSize == 0) {
1140                        continue;
1141                }
1142
1143                /* Increment the number of packets we received */
1144                etdev->Stats.ipackets++;
1145
1146                /* Set the status on the packet, either resources or success */
1147                if (etdev->RxRing.nReadyRecv >= RFD_LOW_WATER_MARK) {
1148                        /* Put this RFD on the pending list
1149                         *
1150                         * NOTE: nic_rx_pkts() above is already returning the
1151                         * RFD to the RecvList, so don't additionally do that
1152                         * here.
1153                         * Besides, we don't really need (at this point) the
1154                         * pending list anyway.
1155                         */
1156                } else {
1157                        RFDFreeArray[PacketFreeCount] = pMpRfd;
1158                        PacketFreeCount++;
1159
1160                        dev_warn(&etdev->pdev->dev,
1161                                    "RFD's are running out\n");
1162                }
1163
1164                PacketArray[PacketArrayCount] = pMpRfd->Packet;
1165                PacketArrayCount++;
1166        }
1167
1168        if ((PacketArrayCount == NUM_PACKETS_HANDLED) || TempUnfinishedRec) {
1169                etdev->RxRing.UnfinishedReceives = true;
1170                writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1171                       &etdev->regs->global.watchdog_timer);
1172        } else {
1173                /* Watchdog timer will disable itself if appropriate. */
1174                etdev->RxRing.UnfinishedReceives = false;
1175        }
1176}
1177
1178static inline u32 bump_fbr(u32 *fbr, u32 limit)
1179{
1180        u32 v = *fbr;
1181        v++;
1182        /* This works for all cases where limit < 1024. The 1023 case
1183           works because 1023++ is 1024 which means the if condition is not
1184           taken but the carry of the bit into the wrap bit toggles the wrap
1185           value correctly */
1186        if ((v & ET_DMA10_MASK) > limit) {
1187                v &= ~ET_DMA10_MASK;
1188                v ^= ET_DMA10_WRAP;
1189        }
1190        /* For the 1023 case */
1191        v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1192        *fbr = v;
1193        return v;
1194}
1195
1196/**
1197 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1198 * @etdev: pointer to our adapter
1199 * @pMpRfd: pointer to the RFD
1200 */
1201void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
1202{
1203        struct _rx_ring_t *rx_local = &etdev->RxRing;
1204        struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1205        uint16_t bi = pMpRfd->bufferindex;
1206        uint8_t ri = pMpRfd->ringindex;
1207        unsigned long flags;
1208
1209        /* We don't use any of the OOB data besides status. Otherwise, we
1210         * need to clean up OOB data
1211         */
1212        if (
1213#ifdef USE_FBR0
1214            (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1215#endif
1216            (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1217                spin_lock_irqsave(&etdev->FbrLock, flags);
1218
1219                if (ri == 1) {
1220                        PFBR_DESC_t pNextDesc =
1221                            (PFBR_DESC_t) (rx_local->pFbr1RingVa) +
1222                            INDEX10(rx_local->local_Fbr1_full);
1223
1224                        /* Handle the Free Buffer Ring advancement here. Write
1225                         * the PA / Buffer Index for the returned buffer into
1226                         * the oldest (next to be freed)FBR entry
1227                         */
1228                        pNextDesc->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1229                        pNextDesc->addr_lo = rx_local->Fbr[1]->PALow[bi];
1230                        pNextDesc->word2.value = bi;
1231
1232                        writel(bump_fbr(&rx_local->local_Fbr1_full,
1233                                rx_local->Fbr1NumEntries - 1),
1234                                &rx_dma->fbr1_full_offset);
1235                }
1236#ifdef USE_FBR0
1237                else {
1238                        PFBR_DESC_t pNextDesc =
1239                            (PFBR_DESC_t) rx_local->pFbr0RingVa +
1240                            INDEX10(rx_local->local_Fbr0_full);
1241
1242                        /* Handle the Free Buffer Ring advancement here. Write
1243                         * the PA / Buffer Index for the returned buffer into
1244                         * the oldest (next to be freed) FBR entry
1245                         */
1246                        pNextDesc->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1247                        pNextDesc->addr_lo = rx_local->Fbr[0]->PALow[bi];
1248                        pNextDesc->word2.value = bi;
1249
1250                        writel(bump_fbr(&rx_local->local_Fbr0_full,
1251                                        rx_local->Fbr0NumEntries - 1),
1252                               &rx_dma->fbr0_full_offset);
1253                }
1254#endif
1255                spin_unlock_irqrestore(&etdev->FbrLock, flags);
1256        } else {
1257                dev_err(&etdev->pdev->dev,
1258                          "NICReturnRFD illegal Buffer Index returned\n");
1259        }
1260
1261        /* The processing on this RFD is done, so put it back on the tail of
1262         * our list
1263         */
1264        spin_lock_irqsave(&etdev->RcvLock, flags);
1265        list_add_tail(&pMpRfd->list_node, &rx_local->RecvList);
1266        rx_local->nReadyRecv++;
1267        spin_unlock_irqrestore(&etdev->RcvLock, flags);
1268
1269        WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1270}
1271