linux/drivers/net/vxge/vxge-traffic.c
<<
>>
Prefs
   1/******************************************************************************
   2 * This software may be used and distributed according to the terms of
   3 * the GNU General Public License (GPL), incorporated herein by reference.
   4 * Drivers based on or derived from this code fall under the GPL and must
   5 * retain the authorship, copyright and license notice.  This file is not
   6 * a complete program and may only be used when the entire operating
   7 * system is licensed under the GPL.
   8 * See the file COPYING in this distribution for more information.
   9 *
  10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
  11 *                 Virtualized Server Adapter.
  12 * Copyright(c) 2002-2009 Neterion Inc.
  13 ******************************************************************************/
  14#include <linux/etherdevice.h>
  15
  16#include "vxge-traffic.h"
  17#include "vxge-config.h"
  18#include "vxge-main.h"
  19
  20/*
  21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  22 * @vp: Virtual Path handle.
  23 *
  24 * Enable vpath interrupts. The function is to be executed the last in
  25 * vpath initialization sequence.
  26 *
  27 * See also: vxge_hw_vpath_intr_disable()
  28 */
  29enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  30{
  31        u64 val64;
  32
  33        struct __vxge_hw_virtualpath *vpath;
  34        struct vxge_hw_vpath_reg __iomem *vp_reg;
  35        enum vxge_hw_status status = VXGE_HW_OK;
  36        if (vp == NULL) {
  37                status = VXGE_HW_ERR_INVALID_HANDLE;
  38                goto exit;
  39        }
  40
  41        vpath = vp->vpath;
  42
  43        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  44                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  45                goto exit;
  46        }
  47
  48        vp_reg = vpath->vp_reg;
  49
  50        writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  51
  52        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  53                        &vp_reg->general_errors_reg);
  54
  55        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  56                        &vp_reg->pci_config_errors_reg);
  57
  58        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  59                        &vp_reg->mrpcim_to_vpath_alarm_reg);
  60
  61        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  62                        &vp_reg->srpcim_to_vpath_alarm_reg);
  63
  64        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  65                        &vp_reg->vpath_ppif_int_status);
  66
  67        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  68                        &vp_reg->srpcim_msg_to_vpath_reg);
  69
  70        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  71                        &vp_reg->vpath_pcipif_int_status);
  72
  73        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  74                        &vp_reg->prc_alarm_reg);
  75
  76        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  77                        &vp_reg->wrdma_alarm_status);
  78
  79        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  80                        &vp_reg->asic_ntwk_vp_err_reg);
  81
  82        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  83                        &vp_reg->xgmac_vp_int_status);
  84
  85        val64 = readq(&vp_reg->vpath_general_int_status);
  86
  87        /* Mask unwanted interrupts */
  88
  89        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  90                        &vp_reg->vpath_pcipif_int_mask);
  91
  92        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  93                        &vp_reg->srpcim_msg_to_vpath_mask);
  94
  95        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  96                        &vp_reg->srpcim_to_vpath_alarm_mask);
  97
  98        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  99                        &vp_reg->mrpcim_to_vpath_alarm_mask);
 100
 101        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 102                        &vp_reg->pci_config_errors_mask);
 103
 104        /* Unmask the individual interrupts */
 105
 106        writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
 107                VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
 108                VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
 109                VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
 110                &vp_reg->general_errors_mask);
 111
 112        __vxge_hw_pio_mem_write32_upper(
 113                (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
 114                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
 115                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
 116                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
 117                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
 118                VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
 119                &vp_reg->kdfcctl_errors_mask);
 120
 121        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
 122
 123        __vxge_hw_pio_mem_write32_upper(
 124                (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
 125                &vp_reg->prc_alarm_mask);
 126
 127        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
 128        __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
 129
 130        if (vpath->hldev->first_vp_id != vpath->vp_id)
 131                __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 132                        &vp_reg->asic_ntwk_vp_err_mask);
 133        else
 134                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
 135                VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
 136                VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
 137                &vp_reg->asic_ntwk_vp_err_mask);
 138
 139        __vxge_hw_pio_mem_write32_upper(0,
 140                &vp_reg->vpath_general_int_mask);
 141exit:
 142        return status;
 143
 144}
 145
 146/*
 147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
 148 * @vp: Virtual Path handle.
 149 *
 150 * Disable vpath interrupts. The function is to be executed the last in
 151 * vpath initialization sequence.
 152 *
 153 * See also: vxge_hw_vpath_intr_enable()
 154 */
 155enum vxge_hw_status vxge_hw_vpath_intr_disable(
 156                        struct __vxge_hw_vpath_handle *vp)
 157{
 158        u64 val64;
 159
 160        struct __vxge_hw_virtualpath *vpath;
 161        enum vxge_hw_status status = VXGE_HW_OK;
 162        struct vxge_hw_vpath_reg __iomem *vp_reg;
 163        if (vp == NULL) {
 164                status = VXGE_HW_ERR_INVALID_HANDLE;
 165                goto exit;
 166        }
 167
 168        vpath = vp->vpath;
 169
 170        if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
 171                status = VXGE_HW_ERR_VPATH_NOT_OPEN;
 172                goto exit;
 173        }
 174        vp_reg = vpath->vp_reg;
 175
 176        __vxge_hw_pio_mem_write32_upper(
 177                (u32)VXGE_HW_INTR_MASK_ALL,
 178                &vp_reg->vpath_general_int_mask);
 179
 180        val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
 181
 182        writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
 183
 184        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 185                        &vp_reg->general_errors_mask);
 186
 187        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 188                        &vp_reg->pci_config_errors_mask);
 189
 190        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 191                        &vp_reg->mrpcim_to_vpath_alarm_mask);
 192
 193        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 194                        &vp_reg->srpcim_to_vpath_alarm_mask);
 195
 196        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 197                        &vp_reg->vpath_ppif_int_mask);
 198
 199        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 200                        &vp_reg->srpcim_msg_to_vpath_mask);
 201
 202        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 203                        &vp_reg->vpath_pcipif_int_mask);
 204
 205        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 206                        &vp_reg->wrdma_alarm_mask);
 207
 208        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 209                        &vp_reg->prc_alarm_mask);
 210
 211        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 212                        &vp_reg->xgmac_vp_int_mask);
 213
 214        __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 215                        &vp_reg->asic_ntwk_vp_err_mask);
 216
 217exit:
 218        return status;
 219}
 220
 221/**
 222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
 223 * @channeh: Channel for rx or tx handle
 224 * @msix_id:  MSIX ID
 225 *
 226 * The function masks the msix interrupt for the given msix_id
 227 *
 228 * Returns: 0
 229 */
 230void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
 231{
 232
 233        __vxge_hw_pio_mem_write32_upper(
 234                (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
 235                        0, 32),
 236                &channel->common_reg->set_msix_mask_vect[msix_id%4]);
 237
 238        return;
 239}
 240
 241/**
 242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
 243 * @channeh: Channel for rx or tx handle
 244 * @msix_id:  MSI ID
 245 *
 246 * The function unmasks the msix interrupt for the given msix_id
 247 *
 248 * Returns: 0
 249 */
 250void
 251vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
 252{
 253
 254        __vxge_hw_pio_mem_write32_upper(
 255                (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
 256                        0, 32),
 257                &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
 258
 259        return;
 260}
 261
 262/**
 263 * vxge_hw_device_set_intr_type - Updates the configuration
 264 *              with new interrupt type.
 265 * @hldev: HW device handle.
 266 * @intr_mode: New interrupt type
 267 */
 268u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
 269{
 270
 271        if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
 272           (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
 273           (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
 274           (intr_mode != VXGE_HW_INTR_MODE_DEF))
 275                intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
 276
 277        hldev->config.intr_mode = intr_mode;
 278        return intr_mode;
 279}
 280
 281/**
 282 * vxge_hw_device_intr_enable - Enable interrupts.
 283 * @hldev: HW device handle.
 284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
 285 *      the type(s) of interrupts to enable.
 286 *
 287 * Enable Titan interrupts. The function is to be executed the last in
 288 * Titan initialization sequence.
 289 *
 290 * See also: vxge_hw_device_intr_disable()
 291 */
 292void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
 293{
 294        u32 i;
 295        u64 val64;
 296        u32 val32;
 297
 298        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 299
 300                if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 301                        continue;
 302
 303                vxge_hw_vpath_intr_enable(
 304                        VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
 305        }
 306
 307        if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
 308                val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
 309                        hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
 310
 311                if (val64 != 0) {
 312                        writeq(val64, &hldev->common_reg->tim_int_status0);
 313
 314                        writeq(~val64, &hldev->common_reg->tim_int_mask0);
 315                }
 316
 317                val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
 318                        hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
 319
 320                if (val32 != 0) {
 321                        __vxge_hw_pio_mem_write32_upper(val32,
 322                                        &hldev->common_reg->tim_int_status1);
 323
 324                        __vxge_hw_pio_mem_write32_upper(~val32,
 325                                        &hldev->common_reg->tim_int_mask1);
 326                }
 327        }
 328
 329        val64 = readq(&hldev->common_reg->titan_general_int_status);
 330
 331        vxge_hw_device_unmask_all(hldev);
 332
 333        return;
 334}
 335
 336/**
 337 * vxge_hw_device_intr_disable - Disable Titan interrupts.
 338 * @hldev: HW device handle.
 339 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
 340 *      the type(s) of interrupts to disable.
 341 *
 342 * Disable Titan interrupts.
 343 *
 344 * See also: vxge_hw_device_intr_enable()
 345 */
 346void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
 347{
 348        u32 i;
 349
 350        vxge_hw_device_mask_all(hldev);
 351
 352        /* mask all the tim interrupts */
 353        writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
 354        __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
 355                &hldev->common_reg->tim_int_mask1);
 356
 357        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 358
 359                if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 360                        continue;
 361
 362                vxge_hw_vpath_intr_disable(
 363                        VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
 364        }
 365
 366        return;
 367}
 368
 369/**
 370 * vxge_hw_device_mask_all - Mask all device interrupts.
 371 * @hldev: HW device handle.
 372 *
 373 * Mask all device interrupts.
 374 *
 375 * See also: vxge_hw_device_unmask_all()
 376 */
 377void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
 378{
 379        u64 val64;
 380
 381        val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
 382                VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
 383
 384        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
 385                                &hldev->common_reg->titan_mask_all_int);
 386
 387        return;
 388}
 389
 390/**
 391 * vxge_hw_device_unmask_all - Unmask all device interrupts.
 392 * @hldev: HW device handle.
 393 *
 394 * Unmask all device interrupts.
 395 *
 396 * See also: vxge_hw_device_mask_all()
 397 */
 398void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
 399{
 400        u64 val64 = 0;
 401
 402        if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
 403                val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
 404
 405        __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
 406                        &hldev->common_reg->titan_mask_all_int);
 407
 408        return;
 409}
 410
 411/**
 412 * vxge_hw_device_flush_io - Flush io writes.
 413 * @hldev: HW device handle.
 414 *
 415 * The function performs a read operation to flush io writes.
 416 *
 417 * Returns: void
 418 */
 419void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
 420{
 421        u32 val32;
 422
 423        val32 = readl(&hldev->common_reg->titan_general_int_status);
 424}
 425
 426/**
 427 * vxge_hw_device_begin_irq - Begin IRQ processing.
 428 * @hldev: HW device handle.
 429 * @skip_alarms: Do not clear the alarms
 430 * @reason: "Reason" for the interrupt, the value of Titan's
 431 *      general_int_status register.
 432 *
 433 * The function performs two actions, It first checks whether (shared IRQ) the
 434 * interrupt was raised by the device. Next, it masks the device interrupts.
 435 *
 436 * Note:
 437 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
 438 * bridge. Therefore, two back-to-back interrupts are potentially possible.
 439 *
 440 * Returns: 0, if the interrupt is not "ours" (note that in this case the
 441 * device remain enabled).
 442 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
 443 * status.
 444 */
 445enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
 446                                             u32 skip_alarms, u64 *reason)
 447{
 448        u32 i;
 449        u64 val64;
 450        u64 adapter_status;
 451        u64 vpath_mask;
 452        enum vxge_hw_status ret = VXGE_HW_OK;
 453
 454        val64 = readq(&hldev->common_reg->titan_general_int_status);
 455
 456        if (unlikely(!val64)) {
 457                /* not Titan interrupt  */
 458                *reason = 0;
 459                ret = VXGE_HW_ERR_WRONG_IRQ;
 460                goto exit;
 461        }
 462
 463        if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
 464
 465                adapter_status = readq(&hldev->common_reg->adapter_status);
 466
 467                if (adapter_status == VXGE_HW_ALL_FOXES) {
 468
 469                        __vxge_hw_device_handle_error(hldev,
 470                                NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
 471                        *reason = 0;
 472                        ret = VXGE_HW_ERR_SLOT_FREEZE;
 473                        goto exit;
 474                }
 475        }
 476
 477        hldev->stats.sw_dev_info_stats.total_intr_cnt++;
 478
 479        *reason = val64;
 480
 481        vpath_mask = hldev->vpaths_deployed >>
 482                                (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
 483
 484        if (val64 &
 485            VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
 486                hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
 487
 488                return VXGE_HW_OK;
 489        }
 490
 491        hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
 492
 493        if (unlikely(val64 &
 494                        VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
 495
 496                enum vxge_hw_status error_level = VXGE_HW_OK;
 497
 498                hldev->stats.sw_dev_err_stats.vpath_alarms++;
 499
 500                for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 501
 502                        if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 503                                continue;
 504
 505                        ret = __vxge_hw_vpath_alarm_process(
 506                                &hldev->virtual_paths[i], skip_alarms);
 507
 508                        error_level = VXGE_HW_SET_LEVEL(ret, error_level);
 509
 510                        if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
 511                                (ret == VXGE_HW_ERR_SLOT_FREEZE)))
 512                                break;
 513                }
 514
 515                ret = error_level;
 516        }
 517exit:
 518        return ret;
 519}
 520
 521/*
 522 * __vxge_hw_device_handle_link_up_ind
 523 * @hldev: HW device handle.
 524 *
 525 * Link up indication handler. The function is invoked by HW when
 526 * Titan indicates that the link is up for programmable amount of time.
 527 */
 528enum vxge_hw_status
 529__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 530{
 531        /*
 532         * If the previous link state is not down, return.
 533         */
 534        if (hldev->link_state == VXGE_HW_LINK_UP)
 535                goto exit;
 536
 537        hldev->link_state = VXGE_HW_LINK_UP;
 538
 539        /* notify driver */
 540        if (hldev->uld_callbacks.link_up)
 541                hldev->uld_callbacks.link_up(hldev);
 542exit:
 543        return VXGE_HW_OK;
 544}
 545
 546/*
 547 * __vxge_hw_device_handle_link_down_ind
 548 * @hldev: HW device handle.
 549 *
 550 * Link down indication handler. The function is invoked by HW when
 551 * Titan indicates that the link is down.
 552 */
 553enum vxge_hw_status
 554__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 555{
 556        /*
 557         * If the previous link state is not down, return.
 558         */
 559        if (hldev->link_state == VXGE_HW_LINK_DOWN)
 560                goto exit;
 561
 562        hldev->link_state = VXGE_HW_LINK_DOWN;
 563
 564        /* notify driver */
 565        if (hldev->uld_callbacks.link_down)
 566                hldev->uld_callbacks.link_down(hldev);
 567exit:
 568        return VXGE_HW_OK;
 569}
 570
 571/**
 572 * __vxge_hw_device_handle_error - Handle error
 573 * @hldev: HW device
 574 * @vp_id: Vpath Id
 575 * @type: Error type. Please see enum vxge_hw_event{}
 576 *
 577 * Handle error.
 578 */
 579enum vxge_hw_status
 580__vxge_hw_device_handle_error(
 581                struct __vxge_hw_device *hldev,
 582                u32 vp_id,
 583                enum vxge_hw_event type)
 584{
 585        switch (type) {
 586        case VXGE_HW_EVENT_UNKNOWN:
 587                break;
 588        case VXGE_HW_EVENT_RESET_START:
 589        case VXGE_HW_EVENT_RESET_COMPLETE:
 590        case VXGE_HW_EVENT_LINK_DOWN:
 591        case VXGE_HW_EVENT_LINK_UP:
 592                goto out;
 593        case VXGE_HW_EVENT_ALARM_CLEARED:
 594                goto out;
 595        case VXGE_HW_EVENT_ECCERR:
 596        case VXGE_HW_EVENT_MRPCIM_ECCERR:
 597                goto out;
 598        case VXGE_HW_EVENT_FIFO_ERR:
 599        case VXGE_HW_EVENT_VPATH_ERR:
 600        case VXGE_HW_EVENT_CRITICAL_ERR:
 601        case VXGE_HW_EVENT_SERR:
 602                break;
 603        case VXGE_HW_EVENT_SRPCIM_SERR:
 604        case VXGE_HW_EVENT_MRPCIM_SERR:
 605                goto out;
 606        case VXGE_HW_EVENT_SLOT_FREEZE:
 607                break;
 608        default:
 609                vxge_assert(0);
 610                goto out;
 611        }
 612
 613        /* notify driver */
 614        if (hldev->uld_callbacks.crit_err)
 615                hldev->uld_callbacks.crit_err(
 616                        (struct __vxge_hw_device *)hldev,
 617                        type, vp_id);
 618out:
 619
 620        return VXGE_HW_OK;
 621}
 622
 623/**
 624 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
 625 * condition that has caused the Tx and RX interrupt.
 626 * @hldev: HW device.
 627 *
 628 * Acknowledge (that is, clear) the condition that has caused
 629 * the Tx and Rx interrupt.
 630 * See also: vxge_hw_device_begin_irq(),
 631 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
 632 */
 633void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
 634{
 635
 636        if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
 637           (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
 638                writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
 639                                 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
 640                                &hldev->common_reg->tim_int_status0);
 641        }
 642
 643        if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
 644           (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
 645                __vxge_hw_pio_mem_write32_upper(
 646                                (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
 647                                 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
 648                                &hldev->common_reg->tim_int_status1);
 649        }
 650
 651        return;
 652}
 653
 654/*
 655 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
 656 * @channel: Channel
 657 * @dtrh: Buffer to return the DTR pointer
 658 *
 659 * Allocates a dtr from the reserve array. If the reserve array is empty,
 660 * it swaps the reserve and free arrays.
 661 *
 662 */
 663enum vxge_hw_status
 664vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
 665{
 666        void **tmp_arr;
 667
 668        if (channel->reserve_ptr - channel->reserve_top > 0) {
 669_alloc_after_swap:
 670                *dtrh = channel->reserve_arr[--channel->reserve_ptr];
 671
 672                return VXGE_HW_OK;
 673        }
 674
 675        /* switch between empty and full arrays */
 676
 677        /* the idea behind such a design is that by having free and reserved
 678         * arrays separated we basically separated irq and non-irq parts.
 679         * i.e. no additional lock need to be done when we free a resource */
 680
 681        if (channel->length - channel->free_ptr > 0) {
 682
 683                tmp_arr = channel->reserve_arr;
 684                channel->reserve_arr = channel->free_arr;
 685                channel->free_arr = tmp_arr;
 686                channel->reserve_ptr = channel->length;
 687                channel->reserve_top = channel->free_ptr;
 688                channel->free_ptr = channel->length;
 689
 690                channel->stats->reserve_free_swaps_cnt++;
 691
 692                goto _alloc_after_swap;
 693        }
 694
 695        channel->stats->full_cnt++;
 696
 697        *dtrh = NULL;
 698        return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
 699}
 700
 701/*
 702 * vxge_hw_channel_dtr_post - Post a dtr to the channel
 703 * @channelh: Channel
 704 * @dtrh: DTR pointer
 705 *
 706 * Posts a dtr to work array.
 707 *
 708 */
 709void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
 710{
 711        vxge_assert(channel->work_arr[channel->post_index] == NULL);
 712
 713        channel->work_arr[channel->post_index++] = dtrh;
 714
 715        /* wrap-around */
 716        if (channel->post_index == channel->length)
 717                channel->post_index = 0;
 718}
 719
 720/*
 721 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
 722 * @channel: Channel
 723 * @dtr: Buffer to return the next completed DTR pointer
 724 *
 725 * Returns the next completed dtr with out removing it from work array
 726 *
 727 */
 728void
 729vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
 730{
 731        vxge_assert(channel->compl_index < channel->length);
 732
 733        *dtrh = channel->work_arr[channel->compl_index];
 734        prefetch(*dtrh);
 735}
 736
 737/*
 738 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
 739 * @channel: Channel handle
 740 *
 741 * Removes the next completed dtr from work array
 742 *
 743 */
 744void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
 745{
 746        channel->work_arr[channel->compl_index] = NULL;
 747
 748        /* wrap-around */
 749        if (++channel->compl_index == channel->length)
 750                channel->compl_index = 0;
 751
 752        channel->stats->total_compl_cnt++;
 753}
 754
 755/*
 756 * vxge_hw_channel_dtr_free - Frees a dtr
 757 * @channel: Channel handle
 758 * @dtr:  DTR pointer
 759 *
 760 * Returns the dtr to free array
 761 *
 762 */
 763void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
 764{
 765        channel->free_arr[--channel->free_ptr] = dtrh;
 766}
 767
 768/*
 769 * vxge_hw_channel_dtr_count
 770 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
 771 *
 772 * Retreive number of DTRs available. This function can not be called
 773 * from data path. ring_initial_replenishi() is the only user.
 774 */
 775int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
 776{
 777        return (channel->reserve_ptr - channel->reserve_top) +
 778                (channel->length - channel->free_ptr);
 779}
 780
 781/**
 782 * vxge_hw_ring_rxd_reserve     - Reserve ring descriptor.
 783 * @ring: Handle to the ring object used for receive
 784 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
 785 * with a valid handle.
 786 *
 787 * Reserve Rx descriptor for the subsequent filling-in driver
 788 * and posting on the corresponding channel (@channelh)
 789 * via vxge_hw_ring_rxd_post().
 790 *
 791 * Returns: VXGE_HW_OK - success.
 792 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
 793 *
 794 */
 795enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
 796        void **rxdh)
 797{
 798        enum vxge_hw_status status;
 799        struct __vxge_hw_channel *channel;
 800
 801        channel = &ring->channel;
 802
 803        status = vxge_hw_channel_dtr_alloc(channel, rxdh);
 804
 805        if (status == VXGE_HW_OK) {
 806                struct vxge_hw_ring_rxd_1 *rxdp =
 807                        (struct vxge_hw_ring_rxd_1 *)*rxdh;
 808
 809                rxdp->control_0 = rxdp->control_1 = 0;
 810        }
 811
 812        return status;
 813}
 814
 815/**
 816 * vxge_hw_ring_rxd_free - Free descriptor.
 817 * @ring: Handle to the ring object used for receive
 818 * @rxdh: Descriptor handle.
 819 *
 820 * Free the reserved descriptor. This operation is "symmetrical" to
 821 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
 822 * lifecycle.
 823 *
 824 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
 825 * be:
 826 *
 827 * - reserved (vxge_hw_ring_rxd_reserve);
 828 *
 829 * - posted     (vxge_hw_ring_rxd_post);
 830 *
 831 * - completed (vxge_hw_ring_rxd_next_completed);
 832 *
 833 * - and recycled again (vxge_hw_ring_rxd_free).
 834 *
 835 * For alternative state transitions and more details please refer to
 836 * the design doc.
 837 *
 838 */
 839void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
 840{
 841        struct __vxge_hw_channel *channel;
 842
 843        channel = &ring->channel;
 844
 845        vxge_hw_channel_dtr_free(channel, rxdh);
 846
 847}
 848
 849/**
 850 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
 851 * @ring: Handle to the ring object used for receive
 852 * @rxdh: Descriptor handle.
 853 *
 854 * This routine prepares a rxd and posts
 855 */
 856void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
 857{
 858        struct __vxge_hw_channel *channel;
 859
 860        channel = &ring->channel;
 861
 862        vxge_hw_channel_dtr_post(channel, rxdh);
 863}
 864
 865/**
 866 * vxge_hw_ring_rxd_post_post - Process rxd after post.
 867 * @ring: Handle to the ring object used for receive
 868 * @rxdh: Descriptor handle.
 869 *
 870 * Processes rxd after post
 871 */
 872void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
 873{
 874        struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
 875        struct __vxge_hw_channel *channel;
 876
 877        channel = &ring->channel;
 878
 879        rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
 880
 881        if (ring->stats->common_stats.usage_cnt > 0)
 882                ring->stats->common_stats.usage_cnt--;
 883}
 884
 885/**
 886 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
 887 * @ring: Handle to the ring object used for receive
 888 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
 889 *
 890 * Post descriptor on the ring.
 891 * Prior to posting the descriptor should be filled in accordance with
 892 * Host/Titan interface specification for a given service (LL, etc.).
 893 *
 894 */
 895void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
 896{
 897        struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
 898        struct __vxge_hw_channel *channel;
 899
 900        channel = &ring->channel;
 901
 902        wmb();
 903        rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
 904
 905        vxge_hw_channel_dtr_post(channel, rxdh);
 906
 907        if (ring->stats->common_stats.usage_cnt > 0)
 908                ring->stats->common_stats.usage_cnt--;
 909}
 910
 911/**
 912 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
 913 * @ring: Handle to the ring object used for receive
 914 * @rxdh: Descriptor handle.
 915 *
 916 * Processes rxd after post with memory barrier.
 917 */
 918void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
 919{
 920        struct __vxge_hw_channel *channel;
 921
 922        channel = &ring->channel;
 923
 924        wmb();
 925        vxge_hw_ring_rxd_post_post(ring, rxdh);
 926}
 927
 928/**
 929 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
 930 * @ring: Handle to the ring object used for receive
 931 * @rxdh: Descriptor handle. Returned by HW.
 932 * @t_code:     Transfer code, as per Titan User Guide,
 933 *       Receive Descriptor Format. Returned by HW.
 934 *
 935 * Retrieve the _next_ completed descriptor.
 936 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
 937 * driver of new completed descriptors. After that
 938 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
 939 * completions (the very first completion is passed by HW via
 940 * vxge_hw_ring_callback_f).
 941 *
 942 * Implementation-wise, the driver is free to call
 943 * vxge_hw_ring_rxd_next_completed either immediately from inside the
 944 * ring callback, or in a deferred fashion and separate (from HW)
 945 * context.
 946 *
 947 * Non-zero @t_code means failure to fill-in receive buffer(s)
 948 * of the descriptor.
 949 * For instance, parity error detected during the data transfer.
 950 * In this case Titan will complete the descriptor and indicate
 951 * for the host that the received data is not to be used.
 952 * For details please refer to Titan User Guide.
 953 *
 954 * Returns: VXGE_HW_OK - success.
 955 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
 956 * are currently available for processing.
 957 *
 958 * See also: vxge_hw_ring_callback_f{},
 959 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
 960 */
 961enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
 962        struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
 963{
 964        struct __vxge_hw_channel *channel;
 965        struct vxge_hw_ring_rxd_1 *rxdp;
 966        enum vxge_hw_status status = VXGE_HW_OK;
 967
 968        channel = &ring->channel;
 969
 970        vxge_hw_channel_dtr_try_complete(channel, rxdh);
 971
 972        rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
 973        if (rxdp == NULL) {
 974                status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
 975                goto exit;
 976        }
 977
 978        /* check whether it is not the end */
 979        if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
 980
 981                vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
 982                                0);
 983
 984                ++ring->cmpl_cnt;
 985                vxge_hw_channel_dtr_complete(channel);
 986
 987                *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
 988
 989                vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
 990
 991                ring->stats->common_stats.usage_cnt++;
 992                if (ring->stats->common_stats.usage_max <
 993                                ring->stats->common_stats.usage_cnt)
 994                        ring->stats->common_stats.usage_max =
 995                                ring->stats->common_stats.usage_cnt;
 996
 997                status = VXGE_HW_OK;
 998                goto exit;
 999        }
1000
1001        /* reset it. since we don't want to return
1002         * garbage to the driver */
1003        *rxdh = NULL;
1004        status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1005exit:
1006        return status;
1007}
1008
1009/**
1010 * vxge_hw_ring_handle_tcode - Handle transfer code.
1011 * @ring: Handle to the ring object used for receive
1012 * @rxdh: Descriptor handle.
1013 * @t_code: One of the enumerated (and documented in the Titan user guide)
1014 * "transfer codes".
1015 *
1016 * Handle descriptor's transfer code. The latter comes with each completed
1017 * descriptor.
1018 *
1019 * Returns: one of the enum vxge_hw_status{} enumerated types.
1020 * VXGE_HW_OK                   - for success.
1021 * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1022 */
1023enum vxge_hw_status vxge_hw_ring_handle_tcode(
1024        struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1025{
1026        struct __vxge_hw_channel *channel;
1027        enum vxge_hw_status status = VXGE_HW_OK;
1028
1029        channel = &ring->channel;
1030
1031        /* If the t_code is not supported and if the
1032         * t_code is other than 0x5 (unparseable packet
1033         * such as unknown UPV6 header), Drop it !!!
1034         */
1035
1036        if (t_code == 0 || t_code == 5) {
1037                status = VXGE_HW_OK;
1038                goto exit;
1039        }
1040
1041        if (t_code > 0xF) {
1042                status = VXGE_HW_ERR_INVALID_TCODE;
1043                goto exit;
1044        }
1045
1046        ring->stats->rxd_t_code_err_cnt[t_code]++;
1047exit:
1048        return status;
1049}
1050
1051/**
1052 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1053 *
1054 * @fifo: fifohandle
1055 * @txdl_ptr: The starting location of the TxDL in host memory
1056 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1057 * @no_snoop: No snoop flags
1058 *
1059 * This function posts a non-offload doorbell to doorbell FIFO
1060 *
1061 */
1062static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1063        u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1064{
1065        struct __vxge_hw_channel *channel;
1066
1067        channel = &fifo->channel;
1068
1069        writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1070                VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1071                VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1072                &fifo->nofl_db->control_0);
1073
1074        mmiowb();
1075
1076        writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1077
1078        mmiowb();
1079}
1080
1081/**
1082 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1083 * the fifo
1084 * @fifoh: Handle to the fifo object used for non offload send
1085 */
1086u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1087{
1088        return vxge_hw_channel_dtr_count(&fifoh->channel);
1089}
1090
1091/**
1092 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1093 * @fifoh: Handle to the fifo object used for non offload send
1094 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1095 *        with a valid handle.
1096 * @txdl_priv: Buffer to return the pointer to per txdl space
1097 *
1098 * Reserve a single TxDL (that is, fifo descriptor)
1099 * for the subsequent filling-in by driver)
1100 * and posting on the corresponding channel (@channelh)
1101 * via vxge_hw_fifo_txdl_post().
1102 *
1103 * Note: it is the responsibility of driver to reserve multiple descriptors
1104 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1105 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1106 *
1107 * Returns: VXGE_HW_OK - success;
1108 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1109 *
1110 */
1111enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1112        struct __vxge_hw_fifo *fifo,
1113        void **txdlh, void **txdl_priv)
1114{
1115        struct __vxge_hw_channel *channel;
1116        enum vxge_hw_status status;
1117        int i;
1118
1119        channel = &fifo->channel;
1120
1121        status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1122
1123        if (status == VXGE_HW_OK) {
1124                struct vxge_hw_fifo_txd *txdp =
1125                        (struct vxge_hw_fifo_txd *)*txdlh;
1126                struct __vxge_hw_fifo_txdl_priv *priv;
1127
1128                priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1129
1130                /* reset the TxDL's private */
1131                priv->align_dma_offset = 0;
1132                priv->align_vaddr_start = priv->align_vaddr;
1133                priv->align_used_frags = 0;
1134                priv->frags = 0;
1135                priv->alloc_frags = fifo->config->max_frags;
1136                priv->next_txdl_priv = NULL;
1137
1138                *txdl_priv = (void *)(size_t)txdp->host_control;
1139
1140                for (i = 0; i < fifo->config->max_frags; i++) {
1141                        txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1142                        txdp->control_0 = txdp->control_1 = 0;
1143                }
1144        }
1145
1146        return status;
1147}
1148
1149/**
1150 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1151 * descriptor.
1152 * @fifo: Handle to the fifo object used for non offload send
1153 * @txdlh: Descriptor handle.
1154 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1155 *            (of buffers).
1156 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1157 * @size: Size of the data buffer (in bytes).
1158 *
1159 * This API is part of the preparation of the transmit descriptor for posting
1160 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1161 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1162 * All three APIs fill in the fields of the fifo descriptor,
1163 * in accordance with the Titan specification.
1164 *
1165 */
1166void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1167                                  void *txdlh, u32 frag_idx,
1168                                  dma_addr_t dma_pointer, u32 size)
1169{
1170        struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1171        struct vxge_hw_fifo_txd *txdp, *txdp_last;
1172        struct __vxge_hw_channel *channel;
1173
1174        channel = &fifo->channel;
1175
1176        txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1177        txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1178
1179        if (frag_idx != 0)
1180                txdp->control_0 = txdp->control_1 = 0;
1181        else {
1182                txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1183                        VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1184                txdp->control_1 |= fifo->interrupt_type;
1185                txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1186                        fifo->tx_intr_num);
1187                if (txdl_priv->frags) {
1188                        txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1189                        (txdl_priv->frags - 1);
1190                        txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1191                                VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1192                }
1193        }
1194
1195        vxge_assert(frag_idx < txdl_priv->alloc_frags);
1196
1197        txdp->buffer_pointer = (u64)dma_pointer;
1198        txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1199        fifo->stats->total_buffers++;
1200        txdl_priv->frags++;
1201}
1202
1203/**
1204 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1205 * @fifo: Handle to the fifo object used for non offload send
1206 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1207 * @frags: Number of contiguous buffers that are part of a single
1208 *         transmit operation.
1209 *
1210 * Post descriptor on the 'fifo' type channel for transmission.
1211 * Prior to posting the descriptor should be filled in accordance with
1212 * Host/Titan interface specification for a given service (LL, etc.).
1213 *
1214 */
1215void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1216{
1217        struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1218        struct vxge_hw_fifo_txd *txdp_last;
1219        struct vxge_hw_fifo_txd *txdp_first;
1220        struct __vxge_hw_channel *channel;
1221
1222        channel = &fifo->channel;
1223
1224        txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1225        txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1226
1227        txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1228        txdp_last->control_0 |=
1229              VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1230        txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1231
1232        vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1233
1234        __vxge_hw_non_offload_db_post(fifo,
1235                (u64)(size_t)txdl_priv->dma_addr,
1236                txdl_priv->frags - 1,
1237                fifo->no_snoop_bits);
1238
1239        fifo->stats->total_posts++;
1240        fifo->stats->common_stats.usage_cnt++;
1241        if (fifo->stats->common_stats.usage_max <
1242                fifo->stats->common_stats.usage_cnt)
1243                fifo->stats->common_stats.usage_max =
1244                        fifo->stats->common_stats.usage_cnt;
1245}
1246
1247/**
1248 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1249 * @fifo: Handle to the fifo object used for non offload send
1250 * @txdlh: Descriptor handle. Returned by HW.
1251 * @t_code: Transfer code, as per Titan User Guide,
1252 *          Transmit Descriptor Format.
1253 *          Returned by HW.
1254 *
1255 * Retrieve the _next_ completed descriptor.
1256 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1257 * driver of new completed descriptors. After that
1258 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1259 * completions (the very first completion is passed by HW via
1260 * vxge_hw_channel_callback_f).
1261 *
1262 * Implementation-wise, the driver is free to call
1263 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1264 * channel callback, or in a deferred fashion and separate (from HW)
1265 * context.
1266 *
1267 * Non-zero @t_code means failure to process the descriptor.
1268 * The failure could happen, for instance, when the link is
1269 * down, in which case Titan completes the descriptor because it
1270 * is not able to send the data out.
1271 *
1272 * For details please refer to Titan User Guide.
1273 *
1274 * Returns: VXGE_HW_OK - success.
1275 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1276 * are currently available for processing.
1277 *
1278 */
1279enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1280        struct __vxge_hw_fifo *fifo, void **txdlh,
1281        enum vxge_hw_fifo_tcode *t_code)
1282{
1283        struct __vxge_hw_channel *channel;
1284        struct vxge_hw_fifo_txd *txdp;
1285        enum vxge_hw_status status = VXGE_HW_OK;
1286
1287        channel = &fifo->channel;
1288
1289        vxge_hw_channel_dtr_try_complete(channel, txdlh);
1290
1291        txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1292        if (txdp == NULL) {
1293                status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1294                goto exit;
1295        }
1296
1297        /* check whether host owns it */
1298        if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1299
1300                vxge_assert(txdp->host_control != 0);
1301
1302                vxge_hw_channel_dtr_complete(channel);
1303
1304                *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1305
1306                if (fifo->stats->common_stats.usage_cnt > 0)
1307                        fifo->stats->common_stats.usage_cnt--;
1308
1309                status = VXGE_HW_OK;
1310                goto exit;
1311        }
1312
1313        /* no more completions */
1314        *txdlh = NULL;
1315        status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1316exit:
1317        return status;
1318}
1319
1320/**
1321 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1322 * @fifo: Handle to the fifo object used for non offload send
1323 * @txdlh: Descriptor handle.
1324 * @t_code: One of the enumerated (and documented in the Titan user guide)
1325 *          "transfer codes".
1326 *
1327 * Handle descriptor's transfer code. The latter comes with each completed
1328 * descriptor.
1329 *
1330 * Returns: one of the enum vxge_hw_status{} enumerated types.
1331 * VXGE_HW_OK - for success.
1332 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1333 */
1334enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1335                                              void *txdlh,
1336                                              enum vxge_hw_fifo_tcode t_code)
1337{
1338        struct __vxge_hw_channel *channel;
1339
1340        enum vxge_hw_status status = VXGE_HW_OK;
1341        channel = &fifo->channel;
1342
1343        if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1344                status = VXGE_HW_ERR_INVALID_TCODE;
1345                goto exit;
1346        }
1347
1348        fifo->stats->txd_t_code_err_cnt[t_code]++;
1349exit:
1350        return status;
1351}
1352
1353/**
1354 * vxge_hw_fifo_txdl_free - Free descriptor.
1355 * @fifo: Handle to the fifo object used for non offload send
1356 * @txdlh: Descriptor handle.
1357 *
1358 * Free the reserved descriptor. This operation is "symmetrical" to
1359 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1360 * lifecycle.
1361 *
1362 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1363 * be:
1364 *
1365 * - reserved (vxge_hw_fifo_txdl_reserve);
1366 *
1367 * - posted (vxge_hw_fifo_txdl_post);
1368 *
1369 * - completed (vxge_hw_fifo_txdl_next_completed);
1370 *
1371 * - and recycled again (vxge_hw_fifo_txdl_free).
1372 *
1373 * For alternative state transitions and more details please refer to
1374 * the design doc.
1375 *
1376 */
1377void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1378{
1379        struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1380        u32 max_frags;
1381        struct __vxge_hw_channel *channel;
1382
1383        channel = &fifo->channel;
1384
1385        txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1386                        (struct vxge_hw_fifo_txd *)txdlh);
1387
1388        max_frags = fifo->config->max_frags;
1389
1390        vxge_hw_channel_dtr_free(channel, txdlh);
1391}
1392
1393/**
1394 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1395 *               to MAC address table.
1396 * @vp: Vpath handle.
1397 * @macaddr: MAC address to be added for this vpath into the list
1398 * @macaddr_mask: MAC address mask for macaddr
1399 * @duplicate_mode: Duplicate MAC address add mode. Please see
1400 *             enum vxge_hw_vpath_mac_addr_add_mode{}
1401 *
1402 * Adds the given mac address and mac address mask into the list for this
1403 * vpath.
1404 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1405 * vxge_hw_vpath_mac_addr_get_next
1406 *
1407 */
1408enum vxge_hw_status
1409vxge_hw_vpath_mac_addr_add(
1410        struct __vxge_hw_vpath_handle *vp,
1411        u8 (macaddr)[ETH_ALEN],
1412        u8 (macaddr_mask)[ETH_ALEN],
1413        enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1414{
1415        u32 i;
1416        u64 data1 = 0ULL;
1417        u64 data2 = 0ULL;
1418        enum vxge_hw_status status = VXGE_HW_OK;
1419
1420        if (vp == NULL) {
1421                status = VXGE_HW_ERR_INVALID_HANDLE;
1422                goto exit;
1423        }
1424
1425        for (i = 0; i < ETH_ALEN; i++) {
1426                data1 <<= 8;
1427                data1 |= (u8)macaddr[i];
1428
1429                data2 <<= 8;
1430                data2 |= (u8)macaddr_mask[i];
1431        }
1432
1433        switch (duplicate_mode) {
1434        case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1435                i = 0;
1436                break;
1437        case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1438                i = 1;
1439                break;
1440        case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1441                i = 2;
1442                break;
1443        default:
1444                i = 0;
1445                break;
1446        }
1447
1448        status = __vxge_hw_vpath_rts_table_set(vp,
1449                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1450                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1451                        0,
1452                        VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1453                        VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1454                        VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1455exit:
1456        return status;
1457}
1458
1459/**
1460 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1461 *               from MAC address table.
1462 * @vp: Vpath handle.
1463 * @macaddr: First MAC address entry for this vpath in the list
1464 * @macaddr_mask: MAC address mask for macaddr
1465 *
1466 * Returns the first mac address and mac address mask in the list for this
1467 * vpath.
1468 * see also: vxge_hw_vpath_mac_addr_get_next
1469 *
1470 */
1471enum vxge_hw_status
1472vxge_hw_vpath_mac_addr_get(
1473        struct __vxge_hw_vpath_handle *vp,
1474        u8 (macaddr)[ETH_ALEN],
1475        u8 (macaddr_mask)[ETH_ALEN])
1476{
1477        u32 i;
1478        u64 data1 = 0ULL;
1479        u64 data2 = 0ULL;
1480        enum vxge_hw_status status = VXGE_HW_OK;
1481
1482        if (vp == NULL) {
1483                status = VXGE_HW_ERR_INVALID_HANDLE;
1484                goto exit;
1485        }
1486
1487        status = __vxge_hw_vpath_rts_table_get(vp,
1488                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1489                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1490                        0, &data1, &data2);
1491
1492        if (status != VXGE_HW_OK)
1493                goto exit;
1494
1495        data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1496
1497        data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1498
1499        for (i = ETH_ALEN; i > 0; i--) {
1500                macaddr[i-1] = (u8)(data1 & 0xFF);
1501                data1 >>= 8;
1502
1503                macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1504                data2 >>= 8;
1505        }
1506exit:
1507        return status;
1508}
1509
1510/**
1511 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1512 * vpath
1513 *               from MAC address table.
1514 * @vp: Vpath handle.
1515 * @macaddr: Next MAC address entry for this vpath in the list
1516 * @macaddr_mask: MAC address mask for macaddr
1517 *
1518 * Returns the next mac address and mac address mask in the list for this
1519 * vpath.
1520 * see also: vxge_hw_vpath_mac_addr_get
1521 *
1522 */
1523enum vxge_hw_status
1524vxge_hw_vpath_mac_addr_get_next(
1525        struct __vxge_hw_vpath_handle *vp,
1526        u8 (macaddr)[ETH_ALEN],
1527        u8 (macaddr_mask)[ETH_ALEN])
1528{
1529        u32 i;
1530        u64 data1 = 0ULL;
1531        u64 data2 = 0ULL;
1532        enum vxge_hw_status status = VXGE_HW_OK;
1533
1534        if (vp == NULL) {
1535                status = VXGE_HW_ERR_INVALID_HANDLE;
1536                goto exit;
1537        }
1538
1539        status = __vxge_hw_vpath_rts_table_get(vp,
1540                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1541                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1542                        0, &data1, &data2);
1543
1544        if (status != VXGE_HW_OK)
1545                goto exit;
1546
1547        data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1548
1549        data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1550
1551        for (i = ETH_ALEN; i > 0; i--) {
1552                macaddr[i-1] = (u8)(data1 & 0xFF);
1553                data1 >>= 8;
1554
1555                macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1556                data2 >>= 8;
1557        }
1558
1559exit:
1560        return status;
1561}
1562
1563/**
1564 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1565 *               to MAC address table.
1566 * @vp: Vpath handle.
1567 * @macaddr: MAC address to be added for this vpath into the list
1568 * @macaddr_mask: MAC address mask for macaddr
1569 *
1570 * Delete the given mac address and mac address mask into the list for this
1571 * vpath.
1572 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1573 * vxge_hw_vpath_mac_addr_get_next
1574 *
1575 */
1576enum vxge_hw_status
1577vxge_hw_vpath_mac_addr_delete(
1578        struct __vxge_hw_vpath_handle *vp,
1579        u8 (macaddr)[ETH_ALEN],
1580        u8 (macaddr_mask)[ETH_ALEN])
1581{
1582        u32 i;
1583        u64 data1 = 0ULL;
1584        u64 data2 = 0ULL;
1585        enum vxge_hw_status status = VXGE_HW_OK;
1586
1587        if (vp == NULL) {
1588                status = VXGE_HW_ERR_INVALID_HANDLE;
1589                goto exit;
1590        }
1591
1592        for (i = 0; i < ETH_ALEN; i++) {
1593                data1 <<= 8;
1594                data1 |= (u8)macaddr[i];
1595
1596                data2 <<= 8;
1597                data2 |= (u8)macaddr_mask[i];
1598        }
1599
1600        status = __vxge_hw_vpath_rts_table_set(vp,
1601                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1602                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1603                        0,
1604                        VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1605                        VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1606exit:
1607        return status;
1608}
1609
1610/**
1611 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1612 *               to vlan id table.
1613 * @vp: Vpath handle.
1614 * @vid: vlan id to be added for this vpath into the list
1615 *
1616 * Adds the given vlan id into the list for this  vpath.
1617 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1618 * vxge_hw_vpath_vid_get_next
1619 *
1620 */
1621enum vxge_hw_status
1622vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1623{
1624        enum vxge_hw_status status = VXGE_HW_OK;
1625
1626        if (vp == NULL) {
1627                status = VXGE_HW_ERR_INVALID_HANDLE;
1628                goto exit;
1629        }
1630
1631        status = __vxge_hw_vpath_rts_table_set(vp,
1632                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1633                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1634                        0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1635exit:
1636        return status;
1637}
1638
1639/**
1640 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1641 *               from vlan id table.
1642 * @vp: Vpath handle.
1643 * @vid: Buffer to return vlan id
1644 *
1645 * Returns the first vlan id in the list for this vpath.
1646 * see also: vxge_hw_vpath_vid_get_next
1647 *
1648 */
1649enum vxge_hw_status
1650vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1651{
1652        u64 data;
1653        enum vxge_hw_status status = VXGE_HW_OK;
1654
1655        if (vp == NULL) {
1656                status = VXGE_HW_ERR_INVALID_HANDLE;
1657                goto exit;
1658        }
1659
1660        status = __vxge_hw_vpath_rts_table_get(vp,
1661                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1662                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1663                        0, vid, &data);
1664
1665        *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1666exit:
1667        return status;
1668}
1669
1670/**
1671 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1672 *               from vlan id table.
1673 * @vp: Vpath handle.
1674 * @vid: Buffer to return vlan id
1675 *
1676 * Returns the next vlan id in the list for this vpath.
1677 * see also: vxge_hw_vpath_vid_get
1678 *
1679 */
1680enum vxge_hw_status
1681vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1682{
1683        u64 data;
1684        enum vxge_hw_status status = VXGE_HW_OK;
1685
1686        if (vp == NULL) {
1687                status = VXGE_HW_ERR_INVALID_HANDLE;
1688                goto exit;
1689        }
1690
1691        status = __vxge_hw_vpath_rts_table_get(vp,
1692                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1693                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1694                        0, vid, &data);
1695
1696        *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1697exit:
1698        return status;
1699}
1700
1701/**
1702 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1703 *               to vlan id table.
1704 * @vp: Vpath handle.
1705 * @vid: vlan id to be added for this vpath into the list
1706 *
1707 * Adds the given vlan id into the list for this  vpath.
1708 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1709 * vxge_hw_vpath_vid_get_next
1710 *
1711 */
1712enum vxge_hw_status
1713vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1714{
1715        enum vxge_hw_status status = VXGE_HW_OK;
1716
1717        if (vp == NULL) {
1718                status = VXGE_HW_ERR_INVALID_HANDLE;
1719                goto exit;
1720        }
1721
1722        status = __vxge_hw_vpath_rts_table_set(vp,
1723                        VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1724                        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1725                        0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1726exit:
1727        return status;
1728}
1729
1730/**
1731 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1732 * @vp: Vpath handle.
1733 *
1734 * Enable promiscuous mode of Titan-e operation.
1735 *
1736 * See also: vxge_hw_vpath_promisc_disable().
1737 */
1738enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1739                        struct __vxge_hw_vpath_handle *vp)
1740{
1741        u64 val64;
1742        struct __vxge_hw_virtualpath *vpath;
1743        enum vxge_hw_status status = VXGE_HW_OK;
1744
1745        if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1746                status = VXGE_HW_ERR_INVALID_HANDLE;
1747                goto exit;
1748        }
1749
1750        vpath = vp->vpath;
1751
1752        /* Enable promiscous mode for function 0 only */
1753        if (!(vpath->hldev->access_rights &
1754                VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1755                return VXGE_HW_OK;
1756
1757        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1758
1759        if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1760
1761                val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1762                         VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1763                         VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1764                         VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1765
1766                writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1767        }
1768exit:
1769        return status;
1770}
1771
1772/**
1773 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1774 * @vp: Vpath handle.
1775 *
1776 * Disable promiscuous mode of Titan-e operation.
1777 *
1778 * See also: vxge_hw_vpath_promisc_enable().
1779 */
1780enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1781                        struct __vxge_hw_vpath_handle *vp)
1782{
1783        u64 val64;
1784        struct __vxge_hw_virtualpath *vpath;
1785        enum vxge_hw_status status = VXGE_HW_OK;
1786
1787        if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1788                status = VXGE_HW_ERR_INVALID_HANDLE;
1789                goto exit;
1790        }
1791
1792        vpath = vp->vpath;
1793
1794        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1795
1796        if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1797
1798                val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1799                           VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1800                           VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1801
1802                writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1803        }
1804exit:
1805        return status;
1806}
1807
1808/*
1809 * vxge_hw_vpath_bcast_enable - Enable broadcast
1810 * @vp: Vpath handle.
1811 *
1812 * Enable receiving broadcasts.
1813 */
1814enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1815                        struct __vxge_hw_vpath_handle *vp)
1816{
1817        u64 val64;
1818        struct __vxge_hw_virtualpath *vpath;
1819        enum vxge_hw_status status = VXGE_HW_OK;
1820
1821        if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1822                status = VXGE_HW_ERR_INVALID_HANDLE;
1823                goto exit;
1824        }
1825
1826        vpath = vp->vpath;
1827
1828        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1829
1830        if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1831                val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1832                writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1833        }
1834exit:
1835        return status;
1836}
1837
1838/**
1839 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1840 * @vp: Vpath handle.
1841 *
1842 * Enable Titan-e multicast addresses.
1843 * Returns: VXGE_HW_OK on success.
1844 *
1845 */
1846enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1847                        struct __vxge_hw_vpath_handle *vp)
1848{
1849        u64 val64;
1850        struct __vxge_hw_virtualpath *vpath;
1851        enum vxge_hw_status status = VXGE_HW_OK;
1852
1853        if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1854                status = VXGE_HW_ERR_INVALID_HANDLE;
1855                goto exit;
1856        }
1857
1858        vpath = vp->vpath;
1859
1860        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1861
1862        if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1863                val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1864                writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1865        }
1866exit:
1867        return status;
1868}
1869
1870/**
1871 * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
1872 * @vp: Vpath handle.
1873 *
1874 * Disable Titan-e multicast addresses.
1875 * Returns: VXGE_HW_OK - success.
1876 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1877 *
1878 */
1879enum vxge_hw_status
1880vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1881{
1882        u64 val64;
1883        struct __vxge_hw_virtualpath *vpath;
1884        enum vxge_hw_status status = VXGE_HW_OK;
1885
1886        if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1887                status = VXGE_HW_ERR_INVALID_HANDLE;
1888                goto exit;
1889        }
1890
1891        vpath = vp->vpath;
1892
1893        val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1894
1895        if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1896                val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1897                writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1898        }
1899exit:
1900        return status;
1901}
1902
1903/*
1904 * __vxge_hw_vpath_alarm_process - Process Alarms.
1905 * @vpath: Virtual Path.
1906 * @skip_alarms: Do not clear the alarms
1907 *
1908 * Process vpath alarms.
1909 *
1910 */
1911enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1912                        struct __vxge_hw_virtualpath *vpath,
1913                        u32 skip_alarms)
1914{
1915        u64 val64;
1916        u64 alarm_status;
1917        u64 pic_status;
1918        struct __vxge_hw_device *hldev = NULL;
1919        enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1920        u64 mask64;
1921        struct vxge_hw_vpath_stats_sw_info *sw_stats;
1922        struct vxge_hw_vpath_reg __iomem *vp_reg;
1923
1924        if (vpath == NULL) {
1925                alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1926                        alarm_event);
1927                goto out2;
1928        }
1929
1930        hldev = vpath->hldev;
1931        vp_reg = vpath->vp_reg;
1932        alarm_status = readq(&vp_reg->vpath_general_int_status);
1933
1934        if (alarm_status == VXGE_HW_ALL_FOXES) {
1935                alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1936                        alarm_event);
1937                goto out;
1938        }
1939
1940        sw_stats = vpath->sw_stats;
1941
1942        if (alarm_status & ~(
1943                VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1944                VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1945                VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1946                VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1947                sw_stats->error_stats.unknown_alarms++;
1948
1949                alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1950                        alarm_event);
1951                goto out;
1952        }
1953
1954        if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1955
1956                val64 = readq(&vp_reg->xgmac_vp_int_status);
1957
1958                if (val64 &
1959                VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1960
1961                        val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1962
1963                        if (((val64 &
1964                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1965                            (!(val64 &
1966                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1967                            ((val64 &
1968                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1969                                && (!(val64 &
1970                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1971                        ))) {
1972                                sw_stats->error_stats.network_sustained_fault++;
1973
1974                                writeq(
1975                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1976                                        &vp_reg->asic_ntwk_vp_err_mask);
1977
1978                                __vxge_hw_device_handle_link_down_ind(hldev);
1979                                alarm_event = VXGE_HW_SET_LEVEL(
1980                                        VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1981                        }
1982
1983                        if (((val64 &
1984                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1985                            (!(val64 &
1986                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1987                            ((val64 &
1988                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1989                                && (!(val64 &
1990                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1991                        ))) {
1992
1993                                sw_stats->error_stats.network_sustained_ok++;
1994
1995                                writeq(
1996                                VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1997                                        &vp_reg->asic_ntwk_vp_err_mask);
1998
1999                                __vxge_hw_device_handle_link_up_ind(hldev);
2000                                alarm_event = VXGE_HW_SET_LEVEL(
2001                                        VXGE_HW_EVENT_LINK_UP, alarm_event);
2002                        }
2003
2004                        writeq(VXGE_HW_INTR_MASK_ALL,
2005                                &vp_reg->asic_ntwk_vp_err_reg);
2006
2007                        alarm_event = VXGE_HW_SET_LEVEL(
2008                                VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2009
2010                        if (skip_alarms)
2011                                return VXGE_HW_OK;
2012                }
2013        }
2014
2015        if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2016
2017                pic_status = readq(&vp_reg->vpath_ppif_int_status);
2018
2019                if (pic_status &
2020                    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2021
2022                        val64 = readq(&vp_reg->general_errors_reg);
2023                        mask64 = readq(&vp_reg->general_errors_mask);
2024
2025                        if ((val64 &
2026                                VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2027                                ~mask64) {
2028                                sw_stats->error_stats.ini_serr_det++;
2029
2030                                alarm_event = VXGE_HW_SET_LEVEL(
2031                                        VXGE_HW_EVENT_SERR, alarm_event);
2032                        }
2033
2034                        if ((val64 &
2035                            VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2036                                ~mask64) {
2037                                sw_stats->error_stats.dblgen_fifo0_overflow++;
2038
2039                                alarm_event = VXGE_HW_SET_LEVEL(
2040                                        VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2041                        }
2042
2043                        if ((val64 &
2044                            VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2045                                ~mask64)
2046                                sw_stats->error_stats.statsb_pif_chain_error++;
2047
2048                        if ((val64 &
2049                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2050                                ~mask64)
2051                                sw_stats->error_stats.statsb_drop_timeout++;
2052
2053                        if ((val64 &
2054                                VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2055                                ~mask64)
2056                                sw_stats->error_stats.target_illegal_access++;
2057
2058                        if (!skip_alarms) {
2059                                writeq(VXGE_HW_INTR_MASK_ALL,
2060                                        &vp_reg->general_errors_reg);
2061                                alarm_event = VXGE_HW_SET_LEVEL(
2062                                        VXGE_HW_EVENT_ALARM_CLEARED,
2063                                        alarm_event);
2064                        }
2065                }
2066
2067                if (pic_status &
2068                    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2069
2070                        val64 = readq(&vp_reg->kdfcctl_errors_reg);
2071                        mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2072
2073                        if ((val64 &
2074                            VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2075                                ~mask64) {
2076                                sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2077
2078                                alarm_event = VXGE_HW_SET_LEVEL(
2079                                        VXGE_HW_EVENT_FIFO_ERR,
2080                                        alarm_event);
2081                        }
2082
2083                        if ((val64 &
2084                            VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2085                                ~mask64) {
2086                                sw_stats->error_stats.kdfcctl_fifo0_poison++;
2087
2088                                alarm_event = VXGE_HW_SET_LEVEL(
2089                                        VXGE_HW_EVENT_FIFO_ERR,
2090                                        alarm_event);
2091                        }
2092
2093                        if ((val64 &
2094                            VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2095                                ~mask64) {
2096                                sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2097
2098                                alarm_event = VXGE_HW_SET_LEVEL(
2099                                        VXGE_HW_EVENT_FIFO_ERR,
2100                                        alarm_event);
2101                        }
2102
2103                        if (!skip_alarms) {
2104                                writeq(VXGE_HW_INTR_MASK_ALL,
2105                                        &vp_reg->kdfcctl_errors_reg);
2106                                alarm_event = VXGE_HW_SET_LEVEL(
2107                                        VXGE_HW_EVENT_ALARM_CLEARED,
2108                                        alarm_event);
2109                        }
2110                }
2111
2112        }
2113
2114        if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2115
2116                val64 = readq(&vp_reg->wrdma_alarm_status);
2117
2118                if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2119
2120                        val64 = readq(&vp_reg->prc_alarm_reg);
2121                        mask64 = readq(&vp_reg->prc_alarm_mask);
2122
2123                        if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2124                                ~mask64)
2125                                sw_stats->error_stats.prc_ring_bumps++;
2126
2127                        if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2128                                ~mask64) {
2129                                sw_stats->error_stats.prc_rxdcm_sc_err++;
2130
2131                                alarm_event = VXGE_HW_SET_LEVEL(
2132                                        VXGE_HW_EVENT_VPATH_ERR,
2133                                        alarm_event);
2134                        }
2135
2136                        if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2137                                & ~mask64) {
2138                                sw_stats->error_stats.prc_rxdcm_sc_abort++;
2139
2140                                alarm_event = VXGE_HW_SET_LEVEL(
2141                                                VXGE_HW_EVENT_VPATH_ERR,
2142                                                alarm_event);
2143                        }
2144
2145                        if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2146                                 & ~mask64) {
2147                                sw_stats->error_stats.prc_quanta_size_err++;
2148
2149                                alarm_event = VXGE_HW_SET_LEVEL(
2150                                        VXGE_HW_EVENT_VPATH_ERR,
2151                                        alarm_event);
2152                        }
2153
2154                        if (!skip_alarms) {
2155                                writeq(VXGE_HW_INTR_MASK_ALL,
2156                                        &vp_reg->prc_alarm_reg);
2157                                alarm_event = VXGE_HW_SET_LEVEL(
2158                                                VXGE_HW_EVENT_ALARM_CLEARED,
2159                                                alarm_event);
2160                        }
2161                }
2162        }
2163out:
2164        hldev->stats.sw_dev_err_stats.vpath_alarms++;
2165out2:
2166        if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2167                (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2168                return VXGE_HW_OK;
2169
2170        __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2171
2172        if (alarm_event == VXGE_HW_EVENT_SERR)
2173                return VXGE_HW_ERR_CRITICAL;
2174
2175        return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2176                VXGE_HW_ERR_SLOT_FREEZE :
2177                (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2178                VXGE_HW_ERR_VPATH;
2179}
2180
2181/*
2182 * vxge_hw_vpath_alarm_process - Process Alarms.
2183 * @vpath: Virtual Path.
2184 * @skip_alarms: Do not clear the alarms
2185 *
2186 * Process vpath alarms.
2187 *
2188 */
2189enum vxge_hw_status vxge_hw_vpath_alarm_process(
2190                        struct __vxge_hw_vpath_handle *vp,
2191                        u32 skip_alarms)
2192{
2193        enum vxge_hw_status status = VXGE_HW_OK;
2194
2195        if (vp == NULL) {
2196                status = VXGE_HW_ERR_INVALID_HANDLE;
2197                goto exit;
2198        }
2199
2200        status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2201exit:
2202        return status;
2203}
2204
2205/**
2206 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2207 *                            alrms
2208 * @vp: Virtual Path handle.
2209 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2210 *             interrupts(Can be repeated). If fifo or ring are not enabled
2211 *             the MSIX vector for that should be set to 0
2212 * @alarm_msix_id: MSIX vector for alarm.
2213 *
2214 * This API will associate a given MSIX vector numbers with the four TIM
2215 * interrupts and alarm interrupt.
2216 */
2217enum vxge_hw_status
2218vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2219                       int alarm_msix_id)
2220{
2221        u64 val64;
2222        struct __vxge_hw_virtualpath *vpath = vp->vpath;
2223        struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2224        u32 first_vp_id = vpath->hldev->first_vp_id;
2225
2226        val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2227                  (first_vp_id * 4) + tim_msix_id[0]) |
2228                 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2229                  (first_vp_id * 4) + tim_msix_id[1]) |
2230                 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2231                        (first_vp_id * 4) + tim_msix_id[2]);
2232
2233                val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2234                        (first_vp_id * 4) + tim_msix_id[3]);
2235
2236        writeq(val64, &vp_reg->interrupt_cfg0);
2237
2238        writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2239                        (first_vp_id * 4) + alarm_msix_id),
2240                        &vp_reg->interrupt_cfg2);
2241
2242        if (vpath->hldev->config.intr_mode ==
2243                                        VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2244                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2245                                VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2246                                0, 32), &vp_reg->one_shot_vect1_en);
2247        }
2248
2249        if (vpath->hldev->config.intr_mode ==
2250                VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2251                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2252                                VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2253                                0, 32), &vp_reg->one_shot_vect2_en);
2254
2255                __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2256                                VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2257                                0, 32), &vp_reg->one_shot_vect3_en);
2258        }
2259
2260        return VXGE_HW_OK;
2261}
2262
2263/**
2264 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2265 * @vp: Virtual Path handle.
2266 * @msix_id:  MSIX ID
2267 *
2268 * The function masks the msix interrupt for the given msix_id
2269 *
2270 * Returns: 0,
2271 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2272 * status.
2273 * See also:
2274 */
2275void
2276vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2277{
2278        struct __vxge_hw_device *hldev = vp->vpath->hldev;
2279        __vxge_hw_pio_mem_write32_upper(
2280                (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2281                        (msix_id  / 4)), 0, 32),
2282                &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2283
2284        return;
2285}
2286
2287/**
2288 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2289 * @vp: Virtual Path handle.
2290 * @msix_id:  MSI ID
2291 *
2292 * The function clears the msix interrupt for the given msix_id
2293 *
2294 * Returns: 0,
2295 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2296 * status.
2297 * See also:
2298 */
2299void
2300vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2301{
2302        struct __vxge_hw_device *hldev = vp->vpath->hldev;
2303        if (hldev->config.intr_mode ==
2304                        VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2305                __vxge_hw_pio_mem_write32_upper(
2306                        (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2307                                (msix_id/4)), 0, 32),
2308                                &hldev->common_reg->
2309                                        clr_msix_one_shot_vec[msix_id%4]);
2310        } else {
2311                __vxge_hw_pio_mem_write32_upper(
2312                        (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2313                                (msix_id/4)), 0, 32),
2314                                &hldev->common_reg->
2315                                        clear_msix_mask_vect[msix_id%4]);
2316        }
2317
2318        return;
2319}
2320
2321/**
2322 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2323 * @vp: Virtual Path handle.
2324 * @msix_id:  MSI ID
2325 *
2326 * The function unmasks the msix interrupt for the given msix_id
2327 *
2328 * Returns: 0,
2329 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2330 * status.
2331 * See also:
2332 */
2333void
2334vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2335{
2336        struct __vxge_hw_device *hldev = vp->vpath->hldev;
2337        __vxge_hw_pio_mem_write32_upper(
2338                        (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2339                        (msix_id/4)), 0, 32),
2340                        &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2341
2342        return;
2343}
2344
2345/**
2346 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2347 * @vp: Virtual Path handle.
2348 *
2349 * The function masks all msix interrupt for the given vpath
2350 *
2351 */
2352void
2353vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2354{
2355
2356        __vxge_hw_pio_mem_write32_upper(
2357                (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2358                &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2359
2360        return;
2361}
2362
2363/**
2364 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2365 * @vp: Virtual Path handle.
2366 *
2367 * Mask Tx and Rx vpath interrupts.
2368 *
2369 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2370 */
2371void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2372{
2373        u64     tim_int_mask0[4] = {[0 ...3] = 0};
2374        u32     tim_int_mask1[4] = {[0 ...3] = 0};
2375        u64     val64;
2376        struct __vxge_hw_device *hldev = vp->vpath->hldev;
2377
2378        VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2379                tim_int_mask1, vp->vpath->vp_id);
2380
2381        val64 = readq(&hldev->common_reg->tim_int_mask0);
2382
2383        if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2384                (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2385                writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2386                        tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2387                        &hldev->common_reg->tim_int_mask0);
2388        }
2389
2390        val64 = readl(&hldev->common_reg->tim_int_mask1);
2391
2392        if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2393                (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2394                __vxge_hw_pio_mem_write32_upper(
2395                        (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2396                        tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2397                        &hldev->common_reg->tim_int_mask1);
2398        }
2399
2400        return;
2401}
2402
2403/**
2404 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2405 * @vp: Virtual Path handle.
2406 *
2407 * Unmask Tx and Rx vpath interrupts.
2408 *
2409 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2410 */
2411void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2412{
2413        u64     tim_int_mask0[4] = {[0 ...3] = 0};
2414        u32     tim_int_mask1[4] = {[0 ...3] = 0};
2415        u64     val64;
2416        struct __vxge_hw_device *hldev = vp->vpath->hldev;
2417
2418        VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2419                tim_int_mask1, vp->vpath->vp_id);
2420
2421        val64 = readq(&hldev->common_reg->tim_int_mask0);
2422
2423        if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2424           (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2425                writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2426                        tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2427                        &hldev->common_reg->tim_int_mask0);
2428        }
2429
2430        if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2431           (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2432                __vxge_hw_pio_mem_write32_upper(
2433                        (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2434                          tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2435                        &hldev->common_reg->tim_int_mask1);
2436        }
2437
2438        return;
2439}
2440
2441/**
2442 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2443 * descriptors and process the same.
2444 * @ring: Handle to the ring object used for receive
2445 *
2446 * The function polls the Rx for the completed  descriptors and calls
2447 * the driver via supplied completion   callback.
2448 *
2449 * Returns: VXGE_HW_OK, if the polling is completed successful.
2450 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2451 * descriptors available which are yet to be processed.
2452 *
2453 * See also: vxge_hw_vpath_poll_rx()
2454 */
2455enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2456{
2457        u8 t_code;
2458        enum vxge_hw_status status = VXGE_HW_OK;
2459        void *first_rxdh;
2460        u64 val64 = 0;
2461        int new_count = 0;
2462
2463        ring->cmpl_cnt = 0;
2464
2465        status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2466        if (status == VXGE_HW_OK)
2467                ring->callback(ring, first_rxdh,
2468                        t_code, ring->channel.userdata);
2469
2470        if (ring->cmpl_cnt != 0) {
2471                ring->doorbell_cnt += ring->cmpl_cnt;
2472                if (ring->doorbell_cnt >= ring->rxds_limit) {
2473                        /*
2474                         * Each RxD is of 4 qwords, update the number of
2475                         * qwords replenished
2476                         */
2477                        new_count = (ring->doorbell_cnt * 4);
2478
2479                        /* For each block add 4 more qwords */
2480                        ring->total_db_cnt += ring->doorbell_cnt;
2481                        if (ring->total_db_cnt >= ring->rxds_per_block) {
2482                                new_count += 4;
2483                                /* Reset total count */
2484                                ring->total_db_cnt %= ring->rxds_per_block;
2485                        }
2486                        writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2487                                &ring->vp_reg->prc_rxd_doorbell);
2488                        val64 =
2489                          readl(&ring->common_reg->titan_general_int_status);
2490                        ring->doorbell_cnt = 0;
2491                }
2492        }
2493
2494        return status;
2495}
2496
2497/**
2498 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2499 * the same.
2500 * @fifo: Handle to the fifo object used for non offload send
2501 *
2502 * The function polls the Tx for the completed  descriptors and calls
2503 * the driver via supplied completion callback.
2504 *
2505 * Returns: VXGE_HW_OK, if the polling is completed successful.
2506 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2507 * descriptors available which are yet to be processed.
2508 *
2509 * See also: vxge_hw_vpath_poll_tx().
2510 */
2511enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2512                                        struct sk_buff ***skb_ptr, int nr_skb,
2513                                        int *more)
2514{
2515        enum vxge_hw_fifo_tcode t_code;
2516        void *first_txdlh;
2517        enum vxge_hw_status status = VXGE_HW_OK;
2518        struct __vxge_hw_channel *channel;
2519
2520        channel = &fifo->channel;
2521
2522        status = vxge_hw_fifo_txdl_next_completed(fifo,
2523                                &first_txdlh, &t_code);
2524        if (status == VXGE_HW_OK)
2525                if (fifo->callback(fifo, first_txdlh, t_code,
2526                        channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2527                        status = VXGE_HW_COMPLETIONS_REMAIN;
2528
2529        return status;
2530}
2531