1/****************************************************************************** 2 * This software may be used and distributed according to the terms of 3 * the GNU General Public License (GPL), incorporated herein by reference. 4 * Drivers based on or derived from this code fall under the GPL and must 5 * retain the authorship, copyright and license notice. This file is not 6 * a complete program and may only be used when the entire operating 7 * system is licensed under the GPL. 8 * See the file COPYING in this distribution for more information. 9 * 10 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O 11 * Virtualized Server Adapter. 12 * Copyright(c) 2002-2010 Exar Corp. 13 ******************************************************************************/ 14#ifndef VXGE_CONFIG_H 15#define VXGE_CONFIG_H 16#include <linux/list.h> 17#include <linux/slab.h> 18 19#ifndef VXGE_CACHE_LINE_SIZE 20#define VXGE_CACHE_LINE_SIZE 128 21#endif 22 23#ifndef VXGE_ALIGN 24#define VXGE_ALIGN(adrs, size) \ 25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) 26#endif 27 28#define VXGE_HW_MIN_MTU 68 29#define VXGE_HW_MAX_MTU 9600 30#define VXGE_HW_DEFAULT_MTU 1500 31 32#define VXGE_HW_MAX_ROM_IMAGES 8 33 34struct eprom_image { 35 u8 is_valid:1; 36 u8 index; 37 u8 type; 38 u16 version; 39}; 40 41#ifdef VXGE_DEBUG_ASSERT 42/** 43 * vxge_assert 44 * @test: C-condition to check 45 * @fmt: printf like format string 46 * 47 * This function implements traditional assert. By default assertions 48 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in 49 * compilation 50 * time. 51 */ 52#define vxge_assert(test) BUG_ON(!(test)) 53#else 54#define vxge_assert(test) 55#endif /* end of VXGE_DEBUG_ASSERT */ 56 57/** 58 * enum vxge_debug_level 59 * @VXGE_NONE: debug disabled 60 * @VXGE_ERR: all errors going to be logged out 61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs 62 * going to be logged out. Very noisy. 63 * 64 * This enumeration going to be used to switch between different 65 * debug levels during runtime if DEBUG macro defined during 66 * compilation. If DEBUG macro not defined than code will be 67 * compiled out. 68 */ 69enum vxge_debug_level { 70 VXGE_NONE = 0, 71 VXGE_TRACE = 1, 72 VXGE_ERR = 2 73}; 74 75#define NULL_VPID 0xFFFFFFFF 76#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL 77#define VXGE_DEBUG_MODULE_MASK 0xffffffff 78#define VXGE_DEBUG_TRACE_MASK 0xffffffff 79#define VXGE_DEBUG_ERR_MASK 0xffffffff 80#define VXGE_DEBUG_MASK 0x000001ff 81#else 82#define VXGE_DEBUG_MODULE_MASK 0x20000000 83#define VXGE_DEBUG_TRACE_MASK 0x20000000 84#define VXGE_DEBUG_ERR_MASK 0x20000000 85#define VXGE_DEBUG_MASK 0x00000001 86#endif 87 88/* 89 * @VXGE_COMPONENT_LL: do debug for vxge link layer module 90 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions 91 * 92 * This enumeration going to be used to distinguish modules 93 * or libraries during compilation and runtime. Makefile must declare 94 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value. 95 */ 96#define VXGE_COMPONENT_LL 0x20000000 97#define VXGE_COMPONENT_ALL 0xffffffff 98 99#define VXGE_HW_BASE_INF 100 100#define VXGE_HW_BASE_ERR 200 101#define VXGE_HW_BASE_BADCFG 300 102 103enum vxge_hw_status { 104 VXGE_HW_OK = 0, 105 VXGE_HW_FAIL = 1, 106 VXGE_HW_PENDING = 2, 107 VXGE_HW_COMPLETIONS_REMAIN = 3, 108 109 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1, 110 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2, 111 112 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1, 113 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2, 114 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3, 115 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4, 116 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5, 117 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6, 118 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7, 119 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8, 120 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9, 121 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10, 122 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11, 123 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12, 124 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13, 125 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, 126 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, 127 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, 128 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, 129 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, 130 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, 131 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, 132 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21, 133 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22, 134 135 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1, 136 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2, 137 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3, 138 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4, 139 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5, 140 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6, 141 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7, 142 143 VXGE_HW_EOF_TRACE_BUF = -1 144}; 145 146/** 147 * enum enum vxge_hw_device_link_state - Link state enumeration. 148 * @VXGE_HW_LINK_NONE: Invalid link state. 149 * @VXGE_HW_LINK_DOWN: Link is down. 150 * @VXGE_HW_LINK_UP: Link is up. 151 * 152 */ 153enum vxge_hw_device_link_state { 154 VXGE_HW_LINK_NONE, 155 VXGE_HW_LINK_DOWN, 156 VXGE_HW_LINK_UP 157}; 158 159/** 160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes. 161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes 162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed 163 * @VXGE_HW_FW_UPGRADE_ERR: upload error 164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream 165 * 166 */ 167enum vxge_hw_fw_upgrade_code { 168 VXGE_HW_FW_UPGRADE_OK = 0, 169 VXGE_HW_FW_UPGRADE_DONE = 1, 170 VXGE_HW_FW_UPGRADE_ERR = 2, 171 VXGE_FW_UPGRADE_BYTES2SKIP = 3 172}; 173 174/** 175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes. 176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data 177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow 178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file 179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file 180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file 181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file 182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data 183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file 184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type 185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed 186 */ 187enum vxge_hw_fw_upgrade_err_code { 188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1, 189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2, 190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3, 191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4, 192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5, 193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6, 194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7, 195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8, 196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9, 197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10 198}; 199 200/** 201 * struct vxge_hw_device_date - Date Format 202 * @day: Day 203 * @month: Month 204 * @year: Year 205 * @date: Date in string format 206 * 207 * Structure for returning date 208 */ 209 210#define VXGE_HW_FW_STRLEN 32 211struct vxge_hw_device_date { 212 u32 day; 213 u32 month; 214 u32 year; 215 char date[VXGE_HW_FW_STRLEN]; 216}; 217 218struct vxge_hw_device_version { 219 u32 major; 220 u32 minor; 221 u32 build; 222 char version[VXGE_HW_FW_STRLEN]; 223}; 224 225/** 226 * struct vxge_hw_fifo_config - Configuration of fifo. 227 * @enable: Is this fifo to be commissioned 228 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors) 229 * blocks per queue. 230 * @max_frags: Max number of Tx buffers per TxDL (that is, per single 231 * transmit operation). 232 * No more than 256 transmit buffers can be specified. 233 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size 234 * bytes. Setting @memblock_size to page size ensures 235 * by-page allocation of descriptors. 128K bytes is the 236 * maximum supported block size. 237 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data 238 * (e.g., to align on a cache line). 239 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL. 240 * Use 0 otherwise. 241 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation, 242 * which generally improves latency of the host bridge operation 243 * (see PCI specification). For valid values please refer 244 * to struct vxge_hw_fifo_config{} in the driver sources. 245 * Configuration of all Titan fifos. 246 * Note: Valid (min, max) range for each attribute is specified in the body of 247 * the struct vxge_hw_fifo_config{} structure. 248 */ 249struct vxge_hw_fifo_config { 250 u32 enable; 251#define VXGE_HW_FIFO_ENABLE 1 252#define VXGE_HW_FIFO_DISABLE 0 253 254 u32 fifo_blocks; 255#define VXGE_HW_MIN_FIFO_BLOCKS 2 256#define VXGE_HW_MAX_FIFO_BLOCKS 128 257 258 u32 max_frags; 259#define VXGE_HW_MIN_FIFO_FRAGS 1 260#define VXGE_HW_MAX_FIFO_FRAGS 256 261 262 u32 memblock_size; 263#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE 264#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072 265#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096 266 267 u32 alignment_size; 268#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0 269#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536 270#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE 271 272 u32 intr; 273#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1 274#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0 275#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0 276 277 u32 no_snoop_bits; 278#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0 279#define VXGE_HW_FIFO_NO_SNOOP_TXD 1 280#define VXGE_HW_FIFO_NO_SNOOP_FRM 2 281#define VXGE_HW_FIFO_NO_SNOOP_ALL 3 282#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0 283 284}; 285/** 286 * struct vxge_hw_ring_config - Ring configurations. 287 * @enable: Is this ring to be commissioned 288 * @ring_blocks: Numbers of RxD blocks in the ring 289 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer 290 * to Titan User Guide. 291 * @scatter_mode: Titan supports two receive scatter modes: A and B. 292 * For details please refer to Titan User Guide. 293 * @rx_timer_val: The number of 32ns periods that would be counted between two 294 * timer interrupts. 295 * @greedy_return: If Set it forces the device to return absolutely all RxD 296 * that are consumed and still on board when a timer interrupt 297 * triggers. If Clear, then if the device has already returned 298 * RxD before current timer interrupt trigerred and after the 299 * previous timer interrupt triggered, then the device is not 300 * forced to returned the rest of the consumed RxD that it has 301 * on board which account for a byte count less than the one 302 * programmed into PRC_CFG6.RXD_CRXDT field 303 * @rx_timer_ci: TBD 304 * @backoff_interval_us: Time (in microseconds), after which Titan 305 * tries to download RxDs posted by the host. 306 * Note that the "backoff" does not happen if host posts receive 307 * descriptors in the timely fashion. 308 * Ring configuration. 309 */ 310struct vxge_hw_ring_config { 311 u32 enable; 312#define VXGE_HW_RING_ENABLE 1 313#define VXGE_HW_RING_DISABLE 0 314#define VXGE_HW_RING_DEFAULT 1 315 316 u32 ring_blocks; 317#define VXGE_HW_MIN_RING_BLOCKS 1 318#define VXGE_HW_MAX_RING_BLOCKS 128 319#define VXGE_HW_DEF_RING_BLOCKS 2 320 321 u32 buffer_mode; 322#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 323#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3 324#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5 325#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1 326 327 u32 scatter_mode; 328#define VXGE_HW_RING_SCATTER_MODE_A 0 329#define VXGE_HW_RING_SCATTER_MODE_B 1 330#define VXGE_HW_RING_SCATTER_MODE_C 2 331#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff 332 333 u64 rxds_limit; 334#define VXGE_HW_DEF_RING_RXDS_LIMIT 44 335}; 336 337/** 338 * struct vxge_hw_vp_config - Configuration of virtual path 339 * @vp_id: Virtual Path Id 340 * @min_bandwidth: Minimum Guaranteed bandwidth 341 * @ring: See struct vxge_hw_ring_config{}. 342 * @fifo: See struct vxge_hw_fifo_config{}. 343 * @tti: Configuration of interrupt associated with Transmit. 344 * see struct vxge_hw_tim_intr_config(); 345 * @rti: Configuration of interrupt associated with Receive. 346 * see struct vxge_hw_tim_intr_config(); 347 * @mtu: mtu size used on this port. 348 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to 349 * remove the VLAN tag from all received tagged frames that are not 350 * replicated at the internal L2 switch. 351 * 0 - Do not strip the VLAN tag. 352 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are 353 * always placed into the RxDMA descriptor. 354 * 355 * This structure is used by the driver to pass the configuration parameters to 356 * configure Virtual Path. 357 */ 358struct vxge_hw_vp_config { 359 u32 vp_id; 360 361#define VXGE_HW_VPATH_PRIORITY_MIN 0 362#define VXGE_HW_VPATH_PRIORITY_MAX 16 363#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0 364 365 u32 min_bandwidth; 366#define VXGE_HW_VPATH_BANDWIDTH_MIN 0 367#define VXGE_HW_VPATH_BANDWIDTH_MAX 100 368#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0 369 370 struct vxge_hw_ring_config ring; 371 struct vxge_hw_fifo_config fifo; 372 struct vxge_hw_tim_intr_config tti; 373 struct vxge_hw_tim_intr_config rti; 374 375 u32 mtu; 376#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU 377#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU 378#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff 379 380 u32 rpa_strip_vlan_tag; 381#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1 382#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0 383#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff 384 385}; 386/** 387 * struct vxge_hw_device_config - Device configuration. 388 * @dma_blockpool_initial: Initial size of DMA Pool 389 * @dma_blockpool_max: Maximum blocks in DMA pool 390 * @intr_mode: Line, or MSI-X interrupt. 391 * 392 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table). 393 * @rth_it_type: RTH IT table programming type 394 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address 395 * @vp_config: Configuration for virtual paths 396 * @device_poll_millis: Specify the interval (in mulliseconds) 397 * to wait for register reads 398 * 399 * Titan configuration. 400 * Contains per-device configuration parameters, including: 401 * - stats sampling interval, etc. 402 * 403 * In addition, struct vxge_hw_device_config{} includes "subordinate" 404 * configurations, including: 405 * - fifos and rings; 406 * - MAC (done at firmware level). 407 * 408 * See Titan User Guide for more details. 409 * Note: Valid (min, max) range for each attribute is specified in the body of 410 * the struct vxge_hw_device_config{} structure. Please refer to the 411 * corresponding include file. 412 * See also: struct vxge_hw_tim_intr_config{}. 413 */ 414struct vxge_hw_device_config { 415 u32 dma_blockpool_initial; 416 u32 dma_blockpool_max; 417#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 418#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 419#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 420#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 421 422#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 423 424 u32 intr_mode; 425#define VXGE_HW_INTR_MODE_IRQLINE 0 426#define VXGE_HW_INTR_MODE_MSIX 1 427#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 428 429#define VXGE_HW_INTR_MODE_DEF 0 430 431 u32 rth_en; 432#define VXGE_HW_RTH_DISABLE 0 433#define VXGE_HW_RTH_ENABLE 1 434#define VXGE_HW_RTH_DEFAULT 0 435 436 u32 rth_it_type; 437#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 438#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 439#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 440 441 u32 rts_mac_en; 442#define VXGE_HW_RTS_MAC_DISABLE 0 443#define VXGE_HW_RTS_MAC_ENABLE 1 444#define VXGE_HW_RTS_MAC_DEFAULT 0 445 446 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; 447 448 u32 device_poll_millis; 449#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 450#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 451#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 452 453}; 454 455/** 456 * function vxge_uld_link_up_f - Link-Up callback provided by driver. 457 * @devh: HW device handle. 458 * Link-up notification callback provided by the driver. 459 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. 460 * 461 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{}, 462 * vxge_hw_driver_initialize(). 463 */ 464 465/** 466 * function vxge_uld_link_down_f - Link-Down callback provided by 467 * driver. 468 * @devh: HW device handle. 469 * 470 * Link-Down notification callback provided by the driver. 471 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. 472 * 473 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, 474 * vxge_hw_driver_initialize(). 475 */ 476 477/** 478 * function vxge_uld_crit_err_f - Critical Error notification callback. 479 * @devh: HW device handle. 480 * (typically - at HW device iinitialization time). 481 * @type: Enumerated hw error, e.g.: double ECC. 482 * @serr_data: Titan status. 483 * @ext_data: Extended data. The contents depends on the @type. 484 * 485 * Link-Down notification callback provided by the driver. 486 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. 487 * 488 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{}, 489 * vxge_hw_driver_initialize(). 490 */ 491 492/** 493 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks. 494 * @link_up: See vxge_uld_link_up_f{}. 495 * @link_down: See vxge_uld_link_down_f{}. 496 * @crit_err: See vxge_uld_crit_err_f{}. 497 * 498 * Driver slow-path (per-driver) callbacks. 499 * Implemented by driver and provided to HW via 500 * vxge_hw_driver_initialize(). 501 * Note that these callbacks are not mandatory: HW will not invoke 502 * a callback if NULL is specified. 503 * 504 * See also: vxge_hw_driver_initialize(). 505 */ 506struct vxge_hw_uld_cbs { 507 void (*link_up)(struct __vxge_hw_device *devh); 508 void (*link_down)(struct __vxge_hw_device *devh); 509 void (*crit_err)(struct __vxge_hw_device *devh, 510 enum vxge_hw_event type, u64 ext_data); 511}; 512 513/* 514 * struct __vxge_hw_blockpool_entry - Block private data structure 515 * @item: List header used to link. 516 * @length: Length of the block 517 * @memblock: Virtual address block 518 * @dma_addr: DMA Address of the block. 519 * @dma_handle: DMA handle of the block. 520 * @acc_handle: DMA acc handle 521 * 522 * Block is allocated with a header to put the blocks into list. 523 * 524 */ 525struct __vxge_hw_blockpool_entry { 526 struct list_head item; 527 u32 length; 528 void *memblock; 529 dma_addr_t dma_addr; 530 struct pci_dev *dma_handle; 531 struct pci_dev *acc_handle; 532}; 533 534/* 535 * struct __vxge_hw_blockpool - Block Pool 536 * @hldev: HW device 537 * @block_size: size of each block. 538 * @Pool_size: Number of blocks in the pool 539 * @pool_max: Maximum number of blocks above which to free additional blocks 540 * @req_out: Number of block requests with OS out standing 541 * @free_block_list: List of free blocks 542 * 543 * Block pool contains the DMA blocks preallocated. 544 * 545 */ 546struct __vxge_hw_blockpool { 547 struct __vxge_hw_device *hldev; 548 u32 block_size; 549 u32 pool_size; 550 u32 pool_max; 551 u32 req_out; 552 struct list_head free_block_list; 553 struct list_head free_entry_list; 554}; 555 556/* 557 * enum enum __vxge_hw_channel_type - Enumerated channel types. 558 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel. 559 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo. 560 * @VXGE_HW_CHANNEL_TYPE_RING: ring. 561 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported 562 * (and recognized) channel types. Currently: 2. 563 * 564 * Enumerated channel types. Currently there are only two link-layer 565 * channels - Titan fifo and Titan ring. In the future the list will grow. 566 */ 567enum __vxge_hw_channel_type { 568 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0, 569 VXGE_HW_CHANNEL_TYPE_FIFO = 1, 570 VXGE_HW_CHANNEL_TYPE_RING = 2, 571 VXGE_HW_CHANNEL_TYPE_MAX = 3 572}; 573 574/* 575 * struct __vxge_hw_channel 576 * @item: List item; used to maintain a list of open channels. 577 * @type: Channel type. See enum vxge_hw_channel_type{}. 578 * @devh: Device handle. HW device object that contains _this_ channel. 579 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel. 580 * @length: Channel length. Currently allocated number of descriptors. 581 * The channel length "grows" when more descriptors get allocated. 582 * See _hw_mempool_grow. 583 * @reserve_arr: Reserve array. Contains descriptors that can be reserved 584 * by driver for the subsequent send or receive operation. 585 * See vxge_hw_fifo_txdl_reserve(), 586 * vxge_hw_ring_rxd_reserve(). 587 * @reserve_ptr: Current pointer in the resrve array 588 * @reserve_top: Reserve top gives the maximum number of dtrs available in 589 * reserve array. 590 * @work_arr: Work array. Contains descriptors posted to the channel. 591 * Note that at any point in time @work_arr contains 3 types of 592 * descriptors: 593 * 1) posted but not yet consumed by Titan device; 594 * 2) consumed but not yet completed; 595 * 3) completed but not yet freed 596 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free()) 597 * @post_index: Post index. At any point in time points on the 598 * position in the channel, which'll contain next to-be-posted 599 * descriptor. 600 * @compl_index: Completion index. At any point in time points on the 601 * position in the channel, which will contain next 602 * to-be-completed descriptor. 603 * @free_arr: Free array. Contains completed descriptors that were freed 604 * (i.e., handed over back to HW) by driver. 605 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free(). 606 * @free_ptr: current pointer in free array 607 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize 608 * to store per-operation control information. 609 * @stats: Pointer to common statistics 610 * @userdata: Per-channel opaque (void*) user-defined context, which may be 611 * driver object, ULP connection, etc. 612 * Once channel is open, @userdata is passed back to user via 613 * vxge_hw_channel_callback_f. 614 * 615 * HW channel object. 616 * 617 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag 618 */ 619struct __vxge_hw_channel { 620 struct list_head item; 621 enum __vxge_hw_channel_type type; 622 struct __vxge_hw_device *devh; 623 struct __vxge_hw_vpath_handle *vph; 624 u32 length; 625 u32 vp_id; 626 void **reserve_arr; 627 u32 reserve_ptr; 628 u32 reserve_top; 629 void **work_arr; 630 u32 post_index ____cacheline_aligned; 631 u32 compl_index ____cacheline_aligned; 632 void **free_arr; 633 u32 free_ptr; 634 void **orig_arr; 635 u32 per_dtr_space; 636 void *userdata; 637 struct vxge_hw_common_reg __iomem *common_reg; 638 u32 first_vp_id; 639 struct vxge_hw_vpath_stats_sw_common_info *stats; 640 641} ____cacheline_aligned; 642 643/* 644 * struct __vxge_hw_virtualpath - Virtual Path 645 * 646 * @vp_id: Virtual path id 647 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver 648 * @hldev: Hal device 649 * @vp_config: Virtual Path Config 650 * @vp_reg: VPATH Register map address in BAR0 651 * @vpmgmt_reg: VPATH_MGMT register map address 652 * @max_mtu: Max mtu that can be supported 653 * @vsport_number: vsport attached to this vpath 654 * @max_kdfc_db: Maximum kernel mode doorbells 655 * @max_nofl_db: Maximum non offload doorbells 656 * @tx_intr_num: Interrupt Number associated with the TX 657 658 * @ringh: Ring Queue 659 * @fifoh: FIFO Queue 660 * @vpath_handles: Virtual Path handles list 661 * @stats_block: Memory for DMAing stats 662 * @stats: Vpath statistics 663 * 664 * Virtual path structure to encapsulate the data related to a virtual path. 665 * Virtual paths are allocated by the HW upon getting configuration from the 666 * driver and inserted into the list of virtual paths. 667 */ 668struct __vxge_hw_virtualpath { 669 u32 vp_id; 670 671 u32 vp_open; 672#define VXGE_HW_VP_NOT_OPEN 0 673#define VXGE_HW_VP_OPEN 1 674 675 struct __vxge_hw_device *hldev; 676 struct vxge_hw_vp_config *vp_config; 677 struct vxge_hw_vpath_reg __iomem *vp_reg; 678 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 679 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; 680 681 u32 max_mtu; 682 u32 vsport_number; 683 u32 max_kdfc_db; 684 u32 max_nofl_db; 685 686 struct __vxge_hw_ring *____cacheline_aligned ringh; 687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 688 struct list_head vpath_handles; 689 struct __vxge_hw_blockpool_entry *stats_block; 690 struct vxge_hw_vpath_stats_hw_info *hw_stats; 691 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; 692 struct vxge_hw_vpath_stats_sw_info *sw_stats; 693 spinlock_t lock; 694}; 695 696/* 697 * struct __vxge_hw_vpath_handle - List item to store callback information 698 * @item: List head to keep the item in linked list 699 * @vpath: Virtual path to which this item belongs 700 * 701 * This structure is used to store the callback information. 702 */ 703struct __vxge_hw_vpath_handle { 704 struct list_head item; 705 struct __vxge_hw_virtualpath *vpath; 706}; 707 708/* 709 * struct __vxge_hw_device 710 * 711 * HW device object. 712 */ 713/** 714 * struct __vxge_hw_device - Hal device object 715 * @magic: Magic Number 716 * @bar0: BAR0 virtual address. 717 * @pdev: Physical device handle 718 * @config: Confguration passed by the LL driver at initialization 719 * @link_state: Link state 720 * 721 * HW device object. Represents Titan adapter 722 */ 723struct __vxge_hw_device { 724 u32 magic; 725#define VXGE_HW_DEVICE_MAGIC 0x12345678 726#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD 727 void __iomem *bar0; 728 struct pci_dev *pdev; 729 struct net_device *ndev; 730 struct vxge_hw_device_config config; 731 enum vxge_hw_device_link_state link_state; 732 733 struct vxge_hw_uld_cbs uld_callbacks; 734 735 u32 host_type; 736 u32 func_id; 737 u32 access_rights; 738#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1 739#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2 740#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4 741 struct vxge_hw_legacy_reg __iomem *legacy_reg; 742 struct vxge_hw_toc_reg __iomem *toc_reg; 743 struct vxge_hw_common_reg __iomem *common_reg; 744 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; 745 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \ 746 [VXGE_HW_TITAN_SRPCIM_REG_SPACES]; 747 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \ 748 [VXGE_HW_TITAN_VPMGMT_REG_SPACES]; 749 struct vxge_hw_vpath_reg __iomem *vpath_reg \ 750 [VXGE_HW_TITAN_VPATH_REG_SPACES]; 751 u8 __iomem *kdfc; 752 u8 __iomem *usdc; 753 struct __vxge_hw_virtualpath virtual_paths \ 754 [VXGE_HW_MAX_VIRTUAL_PATHS]; 755 u64 vpath_assignments; 756 u64 vpaths_deployed; 757 u32 first_vp_id; 758 u64 tim_int_mask0[4]; 759 u32 tim_int_mask1[4]; 760 761 struct __vxge_hw_blockpool block_pool; 762 struct vxge_hw_device_stats stats; 763 u32 debug_module_mask; 764 u32 debug_level; 765 u32 level_err; 766 u32 level_trace; 767 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES]; 768}; 769 770#define VXGE_HW_INFO_LEN 64 771/** 772 * struct vxge_hw_device_hw_info - Device information 773 * @host_type: Host Type 774 * @func_id: Function Id 775 * @vpath_mask: vpath bit mask 776 * @fw_version: Firmware version 777 * @fw_date: Firmware Date 778 * @flash_version: Firmware version 779 * @flash_date: Firmware Date 780 * @mac_addrs: Mac addresses for each vpath 781 * @mac_addr_masks: Mac address masks for each vpath 782 * 783 * Returns the vpath mask that has the bits set for each vpath allocated 784 * for the driver and the first mac address for each vpath 785 */ 786struct vxge_hw_device_hw_info { 787 u32 host_type; 788#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0 789#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1 790#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2 791#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3 792#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4 793#define VXGE_HW_SR_VH_FUNCTION0 5 794#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 795#define VXGE_HW_VH_NORMAL_FUNCTION 7 796 u64 function_mode; 797#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0 798#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1 799#define VXGE_HW_FUNCTION_MODE_SRIOV 2 800#define VXGE_HW_FUNCTION_MODE_MRIOV 3 801#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4 802#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5 803#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6 804#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7 805#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8 806#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9 807#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10 808 809 u32 func_id; 810 u64 vpath_mask; 811 struct vxge_hw_device_version fw_version; 812 struct vxge_hw_device_date fw_date; 813 struct vxge_hw_device_version flash_version; 814 struct vxge_hw_device_date flash_date; 815 u8 serial_number[VXGE_HW_INFO_LEN]; 816 u8 part_number[VXGE_HW_INFO_LEN]; 817 u8 product_desc[VXGE_HW_INFO_LEN]; 818 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 819 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 820}; 821 822/** 823 * struct vxge_hw_device_attr - Device memory spaces. 824 * @bar0: BAR0 virtual address. 825 * @pdev: PCI device object. 826 * 827 * Device memory spaces. Includes configuration, BAR0 etc. per device 828 * mapped memories. Also, includes a pointer to OS-specific PCI device object. 829 */ 830struct vxge_hw_device_attr { 831 void __iomem *bar0; 832 struct pci_dev *pdev; 833 struct vxge_hw_uld_cbs uld_callbacks; 834}; 835 836#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls) 837 838#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \ 839 if (i < 16) { \ 840 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \ 841 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \ 842 } \ 843 else { \ 844 m1[0] = 0x80000000; \ 845 m1[1] = 0x40000000; \ 846 } \ 847} 848 849#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \ 850 if (i < 16) { \ 851 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \ 852 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \ 853 } \ 854 else { \ 855 m1[0] = 0; \ 856 m1[1] = 0; \ 857 } \ 858} 859 860#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \ 861 status = vxge_hw_mrpcim_stats_access(hldev, \ 862 VXGE_HW_STATS_OP_READ, \ 863 loc, \ 864 offset, \ 865 &val64); \ 866 if (status != VXGE_HW_OK) \ 867 return status; \ 868} 869 870/* 871 * struct __vxge_hw_ring - Ring channel. 872 * @channel: Channel "base" of this ring, the common part of all HW 873 * channels. 874 * @mempool: Memory pool, the pool from which descriptors get allocated. 875 * (See vxge_hw_mm.h). 876 * @config: Ring configuration, part of device configuration 877 * (see struct vxge_hw_device_config{}). 878 * @ring_length: Length of the ring 879 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode, 880 * as per Titan User Guide. 881 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec, 882 * 1-buffer mode descriptor is 32 byte long, etc. 883 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep 884 * per-descriptor data (e.g., DMA handle for Solaris) 885 * @per_rxd_space: Per rxd space requested by driver 886 * @rxds_per_block: Number of descriptors per hardware-defined RxD 887 * block. Depends on the (1-, 3-, 5-) buffer mode. 888 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal 889 * usage. Not to confuse with @rxd_priv_size. 890 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR. 891 * @callback: Channel completion callback. HW invokes the callback when there 892 * are new completions on that channel. In many implementations 893 * the @callback executes in the hw interrupt context. 894 * @rxd_init: Channel's descriptor-initialize callback. 895 * See vxge_hw_ring_rxd_init_f{}. 896 * If not NULL, HW invokes the callback when opening 897 * the ring. 898 * @rxd_term: Channel's descriptor-terminate callback. If not NULL, 899 * HW invokes the callback when closing the corresponding channel. 900 * See also vxge_hw_channel_rxd_term_f{}. 901 * @stats: Statistics for ring 902 * Ring channel. 903 * 904 * Note: The structure is cache line aligned to better utilize 905 * CPU cache performance. 906 */ 907struct __vxge_hw_ring { 908 struct __vxge_hw_channel channel; 909 struct vxge_hw_mempool *mempool; 910 struct vxge_hw_vpath_reg __iomem *vp_reg; 911 struct vxge_hw_common_reg __iomem *common_reg; 912 u32 ring_length; 913 u32 buffer_mode; 914 u32 rxd_size; 915 u32 rxd_priv_size; 916 u32 per_rxd_space; 917 u32 rxds_per_block; 918 u32 rxdblock_priv_size; 919 u32 cmpl_cnt; 920 u32 vp_id; 921 u32 doorbell_cnt; 922 u32 total_db_cnt; 923 u64 rxds_limit; 924 925 enum vxge_hw_status (*callback)( 926 struct __vxge_hw_ring *ringh, 927 void *rxdh, 928 u8 t_code, 929 void *userdata); 930 931 enum vxge_hw_status (*rxd_init)( 932 void *rxdh, 933 void *userdata); 934 935 void (*rxd_term)( 936 void *rxdh, 937 enum vxge_hw_rxd_state state, 938 void *userdata); 939 940 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned; 941 struct vxge_hw_ring_config *config; 942} ____cacheline_aligned; 943 944/** 945 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state. 946 * @VXGE_HW_TXDL_STATE_NONE: Invalid state. 947 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation. 948 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the 949 * device. 950 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for 951 * filling-in and posting later. 952 * 953 * Titan/HW descriptor states. 954 * 955 */ 956enum vxge_hw_txdl_state { 957 VXGE_HW_TXDL_STATE_NONE = 0, 958 VXGE_HW_TXDL_STATE_AVAIL = 1, 959 VXGE_HW_TXDL_STATE_POSTED = 2, 960 VXGE_HW_TXDL_STATE_FREED = 3 961}; 962/* 963 * struct __vxge_hw_fifo - Fifo. 964 * @channel: Channel "base" of this fifo, the common part of all HW 965 * channels. 966 * @mempool: Memory pool, from which descriptors get allocated. 967 * @config: Fifo configuration, part of device configuration 968 * (see struct vxge_hw_device_config{}). 969 * @interrupt_type: Interrupt type to be used 970 * @no_snoop_bits: See struct vxge_hw_fifo_config{}. 971 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock. 972 * on TxDL please refer to Titan UG. 973 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus 974 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv). 975 * @priv_size: Per-Tx descriptor space reserved for driver 976 * usage. 977 * @per_txdl_space: Per txdl private space for the driver 978 * @callback: Fifo completion callback. HW invokes the callback when there 979 * are new completions on that fifo. In many implementations 980 * the @callback executes in the hw interrupt context. 981 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, 982 * HW invokes the callback when closing the corresponding fifo. 983 * See also vxge_hw_fifo_txdl_term_f{}. 984 * @stats: Statistics of this fifo 985 * 986 * Fifo channel. 987 * Note: The structure is cache line aligned. 988 */ 989struct __vxge_hw_fifo { 990 struct __vxge_hw_channel channel; 991 struct vxge_hw_mempool *mempool; 992 struct vxge_hw_fifo_config *config; 993 struct vxge_hw_vpath_reg __iomem *vp_reg; 994 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; 995 u64 interrupt_type; 996 u32 no_snoop_bits; 997 u32 txdl_per_memblock; 998 u32 txdl_size; 999 u32 priv_size; 1000 u32 per_txdl_space;
1001 u32 vp_id; 1002 u32 tx_intr_num; 1003 1004 enum vxge_hw_status (*callback)( 1005 struct __vxge_hw_fifo *fifo_handle, 1006 void *txdlh, 1007 enum vxge_hw_fifo_tcode t_code, 1008 void *userdata, 1009 struct sk_buff ***skb_ptr, 1010 int nr_skb, 1011 int *more); 1012 1013 void (*txdl_term)( 1014 void *txdlh, 1015 enum vxge_hw_txdl_state state, 1016 void *userdata); 1017 1018 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned; 1019} ____cacheline_aligned; 1020 1021/* 1022 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data. 1023 * @dma_addr: DMA (mapped) address of _this_ descriptor. 1024 * @dma_handle: DMA handle used to map the descriptor onto device. 1025 * @dma_offset: Descriptor's offset in the memory block. HW allocates 1026 * descriptors in memory blocks (see struct vxge_hw_fifo_config{}) 1027 * Each memblock is a contiguous block of DMA-able memory. 1028 * @frags: Total number of fragments (that is, contiguous data buffers) 1029 * carried by this TxDL. 1030 * @align_vaddr_start: Aligned virtual address start 1031 * @align_vaddr: Virtual address of the per-TxDL area in memory used for 1032 * alignement. Used to place one or more mis-aligned fragments 1033 * @align_dma_addr: DMA address translated from the @align_vaddr. 1034 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr. 1035 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr. 1036 * @align_dma_offset: The current offset into the @align_vaddr area. 1037 * Grows while filling the descriptor, gets reset. 1038 * @align_used_frags: Number of fragments used. 1039 * @alloc_frags: Total number of fragments allocated. 1040 * @unused: TODO 1041 * @next_txdl_priv: (TODO). 1042 * @first_txdp: (TODO). 1043 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous 1044 * TxDL list. 1045 * @txdlh: Corresponding txdlh to this TxDL. 1046 * @memblock: Pointer to the TxDL memory block or memory page. 1047 * on the next send operation. 1048 * @dma_object: DMA address and handle of the memory block that contains 1049 * the descriptor. This member is used only in the "checked" 1050 * version of the HW (to enforce certain assertions); 1051 * otherwise it gets compiled out. 1052 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. 1053 * 1054 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA 1055 * information associated with the descriptor. Note that driver can ask HW 1056 * to allocate additional per-descriptor space for its own (driver-specific) 1057 * purposes. 1058 * 1059 * See also: struct vxge_hw_ring_rxd_priv{}. 1060 */ 1061struct __vxge_hw_fifo_txdl_priv { 1062 dma_addr_t dma_addr; 1063 struct pci_dev *dma_handle; 1064 ptrdiff_t dma_offset; 1065 u32 frags; 1066 u8 *align_vaddr_start; 1067 u8 *align_vaddr; 1068 dma_addr_t align_dma_addr; 1069 struct pci_dev *align_dma_handle; 1070 struct pci_dev *align_dma_acch; 1071 ptrdiff_t align_dma_offset; 1072 u32 align_used_frags; 1073 u32 alloc_frags; 1074 u32 unused; 1075 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv; 1076 struct vxge_hw_fifo_txd *first_txdp; 1077 void *memblock; 1078}; 1079 1080/* 1081 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper 1082 * @control_0: Bits 0 to 7 - Doorbell type. 1083 * Bits 8 to 31 - Reserved. 1084 * Bits 32 to 39 - The highest TxD in this TxDL. 1085 * Bits 40 to 47 - Reserved. 1086 * Bits 48 to 55 - Reserved. 1087 * Bits 56 to 63 - No snoop flags. 1088 * @txdl_ptr: The starting location of the TxDL in host memory. 1089 * 1090 * Created by the host and written to the adapter via PIO to a Kernel Doorbell 1091 * FIFO. All non-offload doorbell wrapper fields must be written by the host as 1092 * part of a doorbell write. Consumed by the adapter but is not written by the 1093 * adapter. 1094 */ 1095struct __vxge_hw_non_offload_db_wrapper { 1096 u64 control_0; 1097#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8) 1098#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8) 1099#define VXGE_HW_NODBW_TYPE_NODBW 0 1100 1101#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8) 1102#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8) 1103 1104#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8) 1105#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8) 1106#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2 1107#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1 1108 1109 u64 txdl_ptr; 1110}; 1111 1112/* 1113 * TX Descriptor 1114 */ 1115 1116/** 1117 * struct vxge_hw_fifo_txd - Transmit Descriptor 1118 * @control_0: Bits 0 to 6 - Reserved. 1119 * Bit 7 - List Ownership. This field should be initialized 1120 * to '1' by the driver before the transmit list pointer is 1121 * written to the adapter. This field will be set to '0' by the 1122 * adapter once it has completed transmitting the frame or frames in 1123 * the list. Note - This field is only valid in TxD0. Additionally, 1124 * for multi-list sequences, the driver should not release any 1125 * buffers until the ownership of the last list in the multi-list 1126 * sequence has been returned to the host. 1127 * Bits 8 to 11 - Reserved 1128 * Bits 12 to 15 - Transfer_Code. This field is only valid in 1129 * TxD0. It is used to describe the status of the transmit data 1130 * buffer transfer. This field is always overwritten by the 1131 * adapter, so this field may be initialized to any value. 1132 * Bits 16 to 17 - Host steering. This field allows the host to 1133 * override the selection of the physical transmit port. 1134 * Attention: 1135 * Normal sounds as if learned from the switch rather than from 1136 * the aggregation algorythms. 1137 * 00: Normal. Use Destination/MAC Address 1138 * lookup to determine the transmit port. 1139 * 01: Send on physical Port1. 1140 * 10: Send on physical Port0. 1141 * 11: Send on both ports. 1142 * Bits 18 to 21 - Reserved 1143 * Bits 22 to 23 - Gather_Code. This field is set by the host and 1144 * is used to describe how individual buffers comprise a frame. 1145 * 10: First descriptor of a frame. 1146 * 00: Middle of a multi-descriptor frame. 1147 * 01: Last descriptor of a frame. 1148 * 11: First and last descriptor of a frame (the entire frame 1149 * resides in a single buffer). 1150 * For multi-descriptor frames, the only valid gather code sequence 1151 * is {10, [00], 01}. In other words, the descriptors must be placed 1152 * in the list in the correct order. 1153 * Bits 24 to 27 - Reserved 1154 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation 1155 * definition. Only valid in TxD0. This field allows the host to 1156 * indicate the Ethernet encapsulation of an outbound LSO packet. 1157 * 00 - classic mode (best guess) 1158 * 01 - LLC 1159 * 10 - SNAP 1160 * 11 - DIX 1161 * If "classic mode" is selected, the adapter will attempt to 1162 * decode the frame's Ethernet encapsulation by examining the L/T 1163 * field as follows: 1164 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine 1165 * if packet is IPv4 or IPv6. 1166 * 0x8870 Jumbo-SNAP encoding. 1167 * 0x0800 IPv4 DIX encoding 1168 * 0x86DD IPv6 DIX encoding 1169 * others illegal encapsulation 1170 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag. 1171 * Set to 1 to perform segmentation offload for TCP/UDP. 1172 * This field is valid only in TxD0. 1173 * Bits 31 to 33 - Reserved. 1174 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size 1175 * This field is meaningful only when LSO_Control is non-zero. 1176 * When LSO_Control is set to TCP_LSO, the single (possibly large) 1177 * TCP segment described by this TxDL will be sent as a series of 1178 * TCP segments each of which contains no more than LSO_MSS 1179 * payload bytes. 1180 * When LSO_Control is set to UDP_LSO, the single (possibly large) 1181 * UDP datagram described by this TxDL will be sent as a series of 1182 * UDP datagrams each of which contains no more than LSO_MSS 1183 * payload bytes. 1184 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP 1185 * or TCP payload, with the exception of the last, which will have 1186 * <= LSO_MSS bytes of payload. 1187 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the 1188 * buffer to be read by the adapter. This field is written by the 1189 * host. A value of 0 is illegal. 1190 * Bits 32 to 63 - This value is written by the adapter upon 1191 * completion of a UDP or TCP LSO operation and indicates the number 1192 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be 1193 * returned for any non-LSO operation. 1194 * @control_1: Bits 0 to 4 - Reserved. 1195 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum 1196 * offload. This field is only valid in the first TxD of a frame. 1197 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload. 1198 * This field is only valid in the first TxD of a frame (the TxD's 1199 * gather code must be 10 or 11). The driver should only set this 1200 * bit if it can guarantee that TCP is present. 1201 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload. 1202 * This field is only valid in the first TxD of a frame (the TxD's 1203 * gather code must be 10 or 11). The driver should only set this 1204 * bit if it can guarantee that UDP is present. 1205 * Bits 8 to 14 - Reserved. 1206 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to 1207 * instruct the adapter to insert the VLAN tag specified by the 1208 * Tx_VLAN_Tag field. This field is only valid in the first TxD of 1209 * a frame. 1210 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag 1211 * to be inserted into the frame by the adapter (the first two bytes 1212 * of a VLAN tag are always 0x8100). This field is only valid if the 1213 * Tx_VLAN_Enable field is set to '1'. 1214 * Bits 32 to 33 - Reserved. 1215 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt 1216 * number the frame associated with. This field is written by the 1217 * host. It is only valid in the first TxD of a frame. 1218 * Bits 40 to 42 - Reserved. 1219 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering 1220 * functions. This field is valid only in the first TxD 1221 * of a frame. 1222 * Bits 44 to 45 - Reserved. 1223 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to 1224 * generate an interrupt as soon as all of the frames in the list 1225 * have been transmitted. In order to have per-frame interrupts, 1226 * the driver should place a maximum of one frame per list. This 1227 * field is only valid in the first TxD of a frame. 1228 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter 1229 * to count the frame toward the utilization interrupt specified in 1230 * the Tx_Int_Number field. This field is only valid in the first 1231 * TxD of a frame. 1232 * Bits 48 to 63 - Reserved. 1233 * @buffer_pointer: Buffer start address. 1234 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the 1235 * Titan descriptor prior to posting the latter on the fifo 1236 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is 1237 * to the driver with each completed descriptor. 1238 * 1239 * Transmit descriptor (TxD).Fifo descriptor contains configured number 1240 * (list) of TxDs. * For more details please refer to Titan User Guide, 1241 * Section 5.4.2 "Transmit Descriptor (TxD) Format". 1242 */ 1243struct vxge_hw_fifo_txd { 1244 u64 control_0; 1245#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7) 1246 1247#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) 1248#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4) 1249#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED 1250 1251 1252#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2) 1253#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST 1254#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST 1255 1256 1257#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30) 1258 1259#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14) 1260 1261#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16) 1262 1263 u64 control_1; 1264#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5) 1265#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6) 1266#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7) 1267#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15) 1268 1269#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16) 1270 1271#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6) 1272 1273#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46) 1274#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47) 1275 1276 u64 buffer_pointer; 1277 1278 u64 host_control; 1279}; 1280 1281/** 1282 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring 1283 * @host_control: This field is exclusively for host use and is "readonly" 1284 * from the adapter's perspective. 1285 * @control_0:Bits 0 to 6 - RTH_Bucket get 1286 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1 1287 * by the host, and is set to 0 by the adapter. 1288 * 0 - Host owns RxD and buffer. 1289 * 1 - The adapter owns RxD and buffer. 1290 * Bit 8 - Fast_Path_Eligible When set, indicates that the 1291 * received frame meets all of the criteria for fast path processing. 1292 * The required criteria are as follows: 1293 * !SYN & 1294 * (Transfer_Code == "Transfer OK") & 1295 * (!Is_IP_Fragment) & 1296 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) | 1297 * (Is_IPv6)) & 1298 * ((Is_TCP & computed_L4_checksum == 0xFFFF) | 1299 * (Is_UDP & (computed_L4_checksum == 0xFFFF | 1300 * computed _L4_checksum == 0x0000))) 1301 * (same meaning for all RxD buffer modes) 1302 * Bit 9 - L3 Checksum Correct 1303 * Bit 10 - L4 Checksum Correct 1304 * Bit 11 - Reserved 1305 * Bit 12 to 15 - This field is written by the adapter. It is 1306 * used to report the status of the frame transfer to the host. 1307 * 0x0 - Transfer OK 1308 * 0x4 - RDA Failure During Transfer 1309 * 0x5 - Unparseable Packet, such as unknown IPv6 header. 1310 * 0x6 - Frame integrity error (FCS or ECC). 1311 * 0x7 - Buffer Size Error. The provided buffer(s) were not 1312 * appropriately sized and data loss occurred. 1313 * 0x8 - Internal ECC Error. RxD corrupted. 1314 * 0x9 - IPv4 Checksum error 1315 * 0xA - TCP/UDP Checksum error 1316 * 0xF - Unknown Error or Multiple Error. Indicates an 1317 * unknown problem or that more than one of transfer codes is set. 1318 * Bit 16 - SYN The adapter sets this field to indicate that 1319 * the incoming frame contained a TCP segment with its SYN bit 1320 * set and its ACK bit NOT set. (same meaning for all RxD buffer 1321 * modes) 1322 * Bit 17 - Is ICMP 1323 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the 1324 * Socket Pair Direct Match Table and the frame was steered based 1325 * on SPDM. 1326 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the 1327 * Indirection Table and the frame was steered based on hash 1328 * indirection. 1329 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash 1330 * type) that was used to calculate the hash. 1331 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN 1332 * tagged. 1333 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation 1334 * of the received frame. 1335 * 0x0 - Ethernet DIX 1336 * 0x1 - LLC 1337 * 0x2 - SNAP (includes Jumbo-SNAP) 1338 * 0x3 - IPX 1339 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet. 1340 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet. 1341 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented 1342 * IP packet. 1343 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment. 1344 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message. 1345 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that 1346 * arrived with the frame. If the resulting computed IPv4 header 1347 * checksum for the frame did not produce the expected 0xFFFF value, 1348 * then the transfer code would be set to 0x9. 1349 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that 1350 * arrived with the frame. If the resulting computed TCP/UDP checksum 1351 * for the frame did not produce the expected 0xFFFF value, then the 1352 * transfer code would be set to 0xA. 1353 * @control_1:Bits 0 to 1 - Reserved 1354 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and 1355 * eventually overwritten by the adapter. The host writes the 1356 * available buffer size in bytes when it passes the descriptor to 1357 * the adapter. When a frame is delivered the host, the adapter 1358 * populates this field with the number of bytes written into the 1359 * buffer. The largest supported buffer is 16, 383 bytes. 1360 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if 1361 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero. 1362 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion 1363 * of the VLAN tag, if one was detected by the adapter. This field is 1364 * populated even if VLAN-tag stripping is enabled. 1365 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver. 1366 * 1367 * One buffer mode RxD for ring structure 1368 */ 1369struct vxge_hw_ring_rxd_1 { 1370 u64 host_control; 1371 u64 control_0; 1372#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7) 1373 1374#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7) 1375 1376#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1) 1377 1378#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1) 1379 1380#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1) 1381 1382#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) 1383#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4) 1384 1385#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED 1386 1387#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1) 1388 1389#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1) 1390 1391#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1) 1392 1393#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1) 1394 1395#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4) 1396 1397#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1) 1398 1399#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2) 1400 1401#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5) 1402 1403#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16) 1404 1405#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16) 1406 1407 u64 control_1; 1408 1409#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14) 1410#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14) 1411#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14) 1412 1413#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32) 1414 1415#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16) 1416 1417 u64 buffer0_ptr; 1418}; 1419 1420enum vxge_hw_rth_algoritms { 1421 RTH_ALG_JENKINS = 0, 1422 RTH_ALG_MS_RSS = 1, 1423 RTH_ALG_CRC32C = 2 1424}; 1425 1426/** 1427 * struct vxge_hw_rth_hash_types - RTH hash types. 1428 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4 1429 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4 1430 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6 1431 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6 1432 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex 1433 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex 1434 * 1435 * Used to pass RTH hash types to rts_rts_set. 1436 * 1437 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). 1438 */ 1439struct vxge_hw_rth_hash_types { 1440 u8 hash_type_tcpipv4_en:1, 1441 hash_type_ipv4_en:1, 1442 hash_type_tcpipv6_en:1, 1443 hash_type_ipv6_en:1, 1444 hash_type_tcpipv6ex_en:1, 1445 hash_type_ipv6ex_en:1; 1446}; 1447 1448void vxge_hw_device_debug_set( 1449 struct __vxge_hw_device *devh, 1450 enum vxge_debug_level level, 1451 u32 mask); 1452 1453u32 1454vxge_hw_device_error_level_get(struct __vxge_hw_device *devh); 1455 1456u32 1457vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); 1458 1459/** 1460 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. 1461 * @buf_mode: Buffer mode (1, 3 or 5) 1462 * 1463 * This function returns the size of RxD for given buffer mode 1464 */ 1465static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode) 1466{ 1467 return sizeof(struct vxge_hw_ring_rxd_1); 1468} 1469 1470/** 1471 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block. 1472 * @buf_mode: Buffer mode (1 buffer mode only) 1473 * 1474 * This function returns the number of RxD for RxD block for given buffer mode 1475 */ 1476static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode) 1477{ 1478 return (u32)((VXGE_HW_BLOCK_SIZE-16) / 1479 sizeof(struct vxge_hw_ring_rxd_1)); 1480} 1481 1482/** 1483 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor. 1484 * @rxdh: Descriptor handle. 1485 * @dma_pointer: DMA address of a single receive buffer this descriptor 1486 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called, 1487 * the receive buffer should be already mapped to the device 1488 * @size: Size of the receive @dma_pointer buffer. 1489 * 1490 * Prepare 1-buffer-mode Rx descriptor for posting 1491 * (via vxge_hw_ring_rxd_post()). 1492 * 1493 * This inline helper-function does not return any parameters and always 1494 * succeeds. 1495 * 1496 */ 1497static inline 1498void vxge_hw_ring_rxd_1b_set( 1499 void *rxdh, 1500 dma_addr_t dma_pointer, 1501 u32 size) 1502{ 1503 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1504 rxdp->buffer0_ptr = dma_pointer; 1505 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK; 1506 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size); 1507} 1508 1509/** 1510 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf 1511 * descriptor. 1512 * @vpath_handle: Virtual Path handle. 1513 * @rxdh: Descriptor handle. 1514 * @dma_pointer: DMA address of a single receive buffer this descriptor 1515 * carries. Returned by HW. 1516 * @pkt_length: Length (in bytes) of the data in the buffer pointed by 1517 * 1518 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. 1519 * This inline helper-function uses completed descriptor to populate receive 1520 * buffer pointer and other "out" parameters. The function always succeeds. 1521 * 1522 */ 1523static inline 1524void vxge_hw_ring_rxd_1b_get( 1525 struct __vxge_hw_ring *ring_handle, 1526 void *rxdh, 1527 u32 *pkt_length) 1528{ 1529 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1530 1531 *pkt_length = 1532 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1); 1533} 1534 1535/** 1536 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with 1537 * a completed receive descriptor for 1b mode. 1538 * @vpath_handle: Virtual Path handle. 1539 * @rxdh: Descriptor handle. 1540 * @rxd_info: Descriptor information 1541 * 1542 * Retrieve extended information associated with a completed receive descriptor. 1543 * 1544 */ 1545static inline 1546void vxge_hw_ring_rxd_1b_info_get( 1547 struct __vxge_hw_ring *ring_handle, 1548 void *rxdh, 1549 struct vxge_hw_ring_rxd_info *rxd_info) 1550{ 1551 1552 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1553 rxd_info->syn_flag = 1554 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0); 1555 rxd_info->is_icmp = 1556 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0); 1557 rxd_info->fast_path_eligible = 1558 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0); 1559 rxd_info->l3_cksum_valid = 1560 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0); 1561 rxd_info->l3_cksum = 1562 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0); 1563 rxd_info->l4_cksum_valid = 1564 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0); 1565 rxd_info->l4_cksum = 1566 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0); 1567 rxd_info->frame = 1568 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0); 1569 rxd_info->proto = 1570 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0); 1571 rxd_info->is_vlan = 1572 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0); 1573 rxd_info->vlan = 1574 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1); 1575 rxd_info->rth_bucket = 1576 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0); 1577 rxd_info->rth_it_hit = 1578 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0); 1579 rxd_info->rth_spdm_hit = 1580 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0); 1581 rxd_info->rth_hash_type = 1582 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0); 1583 rxd_info->rth_value = 1584 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1); 1585} 1586 1587/** 1588 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data 1589 * of 1b mode 3b mode ring. 1590 * @rxdh: Descriptor handle. 1591 * 1592 * Returns: private driver info associated with the descriptor. 1593 * driver requests per-descriptor space via vxge_hw_ring_attr. 1594 * 1595 */ 1596static inline void *vxge_hw_ring_rxd_private_get(void *rxdh) 1597{ 1598 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1599 return (void *)(size_t)rxdp->host_control; 1600} 1601 1602/** 1603 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum. 1604 * @txdlh: Descriptor handle. 1605 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4, 1606 * and/or TCP and/or UDP. 1607 * 1608 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit 1609 * descriptor. 1610 * This API is part of the preparation of the transmit descriptor for posting 1611 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include 1612 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), 1613 * and vxge_hw_fifo_txdl_buffer_set(). 1614 * All these APIs fill in the fields of the fifo descriptor, 1615 * in accordance with the Titan specification. 1616 * 1617 */ 1618static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits) 1619{ 1620 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; 1621 txdp->control_1 |= cksum_bits; 1622} 1623 1624/** 1625 * vxge_hw_fifo_txdl_mss_set - Set MSS. 1626 * @txdlh: Descriptor handle. 1627 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the 1628 * driver, which in turn inserts the MSS into the @txdlh. 1629 * 1630 * This API is part of the preparation of the transmit descriptor for posting 1631 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include 1632 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), 1633 * and vxge_hw_fifo_txdl_cksum_set_bits(). 1634 * All these APIs fill in the fields of the fifo descriptor, 1635 * in accordance with the Titan specification. 1636 * 1637 */ 1638static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss) 1639{ 1640 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; 1641 1642 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN; 1643 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss); 1644} 1645 1646/** 1647 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag. 1648 * @txdlh: Descriptor handle. 1649 * @vlan_tag: 16bit VLAN tag. 1650 * 1651 * Insert VLAN tag into specified transmit descriptor. 1652 * The actual insertion of the tag into outgoing frame is done by the hardware. 1653 */ 1654static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag) 1655{ 1656 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; 1657 1658 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE; 1659 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag); 1660} 1661 1662/** 1663 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data. 1664 * @txdlh: Descriptor handle. 1665 * 1666 * Retrieve per-descriptor private data. 1667 * Note that driver requests per-descriptor space via 1668 * struct vxge_hw_fifo_attr passed to 1669 * vxge_hw_vpath_open(). 1670 * 1671 * Returns: private driver data associated with the descriptor. 1672 */ 1673static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh) 1674{ 1675 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; 1676 1677 return (void *)(size_t)txdp->host_control; 1678} 1679 1680/** 1681 * struct vxge_hw_ring_attr - Ring open "template". 1682 * @callback: Ring completion callback. HW invokes the callback when there 1683 * are new completions on that ring. In many implementations 1684 * the @callback executes in the hw interrupt context. 1685 * @rxd_init: Ring's descriptor-initialize callback. 1686 * See vxge_hw_ring_rxd_init_f{}. 1687 * If not NULL, HW invokes the callback when opening 1688 * the ring. 1689 * @rxd_term: Ring's descriptor-terminate callback. If not NULL, 1690 * HW invokes the callback when closing the corresponding ring. 1691 * See also vxge_hw_ring_rxd_term_f{}. 1692 * @userdata: User-defined "context" of _that_ ring. Passed back to the 1693 * user as one of the @callback, @rxd_init, and @rxd_term arguments. 1694 * @per_rxd_space: If specified (i.e., greater than zero): extra space 1695 * reserved by HW per each receive descriptor. 1696 * Can be used to store 1697 * and retrieve on completion, information specific 1698 * to the driver. 1699 * 1700 * Ring open "template". User fills the structure with ring 1701 * attributes and passes it to vxge_hw_vpath_open(). 1702 */ 1703struct vxge_hw_ring_attr { 1704 enum vxge_hw_status (*callback)( 1705 struct __vxge_hw_ring *ringh, 1706 void *rxdh, 1707 u8 t_code, 1708 void *userdata); 1709 1710 enum vxge_hw_status (*rxd_init)( 1711 void *rxdh, 1712 void *userdata); 1713 1714 void (*rxd_term)( 1715 void *rxdh, 1716 enum vxge_hw_rxd_state state, 1717 void *userdata); 1718 1719 void *userdata; 1720 u32 per_rxd_space; 1721}; 1722 1723/** 1724 * function vxge_hw_fifo_callback_f - FIFO callback. 1725 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed 1726 * descriptors. 1727 * @txdlh: First completed descriptor. 1728 * @txdl_priv: Pointer to per txdl space allocated 1729 * @t_code: Transfer code, as per Titan User Guide. 1730 * Returned by HW. 1731 * @host_control: Opaque 64bit data stored by driver inside the Titan 1732 * descriptor prior to posting the latter on the fifo 1733 * via vxge_hw_fifo_txdl_post(). The @host_control is returned 1734 * as is to the driver with each completed descriptor. 1735 * @userdata: Opaque per-fifo data specified at fifo open 1736 * time, via vxge_hw_vpath_open(). 1737 * 1738 * Fifo completion callback (type declaration). A single per-fifo 1739 * callback is specified at fifo open time, via 1740 * vxge_hw_vpath_open(). Typically gets called as part of the processing 1741 * of the Interrupt Service Routine. 1742 * 1743 * Fifo callback gets called by HW if, and only if, there is at least 1744 * one new completion on a given fifo. Upon processing the first @txdlh driver 1745 * is _supposed_ to continue consuming completions using: 1746 * - vxge_hw_fifo_txdl_next_completed() 1747 * 1748 * Note that failure to process new completions in a timely fashion 1749 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition. 1750 * 1751 * Non-zero @t_code means failure to process transmit descriptor. 1752 * 1753 * In the "transmit" case the failure could happen, for instance, when the 1754 * link is down, in which case Titan completes the descriptor because it 1755 * is not able to send the data out. 1756 * 1757 * For details please refer to Titan User Guide. 1758 * 1759 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}. 1760 */ 1761/** 1762 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback. 1763 * @txdlh: First completed descriptor. 1764 * @txdl_priv: Pointer to per txdl space allocated 1765 * @state: One of the enum vxge_hw_txdl_state{} enumerated states. 1766 * @userdata: Per-fifo user data (a.k.a. context) specified at 1767 * fifo open time, via vxge_hw_vpath_open(). 1768 * 1769 * Terminate descriptor callback. Unless NULL is specified in the 1770 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()), 1771 * HW invokes the callback as part of closing fifo, prior to 1772 * de-allocating the ring and associated data structures 1773 * (including descriptors). 1774 * driver should utilize the callback to (for instance) unmap 1775 * and free DMA data buffers associated with the posted (state = 1776 * VXGE_HW_TXDL_STATE_POSTED) descriptors, 1777 * as well as other relevant cleanup functions. 1778 * 1779 * See also: struct vxge_hw_fifo_attr{} 1780 */ 1781/** 1782 * struct vxge_hw_fifo_attr - Fifo open "template". 1783 * @callback: Fifo completion callback. HW invokes the callback when there 1784 * are new completions on that fifo. In many implementations 1785 * the @callback executes in the hw interrupt context. 1786 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, 1787 * HW invokes the callback when closing the corresponding fifo. 1788 * See also vxge_hw_fifo_txdl_term_f{}. 1789 * @userdata: User-defined "context" of _that_ fifo. Passed back to the 1790 * user as one of the @callback, and @txdl_term arguments. 1791 * @per_txdl_space: If specified (i.e., greater than zero): extra space 1792 * reserved by HW per each transmit descriptor. Can be used to 1793 * store, and retrieve on completion, information specific 1794 * to the driver. 1795 * 1796 * Fifo open "template". User fills the structure with fifo 1797 * attributes and passes it to vxge_hw_vpath_open(). 1798 */ 1799struct vxge_hw_fifo_attr { 1800 1801 enum vxge_hw_status (*callback)( 1802 struct __vxge_hw_fifo *fifo_handle, 1803 void *txdlh, 1804 enum vxge_hw_fifo_tcode t_code, 1805 void *userdata, 1806 struct sk_buff ***skb_ptr, 1807 int nr_skb, int *more); 1808 1809 void (*txdl_term)( 1810 void *txdlh, 1811 enum vxge_hw_txdl_state state, 1812 void *userdata); 1813 1814 void *userdata; 1815 u32 per_txdl_space; 1816}; 1817 1818/** 1819 * struct vxge_hw_vpath_attr - Attributes of virtual path 1820 * @vp_id: Identifier of Virtual Path 1821 * @ring_attr: Attributes of ring for non-offload receive 1822 * @fifo_attr: Attributes of fifo for non-offload transmit 1823 * 1824 * Attributes of virtual path. This structure is passed as parameter 1825 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo. 1826 */ 1827struct vxge_hw_vpath_attr { 1828 u32 vp_id; 1829 struct vxge_hw_ring_attr ring_attr; 1830 struct vxge_hw_fifo_attr fifo_attr; 1831}; 1832 1833enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( 1834 void __iomem *bar0, 1835 struct vxge_hw_device_hw_info *hw_info); 1836 1837enum vxge_hw_status __devinit vxge_hw_device_config_default_get( 1838 struct vxge_hw_device_config *device_config); 1839 1840/** 1841 * vxge_hw_device_link_state_get - Get link state. 1842 * @devh: HW device handle. 1843 * 1844 * Get link state. 1845 * Returns: link state. 1846 */ 1847static inline 1848enum vxge_hw_device_link_state vxge_hw_device_link_state_get( 1849 struct __vxge_hw_device *devh) 1850{ 1851 return devh->link_state; 1852} 1853 1854void vxge_hw_device_terminate(struct __vxge_hw_device *devh); 1855 1856const u8 * 1857vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh); 1858 1859u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh); 1860 1861const u8 * 1862vxge_hw_device_product_name_get(struct __vxge_hw_device *devh); 1863 1864enum vxge_hw_status __devinit vxge_hw_device_initialize( 1865 struct __vxge_hw_device **devh, 1866 struct vxge_hw_device_attr *attr, 1867 struct vxge_hw_device_config *device_config); 1868 1869enum vxge_hw_status vxge_hw_device_getpause_data( 1870 struct __vxge_hw_device *devh, 1871 u32 port, 1872 u32 *tx, 1873 u32 *rx); 1874 1875enum vxge_hw_status vxge_hw_device_setpause_data( 1876 struct __vxge_hw_device *devh, 1877 u32 port, 1878 u32 tx, 1879 u32 rx); 1880 1881static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, 1882 unsigned long size, 1883 struct pci_dev **p_dmah, 1884 struct pci_dev **p_dma_acch) 1885{ 1886 gfp_t flags; 1887 void *vaddr; 1888 unsigned long misaligned = 0; 1889 int realloc_flag = 0; 1890 *p_dma_acch = *p_dmah = NULL; 1891 1892 if (in_interrupt()) 1893 flags = GFP_ATOMIC | GFP_DMA; 1894 else 1895 flags = GFP_KERNEL | GFP_DMA; 1896realloc: 1897 vaddr = kmalloc((size), flags); 1898 if (vaddr == NULL) 1899 return vaddr; 1900 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr, 1901 VXGE_CACHE_LINE_SIZE); 1902 if (realloc_flag) 1903 goto out; 1904 1905 if (misaligned) { 1906 /* misaligned, free current one and try allocating 1907 * size + VXGE_CACHE_LINE_SIZE memory 1908 */ 1909 kfree((void *) vaddr); 1910 size += VXGE_CACHE_LINE_SIZE; 1911 realloc_flag = 1; 1912 goto realloc; 1913 } 1914out: 1915 *(unsigned long *)p_dma_acch = misaligned; 1916 vaddr = (void *)((u8 *)vaddr + misaligned); 1917 return vaddr; 1918} 1919 1920static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, 1921 struct pci_dev **p_dma_acch) 1922{ 1923 unsigned long misaligned = *(unsigned long *)p_dma_acch; 1924 u8 *tmp = (u8 *)vaddr; 1925 tmp -= misaligned; 1926 kfree((void *)tmp); 1927} 1928 1929/* 1930 * __vxge_hw_mempool_item_priv - will return pointer on per item private space 1931 */ 1932static inline void* 1933__vxge_hw_mempool_item_priv( 1934 struct vxge_hw_mempool *mempool, 1935 u32 memblock_idx, 1936 void *item, 1937 u32 *memblock_item_idx) 1938{ 1939 ptrdiff_t offset; 1940 void *memblock = mempool->memblocks_arr[memblock_idx]; 1941 1942 1943 offset = (u32)((u8 *)item - (u8 *)memblock); 1944 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size); 1945 1946 (*memblock_item_idx) = (u32) offset / mempool->item_size; 1947 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock); 1948 1949 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] + 1950 (*memblock_item_idx) * mempool->items_priv_size; 1951} 1952 1953/* 1954 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated 1955 * for the fifo. 1956 * @fifo: Fifo 1957 * @txdp: Poniter to a TxD 1958 */ 1959static inline struct __vxge_hw_fifo_txdl_priv * 1960__vxge_hw_fifo_txdl_priv( 1961 struct __vxge_hw_fifo *fifo, 1962 struct vxge_hw_fifo_txd *txdp) 1963{ 1964 return (struct __vxge_hw_fifo_txdl_priv *) 1965 (((char *)((ulong)txdp->host_control)) + 1966 fifo->per_txdl_space); 1967} 1968 1969enum vxge_hw_status vxge_hw_vpath_open( 1970 struct __vxge_hw_device *devh, 1971 struct vxge_hw_vpath_attr *attr, 1972 struct __vxge_hw_vpath_handle **vpath_handle); 1973 1974enum vxge_hw_status vxge_hw_vpath_close( 1975 struct __vxge_hw_vpath_handle *vpath_handle); 1976 1977enum vxge_hw_status 1978vxge_hw_vpath_reset( 1979 struct __vxge_hw_vpath_handle *vpath_handle); 1980 1981enum vxge_hw_status 1982vxge_hw_vpath_recover_from_reset( 1983 struct __vxge_hw_vpath_handle *vpath_handle); 1984 1985void 1986vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp); 1987 1988enum vxge_hw_status 1989vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh); 1990 1991enum vxge_hw_status vxge_hw_vpath_mtu_set( 1992 struct __vxge_hw_vpath_handle *vpath_handle, 1993 u32 new_mtu); 1994 1995void 1996vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); 1997 1998#ifndef readq 1999static inline u64 readq(void __iomem *addr) 2000{
2001 u64 ret = 0; 2002 ret = readl(addr + 4); 2003 ret <<= 32; 2004 ret |= readl(addr); 2005 2006 return ret; 2007} 2008#endif 2009 2010#ifndef writeq 2011static inline void writeq(u64 val, void __iomem *addr) 2012{ 2013 writel((u32) (val), addr); 2014 writel((u32) (val >> 32), (addr + 4)); 2015} 2016#endif 2017 2018static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) 2019{ 2020 writel(val, addr + 4); 2021} 2022 2023static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr) 2024{ 2025 writel(val, addr); 2026} 2027 2028enum vxge_hw_status 2029vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); 2030 2031enum vxge_hw_status 2032vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2033 2034/** 2035 * vxge_debug_ll 2036 * @level: level of debug verbosity. 2037 * @mask: mask for the debug 2038 * @buf: Circular buffer for tracing 2039 * @fmt: printf like format string 2040 * 2041 * Provides logging facilities. Can be customized on per-module 2042 * basis or/and with debug levels. Input parameters, except 2043 * module and level, are the same as posix printf. This function 2044 * may be compiled out if DEBUG macro was never defined. 2045 * See also: enum vxge_debug_level{}. 2046 */ 2047#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) 2048#define vxge_debug_ll(level, mask, fmt, ...) do { \ 2049 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ 2050 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ 2051 if ((mask & VXGE_DEBUG_MASK) == mask) \ 2052 printk(fmt "\n", __VA_ARGS__); \ 2053} while (0) 2054#else 2055#define vxge_debug_ll(level, mask, fmt, ...) 2056#endif 2057 2058enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( 2059 struct __vxge_hw_vpath_handle **vpath_handles, 2060 u32 vpath_count, 2061 u8 *mtable, 2062 u8 *itable, 2063 u32 itable_size); 2064 2065enum vxge_hw_status vxge_hw_vpath_rts_rth_set( 2066 struct __vxge_hw_vpath_handle *vpath_handle, 2067 enum vxge_hw_rth_algoritms algorithm, 2068 struct vxge_hw_rth_hash_types *hash_type, 2069 u16 bucket_size); 2070 2071enum vxge_hw_status 2072__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); 2073 2074#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5 2075#define VXGE_HW_MAX_POLLING_COUNT 100 2076 2077void 2078vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev); 2079 2080enum vxge_hw_status 2081vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, 2082 u32 *minor, u32 *build); 2083 2084enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev); 2085 2086enum vxge_hw_status 2087vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf, 2088 int size); 2089 2090enum vxge_hw_status 2091vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, 2092 struct eprom_image *eprom_image_data); 2093 2094int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id); 2095#endif 2096