1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2008 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11/* Common definitions for all Efx net driver code */ 12 13#ifndef EFX_NET_DRIVER_H 14#define EFX_NET_DRIVER_H 15 16#include <linux/version.h> 17#include <linux/netdevice.h> 18#include <linux/etherdevice.h> 19#include <linux/ethtool.h> 20#include <linux/if_vlan.h> 21#include <linux/timer.h> 22#include <linux/mdio.h> 23#include <linux/list.h> 24#include <linux/pci.h> 25#include <linux/device.h> 26#include <linux/highmem.h> 27#include <linux/workqueue.h> 28#include <linux/i2c.h> 29 30#include "enum.h" 31#include "bitfield.h" 32 33/************************************************************************** 34 * 35 * Build definitions 36 * 37 **************************************************************************/ 38#ifndef EFX_DRIVER_NAME 39#define EFX_DRIVER_NAME "sfc" 40#endif 41#define EFX_DRIVER_VERSION "2.3" 42 43#ifdef EFX_ENABLE_DEBUG 44#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 45#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) 46#else 47#define EFX_BUG_ON_PARANOID(x) do {} while (0) 48#define EFX_WARN_ON_PARANOID(x) do {} while (0) 49#endif 50 51/* Un-rate-limited logging */ 52#define EFX_ERR(efx, fmt, args...) \ 53dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) 54 55#define EFX_INFO(efx, fmt, args...) \ 56dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) 57 58#ifdef EFX_ENABLE_DEBUG 59#define EFX_LOG(efx, fmt, args...) \ 60dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 61#else 62#define EFX_LOG(efx, fmt, args...) \ 63dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 64#endif 65 66#define EFX_TRACE(efx, fmt, args...) do {} while (0) 67 68#define EFX_REGDUMP(efx, fmt, args...) do {} while (0) 69 70/* Rate-limited logging */ 71#define EFX_ERR_RL(efx, fmt, args...) \ 72do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0) 73 74#define EFX_INFO_RL(efx, fmt, args...) \ 75do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) 76 77#define EFX_LOG_RL(efx, fmt, args...) \ 78do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 79 80/************************************************************************** 81 * 82 * Efx data structures 83 * 84 **************************************************************************/ 85 86#define EFX_MAX_CHANNELS 32 87#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 88 89#define EFX_TX_QUEUE_OFFLOAD_CSUM 0 90#define EFX_TX_QUEUE_NO_CSUM 1 91#define EFX_TX_QUEUE_COUNT 2 92 93/** 94 * struct efx_special_buffer - An Efx special buffer 95 * @addr: CPU base address of the buffer 96 * @dma_addr: DMA base address of the buffer 97 * @len: Buffer length, in bytes 98 * @index: Buffer index within controller;s buffer table 99 * @entries: Number of buffer table entries 100 * 101 * Special buffers are used for the event queues and the TX and RX 102 * descriptor queues for each channel. They are *not* used for the 103 * actual transmit and receive buffers. 104 * 105 * Note that for Falcon, TX and RX descriptor queues live in host memory. 106 * Allocation and freeing procedures must take this into account. 107 */ 108struct efx_special_buffer { 109 void *addr; 110 dma_addr_t dma_addr; 111 unsigned int len; 112 int index; 113 int entries; 114}; 115 116/** 117 * struct efx_tx_buffer - An Efx TX buffer 118 * @skb: The associated socket buffer. 119 * Set only on the final fragment of a packet; %NULL for all other 120 * fragments. When this fragment completes, then we can free this 121 * skb. 122 * @tsoh: The associated TSO header structure, or %NULL if this 123 * buffer is not a TSO header. 124 * @dma_addr: DMA address of the fragment. 125 * @len: Length of this fragment. 126 * This field is zero when the queue slot is empty. 127 * @continuation: True if this fragment is not the end of a packet. 128 * @unmap_single: True if pci_unmap_single should be used. 129 * @unmap_len: Length of this fragment to unmap 130 */ 131struct efx_tx_buffer { 132 const struct sk_buff *skb; 133 struct efx_tso_header *tsoh; 134 dma_addr_t dma_addr; 135 unsigned short len; 136 bool continuation; 137 bool unmap_single; 138 unsigned short unmap_len; 139}; 140 141/** 142 * struct efx_tx_queue - An Efx TX queue 143 * 144 * This is a ring buffer of TX fragments. 145 * Since the TX completion path always executes on the same 146 * CPU and the xmit path can operate on different CPUs, 147 * performance is increased by ensuring that the completion 148 * path and the xmit path operate on different cache lines. 149 * This is particularly important if the xmit path is always 150 * executing on one CPU which is different from the completion 151 * path. There is also a cache line for members which are 152 * read but not written on the fast path. 153 * 154 * @efx: The associated Efx NIC 155 * @queue: DMA queue number 156 * @channel: The associated channel 157 * @buffer: The software buffer ring 158 * @txd: The hardware descriptor ring 159 * @flushed: Used when handling queue flushing 160 * @read_count: Current read pointer. 161 * This is the number of buffers that have been removed from both rings. 162 * @stopped: Stopped count. 163 * Set if this TX queue is currently stopping its port. 164 * @insert_count: Current insert pointer 165 * This is the number of buffers that have been added to the 166 * software ring. 167 * @write_count: Current write pointer 168 * This is the number of buffers that have been added to the 169 * hardware ring. 170 * @old_read_count: The value of read_count when last checked. 171 * This is here for performance reasons. The xmit path will 172 * only get the up-to-date value of read_count if this 173 * variable indicates that the queue is full. This is to 174 * avoid cache-line ping-pong between the xmit path and the 175 * completion path. 176 * @tso_headers_free: A list of TSO headers allocated for this TX queue 177 * that are not in use, and so available for new TSO sends. The list 178 * is protected by the TX queue lock. 179 * @tso_bursts: Number of times TSO xmit invoked by kernel 180 * @tso_long_headers: Number of packets with headers too long for standard 181 * blocks 182 * @tso_packets: Number of packets via the TSO xmit path 183 */ 184struct efx_tx_queue { 185 /* Members which don't change on the fast path */ 186 struct efx_nic *efx ____cacheline_aligned_in_smp; 187 int queue; 188 struct efx_channel *channel; 189 struct efx_nic *nic; 190 struct efx_tx_buffer *buffer; 191 struct efx_special_buffer txd; 192 bool flushed; 193 194 /* Members used mainly on the completion path */ 195 unsigned int read_count ____cacheline_aligned_in_smp; 196 int stopped; 197 198 /* Members used only on the xmit path */ 199 unsigned int insert_count ____cacheline_aligned_in_smp; 200 unsigned int write_count; 201 unsigned int old_read_count; 202 struct efx_tso_header *tso_headers_free; 203 unsigned int tso_bursts; 204 unsigned int tso_long_headers; 205 unsigned int tso_packets; 206}; 207 208/** 209 * struct efx_rx_buffer - An Efx RX data buffer 210 * @dma_addr: DMA base address of the buffer 211 * @skb: The associated socket buffer, if any. 212 * If both this and page are %NULL, the buffer slot is currently free. 213 * @page: The associated page buffer, if any. 214 * If both this and skb are %NULL, the buffer slot is currently free. 215 * @data: Pointer to ethernet header 216 * @len: Buffer length, in bytes. 217 * @unmap_addr: DMA address to unmap 218 */ 219struct efx_rx_buffer { 220 dma_addr_t dma_addr; 221 struct sk_buff *skb; 222 struct page *page; 223 char *data; 224 unsigned int len; 225 dma_addr_t unmap_addr; 226}; 227 228/** 229 * struct efx_rx_queue - An Efx RX queue 230 * @efx: The associated Efx NIC 231 * @queue: DMA queue number 232 * @channel: The associated channel 233 * @buffer: The software buffer ring 234 * @rxd: The hardware descriptor ring 235 * @added_count: Number of buffers added to the receive queue. 236 * @notified_count: Number of buffers given to NIC (<= @added_count). 237 * @removed_count: Number of buffers removed from the receive queue. 238 * @add_lock: Receive queue descriptor add spin lock. 239 * This lock must be held in order to add buffers to the RX 240 * descriptor ring (rxd and buffer) and to update added_count (but 241 * not removed_count). 242 * @max_fill: RX descriptor maximum fill level (<= ring size) 243 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 244 * (<= @max_fill) 245 * @fast_fill_limit: The level to which a fast fill will fill 246 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) 247 * @min_fill: RX descriptor minimum non-zero fill level. 248 * This records the minimum fill level observed when a ring 249 * refill was triggered. 250 * @min_overfill: RX descriptor minimum overflow fill level. 251 * This records the minimum fill level at which RX queue 252 * overflow was observed. It should never be set. 253 * @alloc_page_count: RX allocation strategy counter. 254 * @alloc_skb_count: RX allocation strategy counter. 255 * @work: Descriptor push work thread 256 * @buf_page: Page for next RX buffer. 257 * We can use a single page for multiple RX buffers. This tracks 258 * the remaining space in the allocation. 259 * @buf_dma_addr: Page's DMA address. 260 * @buf_data: Page's host address. 261 * @flushed: Use when handling queue flushing 262 */ 263struct efx_rx_queue { 264 struct efx_nic *efx; 265 int queue; 266 struct efx_channel *channel; 267 struct efx_rx_buffer *buffer; 268 struct efx_special_buffer rxd; 269 270 int added_count; 271 int notified_count; 272 int removed_count; 273 spinlock_t add_lock; 274 unsigned int max_fill; 275 unsigned int fast_fill_trigger; 276 unsigned int fast_fill_limit; 277 unsigned int min_fill; 278 unsigned int min_overfill; 279 unsigned int alloc_page_count; 280 unsigned int alloc_skb_count; 281 struct delayed_work work; 282 unsigned int slow_fill_count; 283 284 struct page *buf_page; 285 dma_addr_t buf_dma_addr; 286 char *buf_data; 287 bool flushed; 288}; 289 290/** 291 * struct efx_buffer - An Efx general-purpose buffer 292 * @addr: host base address of the buffer 293 * @dma_addr: DMA base address of the buffer 294 * @len: Buffer length, in bytes 295 * 296 * Falcon uses these buffers for its interrupt status registers and 297 * MAC stats dumps. 298 */ 299struct efx_buffer { 300 void *addr; 301 dma_addr_t dma_addr; 302 unsigned int len; 303}; 304 305 306/* Flags for channel->used_flags */ 307#define EFX_USED_BY_RX 1 308#define EFX_USED_BY_TX 2 309#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX) 310 311enum efx_rx_alloc_method { 312 RX_ALLOC_METHOD_AUTO = 0, 313 RX_ALLOC_METHOD_SKB = 1, 314 RX_ALLOC_METHOD_PAGE = 2, 315}; 316 317/** 318 * struct efx_channel - An Efx channel 319 * 320 * A channel comprises an event queue, at least one TX queue, at least 321 * one RX queue, and an associated tasklet for processing the event 322 * queue. 323 * 324 * @efx: Associated Efx NIC 325 * @channel: Channel instance number 326 * @name: Name for channel and IRQ 327 * @used_flags: Channel is used by net driver 328 * @enabled: Channel enabled indicator 329 * @irq: IRQ number (MSI and MSI-X only) 330 * @irq_moderation: IRQ moderation value (in us) 331 * @napi_dev: Net device used with NAPI 332 * @napi_str: NAPI control structure 333 * @reset_work: Scheduled reset work thread 334 * @work_pending: Is work pending via NAPI? 335 * @eventq: Event queue buffer 336 * @eventq_read_ptr: Event queue read pointer 337 * @last_eventq_read_ptr: Last event queue read pointer value. 338 * @eventq_magic: Event queue magic value for driver-generated test events 339 * @irq_count: Number of IRQs since last adaptive moderation decision 340 * @irq_mod_score: IRQ moderation score 341 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 342 * and diagnostic counters 343 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 344 * descriptors 345 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 346 * @n_rx_ip_frag_err: Count of RX IP fragment errors 347 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 348 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 349 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 350 * @n_rx_overlength: Count of RX_OVERLENGTH errors 351 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 352 */ 353struct efx_channel { 354 struct efx_nic *efx; 355 int channel; 356 char name[IFNAMSIZ + 6]; 357 int used_flags; 358 bool enabled; 359 int irq; 360 unsigned int irq_moderation; 361 struct net_device *napi_dev; 362 struct napi_struct napi_str; 363 bool work_pending; 364 struct efx_special_buffer eventq; 365 unsigned int eventq_read_ptr; 366 unsigned int last_eventq_read_ptr; 367 unsigned int eventq_magic; 368 369 unsigned int irq_count; 370 unsigned int irq_mod_score; 371 372 int rx_alloc_level; 373 int rx_alloc_push_pages; 374 375 unsigned n_rx_tobe_disc; 376 unsigned n_rx_ip_frag_err; 377 unsigned n_rx_ip_hdr_chksum_err; 378 unsigned n_rx_tcp_udp_chksum_err; 379 unsigned n_rx_frm_trunc; 380 unsigned n_rx_overlength; 381 unsigned n_skbuff_leaks; 382 383 /* Used to pipeline received packets in order to optimise memory 384 * access with prefetches. 385 */ 386 struct efx_rx_buffer *rx_pkt; 387 bool rx_pkt_csummed; 388 389}; 390 391/** 392 * struct efx_blinker - S/W LED blinking context 393 * @state: Current state - on or off 394 * @resubmit: Timer resubmission flag 395 * @timer: Control timer for blinking 396 */ 397struct efx_blinker { 398 bool state; 399 bool resubmit; 400 struct timer_list timer; 401}; 402 403 404/** 405 * struct efx_board - board information 406 * @type: Board model type 407 * @major: Major rev. ('A', 'B' ...) 408 * @minor: Minor rev. (0, 1, ...) 409 * @init: Initialisation function 410 * @init_leds: Sets up board LEDs. May be called repeatedly. 411 * @set_id_led: Turns the identification LED on or off 412 * @blink: Starts/stops blinking 413 * @monitor: Board-specific health check function 414 * @fini: Cleanup function 415 * @blinker: used to blink LEDs in software 416 * @hwmon_client: I2C client for hardware monitor 417 * @ioexp_client: I2C client for power/port control 418 */ 419struct efx_board { 420 int type; 421 int major; 422 int minor; 423 int (*init) (struct efx_nic *nic); 424 /* As the LEDs are typically attached to the PHY, LEDs 425 * have a separate init callback that happens later than 426 * board init. */ 427 void (*init_leds)(struct efx_nic *efx); 428 void (*set_id_led) (struct efx_nic *efx, bool state); 429 int (*monitor) (struct efx_nic *nic); 430 void (*blink) (struct efx_nic *efx, bool start); 431 void (*fini) (struct efx_nic *nic); 432 struct efx_blinker blinker; 433 struct i2c_client *hwmon_client, *ioexp_client; 434}; 435 436#define STRING_TABLE_LOOKUP(val, member) \ 437 member ## _names[val] 438 439enum efx_int_mode { 440 /* Be careful if altering to correct macro below */ 441 EFX_INT_MODE_MSIX = 0, 442 EFX_INT_MODE_MSI = 1, 443 EFX_INT_MODE_LEGACY = 2, 444 EFX_INT_MODE_MAX /* Insert any new items before this */ 445}; 446#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 447 448enum phy_type { 449 PHY_TYPE_NONE = 0, 450 PHY_TYPE_TXC43128 = 1, 451 PHY_TYPE_88E1111 = 2, 452 PHY_TYPE_SFX7101 = 3, 453 PHY_TYPE_QT2022C2 = 4, 454 PHY_TYPE_PM8358 = 6, 455 PHY_TYPE_SFT9001A = 8, 456 PHY_TYPE_QT2025C = 9, 457 PHY_TYPE_SFT9001B = 10, 458 PHY_TYPE_MAX /* Insert any new items before this */ 459}; 460 461#define EFX_IS10G(efx) ((efx)->link_speed == 10000) 462 463enum nic_state { 464 STATE_INIT = 0, 465 STATE_RUNNING = 1, 466 STATE_FINI = 2, 467 STATE_DISABLED = 3, 468 STATE_MAX, 469}; 470 471/* 472 * Alignment of page-allocated RX buffers 473 * 474 * Controls the number of bytes inserted at the start of an RX buffer. 475 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 476 * of the skb->head for hardware DMA]. 477 */ 478#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 479#define EFX_PAGE_IP_ALIGN 0 480#else 481#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN 482#endif 483 484/* 485 * Alignment of the skb->head which wraps a page-allocated RX buffer 486 * 487 * The skb allocated to wrap an rx_buffer can have this alignment. Since 488 * the data is memcpy'd from the rx_buf, it does not need to be equal to 489 * EFX_PAGE_IP_ALIGN. 490 */ 491#define EFX_PAGE_SKB_ALIGN 2 492 493/* Forward declaration */ 494struct efx_nic; 495 496/* Pseudo bit-mask flow control field */ 497enum efx_fc_type { 498 EFX_FC_RX = FLOW_CTRL_RX, 499 EFX_FC_TX = FLOW_CTRL_TX, 500 EFX_FC_AUTO = 4, 501}; 502 503/* Supported MAC bit-mask */ 504enum efx_mac_type { 505 EFX_GMAC = 1, 506 EFX_XMAC = 2, 507}; 508 509static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc, 510 unsigned int lpa) 511{ 512 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); 513 514 if (!(wanted_fc & EFX_FC_AUTO)) 515 return wanted_fc; 516 517 return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa); 518} 519 520/** 521 * struct efx_mac_operations - Efx MAC operations table 522 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock 523 * @update_stats: Update statistics 524 * @irq: Hardware MAC event callback. Serialised by the mac_lock 525 * @poll: Poll for hardware state. Serialised by the mac_lock 526 */ 527struct efx_mac_operations { 528 void (*reconfigure) (struct efx_nic *efx); 529 void (*update_stats) (struct efx_nic *efx); 530 void (*irq) (struct efx_nic *efx); 531 void (*poll) (struct efx_nic *efx); 532}; 533 534/** 535 * struct efx_phy_operations - Efx PHY operations table 536 * @init: Initialise PHY 537 * @fini: Shut down PHY 538 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 539 * @clear_interrupt: Clear down interrupt 540 * @blink: Blink LEDs 541 * @poll: Poll for hardware state. Serialised by the mac_lock. 542 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 543 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 544 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 545 * (only needed where AN bit is set in mmds) 546 * @num_tests: Number of PHY-specific tests/results 547 * @test_names: Names of the tests/results 548 * @run_tests: Run tests and record results as appropriate. 549 * Flags are the ethtool tests flags. 550 * @mmds: MMD presence mask 551 * @loopbacks: Supported loopback modes mask 552 */ 553struct efx_phy_operations { 554 enum efx_mac_type macs; 555 int (*init) (struct efx_nic *efx); 556 void (*fini) (struct efx_nic *efx); 557 void (*reconfigure) (struct efx_nic *efx); 558 void (*clear_interrupt) (struct efx_nic *efx); 559 void (*poll) (struct efx_nic *efx); 560 void (*get_settings) (struct efx_nic *efx, 561 struct ethtool_cmd *ecmd); 562 int (*set_settings) (struct efx_nic *efx, 563 struct ethtool_cmd *ecmd); 564 void (*set_npage_adv) (struct efx_nic *efx, u32); 565 u32 num_tests; 566 const char *const *test_names; 567 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 568 int mmds; 569 unsigned loopbacks; 570}; 571 572/** 573 * @enum efx_phy_mode - PHY operating mode flags 574 * @PHY_MODE_NORMAL: on and should pass traffic 575 * @PHY_MODE_TX_DISABLED: on with TX disabled 576 * @PHY_MODE_LOW_POWER: set to low power through MDIO 577 * @PHY_MODE_OFF: switched off through external control 578 * @PHY_MODE_SPECIAL: on but will not pass traffic 579 */ 580enum efx_phy_mode { 581 PHY_MODE_NORMAL = 0, 582 PHY_MODE_TX_DISABLED = 1, 583 PHY_MODE_LOW_POWER = 2, 584 PHY_MODE_OFF = 4, 585 PHY_MODE_SPECIAL = 8, 586}; 587 588static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) 589{ 590 return !!(mode & ~PHY_MODE_TX_DISABLED); 591} 592 593/* 594 * Efx extended statistics 595 * 596 * Not all statistics are provided by all supported MACs. The purpose 597 * is this structure is to contain the raw statistics provided by each 598 * MAC. 599 */ 600struct efx_mac_stats { 601 u64 tx_bytes; 602 u64 tx_good_bytes; 603 u64 tx_bad_bytes; 604 unsigned long tx_packets; 605 unsigned long tx_bad; 606 unsigned long tx_pause; 607 unsigned long tx_control; 608 unsigned long tx_unicast; 609 unsigned long tx_multicast; 610 unsigned long tx_broadcast; 611 unsigned long tx_lt64; 612 unsigned long tx_64; 613 unsigned long tx_65_to_127; 614 unsigned long tx_128_to_255; 615 unsigned long tx_256_to_511; 616 unsigned long tx_512_to_1023; 617 unsigned long tx_1024_to_15xx; 618 unsigned long tx_15xx_to_jumbo; 619 unsigned long tx_gtjumbo; 620 unsigned long tx_collision; 621 unsigned long tx_single_collision; 622 unsigned long tx_multiple_collision; 623 unsigned long tx_excessive_collision; 624 unsigned long tx_deferred; 625 unsigned long tx_late_collision; 626 unsigned long tx_excessive_deferred; 627 unsigned long tx_non_tcpudp; 628 unsigned long tx_mac_src_error; 629 unsigned long tx_ip_src_error; 630 u64 rx_bytes; 631 u64 rx_good_bytes; 632 u64 rx_bad_bytes; 633 unsigned long rx_packets; 634 unsigned long rx_good; 635 unsigned long rx_bad; 636 unsigned long rx_pause; 637 unsigned long rx_control; 638 unsigned long rx_unicast; 639 unsigned long rx_multicast; 640 unsigned long rx_broadcast; 641 unsigned long rx_lt64; 642 unsigned long rx_64; 643 unsigned long rx_65_to_127; 644 unsigned long rx_128_to_255; 645 unsigned long rx_256_to_511; 646 unsigned long rx_512_to_1023; 647 unsigned long rx_1024_to_15xx; 648 unsigned long rx_15xx_to_jumbo; 649 unsigned long rx_gtjumbo; 650 unsigned long rx_bad_lt64; 651 unsigned long rx_bad_64_to_15xx; 652 unsigned long rx_bad_15xx_to_jumbo; 653 unsigned long rx_bad_gtjumbo; 654 unsigned long rx_overflow; 655 unsigned long rx_missed; 656 unsigned long rx_false_carrier; 657 unsigned long rx_symbol_error; 658 unsigned long rx_align_error; 659 unsigned long rx_length_error; 660 unsigned long rx_internal_error; 661 unsigned long rx_good_lt64; 662}; 663 664/* Number of bits used in a multicast filter hash address */ 665#define EFX_MCAST_HASH_BITS 8 666 667/* Number of (single-bit) entries in a multicast filter hash */ 668#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) 669 670/* An Efx multicast filter hash */ 671union efx_multicast_hash { 672 u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; 673 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 674}; 675 676/** 677 * struct efx_nic - an Efx NIC 678 * @name: Device name (net device name or bus id before net device registered) 679 * @pci_dev: The PCI device 680 * @type: Controller type attributes 681 * @legacy_irq: IRQ number 682 * @workqueue: Workqueue for port reconfigures and the HW monitor. 683 * Work items do not hold and must not acquire RTNL. 684 * @workqueue_name: Name of workqueue 685 * @reset_work: Scheduled reset workitem 686 * @monitor_work: Hardware monitor workitem 687 * @membase_phys: Memory BAR value as physical address 688 * @membase: Memory BAR value 689 * @biu_lock: BIU (bus interface unit) lock 690 * @interrupt_mode: Interrupt mode 691 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 692 * @irq_rx_moderation: IRQ moderation time for RX event queues 693 * @i2c_adap: I2C adapter 694 * @board_info: Board-level information 695 * @state: Device state flag. Serialised by the rtnl_lock. 696 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 697 * @tx_queue: TX DMA queues 698 * @rx_queue: RX DMA queues 699 * @channel: Channels 700 * @n_rx_queues: Number of RX queues 701 * @n_channels: Number of channels in use 702 * @rx_buffer_len: RX buffer length 703 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 704 * @irq_status: Interrupt status buffer 705 * @last_irq_cpu: Last CPU to handle interrupt. 706 * This register is written with the SMP processor ID whenever an 707 * interrupt is handled. It is used by falcon_test_interrupt() 708 * to verify that an interrupt has occurred. 709 * @spi_flash: SPI flash device 710 * This field will be %NULL if no flash device is present. 711 * @spi_eeprom: SPI EEPROM device 712 * This field will be %NULL if no EEPROM device is present. 713 * @spi_lock: SPI bus lock 714 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 715 * @nic_data: Hardware dependant state 716 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 717 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 718 * @port_enabled: Port enabled indicator. 719 * Serialises efx_stop_all(), efx_start_all(), efx_monitor(), 720 * efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read 721 * under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all 722 * three must be held to modify it. 723 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock 724 * @port_initialized: Port initialized? 725 * @net_dev: Operating system network device. Consider holding the rtnl lock 726 * @rx_checksum_enabled: RX checksumming enabled 727 * @netif_stop_count: Port stop count 728 * @netif_stop_lock: Port stop lock 729 * @mac_stats: MAC statistics. These include all statistics the MACs 730 * can provide. Generic code converts these into a standard 731 * &struct net_device_stats. 732 * @stats_buffer: DMA buffer for statistics 733 * @stats_lock: Statistics update lock. Serialises statistics fetches 734 * @stats_disable_count: Nest count for disabling statistics fetches 735 * @mac_op: MAC interface 736 * @mac_address: Permanent MAC address 737 * @phy_type: PHY type 738 * @phy_lock: PHY access lock 739 * @phy_op: PHY interface 740 * @phy_data: PHY private data (including PHY-specific stats) 741 * @mdio: PHY MDIO interface 742 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 743 * @mac_up: MAC link state 744 * @link_up: Link status 745 * @link_fd: Link is full duplex 746 * @link_fc: Actualy flow control flags 747 * @link_speed: Link speed (Mbps) 748 * @n_link_state_changes: Number of times the link has changed state 749 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 750 * @multicast_hash: Multicast hash table 751 * @wanted_fc: Wanted flow control flags 752 * @phy_work: work item for dealing with PHY events 753 * @mac_work: work item for dealing with MAC events 754 * @loopback_mode: Loopback status 755 * @loopback_modes: Supported loopback mode bitmask 756 * @loopback_selftest: Offline self-test private state 757 * 758 * The @priv field of the corresponding &struct net_device points to 759 * this. 760 */ 761struct efx_nic { 762 char name[IFNAMSIZ]; 763 struct pci_dev *pci_dev; 764 const struct efx_nic_type *type; 765 int legacy_irq; 766 struct workqueue_struct *workqueue; 767 char workqueue_name[16]; 768 struct work_struct reset_work; 769 struct delayed_work monitor_work; 770 resource_size_t membase_phys; 771 void __iomem *membase; 772 spinlock_t biu_lock; 773 enum efx_int_mode interrupt_mode; 774 bool irq_rx_adaptive; 775 unsigned int irq_rx_moderation; 776 777 struct i2c_adapter i2c_adap; 778 struct efx_board board_info; 779 780 enum nic_state state; 781 enum reset_type reset_pending; 782 783 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT]; 784 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 785 struct efx_channel channel[EFX_MAX_CHANNELS]; 786 787 int n_rx_queues; 788 int n_channels; 789 unsigned int rx_buffer_len; 790 unsigned int rx_buffer_order; 791 792 struct efx_buffer irq_status; 793 volatile signed int last_irq_cpu; 794 795 struct efx_spi_device *spi_flash; 796 struct efx_spi_device *spi_eeprom; 797 struct mutex spi_lock; 798 799 unsigned n_rx_nodesc_drop_cnt; 800 801 struct falcon_nic_data *nic_data; 802 803 struct mutex mac_lock; 804 struct work_struct mac_work; 805 bool port_enabled; 806 bool port_inhibited; 807 808 bool port_initialized; 809 struct net_device *net_dev; 810 bool rx_checksum_enabled; 811 812 atomic_t netif_stop_count; 813 spinlock_t netif_stop_lock; 814 815 struct efx_mac_stats mac_stats; 816 struct efx_buffer stats_buffer; 817 spinlock_t stats_lock; 818 unsigned int stats_disable_count; 819 820 struct efx_mac_operations *mac_op; 821 unsigned char mac_address[ETH_ALEN]; 822 823 enum phy_type phy_type; 824 spinlock_t phy_lock; 825 struct work_struct phy_work; 826 struct efx_phy_operations *phy_op; 827 void *phy_data; 828 struct mdio_if_info mdio; 829 enum efx_phy_mode phy_mode; 830 831 bool mac_up; 832 bool link_up; 833 bool link_fd; 834 enum efx_fc_type link_fc; 835 unsigned int link_speed; 836 unsigned int n_link_state_changes; 837 838 bool promiscuous; 839 union efx_multicast_hash multicast_hash; 840 enum efx_fc_type wanted_fc; 841 842 atomic_t rx_reset; 843 enum efx_loopback_mode loopback_mode; 844 unsigned int loopback_modes; 845 846 void *loopback_selftest; 847}; 848 849static inline int efx_dev_registered(struct efx_nic *efx) 850{ 851 return efx->net_dev->reg_state == NETREG_REGISTERED; 852} 853 854/* Net device name, for inclusion in log messages if it has been registered. 855 * Use efx->name not efx->net_dev->name so that races with (un)registration 856 * are harmless. 857 */ 858static inline const char *efx_dev_name(struct efx_nic *efx) 859{ 860 return efx_dev_registered(efx) ? efx->name : ""; 861} 862 863/** 864 * struct efx_nic_type - Efx device type definition 865 * @mem_bar: Memory BAR number 866 * @mem_map_size: Memory BAR mapped size 867 * @txd_ptr_tbl_base: TX descriptor ring base address 868 * @rxd_ptr_tbl_base: RX descriptor ring base address 869 * @buf_tbl_base: Buffer table base address 870 * @evq_ptr_tbl_base: Event queue pointer table base address 871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1) 873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1) 874 * @evq_size: Event queue size (must be a power of two) 875 * @max_dma_mask: Maximum possible DMA mask 876 * @tx_dma_mask: TX DMA mask 877 * @bug5391_mask: Address mask for bug 5391 workaround 878 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes) 879 * @rx_xon_thresh: RX FIFO XON watermark (bytes) 880 * @rx_buffer_padding: Padding added to each RX buffer 881 * @max_interrupt_mode: Highest capability interrupt mode supported 882 * from &enum efx_init_mode. 883 * @phys_addr_channels: Number of channels with physically addressed 884 * descriptors 885 */ 886struct efx_nic_type { 887 unsigned int mem_bar; 888 unsigned int mem_map_size; 889 unsigned int txd_ptr_tbl_base; 890 unsigned int rxd_ptr_tbl_base; 891 unsigned int buf_tbl_base; 892 unsigned int evq_ptr_tbl_base; 893 unsigned int evq_rptr_tbl_base; 894 895 unsigned int txd_ring_mask; 896 unsigned int rxd_ring_mask; 897 unsigned int evq_size; 898 u64 max_dma_mask; 899 unsigned int tx_dma_mask; 900 unsigned bug5391_mask; 901 902 int rx_xoff_thresh; 903 int rx_xon_thresh; 904 unsigned int rx_buffer_padding; 905 unsigned int max_interrupt_mode; 906 unsigned int phys_addr_channels; 907}; 908 909/************************************************************************** 910 * 911 * Prototypes and inline functions 912 * 913 *************************************************************************/ 914 915/* Iterate over all used channels */ 916#define efx_for_each_channel(_channel, _efx) \ 917 for (_channel = &_efx->channel[0]; \ 918 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ 919 _channel++) \ 920 if (!_channel->used_flags) \ 921 continue; \ 922 else 923 924/* Iterate over all used TX queues */ 925#define efx_for_each_tx_queue(_tx_queue, _efx) \ 926 for (_tx_queue = &_efx->tx_queue[0]; \ 927 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 928 _tx_queue++) 929 930/* Iterate over all TX queues belonging to a channel */ 931#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 932 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 933 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 934 _tx_queue++) \ 935 if (_tx_queue->channel != _channel) \ 936 continue; \ 937 else 938 939/* Iterate over all used RX queues */ 940#define efx_for_each_rx_queue(_rx_queue, _efx) \ 941 for (_rx_queue = &_efx->rx_queue[0]; \ 942 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \ 943 _rx_queue++) 944 945/* Iterate over all RX queues belonging to a channel */ 946#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 947 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \ 948 _rx_queue; \ 949 _rx_queue = NULL) \ 950 if (_rx_queue->channel != _channel) \ 951 continue; \ 952 else 953 954/* Returns a pointer to the specified receive buffer in the RX 955 * descriptor queue. 956 */ 957static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 958 unsigned int index) 959{ 960 return (&rx_queue->buffer[index]); 961} 962 963/* Set bit in a little-endian bitfield */ 964static inline void set_bit_le(unsigned nr, unsigned char *addr) 965{ 966 addr[nr / 8] |= (1 << (nr % 8)); 967} 968 969/* Clear bit in a little-endian bitfield */ 970static inline void clear_bit_le(unsigned nr, unsigned char *addr) 971{ 972 addr[nr / 8] &= ~(1 << (nr % 8)); 973} 974 975 976/** 977 * EFX_MAX_FRAME_LEN - calculate maximum frame length 978 * 979 * This calculates the maximum frame length that will be used for a 980 * given MTU. The frame length will be equal to the MTU plus a 981 * constant amount of header space and padding. This is the quantity 982 * that the net driver will program into the MAC as the maximum frame 983 * length. 984 * 985 * The 10G MAC used in Falcon requires 8-byte alignment on the frame 986 * length, so we round up to the nearest 8. 987 * 988 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 989 * XGMII cycle). If the frame length reaches the maximum value in the 990 * same cycle, the XMAC can miss the IPG altogether. We work around 991 * this by adding a further 16 bytes. 992 */ 993#define EFX_MAX_FRAME_LEN(mtu) \ 994 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) 995 996 997#endif /* EFX_NET_DRIVER_H */ 998