linux/drivers/net/ethernet/micrel/ksz884x.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
   4 *
   5 * Copyright (c) 2009-2010 Micrel, Inc.
   6 *      Tristram Ha <Tristram.Ha@micrel.com>
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/ioport.h>
  16#include <linux/pci.h>
  17#include <linux/proc_fs.h>
  18#include <linux/mii.h>
  19#include <linux/platform_device.h>
  20#include <linux/ethtool.h>
  21#include <linux/etherdevice.h>
  22#include <linux/in.h>
  23#include <linux/ip.h>
  24#include <linux/if_vlan.h>
  25#include <linux/crc32.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/micrel_phy.h>
  29
  30
  31/* DMA Registers */
  32
  33#define KS_DMA_TX_CTRL                  0x0000
  34#define DMA_TX_ENABLE                   0x00000001
  35#define DMA_TX_CRC_ENABLE               0x00000002
  36#define DMA_TX_PAD_ENABLE               0x00000004
  37#define DMA_TX_LOOPBACK                 0x00000100
  38#define DMA_TX_FLOW_ENABLE              0x00000200
  39#define DMA_TX_CSUM_IP                  0x00010000
  40#define DMA_TX_CSUM_TCP                 0x00020000
  41#define DMA_TX_CSUM_UDP                 0x00040000
  42#define DMA_TX_BURST_SIZE               0x3F000000
  43
  44#define KS_DMA_RX_CTRL                  0x0004
  45#define DMA_RX_ENABLE                   0x00000001
  46#define KS884X_DMA_RX_MULTICAST         0x00000002
  47#define DMA_RX_PROMISCUOUS              0x00000004
  48#define DMA_RX_ERROR                    0x00000008
  49#define DMA_RX_UNICAST                  0x00000010
  50#define DMA_RX_ALL_MULTICAST            0x00000020
  51#define DMA_RX_BROADCAST                0x00000040
  52#define DMA_RX_FLOW_ENABLE              0x00000200
  53#define DMA_RX_CSUM_IP                  0x00010000
  54#define DMA_RX_CSUM_TCP                 0x00020000
  55#define DMA_RX_CSUM_UDP                 0x00040000
  56#define DMA_RX_BURST_SIZE               0x3F000000
  57
  58#define DMA_BURST_SHIFT                 24
  59#define DMA_BURST_DEFAULT               8
  60
  61#define KS_DMA_TX_START                 0x0008
  62#define KS_DMA_RX_START                 0x000C
  63#define DMA_START                       0x00000001
  64
  65#define KS_DMA_TX_ADDR                  0x0010
  66#define KS_DMA_RX_ADDR                  0x0014
  67
  68#define DMA_ADDR_LIST_MASK              0xFFFFFFFC
  69#define DMA_ADDR_LIST_SHIFT             2
  70
  71/* MTR0 */
  72#define KS884X_MULTICAST_0_OFFSET       0x0020
  73#define KS884X_MULTICAST_1_OFFSET       0x0021
  74#define KS884X_MULTICAST_2_OFFSET       0x0022
  75#define KS884x_MULTICAST_3_OFFSET       0x0023
  76/* MTR1 */
  77#define KS884X_MULTICAST_4_OFFSET       0x0024
  78#define KS884X_MULTICAST_5_OFFSET       0x0025
  79#define KS884X_MULTICAST_6_OFFSET       0x0026
  80#define KS884X_MULTICAST_7_OFFSET       0x0027
  81
  82/* Interrupt Registers */
  83
  84/* INTEN */
  85#define KS884X_INTERRUPTS_ENABLE        0x0028
  86/* INTST */
  87#define KS884X_INTERRUPTS_STATUS        0x002C
  88
  89#define KS884X_INT_RX_STOPPED           0x02000000
  90#define KS884X_INT_TX_STOPPED           0x04000000
  91#define KS884X_INT_RX_OVERRUN           0x08000000
  92#define KS884X_INT_TX_EMPTY             0x10000000
  93#define KS884X_INT_RX                   0x20000000
  94#define KS884X_INT_TX                   0x40000000
  95#define KS884X_INT_PHY                  0x80000000
  96
  97#define KS884X_INT_RX_MASK              \
  98        (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
  99#define KS884X_INT_TX_MASK              \
 100        (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
 101#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
 102
 103/* MAC Additional Station Address */
 104
 105/* MAAL0 */
 106#define KS_ADD_ADDR_0_LO                0x0080
 107/* MAAH0 */
 108#define KS_ADD_ADDR_0_HI                0x0084
 109/* MAAL1 */
 110#define KS_ADD_ADDR_1_LO                0x0088
 111/* MAAH1 */
 112#define KS_ADD_ADDR_1_HI                0x008C
 113/* MAAL2 */
 114#define KS_ADD_ADDR_2_LO                0x0090
 115/* MAAH2 */
 116#define KS_ADD_ADDR_2_HI                0x0094
 117/* MAAL3 */
 118#define KS_ADD_ADDR_3_LO                0x0098
 119/* MAAH3 */
 120#define KS_ADD_ADDR_3_HI                0x009C
 121/* MAAL4 */
 122#define KS_ADD_ADDR_4_LO                0x00A0
 123/* MAAH4 */
 124#define KS_ADD_ADDR_4_HI                0x00A4
 125/* MAAL5 */
 126#define KS_ADD_ADDR_5_LO                0x00A8
 127/* MAAH5 */
 128#define KS_ADD_ADDR_5_HI                0x00AC
 129/* MAAL6 */
 130#define KS_ADD_ADDR_6_LO                0x00B0
 131/* MAAH6 */
 132#define KS_ADD_ADDR_6_HI                0x00B4
 133/* MAAL7 */
 134#define KS_ADD_ADDR_7_LO                0x00B8
 135/* MAAH7 */
 136#define KS_ADD_ADDR_7_HI                0x00BC
 137/* MAAL8 */
 138#define KS_ADD_ADDR_8_LO                0x00C0
 139/* MAAH8 */
 140#define KS_ADD_ADDR_8_HI                0x00C4
 141/* MAAL9 */
 142#define KS_ADD_ADDR_9_LO                0x00C8
 143/* MAAH9 */
 144#define KS_ADD_ADDR_9_HI                0x00CC
 145/* MAAL10 */
 146#define KS_ADD_ADDR_A_LO                0x00D0
 147/* MAAH10 */
 148#define KS_ADD_ADDR_A_HI                0x00D4
 149/* MAAL11 */
 150#define KS_ADD_ADDR_B_LO                0x00D8
 151/* MAAH11 */
 152#define KS_ADD_ADDR_B_HI                0x00DC
 153/* MAAL12 */
 154#define KS_ADD_ADDR_C_LO                0x00E0
 155/* MAAH12 */
 156#define KS_ADD_ADDR_C_HI                0x00E4
 157/* MAAL13 */
 158#define KS_ADD_ADDR_D_LO                0x00E8
 159/* MAAH13 */
 160#define KS_ADD_ADDR_D_HI                0x00EC
 161/* MAAL14 */
 162#define KS_ADD_ADDR_E_LO                0x00F0
 163/* MAAH14 */
 164#define KS_ADD_ADDR_E_HI                0x00F4
 165/* MAAL15 */
 166#define KS_ADD_ADDR_F_LO                0x00F8
 167/* MAAH15 */
 168#define KS_ADD_ADDR_F_HI                0x00FC
 169
 170#define ADD_ADDR_HI_MASK                0x0000FFFF
 171#define ADD_ADDR_ENABLE                 0x80000000
 172#define ADD_ADDR_INCR                   8
 173
 174/* Miscellaneous Registers */
 175
 176/* MARL */
 177#define KS884X_ADDR_0_OFFSET            0x0200
 178#define KS884X_ADDR_1_OFFSET            0x0201
 179/* MARM */
 180#define KS884X_ADDR_2_OFFSET            0x0202
 181#define KS884X_ADDR_3_OFFSET            0x0203
 182/* MARH */
 183#define KS884X_ADDR_4_OFFSET            0x0204
 184#define KS884X_ADDR_5_OFFSET            0x0205
 185
 186/* OBCR */
 187#define KS884X_BUS_CTRL_OFFSET          0x0210
 188
 189#define BUS_SPEED_125_MHZ               0x0000
 190#define BUS_SPEED_62_5_MHZ              0x0001
 191#define BUS_SPEED_41_66_MHZ             0x0002
 192#define BUS_SPEED_25_MHZ                0x0003
 193
 194/* EEPCR */
 195#define KS884X_EEPROM_CTRL_OFFSET       0x0212
 196
 197#define EEPROM_CHIP_SELECT              0x0001
 198#define EEPROM_SERIAL_CLOCK             0x0002
 199#define EEPROM_DATA_OUT                 0x0004
 200#define EEPROM_DATA_IN                  0x0008
 201#define EEPROM_ACCESS_ENABLE            0x0010
 202
 203/* MBIR */
 204#define KS884X_MEM_INFO_OFFSET          0x0214
 205
 206#define RX_MEM_TEST_FAILED              0x0008
 207#define RX_MEM_TEST_FINISHED            0x0010
 208#define TX_MEM_TEST_FAILED              0x0800
 209#define TX_MEM_TEST_FINISHED            0x1000
 210
 211/* GCR */
 212#define KS884X_GLOBAL_CTRL_OFFSET       0x0216
 213#define GLOBAL_SOFTWARE_RESET           0x0001
 214
 215#define KS8841_POWER_MANAGE_OFFSET      0x0218
 216
 217/* WFCR */
 218#define KS8841_WOL_CTRL_OFFSET          0x021A
 219#define KS8841_WOL_MAGIC_ENABLE         0x0080
 220#define KS8841_WOL_FRAME3_ENABLE        0x0008
 221#define KS8841_WOL_FRAME2_ENABLE        0x0004
 222#define KS8841_WOL_FRAME1_ENABLE        0x0002
 223#define KS8841_WOL_FRAME0_ENABLE        0x0001
 224
 225/* WF0 */
 226#define KS8841_WOL_FRAME_CRC_OFFSET     0x0220
 227#define KS8841_WOL_FRAME_BYTE0_OFFSET   0x0224
 228#define KS8841_WOL_FRAME_BYTE2_OFFSET   0x0228
 229
 230/* IACR */
 231#define KS884X_IACR_P                   0x04A0
 232#define KS884X_IACR_OFFSET              KS884X_IACR_P
 233
 234/* IADR1 */
 235#define KS884X_IADR1_P                  0x04A2
 236#define KS884X_IADR2_P                  0x04A4
 237#define KS884X_IADR3_P                  0x04A6
 238#define KS884X_IADR4_P                  0x04A8
 239#define KS884X_IADR5_P                  0x04AA
 240
 241#define KS884X_ACC_CTRL_SEL_OFFSET      KS884X_IACR_P
 242#define KS884X_ACC_CTRL_INDEX_OFFSET    (KS884X_ACC_CTRL_SEL_OFFSET + 1)
 243
 244#define KS884X_ACC_DATA_0_OFFSET        KS884X_IADR4_P
 245#define KS884X_ACC_DATA_1_OFFSET        (KS884X_ACC_DATA_0_OFFSET + 1)
 246#define KS884X_ACC_DATA_2_OFFSET        KS884X_IADR5_P
 247#define KS884X_ACC_DATA_3_OFFSET        (KS884X_ACC_DATA_2_OFFSET + 1)
 248#define KS884X_ACC_DATA_4_OFFSET        KS884X_IADR2_P
 249#define KS884X_ACC_DATA_5_OFFSET        (KS884X_ACC_DATA_4_OFFSET + 1)
 250#define KS884X_ACC_DATA_6_OFFSET        KS884X_IADR3_P
 251#define KS884X_ACC_DATA_7_OFFSET        (KS884X_ACC_DATA_6_OFFSET + 1)
 252#define KS884X_ACC_DATA_8_OFFSET        KS884X_IADR1_P
 253
 254/* P1MBCR */
 255#define KS884X_P1MBCR_P                 0x04D0
 256#define KS884X_P1MBSR_P                 0x04D2
 257#define KS884X_PHY1ILR_P                0x04D4
 258#define KS884X_PHY1IHR_P                0x04D6
 259#define KS884X_P1ANAR_P                 0x04D8
 260#define KS884X_P1ANLPR_P                0x04DA
 261
 262/* P2MBCR */
 263#define KS884X_P2MBCR_P                 0x04E0
 264#define KS884X_P2MBSR_P                 0x04E2
 265#define KS884X_PHY2ILR_P                0x04E4
 266#define KS884X_PHY2IHR_P                0x04E6
 267#define KS884X_P2ANAR_P                 0x04E8
 268#define KS884X_P2ANLPR_P                0x04EA
 269
 270#define KS884X_PHY_1_CTRL_OFFSET        KS884X_P1MBCR_P
 271#define PHY_CTRL_INTERVAL               (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
 272
 273#define KS884X_PHY_CTRL_OFFSET          0x00
 274
 275#define KS884X_PHY_STATUS_OFFSET        0x02
 276
 277#define KS884X_PHY_ID_1_OFFSET          0x04
 278#define KS884X_PHY_ID_2_OFFSET          0x06
 279
 280#define KS884X_PHY_AUTO_NEG_OFFSET      0x08
 281
 282#define KS884X_PHY_REMOTE_CAP_OFFSET    0x0A
 283
 284/* P1VCT */
 285#define KS884X_P1VCT_P                  0x04F0
 286#define KS884X_P1PHYCTRL_P              0x04F2
 287
 288/* P2VCT */
 289#define KS884X_P2VCT_P                  0x04F4
 290#define KS884X_P2PHYCTRL_P              0x04F6
 291
 292#define KS884X_PHY_SPECIAL_OFFSET       KS884X_P1VCT_P
 293#define PHY_SPECIAL_INTERVAL            (KS884X_P2VCT_P - KS884X_P1VCT_P)
 294
 295#define KS884X_PHY_LINK_MD_OFFSET       0x00
 296
 297#define PHY_START_CABLE_DIAG            0x8000
 298#define PHY_CABLE_DIAG_RESULT           0x6000
 299#define PHY_CABLE_STAT_NORMAL           0x0000
 300#define PHY_CABLE_STAT_OPEN             0x2000
 301#define PHY_CABLE_STAT_SHORT            0x4000
 302#define PHY_CABLE_STAT_FAILED           0x6000
 303#define PHY_CABLE_10M_SHORT             0x1000
 304#define PHY_CABLE_FAULT_COUNTER         0x01FF
 305
 306#define KS884X_PHY_PHY_CTRL_OFFSET      0x02
 307
 308#define PHY_STAT_REVERSED_POLARITY      0x0020
 309#define PHY_STAT_MDIX                   0x0010
 310#define PHY_FORCE_LINK                  0x0008
 311#define PHY_POWER_SAVING_DISABLE        0x0004
 312#define PHY_REMOTE_LOOPBACK             0x0002
 313
 314/* SIDER */
 315#define KS884X_SIDER_P                  0x0400
 316#define KS884X_CHIP_ID_OFFSET           KS884X_SIDER_P
 317#define KS884X_FAMILY_ID_OFFSET         (KS884X_CHIP_ID_OFFSET + 1)
 318
 319#define REG_FAMILY_ID                   0x88
 320
 321#define REG_CHIP_ID_41                  0x8810
 322#define REG_CHIP_ID_42                  0x8800
 323
 324#define KS884X_CHIP_ID_MASK_41          0xFF10
 325#define KS884X_CHIP_ID_MASK             0xFFF0
 326#define KS884X_CHIP_ID_SHIFT            4
 327#define KS884X_REVISION_MASK            0x000E
 328#define KS884X_REVISION_SHIFT           1
 329#define KS8842_START                    0x0001
 330
 331#define CHIP_IP_41_M                    0x8810
 332#define CHIP_IP_42_M                    0x8800
 333#define CHIP_IP_61_M                    0x8890
 334#define CHIP_IP_62_M                    0x8880
 335
 336#define CHIP_IP_41_P                    0x8850
 337#define CHIP_IP_42_P                    0x8840
 338#define CHIP_IP_61_P                    0x88D0
 339#define CHIP_IP_62_P                    0x88C0
 340
 341/* SGCR1 */
 342#define KS8842_SGCR1_P                  0x0402
 343#define KS8842_SWITCH_CTRL_1_OFFSET     KS8842_SGCR1_P
 344
 345#define SWITCH_PASS_ALL                 0x8000
 346#define SWITCH_TX_FLOW_CTRL             0x2000
 347#define SWITCH_RX_FLOW_CTRL             0x1000
 348#define SWITCH_CHECK_LENGTH             0x0800
 349#define SWITCH_AGING_ENABLE             0x0400
 350#define SWITCH_FAST_AGING               0x0200
 351#define SWITCH_AGGR_BACKOFF             0x0100
 352#define SWITCH_PASS_PAUSE               0x0008
 353#define SWITCH_LINK_AUTO_AGING          0x0001
 354
 355/* SGCR2 */
 356#define KS8842_SGCR2_P                  0x0404
 357#define KS8842_SWITCH_CTRL_2_OFFSET     KS8842_SGCR2_P
 358
 359#define SWITCH_VLAN_ENABLE              0x8000
 360#define SWITCH_IGMP_SNOOP               0x4000
 361#define IPV6_MLD_SNOOP_ENABLE           0x2000
 362#define IPV6_MLD_SNOOP_OPTION           0x1000
 363#define PRIORITY_SCHEME_SELECT          0x0800
 364#define SWITCH_MIRROR_RX_TX             0x0100
 365#define UNICAST_VLAN_BOUNDARY           0x0080
 366#define MULTICAST_STORM_DISABLE         0x0040
 367#define SWITCH_BACK_PRESSURE            0x0020
 368#define FAIR_FLOW_CTRL                  0x0010
 369#define NO_EXC_COLLISION_DROP           0x0008
 370#define SWITCH_HUGE_PACKET              0x0004
 371#define SWITCH_LEGAL_PACKET             0x0002
 372#define SWITCH_BUF_RESERVE              0x0001
 373
 374/* SGCR3 */
 375#define KS8842_SGCR3_P                  0x0406
 376#define KS8842_SWITCH_CTRL_3_OFFSET     KS8842_SGCR3_P
 377
 378#define BROADCAST_STORM_RATE_LO         0xFF00
 379#define SWITCH_REPEATER                 0x0080
 380#define SWITCH_HALF_DUPLEX              0x0040
 381#define SWITCH_FLOW_CTRL                0x0020
 382#define SWITCH_10_MBIT                  0x0010
 383#define SWITCH_REPLACE_NULL_VID         0x0008
 384#define BROADCAST_STORM_RATE_HI         0x0007
 385
 386#define BROADCAST_STORM_RATE            0x07FF
 387
 388/* SGCR4 */
 389#define KS8842_SGCR4_P                  0x0408
 390
 391/* SGCR5 */
 392#define KS8842_SGCR5_P                  0x040A
 393#define KS8842_SWITCH_CTRL_5_OFFSET     KS8842_SGCR5_P
 394
 395#define LED_MODE                        0x8200
 396#define LED_SPEED_DUPLEX_ACT            0x0000
 397#define LED_SPEED_DUPLEX_LINK_ACT       0x8000
 398#define LED_DUPLEX_10_100               0x0200
 399
 400/* SGCR6 */
 401#define KS8842_SGCR6_P                  0x0410
 402#define KS8842_SWITCH_CTRL_6_OFFSET     KS8842_SGCR6_P
 403
 404#define KS8842_PRIORITY_MASK            3
 405#define KS8842_PRIORITY_SHIFT           2
 406
 407/* SGCR7 */
 408#define KS8842_SGCR7_P                  0x0412
 409#define KS8842_SWITCH_CTRL_7_OFFSET     KS8842_SGCR7_P
 410
 411#define SWITCH_UNK_DEF_PORT_ENABLE      0x0008
 412#define SWITCH_UNK_DEF_PORT_3           0x0004
 413#define SWITCH_UNK_DEF_PORT_2           0x0002
 414#define SWITCH_UNK_DEF_PORT_1           0x0001
 415
 416/* MACAR1 */
 417#define KS8842_MACAR1_P                 0x0470
 418#define KS8842_MACAR2_P                 0x0472
 419#define KS8842_MACAR3_P                 0x0474
 420#define KS8842_MAC_ADDR_1_OFFSET        KS8842_MACAR1_P
 421#define KS8842_MAC_ADDR_0_OFFSET        (KS8842_MAC_ADDR_1_OFFSET + 1)
 422#define KS8842_MAC_ADDR_3_OFFSET        KS8842_MACAR2_P
 423#define KS8842_MAC_ADDR_2_OFFSET        (KS8842_MAC_ADDR_3_OFFSET + 1)
 424#define KS8842_MAC_ADDR_5_OFFSET        KS8842_MACAR3_P
 425#define KS8842_MAC_ADDR_4_OFFSET        (KS8842_MAC_ADDR_5_OFFSET + 1)
 426
 427/* TOSR1 */
 428#define KS8842_TOSR1_P                  0x0480
 429#define KS8842_TOSR2_P                  0x0482
 430#define KS8842_TOSR3_P                  0x0484
 431#define KS8842_TOSR4_P                  0x0486
 432#define KS8842_TOSR5_P                  0x0488
 433#define KS8842_TOSR6_P                  0x048A
 434#define KS8842_TOSR7_P                  0x0490
 435#define KS8842_TOSR8_P                  0x0492
 436#define KS8842_TOS_1_OFFSET             KS8842_TOSR1_P
 437#define KS8842_TOS_2_OFFSET             KS8842_TOSR2_P
 438#define KS8842_TOS_3_OFFSET             KS8842_TOSR3_P
 439#define KS8842_TOS_4_OFFSET             KS8842_TOSR4_P
 440#define KS8842_TOS_5_OFFSET             KS8842_TOSR5_P
 441#define KS8842_TOS_6_OFFSET             KS8842_TOSR6_P
 442
 443#define KS8842_TOS_7_OFFSET             KS8842_TOSR7_P
 444#define KS8842_TOS_8_OFFSET             KS8842_TOSR8_P
 445
 446/* P1CR1 */
 447#define KS8842_P1CR1_P                  0x0500
 448#define KS8842_P1CR2_P                  0x0502
 449#define KS8842_P1VIDR_P                 0x0504
 450#define KS8842_P1CR3_P                  0x0506
 451#define KS8842_P1IRCR_P                 0x0508
 452#define KS8842_P1ERCR_P                 0x050A
 453#define KS884X_P1SCSLMD_P               0x0510
 454#define KS884X_P1CR4_P                  0x0512
 455#define KS884X_P1SR_P                   0x0514
 456
 457/* P2CR1 */
 458#define KS8842_P2CR1_P                  0x0520
 459#define KS8842_P2CR2_P                  0x0522
 460#define KS8842_P2VIDR_P                 0x0524
 461#define KS8842_P2CR3_P                  0x0526
 462#define KS8842_P2IRCR_P                 0x0528
 463#define KS8842_P2ERCR_P                 0x052A
 464#define KS884X_P2SCSLMD_P               0x0530
 465#define KS884X_P2CR4_P                  0x0532
 466#define KS884X_P2SR_P                   0x0534
 467
 468/* P3CR1 */
 469#define KS8842_P3CR1_P                  0x0540
 470#define KS8842_P3CR2_P                  0x0542
 471#define KS8842_P3VIDR_P                 0x0544
 472#define KS8842_P3CR3_P                  0x0546
 473#define KS8842_P3IRCR_P                 0x0548
 474#define KS8842_P3ERCR_P                 0x054A
 475
 476#define KS8842_PORT_1_CTRL_1            KS8842_P1CR1_P
 477#define KS8842_PORT_2_CTRL_1            KS8842_P2CR1_P
 478#define KS8842_PORT_3_CTRL_1            KS8842_P3CR1_P
 479
 480#define PORT_CTRL_ADDR(port, addr)              \
 481        (addr = KS8842_PORT_1_CTRL_1 + (port) * \
 482                (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
 483
 484#define KS8842_PORT_CTRL_1_OFFSET       0x00
 485
 486#define PORT_BROADCAST_STORM            0x0080
 487#define PORT_DIFFSERV_ENABLE            0x0040
 488#define PORT_802_1P_ENABLE              0x0020
 489#define PORT_BASED_PRIORITY_MASK        0x0018
 490#define PORT_BASED_PRIORITY_BASE        0x0003
 491#define PORT_BASED_PRIORITY_SHIFT       3
 492#define PORT_BASED_PRIORITY_0           0x0000
 493#define PORT_BASED_PRIORITY_1           0x0008
 494#define PORT_BASED_PRIORITY_2           0x0010
 495#define PORT_BASED_PRIORITY_3           0x0018
 496#define PORT_INSERT_TAG                 0x0004
 497#define PORT_REMOVE_TAG                 0x0002
 498#define PORT_PRIO_QUEUE_ENABLE          0x0001
 499
 500#define KS8842_PORT_CTRL_2_OFFSET       0x02
 501
 502#define PORT_INGRESS_VLAN_FILTER        0x4000
 503#define PORT_DISCARD_NON_VID            0x2000
 504#define PORT_FORCE_FLOW_CTRL            0x1000
 505#define PORT_BACK_PRESSURE              0x0800
 506#define PORT_TX_ENABLE                  0x0400
 507#define PORT_RX_ENABLE                  0x0200
 508#define PORT_LEARN_DISABLE              0x0100
 509#define PORT_MIRROR_SNIFFER             0x0080
 510#define PORT_MIRROR_RX                  0x0040
 511#define PORT_MIRROR_TX                  0x0020
 512#define PORT_USER_PRIORITY_CEILING      0x0008
 513#define PORT_VLAN_MEMBERSHIP            0x0007
 514
 515#define KS8842_PORT_CTRL_VID_OFFSET     0x04
 516
 517#define PORT_DEFAULT_VID                0x0001
 518
 519#define KS8842_PORT_CTRL_3_OFFSET       0x06
 520
 521#define PORT_INGRESS_LIMIT_MODE         0x000C
 522#define PORT_INGRESS_ALL                0x0000
 523#define PORT_INGRESS_UNICAST            0x0004
 524#define PORT_INGRESS_MULTICAST          0x0008
 525#define PORT_INGRESS_BROADCAST          0x000C
 526#define PORT_COUNT_IFG                  0x0002
 527#define PORT_COUNT_PREAMBLE             0x0001
 528
 529#define KS8842_PORT_IN_RATE_OFFSET      0x08
 530#define KS8842_PORT_OUT_RATE_OFFSET     0x0A
 531
 532#define PORT_PRIORITY_RATE              0x0F
 533#define PORT_PRIORITY_RATE_SHIFT        4
 534
 535#define KS884X_PORT_LINK_MD             0x10
 536
 537#define PORT_CABLE_10M_SHORT            0x8000
 538#define PORT_CABLE_DIAG_RESULT          0x6000
 539#define PORT_CABLE_STAT_NORMAL          0x0000
 540#define PORT_CABLE_STAT_OPEN            0x2000
 541#define PORT_CABLE_STAT_SHORT           0x4000
 542#define PORT_CABLE_STAT_FAILED          0x6000
 543#define PORT_START_CABLE_DIAG           0x1000
 544#define PORT_FORCE_LINK                 0x0800
 545#define PORT_POWER_SAVING_DISABLE       0x0400
 546#define PORT_PHY_REMOTE_LOOPBACK        0x0200
 547#define PORT_CABLE_FAULT_COUNTER        0x01FF
 548
 549#define KS884X_PORT_CTRL_4_OFFSET       0x12
 550
 551#define PORT_LED_OFF                    0x8000
 552#define PORT_TX_DISABLE                 0x4000
 553#define PORT_AUTO_NEG_RESTART           0x2000
 554#define PORT_REMOTE_FAULT_DISABLE       0x1000
 555#define PORT_POWER_DOWN                 0x0800
 556#define PORT_AUTO_MDIX_DISABLE          0x0400
 557#define PORT_FORCE_MDIX                 0x0200
 558#define PORT_LOOPBACK                   0x0100
 559#define PORT_AUTO_NEG_ENABLE            0x0080
 560#define PORT_FORCE_100_MBIT             0x0040
 561#define PORT_FORCE_FULL_DUPLEX          0x0020
 562#define PORT_AUTO_NEG_SYM_PAUSE         0x0010
 563#define PORT_AUTO_NEG_100BTX_FD         0x0008
 564#define PORT_AUTO_NEG_100BTX            0x0004
 565#define PORT_AUTO_NEG_10BT_FD           0x0002
 566#define PORT_AUTO_NEG_10BT              0x0001
 567
 568#define KS884X_PORT_STATUS_OFFSET       0x14
 569
 570#define PORT_HP_MDIX                    0x8000
 571#define PORT_REVERSED_POLARITY          0x2000
 572#define PORT_RX_FLOW_CTRL               0x0800
 573#define PORT_TX_FLOW_CTRL               0x1000
 574#define PORT_STATUS_SPEED_100MBIT       0x0400
 575#define PORT_STATUS_FULL_DUPLEX         0x0200
 576#define PORT_REMOTE_FAULT               0x0100
 577#define PORT_MDIX_STATUS                0x0080
 578#define PORT_AUTO_NEG_COMPLETE          0x0040
 579#define PORT_STATUS_LINK_GOOD           0x0020
 580#define PORT_REMOTE_SYM_PAUSE           0x0010
 581#define PORT_REMOTE_100BTX_FD           0x0008
 582#define PORT_REMOTE_100BTX              0x0004
 583#define PORT_REMOTE_10BT_FD             0x0002
 584#define PORT_REMOTE_10BT                0x0001
 585
 586/*
 587#define STATIC_MAC_TABLE_ADDR           00-0000FFFF-FFFFFFFF
 588#define STATIC_MAC_TABLE_FWD_PORTS      00-00070000-00000000
 589#define STATIC_MAC_TABLE_VALID          00-00080000-00000000
 590#define STATIC_MAC_TABLE_OVERRIDE       00-00100000-00000000
 591#define STATIC_MAC_TABLE_USE_FID        00-00200000-00000000
 592#define STATIC_MAC_TABLE_FID            00-03C00000-00000000
 593*/
 594
 595#define STATIC_MAC_TABLE_ADDR           0x0000FFFF
 596#define STATIC_MAC_TABLE_FWD_PORTS      0x00070000
 597#define STATIC_MAC_TABLE_VALID          0x00080000
 598#define STATIC_MAC_TABLE_OVERRIDE       0x00100000
 599#define STATIC_MAC_TABLE_USE_FID        0x00200000
 600#define STATIC_MAC_TABLE_FID            0x03C00000
 601
 602#define STATIC_MAC_FWD_PORTS_SHIFT      16
 603#define STATIC_MAC_FID_SHIFT            22
 604
 605/*
 606#define VLAN_TABLE_VID                  00-00000000-00000FFF
 607#define VLAN_TABLE_FID                  00-00000000-0000F000
 608#define VLAN_TABLE_MEMBERSHIP           00-00000000-00070000
 609#define VLAN_TABLE_VALID                00-00000000-00080000
 610*/
 611
 612#define VLAN_TABLE_VID                  0x00000FFF
 613#define VLAN_TABLE_FID                  0x0000F000
 614#define VLAN_TABLE_MEMBERSHIP           0x00070000
 615#define VLAN_TABLE_VALID                0x00080000
 616
 617#define VLAN_TABLE_FID_SHIFT            12
 618#define VLAN_TABLE_MEMBERSHIP_SHIFT     16
 619
 620/*
 621#define DYNAMIC_MAC_TABLE_ADDR          00-0000FFFF-FFFFFFFF
 622#define DYNAMIC_MAC_TABLE_FID           00-000F0000-00000000
 623#define DYNAMIC_MAC_TABLE_SRC_PORT      00-00300000-00000000
 624#define DYNAMIC_MAC_TABLE_TIMESTAMP     00-00C00000-00000000
 625#define DYNAMIC_MAC_TABLE_ENTRIES       03-FF000000-00000000
 626#define DYNAMIC_MAC_TABLE_MAC_EMPTY     04-00000000-00000000
 627#define DYNAMIC_MAC_TABLE_RESERVED      78-00000000-00000000
 628#define DYNAMIC_MAC_TABLE_NOT_READY     80-00000000-00000000
 629*/
 630
 631#define DYNAMIC_MAC_TABLE_ADDR          0x0000FFFF
 632#define DYNAMIC_MAC_TABLE_FID           0x000F0000
 633#define DYNAMIC_MAC_TABLE_SRC_PORT      0x00300000
 634#define DYNAMIC_MAC_TABLE_TIMESTAMP     0x00C00000
 635#define DYNAMIC_MAC_TABLE_ENTRIES       0xFF000000
 636
 637#define DYNAMIC_MAC_TABLE_ENTRIES_H     0x03
 638#define DYNAMIC_MAC_TABLE_MAC_EMPTY     0x04
 639#define DYNAMIC_MAC_TABLE_RESERVED      0x78
 640#define DYNAMIC_MAC_TABLE_NOT_READY     0x80
 641
 642#define DYNAMIC_MAC_FID_SHIFT           16
 643#define DYNAMIC_MAC_SRC_PORT_SHIFT      20
 644#define DYNAMIC_MAC_TIMESTAMP_SHIFT     22
 645#define DYNAMIC_MAC_ENTRIES_SHIFT       24
 646#define DYNAMIC_MAC_ENTRIES_H_SHIFT     8
 647
 648/*
 649#define MIB_COUNTER_VALUE               00-00000000-3FFFFFFF
 650#define MIB_COUNTER_VALID               00-00000000-40000000
 651#define MIB_COUNTER_OVERFLOW            00-00000000-80000000
 652*/
 653
 654#define MIB_COUNTER_VALUE               0x3FFFFFFF
 655#define MIB_COUNTER_VALID               0x40000000
 656#define MIB_COUNTER_OVERFLOW            0x80000000
 657
 658#define MIB_PACKET_DROPPED              0x0000FFFF
 659
 660#define KS_MIB_PACKET_DROPPED_TX_0      0x100
 661#define KS_MIB_PACKET_DROPPED_TX_1      0x101
 662#define KS_MIB_PACKET_DROPPED_TX        0x102
 663#define KS_MIB_PACKET_DROPPED_RX_0      0x103
 664#define KS_MIB_PACKET_DROPPED_RX_1      0x104
 665#define KS_MIB_PACKET_DROPPED_RX        0x105
 666
 667/* Change default LED mode. */
 668#define SET_DEFAULT_LED                 LED_SPEED_DUPLEX_ACT
 669
 670#define MAC_ADDR_ORDER(i)               (ETH_ALEN - 1 - (i))
 671
 672#define MAX_ETHERNET_BODY_SIZE          1500
 673#define ETHERNET_HEADER_SIZE            (14 + VLAN_HLEN)
 674
 675#define MAX_ETHERNET_PACKET_SIZE        \
 676        (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
 677
 678#define REGULAR_RX_BUF_SIZE             (MAX_ETHERNET_PACKET_SIZE + 4)
 679#define MAX_RX_BUF_SIZE                 (1912 + 4)
 680
 681#define ADDITIONAL_ENTRIES              16
 682#define MAX_MULTICAST_LIST              32
 683
 684#define HW_MULTICAST_SIZE               8
 685
 686#define HW_TO_DEV_PORT(port)            (port - 1)
 687
 688enum {
 689        media_connected,
 690        media_disconnected
 691};
 692
 693enum {
 694        OID_COUNTER_UNKOWN,
 695
 696        OID_COUNTER_FIRST,
 697
 698        /* total transmit errors */
 699        OID_COUNTER_XMIT_ERROR,
 700
 701        /* total receive errors */
 702        OID_COUNTER_RCV_ERROR,
 703
 704        OID_COUNTER_LAST
 705};
 706
 707/*
 708 * Hardware descriptor definitions
 709 */
 710
 711#define DESC_ALIGNMENT                  16
 712#define BUFFER_ALIGNMENT                8
 713
 714#define NUM_OF_RX_DESC                  64
 715#define NUM_OF_TX_DESC                  64
 716
 717#define KS_DESC_RX_FRAME_LEN            0x000007FF
 718#define KS_DESC_RX_FRAME_TYPE           0x00008000
 719#define KS_DESC_RX_ERROR_CRC            0x00010000
 720#define KS_DESC_RX_ERROR_RUNT           0x00020000
 721#define KS_DESC_RX_ERROR_TOO_LONG       0x00040000
 722#define KS_DESC_RX_ERROR_PHY            0x00080000
 723#define KS884X_DESC_RX_PORT_MASK        0x00300000
 724#define KS_DESC_RX_MULTICAST            0x01000000
 725#define KS_DESC_RX_ERROR                0x02000000
 726#define KS_DESC_RX_ERROR_CSUM_UDP       0x04000000
 727#define KS_DESC_RX_ERROR_CSUM_TCP       0x08000000
 728#define KS_DESC_RX_ERROR_CSUM_IP        0x10000000
 729#define KS_DESC_RX_LAST                 0x20000000
 730#define KS_DESC_RX_FIRST                0x40000000
 731#define KS_DESC_RX_ERROR_COND           \
 732        (KS_DESC_RX_ERROR_CRC |         \
 733        KS_DESC_RX_ERROR_RUNT |         \
 734        KS_DESC_RX_ERROR_PHY |          \
 735        KS_DESC_RX_ERROR_TOO_LONG)
 736
 737#define KS_DESC_HW_OWNED                0x80000000
 738
 739#define KS_DESC_BUF_SIZE                0x000007FF
 740#define KS884X_DESC_TX_PORT_MASK        0x00300000
 741#define KS_DESC_END_OF_RING             0x02000000
 742#define KS_DESC_TX_CSUM_GEN_UDP         0x04000000
 743#define KS_DESC_TX_CSUM_GEN_TCP         0x08000000
 744#define KS_DESC_TX_CSUM_GEN_IP          0x10000000
 745#define KS_DESC_TX_LAST                 0x20000000
 746#define KS_DESC_TX_FIRST                0x40000000
 747#define KS_DESC_TX_INTERRUPT            0x80000000
 748
 749#define KS_DESC_PORT_SHIFT              20
 750
 751#define KS_DESC_RX_MASK                 (KS_DESC_BUF_SIZE)
 752
 753#define KS_DESC_TX_MASK                 \
 754        (KS_DESC_TX_INTERRUPT |         \
 755        KS_DESC_TX_FIRST |              \
 756        KS_DESC_TX_LAST |               \
 757        KS_DESC_TX_CSUM_GEN_IP |        \
 758        KS_DESC_TX_CSUM_GEN_TCP |       \
 759        KS_DESC_TX_CSUM_GEN_UDP |       \
 760        KS_DESC_BUF_SIZE)
 761
 762struct ksz_desc_rx_stat {
 763#ifdef __BIG_ENDIAN_BITFIELD
 764        u32 hw_owned:1;
 765        u32 first_desc:1;
 766        u32 last_desc:1;
 767        u32 csum_err_ip:1;
 768        u32 csum_err_tcp:1;
 769        u32 csum_err_udp:1;
 770        u32 error:1;
 771        u32 multicast:1;
 772        u32 src_port:4;
 773        u32 err_phy:1;
 774        u32 err_too_long:1;
 775        u32 err_runt:1;
 776        u32 err_crc:1;
 777        u32 frame_type:1;
 778        u32 reserved1:4;
 779        u32 frame_len:11;
 780#else
 781        u32 frame_len:11;
 782        u32 reserved1:4;
 783        u32 frame_type:1;
 784        u32 err_crc:1;
 785        u32 err_runt:1;
 786        u32 err_too_long:1;
 787        u32 err_phy:1;
 788        u32 src_port:4;
 789        u32 multicast:1;
 790        u32 error:1;
 791        u32 csum_err_udp:1;
 792        u32 csum_err_tcp:1;
 793        u32 csum_err_ip:1;
 794        u32 last_desc:1;
 795        u32 first_desc:1;
 796        u32 hw_owned:1;
 797#endif
 798};
 799
 800struct ksz_desc_tx_stat {
 801#ifdef __BIG_ENDIAN_BITFIELD
 802        u32 hw_owned:1;
 803        u32 reserved1:31;
 804#else
 805        u32 reserved1:31;
 806        u32 hw_owned:1;
 807#endif
 808};
 809
 810struct ksz_desc_rx_buf {
 811#ifdef __BIG_ENDIAN_BITFIELD
 812        u32 reserved4:6;
 813        u32 end_of_ring:1;
 814        u32 reserved3:14;
 815        u32 buf_size:11;
 816#else
 817        u32 buf_size:11;
 818        u32 reserved3:14;
 819        u32 end_of_ring:1;
 820        u32 reserved4:6;
 821#endif
 822};
 823
 824struct ksz_desc_tx_buf {
 825#ifdef __BIG_ENDIAN_BITFIELD
 826        u32 intr:1;
 827        u32 first_seg:1;
 828        u32 last_seg:1;
 829        u32 csum_gen_ip:1;
 830        u32 csum_gen_tcp:1;
 831        u32 csum_gen_udp:1;
 832        u32 end_of_ring:1;
 833        u32 reserved4:1;
 834        u32 dest_port:4;
 835        u32 reserved3:9;
 836        u32 buf_size:11;
 837#else
 838        u32 buf_size:11;
 839        u32 reserved3:9;
 840        u32 dest_port:4;
 841        u32 reserved4:1;
 842        u32 end_of_ring:1;
 843        u32 csum_gen_udp:1;
 844        u32 csum_gen_tcp:1;
 845        u32 csum_gen_ip:1;
 846        u32 last_seg:1;
 847        u32 first_seg:1;
 848        u32 intr:1;
 849#endif
 850};
 851
 852union desc_stat {
 853        struct ksz_desc_rx_stat rx;
 854        struct ksz_desc_tx_stat tx;
 855        u32 data;
 856};
 857
 858union desc_buf {
 859        struct ksz_desc_rx_buf rx;
 860        struct ksz_desc_tx_buf tx;
 861        u32 data;
 862};
 863
 864/**
 865 * struct ksz_hw_desc - Hardware descriptor data structure
 866 * @ctrl:       Descriptor control value.
 867 * @buf:        Descriptor buffer value.
 868 * @addr:       Physical address of memory buffer.
 869 * @next:       Pointer to next hardware descriptor.
 870 */
 871struct ksz_hw_desc {
 872        union desc_stat ctrl;
 873        union desc_buf buf;
 874        u32 addr;
 875        u32 next;
 876};
 877
 878/**
 879 * struct ksz_sw_desc - Software descriptor data structure
 880 * @ctrl:       Descriptor control value.
 881 * @buf:        Descriptor buffer value.
 882 * @buf_size:   Current buffers size value in hardware descriptor.
 883 */
 884struct ksz_sw_desc {
 885        union desc_stat ctrl;
 886        union desc_buf buf;
 887        u32 buf_size;
 888};
 889
 890/**
 891 * struct ksz_dma_buf - OS dependent DMA buffer data structure
 892 * @skb:        Associated socket buffer.
 893 * @dma:        Associated physical DMA address.
 894 * @len:        Actual len used.
 895 */
 896struct ksz_dma_buf {
 897        struct sk_buff *skb;
 898        dma_addr_t dma;
 899        int len;
 900};
 901
 902/**
 903 * struct ksz_desc - Descriptor structure
 904 * @phw:        Hardware descriptor pointer to uncached physical memory.
 905 * @sw:         Cached memory to hold hardware descriptor values for
 906 *              manipulation.
 907 * @dma_buf:    Operating system dependent data structure to hold physical
 908 *              memory buffer allocation information.
 909 */
 910struct ksz_desc {
 911        struct ksz_hw_desc *phw;
 912        struct ksz_sw_desc sw;
 913        struct ksz_dma_buf dma_buf;
 914};
 915
 916#define DMA_BUFFER(desc)  ((struct ksz_dma_buf *)(&(desc)->dma_buf))
 917
 918/**
 919 * struct ksz_desc_info - Descriptor information data structure
 920 * @ring:       First descriptor in the ring.
 921 * @cur:        Current descriptor being manipulated.
 922 * @ring_virt:  First hardware descriptor in the ring.
 923 * @ring_phys:  The physical address of the first descriptor of the ring.
 924 * @size:       Size of hardware descriptor.
 925 * @alloc:      Number of descriptors allocated.
 926 * @avail:      Number of descriptors available for use.
 927 * @last:       Index for last descriptor released to hardware.
 928 * @next:       Index for next descriptor available for use.
 929 * @mask:       Mask for index wrapping.
 930 */
 931struct ksz_desc_info {
 932        struct ksz_desc *ring;
 933        struct ksz_desc *cur;
 934        struct ksz_hw_desc *ring_virt;
 935        u32 ring_phys;
 936        int size;
 937        int alloc;
 938        int avail;
 939        int last;
 940        int next;
 941        int mask;
 942};
 943
 944/*
 945 * KSZ8842 switch definitions
 946 */
 947
 948enum {
 949        TABLE_STATIC_MAC = 0,
 950        TABLE_VLAN,
 951        TABLE_DYNAMIC_MAC,
 952        TABLE_MIB
 953};
 954
 955#define LEARNED_MAC_TABLE_ENTRIES       1024
 956#define STATIC_MAC_TABLE_ENTRIES        8
 957
 958/**
 959 * struct ksz_mac_table - Static MAC table data structure
 960 * @mac_addr:   MAC address to filter.
 961 * @vid:        VID value.
 962 * @fid:        FID value.
 963 * @ports:      Port membership.
 964 * @override:   Override setting.
 965 * @use_fid:    FID use setting.
 966 * @valid:      Valid setting indicating the entry is being used.
 967 */
 968struct ksz_mac_table {
 969        u8 mac_addr[ETH_ALEN];
 970        u16 vid;
 971        u8 fid;
 972        u8 ports;
 973        u8 override:1;
 974        u8 use_fid:1;
 975        u8 valid:1;
 976};
 977
 978#define VLAN_TABLE_ENTRIES              16
 979
 980/**
 981 * struct ksz_vlan_table - VLAN table data structure
 982 * @vid:        VID value.
 983 * @fid:        FID value.
 984 * @member:     Port membership.
 985 */
 986struct ksz_vlan_table {
 987        u16 vid;
 988        u8 fid;
 989        u8 member;
 990};
 991
 992#define DIFFSERV_ENTRIES                64
 993#define PRIO_802_1P_ENTRIES             8
 994#define PRIO_QUEUES                     4
 995
 996#define SWITCH_PORT_NUM                 2
 997#define TOTAL_PORT_NUM                  (SWITCH_PORT_NUM + 1)
 998#define HOST_MASK                       (1 << SWITCH_PORT_NUM)
 999#define PORT_MASK                       7
1000
1001#define MAIN_PORT                       0
1002#define OTHER_PORT                      1
1003#define HOST_PORT                       SWITCH_PORT_NUM
1004
1005#define PORT_COUNTER_NUM                0x20
1006#define TOTAL_PORT_COUNTER_NUM          (PORT_COUNTER_NUM + 2)
1007
1008#define MIB_COUNTER_RX_LO_PRIORITY      0x00
1009#define MIB_COUNTER_RX_HI_PRIORITY      0x01
1010#define MIB_COUNTER_RX_UNDERSIZE        0x02
1011#define MIB_COUNTER_RX_FRAGMENT         0x03
1012#define MIB_COUNTER_RX_OVERSIZE         0x04
1013#define MIB_COUNTER_RX_JABBER           0x05
1014#define MIB_COUNTER_RX_SYMBOL_ERR       0x06
1015#define MIB_COUNTER_RX_CRC_ERR          0x07
1016#define MIB_COUNTER_RX_ALIGNMENT_ERR    0x08
1017#define MIB_COUNTER_RX_CTRL_8808        0x09
1018#define MIB_COUNTER_RX_PAUSE            0x0A
1019#define MIB_COUNTER_RX_BROADCAST        0x0B
1020#define MIB_COUNTER_RX_MULTICAST        0x0C
1021#define MIB_COUNTER_RX_UNICAST          0x0D
1022#define MIB_COUNTER_RX_OCTET_64         0x0E
1023#define MIB_COUNTER_RX_OCTET_65_127     0x0F
1024#define MIB_COUNTER_RX_OCTET_128_255    0x10
1025#define MIB_COUNTER_RX_OCTET_256_511    0x11
1026#define MIB_COUNTER_RX_OCTET_512_1023   0x12
1027#define MIB_COUNTER_RX_OCTET_1024_1522  0x13
1028#define MIB_COUNTER_TX_LO_PRIORITY      0x14
1029#define MIB_COUNTER_TX_HI_PRIORITY      0x15
1030#define MIB_COUNTER_TX_LATE_COLLISION   0x16
1031#define MIB_COUNTER_TX_PAUSE            0x17
1032#define MIB_COUNTER_TX_BROADCAST        0x18
1033#define MIB_COUNTER_TX_MULTICAST        0x19
1034#define MIB_COUNTER_TX_UNICAST          0x1A
1035#define MIB_COUNTER_TX_DEFERRED         0x1B
1036#define MIB_COUNTER_TX_TOTAL_COLLISION  0x1C
1037#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
1038#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
1039#define MIB_COUNTER_TX_MULTI_COLLISION  0x1F
1040
1041#define MIB_COUNTER_RX_DROPPED_PACKET   0x20
1042#define MIB_COUNTER_TX_DROPPED_PACKET   0x21
1043
1044/**
1045 * struct ksz_port_mib - Port MIB data structure
1046 * @cnt_ptr:    Current pointer to MIB counter index.
1047 * @link_down:  Indication the link has just gone down.
1048 * @state:      Connection status of the port.
1049 * @mib_start:  The starting counter index.  Some ports do not start at 0.
1050 * @counter:    64-bit MIB counter value.
1051 * @dropped:    Temporary buffer to remember last read packet dropped values.
1052 *
1053 * MIB counters needs to be read periodically so that counters do not get
1054 * overflowed and give incorrect values.  A right balance is needed to
1055 * satisfy this condition and not waste too much CPU time.
1056 *
1057 * It is pointless to read MIB counters when the port is disconnected.  The
1058 * @state provides the connection status so that MIB counters are read only
1059 * when the port is connected.  The @link_down indicates the port is just
1060 * disconnected so that all MIB counters are read one last time to update the
1061 * information.
1062 */
1063struct ksz_port_mib {
1064        u8 cnt_ptr;
1065        u8 link_down;
1066        u8 state;
1067        u8 mib_start;
1068
1069        u64 counter[TOTAL_PORT_COUNTER_NUM];
1070        u32 dropped[2];
1071};
1072
1073/**
1074 * struct ksz_port_cfg - Port configuration data structure
1075 * @vid:        VID value.
1076 * @member:     Port membership.
1077 * @port_prio:  Port priority.
1078 * @rx_rate:    Receive priority rate.
1079 * @tx_rate:    Transmit priority rate.
1080 * @stp_state:  Current Spanning Tree Protocol state.
1081 */
1082struct ksz_port_cfg {
1083        u16 vid;
1084        u8 member;
1085        u8 port_prio;
1086        u32 rx_rate[PRIO_QUEUES];
1087        u32 tx_rate[PRIO_QUEUES];
1088        int stp_state;
1089};
1090
1091/**
1092 * struct ksz_switch - KSZ8842 switch data structure
1093 * @mac_table:  MAC table entries information.
1094 * @vlan_table: VLAN table entries information.
1095 * @port_cfg:   Port configuration information.
1096 * @diffserv:   DiffServ priority settings.  Possible values from 6-bit of ToS
1097 *              (bit7 ~ bit2) field.
1098 * @p_802_1p:   802.1P priority settings.  Possible values from 3-bit of 802.1p
1099 *              Tag priority field.
1100 * @br_addr:    Bridge address.  Used for STP.
1101 * @other_addr: Other MAC address.  Used for multiple network device mode.
1102 * @broad_per:  Broadcast storm percentage.
1103 * @member:     Current port membership.  Used for STP.
1104 */
1105struct ksz_switch {
1106        struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
1107        struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
1108        struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
1109
1110        u8 diffserv[DIFFSERV_ENTRIES];
1111        u8 p_802_1p[PRIO_802_1P_ENTRIES];
1112
1113        u8 br_addr[ETH_ALEN];
1114        u8 other_addr[ETH_ALEN];
1115
1116        u8 broad_per;
1117        u8 member;
1118};
1119
1120#define TX_RATE_UNIT                    10000
1121
1122/**
1123 * struct ksz_port_info - Port information data structure
1124 * @state:      Connection status of the port.
1125 * @tx_rate:    Transmit rate divided by 10000 to get Mbit.
1126 * @duplex:     Duplex mode.
1127 * @advertised: Advertised auto-negotiation setting.  Used to determine link.
1128 * @partner:    Auto-negotiation partner setting.  Used to determine link.
1129 * @port_id:    Port index to access actual hardware register.
1130 * @pdev:       Pointer to OS dependent network device.
1131 */
1132struct ksz_port_info {
1133        uint state;
1134        uint tx_rate;
1135        u8 duplex;
1136        u8 advertised;
1137        u8 partner;
1138        u8 port_id;
1139        void *pdev;
1140};
1141
1142#define MAX_TX_HELD_SIZE                52000
1143
1144/* Hardware features and bug fixes. */
1145#define LINK_INT_WORKING                (1 << 0)
1146#define SMALL_PACKET_TX_BUG             (1 << 1)
1147#define HALF_DUPLEX_SIGNAL_BUG          (1 << 2)
1148#define RX_HUGE_FRAME                   (1 << 4)
1149#define STP_SUPPORT                     (1 << 8)
1150
1151/* Software overrides. */
1152#define PAUSE_FLOW_CTRL                 (1 << 0)
1153#define FAST_AGING                      (1 << 1)
1154
1155/**
1156 * struct ksz_hw - KSZ884X hardware data structure
1157 * @io:                 Virtual address assigned.
1158 * @ksz_switch:         Pointer to KSZ8842 switch.
1159 * @port_info:          Port information.
1160 * @port_mib:           Port MIB information.
1161 * @dev_count:          Number of network devices this hardware supports.
1162 * @dst_ports:          Destination ports in switch for transmission.
1163 * @id:                 Hardware ID.  Used for display only.
1164 * @mib_cnt:            Number of MIB counters this hardware has.
1165 * @mib_port_cnt:       Number of ports with MIB counters.
1166 * @tx_cfg:             Cached transmit control settings.
1167 * @rx_cfg:             Cached receive control settings.
1168 * @intr_mask:          Current interrupt mask.
1169 * @intr_set:           Current interrup set.
1170 * @intr_blocked:       Interrupt blocked.
1171 * @rx_desc_info:       Receive descriptor information.
1172 * @tx_desc_info:       Transmit descriptor information.
1173 * @tx_int_cnt:         Transmit interrupt count.  Used for TX optimization.
1174 * @tx_int_mask:        Transmit interrupt mask.  Used for TX optimization.
1175 * @tx_size:            Transmit data size.  Used for TX optimization.
1176 *                      The maximum is defined by MAX_TX_HELD_SIZE.
1177 * @perm_addr:          Permanent MAC address.
1178 * @override_addr:      Overridden MAC address.
1179 * @address:            Additional MAC address entries.
1180 * @addr_list_size:     Additional MAC address list size.
1181 * @mac_override:       Indication of MAC address overridden.
1182 * @promiscuous:        Counter to keep track of promiscuous mode set.
1183 * @all_multi:          Counter to keep track of all multicast mode set.
1184 * @multi_list:         Multicast address entries.
1185 * @multi_bits:         Cached multicast hash table settings.
1186 * @multi_list_size:    Multicast address list size.
1187 * @enabled:            Indication of hardware enabled.
1188 * @rx_stop:            Indication of receive process stop.
1189 * @reserved2:          none
1190 * @features:           Hardware features to enable.
1191 * @overrides:          Hardware features to override.
1192 * @parent:             Pointer to parent, network device private structure.
1193 */
1194struct ksz_hw {
1195        void __iomem *io;
1196
1197        struct ksz_switch *ksz_switch;
1198        struct ksz_port_info port_info[SWITCH_PORT_NUM];
1199        struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
1200        int dev_count;
1201        int dst_ports;
1202        int id;
1203        int mib_cnt;
1204        int mib_port_cnt;
1205
1206        u32 tx_cfg;
1207        u32 rx_cfg;
1208        u32 intr_mask;
1209        u32 intr_set;
1210        uint intr_blocked;
1211
1212        struct ksz_desc_info rx_desc_info;
1213        struct ksz_desc_info tx_desc_info;
1214
1215        int tx_int_cnt;
1216        int tx_int_mask;
1217        int tx_size;
1218
1219        u8 perm_addr[ETH_ALEN];
1220        u8 override_addr[ETH_ALEN];
1221        u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
1222        u8 addr_list_size;
1223        u8 mac_override;
1224        u8 promiscuous;
1225        u8 all_multi;
1226        u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
1227        u8 multi_bits[HW_MULTICAST_SIZE];
1228        u8 multi_list_size;
1229
1230        u8 enabled;
1231        u8 rx_stop;
1232        u8 reserved2[1];
1233
1234        uint features;
1235        uint overrides;
1236
1237        void *parent;
1238};
1239
1240enum {
1241        PHY_NO_FLOW_CTRL,
1242        PHY_FLOW_CTRL,
1243        PHY_TX_ONLY,
1244        PHY_RX_ONLY
1245};
1246
1247/**
1248 * struct ksz_port - Virtual port data structure
1249 * @duplex:             Duplex mode setting.  1 for half duplex, 2 for full
1250 *                      duplex, and 0 for auto, which normally results in full
1251 *                      duplex.
1252 * @speed:              Speed setting.  10 for 10 Mbit, 100 for 100 Mbit, and
1253 *                      0 for auto, which normally results in 100 Mbit.
1254 * @force_link:         Force link setting.  0 for auto-negotiation, and 1 for
1255 *                      force.
1256 * @flow_ctrl:          Flow control setting.  PHY_NO_FLOW_CTRL for no flow
1257 *                      control, and PHY_FLOW_CTRL for flow control.
1258 *                      PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
1259 *                      Mbit PHY.
1260 * @first_port:         Index of first port this port supports.
1261 * @mib_port_cnt:       Number of ports with MIB counters.
1262 * @port_cnt:           Number of ports this port supports.
1263 * @counter:            Port statistics counter.
1264 * @hw:                 Pointer to hardware structure.
1265 * @linked:             Pointer to port information linked to this port.
1266 */
1267struct ksz_port {
1268        u8 duplex;
1269        u8 speed;
1270        u8 force_link;
1271        u8 flow_ctrl;
1272
1273        int first_port;
1274        int mib_port_cnt;
1275        int port_cnt;
1276        u64 counter[OID_COUNTER_LAST];
1277
1278        struct ksz_hw *hw;
1279        struct ksz_port_info *linked;
1280};
1281
1282/**
1283 * struct ksz_timer_info - Timer information data structure
1284 * @timer:      Kernel timer.
1285 * @cnt:        Running timer counter.
1286 * @max:        Number of times to run timer; -1 for infinity.
1287 * @period:     Timer period in jiffies.
1288 */
1289struct ksz_timer_info {
1290        struct timer_list timer;
1291        int cnt;
1292        int max;
1293        int period;
1294};
1295
1296/**
1297 * struct ksz_shared_mem - OS dependent shared memory data structure
1298 * @dma_addr:   Physical DMA address allocated.
1299 * @alloc_size: Allocation size.
1300 * @phys:       Actual physical address used.
1301 * @alloc_virt: Virtual address allocated.
1302 * @virt:       Actual virtual address used.
1303 */
1304struct ksz_shared_mem {
1305        dma_addr_t dma_addr;
1306        uint alloc_size;
1307        uint phys;
1308        u8 *alloc_virt;
1309        u8 *virt;
1310};
1311
1312/**
1313 * struct ksz_counter_info - OS dependent counter information data structure
1314 * @counter:    Wait queue to wakeup after counters are read.
1315 * @time:       Next time in jiffies to read counter.
1316 * @read:       Indication of counters read in full or not.
1317 */
1318struct ksz_counter_info {
1319        wait_queue_head_t counter;
1320        unsigned long time;
1321        int read;
1322};
1323
1324/**
1325 * struct dev_info - Network device information data structure
1326 * @dev:                Pointer to network device.
1327 * @pdev:               Pointer to PCI device.
1328 * @hw:                 Hardware structure.
1329 * @desc_pool:          Physical memory used for descriptor pool.
1330 * @hwlock:             Spinlock to prevent hardware from accessing.
1331 * @lock:               Mutex lock to prevent device from accessing.
1332 * @dev_rcv:            Receive process function used.
1333 * @last_skb:           Socket buffer allocated for descriptor rx fragments.
1334 * @skb_index:          Buffer index for receiving fragments.
1335 * @skb_len:            Buffer length for receiving fragments.
1336 * @mib_read:           Workqueue to read MIB counters.
1337 * @mib_timer_info:     Timer to read MIB counters.
1338 * @counter:            Used for MIB reading.
1339 * @mtu:                Current MTU used.  The default is REGULAR_RX_BUF_SIZE;
1340 *                      the maximum is MAX_RX_BUF_SIZE.
1341 * @opened:             Counter to keep track of device open.
1342 * @rx_tasklet:         Receive processing tasklet.
1343 * @tx_tasklet:         Transmit processing tasklet.
1344 * @wol_enable:         Wake-on-LAN enable set by ethtool.
1345 * @wol_support:        Wake-on-LAN support used by ethtool.
1346 * @pme_wait:           Used for KSZ8841 power management.
1347 */
1348struct dev_info {
1349        struct net_device *dev;
1350        struct pci_dev *pdev;
1351
1352        struct ksz_hw hw;
1353        struct ksz_shared_mem desc_pool;
1354
1355        spinlock_t hwlock;
1356        struct mutex lock;
1357
1358        int (*dev_rcv)(struct dev_info *);
1359
1360        struct sk_buff *last_skb;
1361        int skb_index;
1362        int skb_len;
1363
1364        struct work_struct mib_read;
1365        struct ksz_timer_info mib_timer_info;
1366        struct ksz_counter_info counter[TOTAL_PORT_NUM];
1367
1368        int mtu;
1369        int opened;
1370
1371        struct tasklet_struct rx_tasklet;
1372        struct tasklet_struct tx_tasklet;
1373
1374        int wol_enable;
1375        int wol_support;
1376        unsigned long pme_wait;
1377};
1378
1379/**
1380 * struct dev_priv - Network device private data structure
1381 * @adapter:            Adapter device information.
1382 * @port:               Port information.
1383 * @monitor_timer_info: Timer to monitor ports.
1384 * @proc_sem:           Semaphore for proc accessing.
1385 * @id:                 Device ID.
1386 * @mii_if:             MII interface information.
1387 * @advertising:        Temporary variable to store advertised settings.
1388 * @msg_enable:         The message flags controlling driver output.
1389 * @media_state:        The connection status of the device.
1390 * @multicast:          The all multicast state of the device.
1391 * @promiscuous:        The promiscuous state of the device.
1392 */
1393struct dev_priv {
1394        struct dev_info *adapter;
1395        struct ksz_port port;
1396        struct ksz_timer_info monitor_timer_info;
1397
1398        struct semaphore proc_sem;
1399        int id;
1400
1401        struct mii_if_info mii_if;
1402        u32 advertising;
1403
1404        u32 msg_enable;
1405        int media_state;
1406        int multicast;
1407        int promiscuous;
1408};
1409
1410#define DRV_NAME                "KSZ884X PCI"
1411#define DEVICE_NAME             "KSZ884x PCI"
1412#define DRV_VERSION             "1.0.0"
1413#define DRV_RELDATE             "Feb 8, 2010"
1414
1415static char version[] =
1416        "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
1417
1418static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
1419
1420/*
1421 * Interrupt processing primary routines
1422 */
1423
1424static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
1425{
1426        writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
1427}
1428
1429static inline void hw_dis_intr(struct ksz_hw *hw)
1430{
1431        hw->intr_blocked = hw->intr_mask;
1432        writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
1433        hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1434}
1435
1436static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
1437{
1438        hw->intr_set = interrupt;
1439        writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
1440}
1441
1442static inline void hw_ena_intr(struct ksz_hw *hw)
1443{
1444        hw->intr_blocked = 0;
1445        hw_set_intr(hw, hw->intr_mask);
1446}
1447
1448static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
1449{
1450        hw->intr_mask &= ~(bit);
1451}
1452
1453static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
1454{
1455        u32 read_intr;
1456
1457        read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1458        hw->intr_set = read_intr & ~interrupt;
1459        writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
1460        hw_dis_intr_bit(hw, interrupt);
1461}
1462
1463/**
1464 * hw_turn_on_intr - turn on specified interrupts
1465 * @hw:         The hardware instance.
1466 * @bit:        The interrupt bits to be on.
1467 *
1468 * This routine turns on the specified interrupts in the interrupt mask so that
1469 * those interrupts will be enabled.
1470 */
1471static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
1472{
1473        hw->intr_mask |= bit;
1474
1475        if (!hw->intr_blocked)
1476                hw_set_intr(hw, hw->intr_mask);
1477}
1478
1479static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
1480{
1481        u32 read_intr;
1482
1483        read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
1484        hw->intr_set = read_intr | interrupt;
1485        writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
1486}
1487
1488static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
1489{
1490        *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
1491        *status = *status & hw->intr_set;
1492}
1493
1494static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
1495{
1496        if (interrupt)
1497                hw_ena_intr(hw);
1498}
1499
1500/**
1501 * hw_block_intr - block hardware interrupts
1502 * @hw: The hardware instance.
1503 *
1504 * This function blocks all interrupts of the hardware and returns the current
1505 * interrupt enable mask so that interrupts can be restored later.
1506 *
1507 * Return the current interrupt enable mask.
1508 */
1509static uint hw_block_intr(struct ksz_hw *hw)
1510{
1511        uint interrupt = 0;
1512
1513        if (!hw->intr_blocked) {
1514                hw_dis_intr(hw);
1515                interrupt = hw->intr_blocked;
1516        }
1517        return interrupt;
1518}
1519
1520/*
1521 * Hardware descriptor routines
1522 */
1523
1524static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
1525{
1526        status.rx.hw_owned = 0;
1527        desc->phw->ctrl.data = cpu_to_le32(status.data);
1528}
1529
1530static inline void release_desc(struct ksz_desc *desc)
1531{
1532        desc->sw.ctrl.tx.hw_owned = 1;
1533        if (desc->sw.buf_size != desc->sw.buf.data) {
1534                desc->sw.buf_size = desc->sw.buf.data;
1535                desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
1536        }
1537        desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
1538}
1539
1540static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
1541{
1542        *desc = &info->ring[info->last];
1543        info->last++;
1544        info->last &= info->mask;
1545        info->avail--;
1546        (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
1547}
1548
1549static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
1550{
1551        desc->phw->addr = cpu_to_le32(addr);
1552}
1553
1554static inline void set_rx_len(struct ksz_desc *desc, u32 len)
1555{
1556        desc->sw.buf.rx.buf_size = len;
1557}
1558
1559static inline void get_tx_pkt(struct ksz_desc_info *info,
1560        struct ksz_desc **desc)
1561{
1562        *desc = &info->ring[info->next];
1563        info->next++;
1564        info->next &= info->mask;
1565        info->avail--;
1566        (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
1567}
1568
1569static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
1570{
1571        desc->phw->addr = cpu_to_le32(addr);
1572}
1573
1574static inline void set_tx_len(struct ksz_desc *desc, u32 len)
1575{
1576        desc->sw.buf.tx.buf_size = len;
1577}
1578
1579/* Switch functions */
1580
1581#define TABLE_READ                      0x10
1582#define TABLE_SEL_SHIFT                 2
1583
1584#define HW_DELAY(hw, reg)                       \
1585        do {                                    \
1586                readw(hw->io + reg);            \
1587        } while (0)
1588
1589/**
1590 * sw_r_table - read 4 bytes of data from switch table
1591 * @hw:         The hardware instance.
1592 * @table:      The table selector.
1593 * @addr:       The address of the table entry.
1594 * @data:       Buffer to store the read data.
1595 *
1596 * This routine reads 4 bytes of data from the table of the switch.
1597 * Hardware interrupts are disabled to minimize corruption of read data.
1598 */
1599static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
1600{
1601        u16 ctrl_addr;
1602        uint interrupt;
1603
1604        ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
1605
1606        interrupt = hw_block_intr(hw);
1607
1608        writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1609        HW_DELAY(hw, KS884X_IACR_OFFSET);
1610        *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1611
1612        hw_restore_intr(hw, interrupt);
1613}
1614
1615/**
1616 * sw_w_table_64 - write 8 bytes of data to the switch table
1617 * @hw:         The hardware instance.
1618 * @table:      The table selector.
1619 * @addr:       The address of the table entry.
1620 * @data_hi:    The high part of data to be written (bit63 ~ bit32).
1621 * @data_lo:    The low part of data to be written (bit31 ~ bit0).
1622 *
1623 * This routine writes 8 bytes of data to the table of the switch.
1624 * Hardware interrupts are disabled to minimize corruption of written data.
1625 */
1626static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
1627        u32 data_lo)
1628{
1629        u16 ctrl_addr;
1630        uint interrupt;
1631
1632        ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
1633
1634        interrupt = hw_block_intr(hw);
1635
1636        writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
1637        writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
1638
1639        writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1640        HW_DELAY(hw, KS884X_IACR_OFFSET);
1641
1642        hw_restore_intr(hw, interrupt);
1643}
1644
1645/**
1646 * sw_w_sta_mac_table - write to the static MAC table
1647 * @hw:         The hardware instance.
1648 * @addr:       The address of the table entry.
1649 * @mac_addr:   The MAC address.
1650 * @ports:      The port members.
1651 * @override:   The flag to override the port receive/transmit settings.
1652 * @valid:      The flag to indicate entry is valid.
1653 * @use_fid:    The flag to indicate the FID is valid.
1654 * @fid:        The FID value.
1655 *
1656 * This routine writes an entry of the static MAC table of the switch.  It
1657 * calls sw_w_table_64() to write the data.
1658 */
1659static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
1660        u8 ports, int override, int valid, int use_fid, u8 fid)
1661{
1662        u32 data_hi;
1663        u32 data_lo;
1664
1665        data_lo = ((u32) mac_addr[2] << 24) |
1666                ((u32) mac_addr[3] << 16) |
1667                ((u32) mac_addr[4] << 8) | mac_addr[5];
1668        data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
1669        data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
1670
1671        if (override)
1672                data_hi |= STATIC_MAC_TABLE_OVERRIDE;
1673        if (use_fid) {
1674                data_hi |= STATIC_MAC_TABLE_USE_FID;
1675                data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
1676        }
1677        if (valid)
1678                data_hi |= STATIC_MAC_TABLE_VALID;
1679
1680        sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
1681}
1682
1683/**
1684 * sw_r_vlan_table - read from the VLAN table
1685 * @hw:         The hardware instance.
1686 * @addr:       The address of the table entry.
1687 * @vid:        Buffer to store the VID.
1688 * @fid:        Buffer to store the VID.
1689 * @member:     Buffer to store the port membership.
1690 *
1691 * This function reads an entry of the VLAN table of the switch.  It calls
1692 * sw_r_table() to get the data.
1693 *
1694 * Return 0 if the entry is valid; otherwise -1.
1695 */
1696static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
1697        u8 *member)
1698{
1699        u32 data;
1700
1701        sw_r_table(hw, TABLE_VLAN, addr, &data);
1702        if (data & VLAN_TABLE_VALID) {
1703                *vid = (u16)(data & VLAN_TABLE_VID);
1704                *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
1705                *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
1706                        VLAN_TABLE_MEMBERSHIP_SHIFT);
1707                return 0;
1708        }
1709        return -1;
1710}
1711
1712/**
1713 * port_r_mib_cnt - read MIB counter
1714 * @hw:         The hardware instance.
1715 * @port:       The port index.
1716 * @addr:       The address of the counter.
1717 * @cnt:        Buffer to store the counter.
1718 *
1719 * This routine reads a MIB counter of the port.
1720 * Hardware interrupts are disabled to minimize corruption of read data.
1721 */
1722static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
1723{
1724        u32 data;
1725        u16 ctrl_addr;
1726        uint interrupt;
1727        int timeout;
1728
1729        ctrl_addr = addr + PORT_COUNTER_NUM * port;
1730
1731        interrupt = hw_block_intr(hw);
1732
1733        ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
1734        writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1735        HW_DELAY(hw, KS884X_IACR_OFFSET);
1736
1737        for (timeout = 100; timeout > 0; timeout--) {
1738                data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1739
1740                if (data & MIB_COUNTER_VALID) {
1741                        if (data & MIB_COUNTER_OVERFLOW)
1742                                *cnt += MIB_COUNTER_VALUE + 1;
1743                        *cnt += data & MIB_COUNTER_VALUE;
1744                        break;
1745                }
1746        }
1747
1748        hw_restore_intr(hw, interrupt);
1749}
1750
1751/**
1752 * port_r_mib_pkt - read dropped packet counts
1753 * @hw:         The hardware instance.
1754 * @port:       The port index.
1755 * @last:       last one
1756 * @cnt:        Buffer to store the receive and transmit dropped packet counts.
1757 *
1758 * This routine reads the dropped packet counts of the port.
1759 * Hardware interrupts are disabled to minimize corruption of read data.
1760 */
1761static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
1762{
1763        u32 cur;
1764        u32 data;
1765        u16 ctrl_addr;
1766        uint interrupt;
1767        int index;
1768
1769        index = KS_MIB_PACKET_DROPPED_RX_0 + port;
1770        do {
1771                interrupt = hw_block_intr(hw);
1772
1773                ctrl_addr = (u16) index;
1774                ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
1775                        << 8);
1776                writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
1777                HW_DELAY(hw, KS884X_IACR_OFFSET);
1778                data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
1779
1780                hw_restore_intr(hw, interrupt);
1781
1782                data &= MIB_PACKET_DROPPED;
1783                cur = *last;
1784                if (data != cur) {
1785                        *last = data;
1786                        if (data < cur)
1787                                data += MIB_PACKET_DROPPED + 1;
1788                        data -= cur;
1789                        *cnt += data;
1790                }
1791                ++last;
1792                ++cnt;
1793                index -= KS_MIB_PACKET_DROPPED_TX -
1794                        KS_MIB_PACKET_DROPPED_TX_0 + 1;
1795        } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
1796}
1797
1798/**
1799 * port_r_cnt - read MIB counters periodically
1800 * @hw:         The hardware instance.
1801 * @port:       The port index.
1802 *
1803 * This routine is used to read the counters of the port periodically to avoid
1804 * counter overflow.  The hardware should be acquired first before calling this
1805 * routine.
1806 *
1807 * Return non-zero when not all counters not read.
1808 */
1809static int port_r_cnt(struct ksz_hw *hw, int port)
1810{
1811        struct ksz_port_mib *mib = &hw->port_mib[port];
1812
1813        if (mib->mib_start < PORT_COUNTER_NUM)
1814                while (mib->cnt_ptr < PORT_COUNTER_NUM) {
1815                        port_r_mib_cnt(hw, port, mib->cnt_ptr,
1816                                &mib->counter[mib->cnt_ptr]);
1817                        ++mib->cnt_ptr;
1818                }
1819        if (hw->mib_cnt > PORT_COUNTER_NUM)
1820                port_r_mib_pkt(hw, port, mib->dropped,
1821                        &mib->counter[PORT_COUNTER_NUM]);
1822        mib->cnt_ptr = 0;
1823        return 0;
1824}
1825
1826/**
1827 * port_init_cnt - initialize MIB counter values
1828 * @hw:         The hardware instance.
1829 * @port:       The port index.
1830 *
1831 * This routine is used to initialize all counters to zero if the hardware
1832 * cannot do it after reset.
1833 */
1834static void port_init_cnt(struct ksz_hw *hw, int port)
1835{
1836        struct ksz_port_mib *mib = &hw->port_mib[port];
1837
1838        mib->cnt_ptr = 0;
1839        if (mib->mib_start < PORT_COUNTER_NUM)
1840                do {
1841                        port_r_mib_cnt(hw, port, mib->cnt_ptr,
1842                                &mib->counter[mib->cnt_ptr]);
1843                        ++mib->cnt_ptr;
1844                } while (mib->cnt_ptr < PORT_COUNTER_NUM);
1845        if (hw->mib_cnt > PORT_COUNTER_NUM)
1846                port_r_mib_pkt(hw, port, mib->dropped,
1847                        &mib->counter[PORT_COUNTER_NUM]);
1848        memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
1849        mib->cnt_ptr = 0;
1850}
1851
1852/*
1853 * Port functions
1854 */
1855
1856/**
1857 * port_chk - check port register bits
1858 * @hw:         The hardware instance.
1859 * @port:       The port index.
1860 * @offset:     The offset of the port register.
1861 * @bits:       The data bits to check.
1862 *
1863 * This function checks whether the specified bits of the port register are set
1864 * or not.
1865 *
1866 * Return 0 if the bits are not set.
1867 */
1868static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
1869{
1870        u32 addr;
1871        u16 data;
1872
1873        PORT_CTRL_ADDR(port, addr);
1874        addr += offset;
1875        data = readw(hw->io + addr);
1876        return (data & bits) == bits;
1877}
1878
1879/**
1880 * port_cfg - set port register bits
1881 * @hw:         The hardware instance.
1882 * @port:       The port index.
1883 * @offset:     The offset of the port register.
1884 * @bits:       The data bits to set.
1885 * @set:        The flag indicating whether the bits are to be set or not.
1886 *
1887 * This routine sets or resets the specified bits of the port register.
1888 */
1889static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
1890        int set)
1891{
1892        u32 addr;
1893        u16 data;
1894
1895        PORT_CTRL_ADDR(port, addr);
1896        addr += offset;
1897        data = readw(hw->io + addr);
1898        if (set)
1899                data |= bits;
1900        else
1901                data &= ~bits;
1902        writew(data, hw->io + addr);
1903}
1904
1905/**
1906 * port_chk_shift - check port bit
1907 * @hw:         The hardware instance.
1908 * @port:       The port index.
1909 * @addr:       The offset of the register.
1910 * @shift:      Number of bits to shift.
1911 *
1912 * This function checks whether the specified port is set in the register or
1913 * not.
1914 *
1915 * Return 0 if the port is not set.
1916 */
1917static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
1918{
1919        u16 data;
1920        u16 bit = 1 << port;
1921
1922        data = readw(hw->io + addr);
1923        data >>= shift;
1924        return (data & bit) == bit;
1925}
1926
1927/**
1928 * port_cfg_shift - set port bit
1929 * @hw:         The hardware instance.
1930 * @port:       The port index.
1931 * @addr:       The offset of the register.
1932 * @shift:      Number of bits to shift.
1933 * @set:        The flag indicating whether the port is to be set or not.
1934 *
1935 * This routine sets or resets the specified port in the register.
1936 */
1937static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
1938        int set)
1939{
1940        u16 data;
1941        u16 bits = 1 << port;
1942
1943        data = readw(hw->io + addr);
1944        bits <<= shift;
1945        if (set)
1946                data |= bits;
1947        else
1948                data &= ~bits;
1949        writew(data, hw->io + addr);
1950}
1951
1952/**
1953 * port_r8 - read byte from port register
1954 * @hw:         The hardware instance.
1955 * @port:       The port index.
1956 * @offset:     The offset of the port register.
1957 * @data:       Buffer to store the data.
1958 *
1959 * This routine reads a byte from the port register.
1960 */
1961static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
1962{
1963        u32 addr;
1964
1965        PORT_CTRL_ADDR(port, addr);
1966        addr += offset;
1967        *data = readb(hw->io + addr);
1968}
1969
1970/**
1971 * port_r16 - read word from port register.
1972 * @hw:         The hardware instance.
1973 * @port:       The port index.
1974 * @offset:     The offset of the port register.
1975 * @data:       Buffer to store the data.
1976 *
1977 * This routine reads a word from the port register.
1978 */
1979static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
1980{
1981        u32 addr;
1982
1983        PORT_CTRL_ADDR(port, addr);
1984        addr += offset;
1985        *data = readw(hw->io + addr);
1986}
1987
1988/**
1989 * port_w16 - write word to port register.
1990 * @hw:         The hardware instance.
1991 * @port:       The port index.
1992 * @offset:     The offset of the port register.
1993 * @data:       Data to write.
1994 *
1995 * This routine writes a word to the port register.
1996 */
1997static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
1998{
1999        u32 addr;
2000
2001        PORT_CTRL_ADDR(port, addr);
2002        addr += offset;
2003        writew(data, hw->io + addr);
2004}
2005
2006/**
2007 * sw_chk - check switch register bits
2008 * @hw:         The hardware instance.
2009 * @addr:       The address of the switch register.
2010 * @bits:       The data bits to check.
2011 *
2012 * This function checks whether the specified bits of the switch register are
2013 * set or not.
2014 *
2015 * Return 0 if the bits are not set.
2016 */
2017static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
2018{
2019        u16 data;
2020
2021        data = readw(hw->io + addr);
2022        return (data & bits) == bits;
2023}
2024
2025/**
2026 * sw_cfg - set switch register bits
2027 * @hw:         The hardware instance.
2028 * @addr:       The address of the switch register.
2029 * @bits:       The data bits to set.
2030 * @set:        The flag indicating whether the bits are to be set or not.
2031 *
2032 * This function sets or resets the specified bits of the switch register.
2033 */
2034static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
2035{
2036        u16 data;
2037
2038        data = readw(hw->io + addr);
2039        if (set)
2040                data |= bits;
2041        else
2042                data &= ~bits;
2043        writew(data, hw->io + addr);
2044}
2045
2046/* Bandwidth */
2047
2048static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
2049{
2050        port_cfg(hw, p,
2051                KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
2052}
2053
2054static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
2055{
2056        return port_chk(hw, p,
2057                KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
2058}
2059
2060/* Driver set switch broadcast storm protection at 10% rate. */
2061#define BROADCAST_STORM_PROTECTION_RATE 10
2062
2063/* 148,800 frames * 67 ms / 100 */
2064#define BROADCAST_STORM_VALUE           9969
2065
2066/**
2067 * sw_cfg_broad_storm - configure broadcast storm threshold
2068 * @hw:         The hardware instance.
2069 * @percent:    Broadcast storm threshold in percent of transmit rate.
2070 *
2071 * This routine configures the broadcast storm threshold of the switch.
2072 */
2073static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
2074{
2075        u16 data;
2076        u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
2077
2078        if (value > BROADCAST_STORM_RATE)
2079                value = BROADCAST_STORM_RATE;
2080
2081        data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2082        data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
2083        data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
2084        writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2085}
2086
2087/**
2088 * sw_get_broad_storm - get broadcast storm threshold
2089 * @hw:         The hardware instance.
2090 * @percent:    Buffer to store the broadcast storm threshold percentage.
2091 *
2092 * This routine retrieves the broadcast storm threshold of the switch.
2093 */
2094static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
2095{
2096        int num;
2097        u16 data;
2098
2099        data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2100        num = (data & BROADCAST_STORM_RATE_HI);
2101        num <<= 8;
2102        num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
2103        num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE);
2104        *percent = (u8) num;
2105}
2106
2107/**
2108 * sw_dis_broad_storm - disable broadstorm
2109 * @hw:         The hardware instance.
2110 * @port:       The port index.
2111 *
2112 * This routine disables the broadcast storm limit function of the switch.
2113 */
2114static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
2115{
2116        port_cfg_broad_storm(hw, port, 0);
2117}
2118
2119/**
2120 * sw_ena_broad_storm - enable broadcast storm
2121 * @hw:         The hardware instance.
2122 * @port:       The port index.
2123 *
2124 * This routine enables the broadcast storm limit function of the switch.
2125 */
2126static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
2127{
2128        sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2129        port_cfg_broad_storm(hw, port, 1);
2130}
2131
2132/**
2133 * sw_init_broad_storm - initialize broadcast storm
2134 * @hw:         The hardware instance.
2135 *
2136 * This routine initializes the broadcast storm limit function of the switch.
2137 */
2138static void sw_init_broad_storm(struct ksz_hw *hw)
2139{
2140        int port;
2141
2142        hw->ksz_switch->broad_per = 1;
2143        sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
2144        for (port = 0; port < TOTAL_PORT_NUM; port++)
2145                sw_dis_broad_storm(hw, port);
2146        sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
2147}
2148
2149/**
2150 * hw_cfg_broad_storm - configure broadcast storm
2151 * @hw:         The hardware instance.
2152 * @percent:    Broadcast storm threshold in percent of transmit rate.
2153 *
2154 * This routine configures the broadcast storm threshold of the switch.
2155 * It is called by user functions.  The hardware should be acquired first.
2156 */
2157static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
2158{
2159        if (percent > 100)
2160                percent = 100;
2161
2162        sw_cfg_broad_storm(hw, percent);
2163        sw_get_broad_storm(hw, &percent);
2164        hw->ksz_switch->broad_per = percent;
2165}
2166
2167/**
2168 * sw_dis_prio_rate - disable switch priority rate
2169 * @hw:         The hardware instance.
2170 * @port:       The port index.
2171 *
2172 * This routine disables the priority rate function of the switch.
2173 */
2174static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
2175{
2176        u32 addr;
2177
2178        PORT_CTRL_ADDR(port, addr);
2179        addr += KS8842_PORT_IN_RATE_OFFSET;
2180        writel(0, hw->io + addr);
2181}
2182
2183/**
2184 * sw_init_prio_rate - initialize switch prioirty rate
2185 * @hw:         The hardware instance.
2186 *
2187 * This routine initializes the priority rate function of the switch.
2188 */
2189static void sw_init_prio_rate(struct ksz_hw *hw)
2190{
2191        int port;
2192        int prio;
2193        struct ksz_switch *sw = hw->ksz_switch;
2194
2195        for (port = 0; port < TOTAL_PORT_NUM; port++) {
2196                for (prio = 0; prio < PRIO_QUEUES; prio++) {
2197                        sw->port_cfg[port].rx_rate[prio] =
2198                        sw->port_cfg[port].tx_rate[prio] = 0;
2199                }
2200                sw_dis_prio_rate(hw, port);
2201        }
2202}
2203
2204/* Communication */
2205
2206static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
2207{
2208        port_cfg(hw, p,
2209                KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
2210}
2211
2212static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
2213{
2214        port_cfg(hw, p,
2215                KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
2216}
2217
2218static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
2219{
2220        return port_chk(hw, p,
2221                KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
2222}
2223
2224static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
2225{
2226        return port_chk(hw, p,
2227                KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
2228}
2229
2230/* Spanning Tree */
2231
2232static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
2233{
2234        port_cfg(hw, p,
2235                KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
2236}
2237
2238static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
2239{
2240        port_cfg(hw, p,
2241                KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
2242}
2243
2244static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
2245{
2246        sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
2247}
2248
2249static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
2250{
2251        if (!(hw->overrides & FAST_AGING)) {
2252                sw_cfg_fast_aging(hw, 1);
2253                mdelay(1);
2254                sw_cfg_fast_aging(hw, 0);
2255        }
2256}
2257
2258/* VLAN */
2259
2260static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
2261{
2262        port_cfg(hw, p,
2263                KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
2264}
2265
2266static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
2267{
2268        port_cfg(hw, p,
2269                KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
2270}
2271
2272static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
2273{
2274        return port_chk(hw, p,
2275                KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
2276}
2277
2278static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
2279{
2280        return port_chk(hw, p,
2281                KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
2282}
2283
2284static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
2285{
2286        port_cfg(hw, p,
2287                KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
2288}
2289
2290static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
2291{
2292        port_cfg(hw, p,
2293                KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
2294}
2295
2296static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
2297{
2298        return port_chk(hw, p,
2299                KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
2300}
2301
2302static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
2303{
2304        return port_chk(hw, p,
2305                KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
2306}
2307
2308/* Mirroring */
2309
2310static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
2311{
2312        port_cfg(hw, p,
2313                KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
2314}
2315
2316static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
2317{
2318        port_cfg(hw, p,
2319                KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
2320}
2321
2322static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
2323{
2324        port_cfg(hw, p,
2325                KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
2326}
2327
2328static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
2329{
2330        sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
2331}
2332
2333static void sw_init_mirror(struct ksz_hw *hw)
2334{
2335        int port;
2336
2337        for (port = 0; port < TOTAL_PORT_NUM; port++) {
2338                port_cfg_mirror_sniffer(hw, port, 0);
2339                port_cfg_mirror_rx(hw, port, 0);
2340                port_cfg_mirror_tx(hw, port, 0);
2341        }
2342        sw_cfg_mirror_rx_tx(hw, 0);
2343}
2344
2345static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
2346{
2347        sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
2348                SWITCH_UNK_DEF_PORT_ENABLE, set);
2349}
2350
2351static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
2352{
2353        return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
2354                SWITCH_UNK_DEF_PORT_ENABLE);
2355}
2356
2357static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
2358{
2359        port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
2360}
2361
2362static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
2363{
2364        return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
2365}
2366
2367/* Priority */
2368
2369static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
2370{
2371        port_cfg(hw, p,
2372                KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
2373}
2374
2375static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
2376{
2377        port_cfg(hw, p,
2378                KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
2379}
2380
2381static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
2382{
2383        port_cfg(hw, p,
2384                KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
2385}
2386
2387static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
2388{
2389        port_cfg(hw, p,
2390                KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
2391}
2392
2393static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
2394{
2395        return port_chk(hw, p,
2396                KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
2397}
2398
2399static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
2400{
2401        return port_chk(hw, p,
2402                KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
2403}
2404
2405static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
2406{
2407        return port_chk(hw, p,
2408                KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
2409}
2410
2411static inline int port_chk_prio(struct ksz_hw *hw, int p)
2412{
2413        return port_chk(hw, p,
2414                KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
2415}
2416
2417/**
2418 * sw_dis_diffserv - disable switch DiffServ priority
2419 * @hw:         The hardware instance.
2420 * @port:       The port index.
2421 *
2422 * This routine disables the DiffServ priority function of the switch.
2423 */
2424static void sw_dis_diffserv(struct ksz_hw *hw, int port)
2425{
2426        port_cfg_diffserv(hw, port, 0);
2427}
2428
2429/**
2430 * sw_dis_802_1p - disable switch 802.1p priority
2431 * @hw:         The hardware instance.
2432 * @port:       The port index.
2433 *
2434 * This routine disables the 802.1p priority function of the switch.
2435 */
2436static void sw_dis_802_1p(struct ksz_hw *hw, int port)
2437{
2438        port_cfg_802_1p(hw, port, 0);
2439}
2440
2441/**
2442 * sw_cfg_replace_null_vid -
2443 * @hw:         The hardware instance.
2444 * @set:        The flag to disable or enable.
2445 *
2446 */
2447static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
2448{
2449        sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
2450}
2451
2452/**
2453 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2454 * @hw:         The hardware instance.
2455 * @port:       The port index.
2456 * @set:        The flag to disable or enable.
2457 *
2458 * This routine enables the 802.1p priority re-mapping function of the switch.
2459 * That allows 802.1p priority field to be replaced with the port's default
2460 * tag's priority value if the ingress packet's 802.1p priority has a higher
2461 * priority than port's default tag's priority.
2462 */
2463static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
2464{
2465        port_cfg_replace_vid(hw, port, set);
2466}
2467
2468/**
2469 * sw_cfg_port_based - configure switch port based priority
2470 * @hw:         The hardware instance.
2471 * @port:       The port index.
2472 * @prio:       The priority to set.
2473 *
2474 * This routine configures the port based priority of the switch.
2475 */
2476static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
2477{
2478        u16 data;
2479
2480        if (prio > PORT_BASED_PRIORITY_BASE)
2481                prio = PORT_BASED_PRIORITY_BASE;
2482
2483        hw->ksz_switch->port_cfg[port].port_prio = prio;
2484
2485        port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
2486        data &= ~PORT_BASED_PRIORITY_MASK;
2487        data |= prio << PORT_BASED_PRIORITY_SHIFT;
2488        port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
2489}
2490
2491/**
2492 * sw_dis_multi_queue - disable transmit multiple queues
2493 * @hw:         The hardware instance.
2494 * @port:       The port index.
2495 *
2496 * This routine disables the transmit multiple queues selection of the switch
2497 * port.  Only single transmit queue on the port.
2498 */
2499static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
2500{
2501        port_cfg_prio(hw, port, 0);
2502}
2503
2504/**
2505 * sw_init_prio - initialize switch priority
2506 * @hw:         The hardware instance.
2507 *
2508 * This routine initializes the switch QoS priority functions.
2509 */
2510static void sw_init_prio(struct ksz_hw *hw)
2511{
2512        int port;
2513        int tos;
2514        struct ksz_switch *sw = hw->ksz_switch;
2515
2516        /*
2517         * Init all the 802.1p tag priority value to be assigned to different
2518         * priority queue.
2519         */
2520        sw->p_802_1p[0] = 0;
2521        sw->p_802_1p[1] = 0;
2522        sw->p_802_1p[2] = 1;
2523        sw->p_802_1p[3] = 1;
2524        sw->p_802_1p[4] = 2;
2525        sw->p_802_1p[5] = 2;
2526        sw->p_802_1p[6] = 3;
2527        sw->p_802_1p[7] = 3;
2528
2529        /*
2530         * Init all the DiffServ priority value to be assigned to priority
2531         * queue 0.
2532         */
2533        for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
2534                sw->diffserv[tos] = 0;
2535
2536        /* All QoS functions disabled. */
2537        for (port = 0; port < TOTAL_PORT_NUM; port++) {
2538                sw_dis_multi_queue(hw, port);
2539                sw_dis_diffserv(hw, port);
2540                sw_dis_802_1p(hw, port);
2541                sw_cfg_replace_vid(hw, port, 0);
2542
2543                sw->port_cfg[port].port_prio = 0;
2544                sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
2545        }
2546        sw_cfg_replace_null_vid(hw, 0);
2547}
2548
2549/**
2550 * port_get_def_vid - get port default VID.
2551 * @hw:         The hardware instance.
2552 * @port:       The port index.
2553 * @vid:        Buffer to store the VID.
2554 *
2555 * This routine retrieves the default VID of the port.
2556 */
2557static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
2558{
2559        u32 addr;
2560
2561        PORT_CTRL_ADDR(port, addr);
2562        addr += KS8842_PORT_CTRL_VID_OFFSET;
2563        *vid = readw(hw->io + addr);
2564}
2565
2566/**
2567 * sw_init_vlan - initialize switch VLAN
2568 * @hw:         The hardware instance.
2569 *
2570 * This routine initializes the VLAN function of the switch.
2571 */
2572static void sw_init_vlan(struct ksz_hw *hw)
2573{
2574        int port;
2575        int entry;
2576        struct ksz_switch *sw = hw->ksz_switch;
2577
2578        /* Read 16 VLAN entries from device's VLAN table. */
2579        for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
2580                sw_r_vlan_table(hw, entry,
2581                        &sw->vlan_table[entry].vid,
2582                        &sw->vlan_table[entry].fid,
2583                        &sw->vlan_table[entry].member);
2584        }
2585
2586        for (port = 0; port < TOTAL_PORT_NUM; port++) {
2587                port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
2588                sw->port_cfg[port].member = PORT_MASK;
2589        }
2590}
2591
2592/**
2593 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2594 * @hw:         The hardware instance.
2595 * @port:       The port index.
2596 * @member:     The port-based VLAN membership.
2597 *
2598 * This routine configures the port-based VLAN membership of the port.
2599 */
2600static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
2601{
2602        u32 addr;
2603        u8 data;
2604
2605        PORT_CTRL_ADDR(port, addr);
2606        addr += KS8842_PORT_CTRL_2_OFFSET;
2607
2608        data = readb(hw->io + addr);
2609        data &= ~PORT_VLAN_MEMBERSHIP;
2610        data |= (member & PORT_MASK);
2611        writeb(data, hw->io + addr);
2612
2613        hw->ksz_switch->port_cfg[port].member = member;
2614}
2615
2616/**
2617 * sw_get_addr - get the switch MAC address.
2618 * @hw:         The hardware instance.
2619 * @mac_addr:   Buffer to store the MAC address.
2620 *
2621 * This function retrieves the MAC address of the switch.
2622 */
2623static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
2624{
2625        int i;
2626
2627        for (i = 0; i < 6; i += 2) {
2628                mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
2629                mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
2630        }
2631}
2632
2633/**
2634 * sw_set_addr - configure switch MAC address
2635 * @hw:         The hardware instance.
2636 * @mac_addr:   The MAC address.
2637 *
2638 * This function configures the MAC address of the switch.
2639 */
2640static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
2641{
2642        int i;
2643
2644        for (i = 0; i < 6; i += 2) {
2645                writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
2646                writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
2647        }
2648}
2649
2650/**
2651 * sw_set_global_ctrl - set switch global control
2652 * @hw:         The hardware instance.
2653 *
2654 * This routine sets the global control of the switch function.
2655 */
2656static void sw_set_global_ctrl(struct ksz_hw *hw)
2657{
2658        u16 data;
2659
2660        /* Enable switch MII flow control. */
2661        data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2662        data |= SWITCH_FLOW_CTRL;
2663        writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
2664
2665        data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2666
2667        /* Enable aggressive back off algorithm in half duplex mode. */
2668        data |= SWITCH_AGGR_BACKOFF;
2669
2670        /* Enable automatic fast aging when link changed detected. */
2671        data |= SWITCH_AGING_ENABLE;
2672        data |= SWITCH_LINK_AUTO_AGING;
2673
2674        if (hw->overrides & FAST_AGING)
2675                data |= SWITCH_FAST_AGING;
2676        else
2677                data &= ~SWITCH_FAST_AGING;
2678        writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
2679
2680        data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2681
2682        /* Enable no excessive collision drop. */
2683        data |= NO_EXC_COLLISION_DROP;
2684        writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
2685}
2686
2687enum {
2688        STP_STATE_DISABLED = 0,
2689        STP_STATE_LISTENING,
2690        STP_STATE_LEARNING,
2691        STP_STATE_FORWARDING,
2692        STP_STATE_BLOCKED,
2693        STP_STATE_SIMPLE
2694};
2695
2696/**
2697 * port_set_stp_state - configure port spanning tree state
2698 * @hw:         The hardware instance.
2699 * @port:       The port index.
2700 * @state:      The spanning tree state.
2701 *
2702 * This routine configures the spanning tree state of the port.
2703 */
2704static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
2705{
2706        u16 data;
2707
2708        port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
2709        switch (state) {
2710        case STP_STATE_DISABLED:
2711                data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2712                data |= PORT_LEARN_DISABLE;
2713                break;
2714        case STP_STATE_LISTENING:
2715/*
2716 * No need to turn on transmit because of port direct mode.
2717 * Turning on receive is required if static MAC table is not setup.
2718 */
2719                data &= ~PORT_TX_ENABLE;
2720                data |= PORT_RX_ENABLE;
2721                data |= PORT_LEARN_DISABLE;
2722                break;
2723        case STP_STATE_LEARNING:
2724                data &= ~PORT_TX_ENABLE;
2725                data |= PORT_RX_ENABLE;
2726                data &= ~PORT_LEARN_DISABLE;
2727                break;
2728        case STP_STATE_FORWARDING:
2729                data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2730                data &= ~PORT_LEARN_DISABLE;
2731                break;
2732        case STP_STATE_BLOCKED:
2733/*
2734 * Need to setup static MAC table with override to keep receiving BPDU
2735 * messages.  See sw_init_stp routine.
2736 */
2737                data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
2738                data |= PORT_LEARN_DISABLE;
2739                break;
2740        case STP_STATE_SIMPLE:
2741                data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
2742                data |= PORT_LEARN_DISABLE;
2743                break;
2744        }
2745        port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
2746        hw->ksz_switch->port_cfg[port].stp_state = state;
2747}
2748
2749#define STP_ENTRY                       0
2750#define BROADCAST_ENTRY                 1
2751#define BRIDGE_ADDR_ENTRY               2
2752#define IPV6_ADDR_ENTRY                 3
2753
2754/**
2755 * sw_clr_sta_mac_table - clear static MAC table
2756 * @hw:         The hardware instance.
2757 *
2758 * This routine clears the static MAC table.
2759 */
2760static void sw_clr_sta_mac_table(struct ksz_hw *hw)
2761{
2762        struct ksz_mac_table *entry;
2763        int i;
2764
2765        for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
2766                entry = &hw->ksz_switch->mac_table[i];
2767                sw_w_sta_mac_table(hw, i,
2768                        entry->mac_addr, entry->ports,
2769                        entry->override, 0,
2770                        entry->use_fid, entry->fid);
2771        }
2772}
2773
2774/**
2775 * sw_init_stp - initialize switch spanning tree support
2776 * @hw:         The hardware instance.
2777 *
2778 * This routine initializes the spanning tree support of the switch.
2779 */
2780static void sw_init_stp(struct ksz_hw *hw)
2781{
2782        struct ksz_mac_table *entry;
2783
2784        entry = &hw->ksz_switch->mac_table[STP_ENTRY];
2785        entry->mac_addr[0] = 0x01;
2786        entry->mac_addr[1] = 0x80;
2787        entry->mac_addr[2] = 0xC2;
2788        entry->mac_addr[3] = 0x00;
2789        entry->mac_addr[4] = 0x00;
2790        entry->mac_addr[5] = 0x00;
2791        entry->ports = HOST_MASK;
2792        entry->override = 1;
2793        entry->valid = 1;
2794        sw_w_sta_mac_table(hw, STP_ENTRY,
2795                entry->mac_addr, entry->ports,
2796                entry->override, entry->valid,
2797                entry->use_fid, entry->fid);
2798}
2799
2800/**
2801 * sw_block_addr - block certain packets from the host port
2802 * @hw:         The hardware instance.
2803 *
2804 * This routine blocks certain packets from reaching to the host port.
2805 */
2806static void sw_block_addr(struct ksz_hw *hw)
2807{
2808        struct ksz_mac_table *entry;
2809        int i;
2810
2811        for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
2812                entry = &hw->ksz_switch->mac_table[i];
2813                entry->valid = 0;
2814                sw_w_sta_mac_table(hw, i,
2815                        entry->mac_addr, entry->ports,
2816                        entry->override, entry->valid,
2817                        entry->use_fid, entry->fid);
2818        }
2819}
2820
2821static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
2822{
2823        *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2824}
2825
2826static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
2827{
2828        writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2829}
2830
2831static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
2832{
2833        *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
2834}
2835
2836static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
2837{
2838        *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
2839}
2840
2841static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
2842{
2843        writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
2844}
2845
2846static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
2847{
2848        *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
2849}
2850
2851static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
2852{
2853        *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2854}
2855
2856static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
2857{
2858        writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
2859}
2860
2861static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
2862{
2863        *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
2864}
2865
2866static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
2867{
2868        writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
2869}
2870
2871static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
2872{
2873        *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
2874}
2875
2876static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
2877{
2878        writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
2879}
2880
2881/**
2882 * hw_r_phy - read data from PHY register
2883 * @hw:         The hardware instance.
2884 * @port:       Port to read.
2885 * @reg:        PHY register to read.
2886 * @val:        Buffer to store the read data.
2887 *
2888 * This routine reads data from the PHY register.
2889 */
2890static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
2891{
2892        int phy;
2893
2894        phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
2895        *val = readw(hw->io + phy);
2896}
2897
2898/**
2899 * hw_w_phy - write data to PHY register
2900 * @hw:         The hardware instance.
2901 * @port:       Port to write.
2902 * @reg:        PHY register to write.
2903 * @val:        Word data to write.
2904 *
2905 * This routine writes data to the PHY register.
2906 */
2907static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
2908{
2909        int phy;
2910
2911        phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
2912        writew(val, hw->io + phy);
2913}
2914
2915/*
2916 * EEPROM access functions
2917 */
2918
2919#define AT93C_CODE                      0
2920#define AT93C_WR_OFF                    0x00
2921#define AT93C_WR_ALL                    0x10
2922#define AT93C_ER_ALL                    0x20
2923#define AT93C_WR_ON                     0x30
2924
2925#define AT93C_WRITE                     1
2926#define AT93C_READ                      2
2927#define AT93C_ERASE                     3
2928
2929#define EEPROM_DELAY                    4
2930
2931static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
2932{
2933        u16 data;
2934
2935        data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2936        data &= ~gpio;
2937        writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
2938}
2939
2940static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
2941{
2942        u16 data;
2943
2944        data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2945        data |= gpio;
2946        writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
2947}
2948
2949static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
2950{
2951        u16 data;
2952
2953        data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
2954        return (u8)(data & gpio);
2955}
2956
2957static void eeprom_clk(struct ksz_hw *hw)
2958{
2959        raise_gpio(hw, EEPROM_SERIAL_CLOCK);
2960        udelay(EEPROM_DELAY);
2961        drop_gpio(hw, EEPROM_SERIAL_CLOCK);
2962        udelay(EEPROM_DELAY);
2963}
2964
2965static u16 spi_r(struct ksz_hw *hw)
2966{
2967        int i;
2968        u16 temp = 0;
2969
2970        for (i = 15; i >= 0; i--) {
2971                raise_gpio(hw, EEPROM_SERIAL_CLOCK);
2972                udelay(EEPROM_DELAY);
2973
2974                temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
2975
2976                drop_gpio(hw, EEPROM_SERIAL_CLOCK);
2977                udelay(EEPROM_DELAY);
2978        }
2979        return temp;
2980}
2981
2982static void spi_w(struct ksz_hw *hw, u16 data)
2983{
2984        int i;
2985
2986        for (i = 15; i >= 0; i--) {
2987                (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
2988                        drop_gpio(hw, EEPROM_DATA_OUT);
2989                eeprom_clk(hw);
2990        }
2991}
2992
2993static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
2994{
2995        int i;
2996
2997        /* Initial start bit */
2998        raise_gpio(hw, EEPROM_DATA_OUT);
2999        eeprom_clk(hw);
3000
3001        /* AT93C operation */
3002        for (i = 1; i >= 0; i--) {
3003                (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
3004                        drop_gpio(hw, EEPROM_DATA_OUT);
3005                eeprom_clk(hw);
3006        }
3007
3008        /* Address location */
3009        for (i = 5; i >= 0; i--) {
3010                (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
3011                        drop_gpio(hw, EEPROM_DATA_OUT);
3012                eeprom_clk(hw);
3013        }
3014}
3015
3016#define EEPROM_DATA_RESERVED            0
3017#define EEPROM_DATA_MAC_ADDR_0          1
3018#define EEPROM_DATA_MAC_ADDR_1          2
3019#define EEPROM_DATA_MAC_ADDR_2          3
3020#define EEPROM_DATA_SUBSYS_ID           4
3021#define EEPROM_DATA_SUBSYS_VEN_ID       5
3022#define EEPROM_DATA_PM_CAP              6
3023
3024/* User defined EEPROM data */
3025#define EEPROM_DATA_OTHER_MAC_ADDR      9
3026
3027/**
3028 * eeprom_read - read from AT93C46 EEPROM
3029 * @hw:         The hardware instance.
3030 * @reg:        The register offset.
3031 *
3032 * This function reads a word from the AT93C46 EEPROM.
3033 *
3034 * Return the data value.
3035 */
3036static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
3037{
3038        u16 data;
3039
3040        raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3041
3042        spi_reg(hw, AT93C_READ, reg);
3043        data = spi_r(hw);
3044
3045        drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3046
3047        return data;
3048}
3049
3050/**
3051 * eeprom_write - write to AT93C46 EEPROM
3052 * @hw:         The hardware instance.
3053 * @reg:        The register offset.
3054 * @data:       The data value.
3055 *
3056 * This procedure writes a word to the AT93C46 EEPROM.
3057 */
3058static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
3059{
3060        int timeout;
3061
3062        raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3063
3064        /* Enable write. */
3065        spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
3066        drop_gpio(hw, EEPROM_CHIP_SELECT);
3067        udelay(1);
3068
3069        /* Erase the register. */
3070        raise_gpio(hw, EEPROM_CHIP_SELECT);
3071        spi_reg(hw, AT93C_ERASE, reg);
3072        drop_gpio(hw, EEPROM_CHIP_SELECT);
3073        udelay(1);
3074
3075        /* Check operation complete. */
3076        raise_gpio(hw, EEPROM_CHIP_SELECT);
3077        timeout = 8;
3078        mdelay(2);
3079        do {
3080                mdelay(1);
3081        } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
3082        drop_gpio(hw, EEPROM_CHIP_SELECT);
3083        udelay(1);
3084
3085        /* Write the register. */
3086        raise_gpio(hw, EEPROM_CHIP_SELECT);
3087        spi_reg(hw, AT93C_WRITE, reg);
3088        spi_w(hw, data);
3089        drop_gpio(hw, EEPROM_CHIP_SELECT);
3090        udelay(1);
3091
3092        /* Check operation complete. */
3093        raise_gpio(hw, EEPROM_CHIP_SELECT);
3094        timeout = 8;
3095        mdelay(2);
3096        do {
3097                mdelay(1);
3098        } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
3099        drop_gpio(hw, EEPROM_CHIP_SELECT);
3100        udelay(1);
3101
3102        /* Disable write. */
3103        raise_gpio(hw, EEPROM_CHIP_SELECT);
3104        spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
3105
3106        drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
3107}
3108
3109/*
3110 * Link detection routines
3111 */
3112
3113static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
3114{
3115        ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
3116        switch (port->flow_ctrl) {
3117        case PHY_FLOW_CTRL:
3118                ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
3119                break;
3120        /* Not supported. */
3121        case PHY_TX_ONLY:
3122        case PHY_RX_ONLY:
3123        default:
3124                break;
3125        }
3126        return ctrl;
3127}
3128
3129static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
3130{
3131        u32 rx_cfg;
3132        u32 tx_cfg;
3133
3134        rx_cfg = hw->rx_cfg;
3135        tx_cfg = hw->tx_cfg;
3136        if (rx)
3137                hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
3138        else
3139                hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
3140        if (tx)
3141                hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
3142        else
3143                hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
3144        if (hw->enabled) {
3145                if (rx_cfg != hw->rx_cfg)
3146                        writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
3147                if (tx_cfg != hw->tx_cfg)
3148                        writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3149        }
3150}
3151
3152static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
3153        u16 local, u16 remote)
3154{
3155        int rx;
3156        int tx;
3157
3158        if (hw->overrides & PAUSE_FLOW_CTRL)
3159                return;
3160
3161        rx = tx = 0;
3162        if (port->force_link)
3163                rx = tx = 1;
3164        if (remote & LPA_PAUSE_CAP) {
3165                if (local & ADVERTISE_PAUSE_CAP) {
3166                        rx = tx = 1;
3167                } else if ((remote & LPA_PAUSE_ASYM) &&
3168                           (local &
3169                            (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) ==
3170                           ADVERTISE_PAUSE_ASYM) {
3171                        tx = 1;
3172                }
3173        } else if (remote & LPA_PAUSE_ASYM) {
3174                if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
3175                    == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
3176                        rx = 1;
3177        }
3178        if (!hw->ksz_switch)
3179                set_flow_ctrl(hw, rx, tx);
3180}
3181
3182static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
3183        struct ksz_port_info *info, u16 link_status)
3184{
3185        if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
3186                        !(hw->overrides & PAUSE_FLOW_CTRL)) {
3187                u32 cfg = hw->tx_cfg;
3188
3189                /* Disable flow control in the half duplex mode. */
3190                if (1 == info->duplex)
3191                        hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
3192                if (hw->enabled && cfg != hw->tx_cfg)
3193                        writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3194        }
3195}
3196
3197/**
3198 * port_get_link_speed - get current link status
3199 * @port:       The port instance.
3200 *
3201 * This routine reads PHY registers to determine the current link status of the
3202 * switch ports.
3203 */
3204static void port_get_link_speed(struct ksz_port *port)
3205{
3206        uint interrupt;
3207        struct ksz_port_info *info;
3208        struct ksz_port_info *linked = NULL;
3209        struct ksz_hw *hw = port->hw;
3210        u16 data;
3211        u16 status;
3212        u8 local;
3213        u8 remote;
3214        int i;
3215        int p;
3216        int change = 0;
3217
3218        interrupt = hw_block_intr(hw);
3219
3220        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3221                info = &hw->port_info[p];
3222                port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
3223                port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
3224
3225                /*
3226                 * Link status is changing all the time even when there is no
3227                 * cable connection!
3228                 */
3229                remote = status & (PORT_AUTO_NEG_COMPLETE |
3230                        PORT_STATUS_LINK_GOOD);
3231                local = (u8) data;
3232
3233                /* No change to status. */
3234                if (local == info->advertised && remote == info->partner)
3235                        continue;
3236
3237                info->advertised = local;
3238                info->partner = remote;
3239                if (status & PORT_STATUS_LINK_GOOD) {
3240
3241                        /* Remember the first linked port. */
3242                        if (!linked)
3243                                linked = info;
3244
3245                        info->tx_rate = 10 * TX_RATE_UNIT;
3246                        if (status & PORT_STATUS_SPEED_100MBIT)
3247                                info->tx_rate = 100 * TX_RATE_UNIT;
3248
3249                        info->duplex = 1;
3250                        if (status & PORT_STATUS_FULL_DUPLEX)
3251                                info->duplex = 2;
3252
3253                        if (media_connected != info->state) {
3254                                hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
3255                                        &data);
3256                                hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
3257                                        &status);
3258                                determine_flow_ctrl(hw, port, data, status);
3259                                if (hw->ksz_switch) {
3260                                        port_cfg_back_pressure(hw, p,
3261                                                (1 == info->duplex));
3262                                }
3263                                change |= 1 << i;
3264                                port_cfg_change(hw, port, info, status);
3265                        }
3266                        info->state = media_connected;
3267                } else {
3268                        if (media_disconnected != info->state) {
3269                                change |= 1 << i;
3270
3271                                /* Indicate the link just goes down. */
3272                                hw->port_mib[p].link_down = 1;
3273                        }
3274                        info->state = media_disconnected;
3275                }
3276                hw->port_mib[p].state = (u8) info->state;
3277        }
3278
3279        if (linked && media_disconnected == port->linked->state)
3280                port->linked = linked;
3281
3282        hw_restore_intr(hw, interrupt);
3283}
3284
3285#define PHY_RESET_TIMEOUT               10
3286
3287/**
3288 * port_set_link_speed - set port speed
3289 * @port:       The port instance.
3290 *
3291 * This routine sets the link speed of the switch ports.
3292 */
3293static void port_set_link_speed(struct ksz_port *port)
3294{
3295        struct ksz_hw *hw = port->hw;
3296        u16 data;
3297        u16 cfg;
3298        u8 status;
3299        int i;
3300        int p;
3301
3302        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3303                port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
3304                port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
3305
3306                cfg = 0;
3307                if (status & PORT_STATUS_LINK_GOOD)
3308                        cfg = data;
3309
3310                data |= PORT_AUTO_NEG_ENABLE;
3311                data = advertised_flow_ctrl(port, data);
3312
3313                data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
3314                        PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
3315
3316                /* Check if manual configuration is specified by the user. */
3317                if (port->speed || port->duplex) {
3318                        if (10 == port->speed)
3319                                data &= ~(PORT_AUTO_NEG_100BTX_FD |
3320                                        PORT_AUTO_NEG_100BTX);
3321                        else if (100 == port->speed)
3322                                data &= ~(PORT_AUTO_NEG_10BT_FD |
3323                                        PORT_AUTO_NEG_10BT);
3324                        if (1 == port->duplex)
3325                                data &= ~(PORT_AUTO_NEG_100BTX_FD |
3326                                        PORT_AUTO_NEG_10BT_FD);
3327                        else if (2 == port->duplex)
3328                                data &= ~(PORT_AUTO_NEG_100BTX |
3329                                        PORT_AUTO_NEG_10BT);
3330                }
3331                if (data != cfg) {
3332                        data |= PORT_AUTO_NEG_RESTART;
3333                        port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
3334                }
3335        }
3336}
3337
3338/**
3339 * port_force_link_speed - force port speed
3340 * @port:       The port instance.
3341 *
3342 * This routine forces the link speed of the switch ports.
3343 */
3344static void port_force_link_speed(struct ksz_port *port)
3345{
3346        struct ksz_hw *hw = port->hw;
3347        u16 data;
3348        int i;
3349        int phy;
3350        int p;
3351
3352        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
3353                phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
3354                hw_r_phy_ctrl(hw, phy, &data);
3355
3356                data &= ~BMCR_ANENABLE;
3357
3358                if (10 == port->speed)
3359                        data &= ~BMCR_SPEED100;
3360                else if (100 == port->speed)
3361                        data |= BMCR_SPEED100;
3362                if (1 == port->duplex)
3363                        data &= ~BMCR_FULLDPLX;
3364                else if (2 == port->duplex)
3365                        data |= BMCR_FULLDPLX;
3366                hw_w_phy_ctrl(hw, phy, data);
3367        }
3368}
3369
3370static void port_set_power_saving(struct ksz_port *port, int enable)
3371{
3372        struct ksz_hw *hw = port->hw;
3373        int i;
3374        int p;
3375
3376        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
3377                port_cfg(hw, p,
3378                        KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
3379}
3380
3381/*
3382 * KSZ8841 power management functions
3383 */
3384
3385/**
3386 * hw_chk_wol_pme_status - check PMEN pin
3387 * @hw:         The hardware instance.
3388 *
3389 * This function is used to check PMEN pin is asserted.
3390 *
3391 * Return 1 if PMEN pin is asserted; otherwise, 0.
3392 */
3393static int hw_chk_wol_pme_status(struct ksz_hw *hw)
3394{
3395        struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3396        struct pci_dev *pdev = hw_priv->pdev;
3397        u16 data;
3398
3399        if (!pdev->pm_cap)
3400                return 0;
3401        pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3402        return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
3403}
3404
3405/**
3406 * hw_clr_wol_pme_status - clear PMEN pin
3407 * @hw:         The hardware instance.
3408 *
3409 * This routine is used to clear PME_Status to deassert PMEN pin.
3410 */
3411static void hw_clr_wol_pme_status(struct ksz_hw *hw)
3412{
3413        struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3414        struct pci_dev *pdev = hw_priv->pdev;
3415        u16 data;
3416
3417        if (!pdev->pm_cap)
3418                return;
3419
3420        /* Clear PME_Status to deassert PMEN pin. */
3421        pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3422        data |= PCI_PM_CTRL_PME_STATUS;
3423        pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3424}
3425
3426/**
3427 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3428 * @hw:         The hardware instance.
3429 * @set:        The flag indicating whether to enable or disable.
3430 *
3431 * This routine is used to enable or disable Wake-on-LAN.
3432 */
3433static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
3434{
3435        struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3436        struct pci_dev *pdev = hw_priv->pdev;
3437        u16 data;
3438
3439        if (!pdev->pm_cap)
3440                return;
3441        pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
3442        data &= ~PCI_PM_CTRL_STATE_MASK;
3443        if (set)
3444                data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
3445        else
3446                data &= ~PCI_PM_CTRL_PME_ENABLE;
3447        pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
3448}
3449
3450/**
3451 * hw_cfg_wol - configure Wake-on-LAN features
3452 * @hw:         The hardware instance.
3453 * @frame:      The pattern frame bit.
3454 * @set:        The flag indicating whether to enable or disable.
3455 *
3456 * This routine is used to enable or disable certain Wake-on-LAN features.
3457 */
3458static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
3459{
3460        u16 data;
3461
3462        data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
3463        if (set)
3464                data |= frame;
3465        else
3466                data &= ~frame;
3467        writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
3468}
3469
3470/**
3471 * hw_set_wol_frame - program Wake-on-LAN pattern
3472 * @hw:         The hardware instance.
3473 * @i:          The frame index.
3474 * @mask_size:  The size of the mask.
3475 * @mask:       Mask to ignore certain bytes in the pattern.
3476 * @frame_size: The size of the frame.
3477 * @pattern:    The frame data.
3478 *
3479 * This routine is used to program Wake-on-LAN pattern.
3480 */
3481static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
3482        const u8 *mask, uint frame_size, const u8 *pattern)
3483{
3484        int bits;
3485        int from;
3486        int len;
3487        int to;
3488        u32 crc;
3489        u8 data[64];
3490        u8 val = 0;
3491
3492        if (frame_size > mask_size * 8)
3493                frame_size = mask_size * 8;
3494        if (frame_size > 64)
3495                frame_size = 64;
3496
3497        i *= 0x10;
3498        writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
3499        writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
3500
3501        bits = len = from = to = 0;
3502        do {
3503                if (bits) {
3504                        if ((val & 1))
3505                                data[to++] = pattern[from];
3506                        val >>= 1;
3507                        ++from;
3508                        --bits;
3509                } else {
3510                        val = mask[len];
3511                        writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
3512                                + len);
3513                        ++len;
3514                        if (val)
3515                                bits = 8;
3516                        else
3517                                from += 8;
3518                }
3519        } while (from < (int) frame_size);
3520        if (val) {
3521                bits = mask[len - 1];
3522                val <<= (from % 8);
3523                bits &= ~val;
3524                writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
3525                        1);
3526        }
3527        crc = ether_crc(to, data);
3528        writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
3529}
3530
3531/**
3532 * hw_add_wol_arp - add ARP pattern
3533 * @hw:         The hardware instance.
3534 * @ip_addr:    The IPv4 address assigned to the device.
3535 *
3536 * This routine is used to add ARP pattern for waking up the host.
3537 */
3538static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
3539{
3540        static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
3541        u8 pattern[42] = {
3542                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
3543                0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3544                0x08, 0x06,
3545                0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
3546                0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3547                0x00, 0x00, 0x00, 0x00,
3548                0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3549                0x00, 0x00, 0x00, 0x00 };
3550
3551        memcpy(&pattern[38], ip_addr, 4);
3552        hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
3553}
3554
3555/**
3556 * hw_add_wol_bcast - add broadcast pattern
3557 * @hw:         The hardware instance.
3558 *
3559 * This routine is used to add broadcast pattern for waking up the host.
3560 */
3561static void hw_add_wol_bcast(struct ksz_hw *hw)
3562{
3563        static const u8 mask[] = { 0x3F };
3564        static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3565
3566        hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
3567}
3568
3569/**
3570 * hw_add_wol_mcast - add multicast pattern
3571 * @hw:         The hardware instance.
3572 *
3573 * This routine is used to add multicast pattern for waking up the host.
3574 *
3575 * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
3576 * by IPv6 ping command.  Note that multicast packets are filtred through the
3577 * multicast hash table, so not all multicast packets can wake up the host.
3578 */
3579static void hw_add_wol_mcast(struct ksz_hw *hw)
3580{
3581        static const u8 mask[] = { 0x3F };
3582        u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
3583
3584        memcpy(&pattern[3], &hw->override_addr[3], 3);
3585        hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
3586}
3587
3588/**
3589 * hw_add_wol_ucast - add unicast pattern
3590 * @hw:         The hardware instance.
3591 *
3592 * This routine is used to add unicast pattern to wakeup the host.
3593 *
3594 * It is assumed the unicast packet is directed to the device, as the hardware
3595 * can only receive them in normal case.
3596 */
3597static void hw_add_wol_ucast(struct ksz_hw *hw)
3598{
3599        static const u8 mask[] = { 0x3F };
3600
3601        hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
3602}
3603
3604/**
3605 * hw_enable_wol - enable Wake-on-LAN
3606 * @hw:         The hardware instance.
3607 * @wol_enable: The Wake-on-LAN settings.
3608 * @net_addr:   The IPv4 address assigned to the device.
3609 *
3610 * This routine is used to enable Wake-on-LAN depending on driver settings.
3611 */
3612static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
3613{
3614        hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
3615        hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
3616        hw_add_wol_ucast(hw);
3617        hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
3618        hw_add_wol_mcast(hw);
3619        hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
3620        hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
3621        hw_add_wol_arp(hw, net_addr);
3622}
3623
3624/**
3625 * hw_init - check driver is correct for the hardware
3626 * @hw:         The hardware instance.
3627 *
3628 * This function checks the hardware is correct for this driver and sets the
3629 * hardware up for proper initialization.
3630 *
3631 * Return number of ports or 0 if not right.
3632 */
3633static int hw_init(struct ksz_hw *hw)
3634{
3635        int rc = 0;
3636        u16 data;
3637        u16 revision;
3638
3639        /* Set bus speed to 125MHz. */
3640        writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
3641
3642        /* Check KSZ884x chip ID. */
3643        data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
3644
3645        revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
3646        data &= KS884X_CHIP_ID_MASK_41;
3647        if (REG_CHIP_ID_41 == data)
3648                rc = 1;
3649        else if (REG_CHIP_ID_42 == data)
3650                rc = 2;
3651        else
3652                return 0;
3653
3654        /* Setup hardware features or bug workarounds. */
3655        if (revision <= 1) {
3656                hw->features |= SMALL_PACKET_TX_BUG;
3657                if (1 == rc)
3658                        hw->features |= HALF_DUPLEX_SIGNAL_BUG;
3659        }
3660        return rc;
3661}
3662
3663/**
3664 * hw_reset - reset the hardware
3665 * @hw:         The hardware instance.
3666 *
3667 * This routine resets the hardware.
3668 */
3669static void hw_reset(struct ksz_hw *hw)
3670{
3671        writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3672
3673        /* Wait for device to reset. */
3674        mdelay(10);
3675
3676        /* Write 0 to clear device reset. */
3677        writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
3678}
3679
3680/**
3681 * hw_setup - setup the hardware
3682 * @hw:         The hardware instance.
3683 *
3684 * This routine setup the hardware for proper operation.
3685 */
3686static void hw_setup(struct ksz_hw *hw)
3687{
3688#if SET_DEFAULT_LED
3689        u16 data;
3690
3691        /* Change default LED mode. */
3692        data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3693        data &= ~LED_MODE;
3694        data |= SET_DEFAULT_LED;
3695        writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
3696#endif
3697
3698        /* Setup transmit control. */
3699        hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
3700                (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
3701
3702        /* Setup receive control. */
3703        hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
3704                (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
3705        hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
3706
3707        /* Hardware cannot handle UDP packet in IP fragments. */
3708        hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
3709
3710        if (hw->all_multi)
3711                hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
3712        if (hw->promiscuous)
3713                hw->rx_cfg |= DMA_RX_PROMISCUOUS;
3714}
3715
3716/**
3717 * hw_setup_intr - setup interrupt mask
3718 * @hw:         The hardware instance.
3719 *
3720 * This routine setup the interrupt mask for proper operation.
3721 */
3722static void hw_setup_intr(struct ksz_hw *hw)
3723{
3724        hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
3725}
3726
3727static void ksz_check_desc_num(struct ksz_desc_info *info)
3728{
3729#define MIN_DESC_SHIFT  2
3730
3731        int alloc = info->alloc;
3732        int shift;
3733
3734        shift = 0;
3735        while (!(alloc & 1)) {
3736                shift++;
3737                alloc >>= 1;
3738        }
3739        if (alloc != 1 || shift < MIN_DESC_SHIFT) {
3740                pr_alert("Hardware descriptor numbers not right!\n");
3741                while (alloc) {
3742                        shift++;
3743                        alloc >>= 1;
3744                }
3745                if (shift < MIN_DESC_SHIFT)
3746                        shift = MIN_DESC_SHIFT;
3747                alloc = 1 << shift;
3748                info->alloc = alloc;
3749        }
3750        info->mask = info->alloc - 1;
3751}
3752
3753static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
3754{
3755        int i;
3756        u32 phys = desc_info->ring_phys;
3757        struct ksz_hw_desc *desc = desc_info->ring_virt;
3758        struct ksz_desc *cur = desc_info->ring;
3759        struct ksz_desc *previous = NULL;
3760
3761        for (i = 0; i < desc_info->alloc; i++) {
3762                cur->phw = desc++;
3763                phys += desc_info->size;
3764                previous = cur++;
3765                previous->phw->next = cpu_to_le32(phys);
3766        }
3767        previous->phw->next = cpu_to_le32(desc_info->ring_phys);
3768        previous->sw.buf.rx.end_of_ring = 1;
3769        previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
3770
3771        desc_info->avail = desc_info->alloc;
3772        desc_info->last = desc_info->next = 0;
3773
3774        desc_info->cur = desc_info->ring;
3775}
3776
3777/**
3778 * hw_set_desc_base - set descriptor base addresses
3779 * @hw:         The hardware instance.
3780 * @tx_addr:    The transmit descriptor base.
3781 * @rx_addr:    The receive descriptor base.
3782 *
3783 * This routine programs the descriptor base addresses after reset.
3784 */
3785static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
3786{
3787        /* Set base address of Tx/Rx descriptors. */
3788        writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
3789        writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
3790}
3791
3792static void hw_reset_pkts(struct ksz_desc_info *info)
3793{
3794        info->cur = info->ring;
3795        info->avail = info->alloc;
3796        info->last = info->next = 0;
3797}
3798
3799static inline void hw_resume_rx(struct ksz_hw *hw)
3800{
3801        writel(DMA_START, hw->io + KS_DMA_RX_START);
3802}
3803
3804/**
3805 * hw_start_rx - start receiving
3806 * @hw:         The hardware instance.
3807 *
3808 * This routine starts the receive function of the hardware.
3809 */
3810static void hw_start_rx(struct ksz_hw *hw)
3811{
3812        writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
3813
3814        /* Notify when the receive stops. */
3815        hw->intr_mask |= KS884X_INT_RX_STOPPED;
3816
3817        writel(DMA_START, hw->io + KS_DMA_RX_START);
3818        hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
3819        hw->rx_stop++;
3820
3821        /* Variable overflows. */
3822        if (0 == hw->rx_stop)
3823                hw->rx_stop = 2;
3824}
3825
3826/**
3827 * hw_stop_rx - stop receiving
3828 * @hw:         The hardware instance.
3829 *
3830 * This routine stops the receive function of the hardware.
3831 */
3832static void hw_stop_rx(struct ksz_hw *hw)
3833{
3834        hw->rx_stop = 0;
3835        hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
3836        writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
3837}
3838
3839/**
3840 * hw_start_tx - start transmitting
3841 * @hw:         The hardware instance.
3842 *
3843 * This routine starts the transmit function of the hardware.
3844 */
3845static void hw_start_tx(struct ksz_hw *hw)
3846{
3847        writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
3848}
3849
3850/**
3851 * hw_stop_tx - stop transmitting
3852 * @hw:         The hardware instance.
3853 *
3854 * This routine stops the transmit function of the hardware.
3855 */
3856static void hw_stop_tx(struct ksz_hw *hw)
3857{
3858        writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
3859}
3860
3861/**
3862 * hw_disable - disable hardware
3863 * @hw:         The hardware instance.
3864 *
3865 * This routine disables the hardware.
3866 */
3867static void hw_disable(struct ksz_hw *hw)
3868{
3869        hw_stop_rx(hw);
3870        hw_stop_tx(hw);
3871        hw->enabled = 0;
3872}
3873
3874/**
3875 * hw_enable - enable hardware
3876 * @hw:         The hardware instance.
3877 *
3878 * This routine enables the hardware.
3879 */
3880static void hw_enable(struct ksz_hw *hw)
3881{
3882        hw_start_tx(hw);
3883        hw_start_rx(hw);
3884        hw->enabled = 1;
3885}
3886
3887/**
3888 * hw_alloc_pkt - allocate enough descriptors for transmission
3889 * @hw:         The hardware instance.
3890 * @length:     The length of the packet.
3891 * @physical:   Number of descriptors required.
3892 *
3893 * This function allocates descriptors for transmission.
3894 *
3895 * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
3896 */
3897static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
3898{
3899        /* Always leave one descriptor free. */
3900        if (hw->tx_desc_info.avail <= 1)
3901                return 0;
3902
3903        /* Allocate a descriptor for transmission and mark it current. */
3904        get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
3905        hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
3906
3907        /* Keep track of number of transmit descriptors used so far. */
3908        ++hw->tx_int_cnt;
3909        hw->tx_size += length;
3910
3911        /* Cannot hold on too much data. */
3912        if (hw->tx_size >= MAX_TX_HELD_SIZE)
3913                hw->tx_int_cnt = hw->tx_int_mask + 1;
3914
3915        if (physical > hw->tx_desc_info.avail)
3916                return 1;
3917
3918        return hw->tx_desc_info.avail;
3919}
3920
3921/**
3922 * hw_send_pkt - mark packet for transmission
3923 * @hw:         The hardware instance.
3924 *
3925 * This routine marks the packet for transmission in PCI version.
3926 */
3927static void hw_send_pkt(struct ksz_hw *hw)
3928{
3929        struct ksz_desc *cur = hw->tx_desc_info.cur;
3930
3931        cur->sw.buf.tx.last_seg = 1;
3932
3933        /* Interrupt only after specified number of descriptors used. */
3934        if (hw->tx_int_cnt > hw->tx_int_mask) {
3935                cur->sw.buf.tx.intr = 1;
3936                hw->tx_int_cnt = 0;
3937                hw->tx_size = 0;
3938        }
3939
3940        /* KSZ8842 supports port directed transmission. */
3941        cur->sw.buf.tx.dest_port = hw->dst_ports;
3942
3943        release_desc(cur);
3944
3945        writel(0, hw->io + KS_DMA_TX_START);
3946}
3947
3948static int empty_addr(u8 *addr)
3949{
3950        u32 *addr1 = (u32 *) addr;
3951        u16 *addr2 = (u16 *) &addr[4];
3952
3953        return 0 == *addr1 && 0 == *addr2;
3954}
3955
3956/**
3957 * hw_set_addr - set MAC address
3958 * @hw:         The hardware instance.
3959 *
3960 * This routine programs the MAC address of the hardware when the address is
3961 * overridden.
3962 */
3963static void hw_set_addr(struct ksz_hw *hw)
3964{
3965        int i;
3966
3967        for (i = 0; i < ETH_ALEN; i++)
3968                writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
3969                        hw->io + KS884X_ADDR_0_OFFSET + i);
3970
3971        sw_set_addr(hw, hw->override_addr);
3972}
3973
3974/**
3975 * hw_read_addr - read MAC address
3976 * @hw:         The hardware instance.
3977 *
3978 * This routine retrieves the MAC address of the hardware.
3979 */
3980static void hw_read_addr(struct ksz_hw *hw)
3981{
3982        int i;
3983
3984        for (i = 0; i < ETH_ALEN; i++)
3985                hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
3986                        KS884X_ADDR_0_OFFSET + i);
3987
3988        if (!hw->mac_override) {
3989                memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
3990                if (empty_addr(hw->override_addr)) {
3991                        memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
3992                        memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
3993                               ETH_ALEN);
3994                        hw->override_addr[5] += hw->id;
3995                        hw_set_addr(hw);
3996                }
3997        }
3998}
3999
4000static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
4001{
4002        int i;
4003        u32 mac_addr_lo;
4004        u32 mac_addr_hi;
4005
4006        mac_addr_hi = 0;
4007        for (i = 0; i < 2; i++) {
4008                mac_addr_hi <<= 8;
4009                mac_addr_hi |= mac_addr[i];
4010        }
4011        mac_addr_hi |= ADD_ADDR_ENABLE;
4012        mac_addr_lo = 0;
4013        for (i = 2; i < 6; i++) {
4014                mac_addr_lo <<= 8;
4015                mac_addr_lo |= mac_addr[i];
4016        }
4017        index *= ADD_ADDR_INCR;
4018
4019        writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
4020        writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
4021}
4022
4023static void hw_set_add_addr(struct ksz_hw *hw)
4024{
4025        int i;
4026
4027        for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
4028                if (empty_addr(hw->address[i]))
4029                        writel(0, hw->io + ADD_ADDR_INCR * i +
4030                                KS_ADD_ADDR_0_HI);
4031                else
4032                        hw_ena_add_addr(hw, i, hw->address[i]);
4033        }
4034}
4035
4036static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
4037{
4038        int i;
4039        int j = ADDITIONAL_ENTRIES;
4040
4041        if (ether_addr_equal(hw->override_addr, mac_addr))
4042                return 0;
4043        for (i = 0; i < hw->addr_list_size; i++) {
4044                if (ether_addr_equal(hw->address[i], mac_addr))
4045                        return 0;
4046                if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
4047                        j = i;
4048        }
4049        if (j < ADDITIONAL_ENTRIES) {
4050                memcpy(hw->address[j], mac_addr, ETH_ALEN);
4051                hw_ena_add_addr(hw, j, hw->address[j]);
4052                return 0;
4053        }
4054        return -1;
4055}
4056
4057static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
4058{
4059        int i;
4060
4061        for (i = 0; i < hw->addr_list_size; i++) {
4062                if (ether_addr_equal(hw->address[i], mac_addr)) {
4063                        eth_zero_addr(hw->address[i]);
4064                        writel(0, hw->io + ADD_ADDR_INCR * i +
4065                                KS_ADD_ADDR_0_HI);
4066                        return 0;
4067                }
4068        }
4069        return -1;
4070}
4071
4072/**
4073 * hw_clr_multicast - clear multicast addresses
4074 * @hw:         The hardware instance.
4075 *
4076 * This routine removes all multicast addresses set in the hardware.
4077 */
4078static void hw_clr_multicast(struct ksz_hw *hw)
4079{
4080        int i;
4081
4082        for (i = 0; i < HW_MULTICAST_SIZE; i++) {
4083                hw->multi_bits[i] = 0;
4084
4085                writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
4086        }
4087}
4088
4089/**
4090 * hw_set_grp_addr - set multicast addresses
4091 * @hw:         The hardware instance.
4092 *
4093 * This routine programs multicast addresses for the hardware to accept those
4094 * addresses.
4095 */
4096static void hw_set_grp_addr(struct ksz_hw *hw)
4097{
4098        int i;
4099        int index;
4100        int position;
4101        int value;
4102
4103        memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
4104
4105        for (i = 0; i < hw->multi_list_size; i++) {
4106                position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
4107                index = position >> 3;
4108                value = 1 << (position & 7);
4109                hw->multi_bits[index] |= (u8) value;
4110        }
4111
4112        for (i = 0; i < HW_MULTICAST_SIZE; i++)
4113                writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
4114                        i);
4115}
4116
4117/**
4118 * hw_set_multicast - enable or disable all multicast receiving
4119 * @hw:         The hardware instance.
4120 * @multicast:  To turn on or off the all multicast feature.
4121 *
4122 * This routine enables/disables the hardware to accept all multicast packets.
4123 */
4124static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
4125{
4126        /* Stop receiving for reconfiguration. */
4127        hw_stop_rx(hw);
4128
4129        if (multicast)
4130                hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
4131        else
4132                hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
4133
4134        if (hw->enabled)
4135                hw_start_rx(hw);
4136}
4137
4138/**
4139 * hw_set_promiscuous - enable or disable promiscuous receiving
4140 * @hw:         The hardware instance.
4141 * @prom:       To turn on or off the promiscuous feature.
4142 *
4143 * This routine enables/disables the hardware to accept all packets.
4144 */
4145static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
4146{
4147        /* Stop receiving for reconfiguration. */
4148        hw_stop_rx(hw);
4149
4150        if (prom)
4151                hw->rx_cfg |= DMA_RX_PROMISCUOUS;
4152        else
4153                hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
4154
4155        if (hw->enabled)
4156                hw_start_rx(hw);
4157}
4158
4159/**
4160 * sw_enable - enable the switch
4161 * @hw:         The hardware instance.
4162 * @enable:     The flag to enable or disable the switch
4163 *
4164 * This routine is used to enable/disable the switch in KSZ8842.
4165 */
4166static void sw_enable(struct ksz_hw *hw, int enable)
4167{
4168        int port;
4169
4170        for (port = 0; port < SWITCH_PORT_NUM; port++) {
4171                if (hw->dev_count > 1) {
4172                        /* Set port-base vlan membership with host port. */
4173                        sw_cfg_port_base_vlan(hw, port,
4174                                HOST_MASK | (1 << port));
4175                        port_set_stp_state(hw, port, STP_STATE_DISABLED);
4176                } else {
4177                        sw_cfg_port_base_vlan(hw, port, PORT_MASK);
4178                        port_set_stp_state(hw, port, STP_STATE_FORWARDING);
4179                }
4180        }
4181        if (hw->dev_count > 1)
4182                port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
4183        else
4184                port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
4185
4186        if (enable)
4187                enable = KS8842_START;
4188        writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
4189}
4190
4191/**
4192 * sw_setup - setup the switch
4193 * @hw:         The hardware instance.
4194 *
4195 * This routine setup the hardware switch engine for default operation.
4196 */
4197static void sw_setup(struct ksz_hw *hw)
4198{
4199        int port;
4200
4201        sw_set_global_ctrl(hw);
4202
4203        /* Enable switch broadcast storm protection at 10% percent rate. */
4204        sw_init_broad_storm(hw);
4205        hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
4206        for (port = 0; port < SWITCH_PORT_NUM; port++)
4207                sw_ena_broad_storm(hw, port);
4208
4209        sw_init_prio(hw);
4210
4211        sw_init_mirror(hw);
4212
4213        sw_init_prio_rate(hw);
4214
4215        sw_init_vlan(hw);
4216
4217        if (hw->features & STP_SUPPORT)
4218                sw_init_stp(hw);
4219        if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
4220                        SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
4221                hw->overrides |= PAUSE_FLOW_CTRL;
4222        sw_enable(hw, 1);
4223}
4224
4225/**
4226 * ksz_start_timer - start kernel timer
4227 * @info:       Kernel timer information.
4228 * @time:       The time tick.
4229 *
4230 * This routine starts the kernel timer after the specified time tick.
4231 */
4232static void ksz_start_timer(struct ksz_timer_info *info, int time)
4233{
4234        info->cnt = 0;
4235        info->timer.expires = jiffies + time;
4236        add_timer(&info->timer);
4237
4238        /* infinity */
4239        info->max = -1;
4240}
4241
4242/**
4243 * ksz_stop_timer - stop kernel timer
4244 * @info:       Kernel timer information.
4245 *
4246 * This routine stops the kernel timer.
4247 */
4248static void ksz_stop_timer(struct ksz_timer_info *info)
4249{
4250        if (info->max) {
4251                info->max = 0;
4252                del_timer_sync(&info->timer);
4253        }
4254}
4255
4256static void ksz_init_timer(struct ksz_timer_info *info, int period,
4257        void (*function)(struct timer_list *))
4258{
4259        info->max = 0;
4260        info->period = period;
4261        timer_setup(&info->timer, function, 0);
4262}
4263
4264static void ksz_update_timer(struct ksz_timer_info *info)
4265{
4266        ++info->cnt;
4267        if (info->max > 0) {
4268                if (info->cnt < info->max) {
4269                        info->timer.expires = jiffies + info->period;
4270                        add_timer(&info->timer);
4271                } else
4272                        info->max = 0;
4273        } else if (info->max < 0) {
4274                info->timer.expires = jiffies + info->period;
4275                add_timer(&info->timer);
4276        }
4277}
4278
4279/**
4280 * ksz_alloc_soft_desc - allocate software descriptors
4281 * @desc_info:  Descriptor information structure.
4282 * @transmit:   Indication that descriptors are for transmit.
4283 *
4284 * This local function allocates software descriptors for manipulation in
4285 * memory.
4286 *
4287 * Return 0 if successful.
4288 */
4289static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
4290{
4291        desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
4292                                  GFP_KERNEL);
4293        if (!desc_info->ring)
4294                return 1;
4295        hw_init_desc(desc_info, transmit);
4296        return 0;
4297}
4298
4299/**
4300 * ksz_alloc_desc - allocate hardware descriptors
4301 * @adapter:    Adapter information structure.
4302 *
4303 * This local function allocates hardware descriptors for receiving and
4304 * transmitting.
4305 *
4306 * Return 0 if successful.
4307 */
4308static int ksz_alloc_desc(struct dev_info *adapter)
4309{
4310        struct ksz_hw *hw = &adapter->hw;
4311        int offset;
4312
4313        /* Allocate memory for RX & TX descriptors. */
4314        adapter->desc_pool.alloc_size =
4315                hw->rx_desc_info.size * hw->rx_desc_info.alloc +
4316                hw->tx_desc_info.size * hw->tx_desc_info.alloc +
4317                DESC_ALIGNMENT;
4318
4319        adapter->desc_pool.alloc_virt =
4320                dma_alloc_coherent(&adapter->pdev->dev,
4321                                   adapter->desc_pool.alloc_size,
4322                                   &adapter->desc_pool.dma_addr, GFP_KERNEL);
4323        if (adapter->desc_pool.alloc_virt == NULL) {
4324                adapter->desc_pool.alloc_size = 0;
4325                return 1;
4326        }
4327
4328        /* Align to the next cache line boundary. */
4329        offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
4330                (DESC_ALIGNMENT -
4331                ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
4332        adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
4333        adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
4334
4335        /* Allocate receive/transmit descriptors. */
4336        hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
4337                adapter->desc_pool.virt;
4338        hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
4339        offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
4340        hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
4341                (adapter->desc_pool.virt + offset);
4342        hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
4343
4344        if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
4345                return 1;
4346        if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
4347                return 1;
4348
4349        return 0;
4350}
4351
4352/**
4353 * free_dma_buf - release DMA buffer resources
4354 * @adapter:    Adapter information structure.
4355 * @dma_buf:    pointer to buf
4356 * @direction:  to or from device
4357 *
4358 * This routine is just a helper function to release the DMA buffer resources.
4359 */
4360static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
4361        int direction)
4362{
4363        dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len,
4364                         direction);
4365        dev_kfree_skb(dma_buf->skb);
4366        dma_buf->skb = NULL;
4367        dma_buf->dma = 0;
4368}
4369
4370/**
4371 * ksz_init_rx_buffers - initialize receive descriptors
4372 * @adapter:    Adapter information structure.
4373 *
4374 * This routine initializes DMA buffers for receiving.
4375 */
4376static void ksz_init_rx_buffers(struct dev_info *adapter)
4377{
4378        int i;
4379        struct ksz_desc *desc;
4380        struct ksz_dma_buf *dma_buf;
4381        struct ksz_hw *hw = &adapter->hw;
4382        struct ksz_desc_info *info = &hw->rx_desc_info;
4383
4384        for (i = 0; i < hw->rx_desc_info.alloc; i++) {
4385                get_rx_pkt(info, &desc);
4386
4387                dma_buf = DMA_BUFFER(desc);
4388                if (dma_buf->skb && dma_buf->len != adapter->mtu)
4389                        free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE);
4390                dma_buf->len = adapter->mtu;
4391                if (!dma_buf->skb)
4392                        dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
4393                if (dma_buf->skb && !dma_buf->dma)
4394                        dma_buf->dma = dma_map_single(&adapter->pdev->dev,
4395                                                skb_tail_pointer(dma_buf->skb),
4396                                                dma_buf->len,
4397                                                DMA_FROM_DEVICE);
4398
4399                /* Set descriptor. */
4400                set_rx_buf(desc, dma_buf->dma);
4401                set_rx_len(desc, dma_buf->len);
4402                release_desc(desc);
4403        }
4404}
4405
4406/**
4407 * ksz_alloc_mem - allocate memory for hardware descriptors
4408 * @adapter:    Adapter information structure.
4409 *
4410 * This function allocates memory for use by hardware descriptors for receiving
4411 * and transmitting.
4412 *
4413 * Return 0 if successful.
4414 */
4415static int ksz_alloc_mem(struct dev_info *adapter)
4416{
4417        struct ksz_hw *hw = &adapter->hw;
4418
4419        /* Determine the number of receive and transmit descriptors. */
4420        hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
4421        hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
4422
4423        /* Determine how many descriptors to skip transmit interrupt. */
4424        hw->tx_int_cnt = 0;
4425        hw->tx_int_mask = NUM_OF_TX_DESC / 4;
4426        if (hw->tx_int_mask > 8)
4427                hw->tx_int_mask = 8;
4428        while (hw->tx_int_mask) {
4429                hw->tx_int_cnt++;
4430                hw->tx_int_mask >>= 1;
4431        }
4432        if (hw->tx_int_cnt) {
4433                hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
4434                hw->tx_int_cnt = 0;
4435        }
4436
4437        /* Determine the descriptor size. */
4438        hw->rx_desc_info.size =
4439                (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4440                DESC_ALIGNMENT) * DESC_ALIGNMENT);
4441        hw->tx_desc_info.size =
4442                (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4443                DESC_ALIGNMENT) * DESC_ALIGNMENT);
4444        if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
4445                pr_alert("Hardware descriptor size not right!\n");
4446        ksz_check_desc_num(&hw->rx_desc_info);
4447        ksz_check_desc_num(&hw->tx_desc_info);
4448
4449        /* Allocate descriptors. */
4450        if (ksz_alloc_desc(adapter))
4451                return 1;
4452
4453        return 0;
4454}
4455
4456/**
4457 * ksz_free_desc - free software and hardware descriptors
4458 * @adapter:    Adapter information structure.
4459 *
4460 * This local routine frees the software and hardware descriptors allocated by
4461 * ksz_alloc_desc().
4462 */
4463static void ksz_free_desc(struct dev_info *adapter)
4464{
4465        struct ksz_hw *hw = &adapter->hw;
4466
4467        /* Reset descriptor. */
4468        hw->rx_desc_info.ring_virt = NULL;
4469        hw->tx_desc_info.ring_virt = NULL;
4470        hw->rx_desc_info.ring_phys = 0;
4471        hw->tx_desc_info.ring_phys = 0;
4472
4473        /* Free memory. */
4474        if (adapter->desc_pool.alloc_virt)
4475                dma_free_coherent(&adapter->pdev->dev,
4476                                  adapter->desc_pool.alloc_size,
4477                                  adapter->desc_pool.alloc_virt,
4478                                  adapter->desc_pool.dma_addr);
4479
4480        /* Reset resource pool. */
4481        adapter->desc_pool.alloc_size = 0;
4482        adapter->desc_pool.alloc_virt = NULL;
4483
4484        kfree(hw->rx_desc_info.ring);
4485        hw->rx_desc_info.ring = NULL;
4486        kfree(hw->tx_desc_info.ring);
4487        hw->tx_desc_info.ring = NULL;
4488}
4489
4490/**
4491 * ksz_free_buffers - free buffers used in the descriptors
4492 * @adapter:    Adapter information structure.
4493 * @desc_info:  Descriptor information structure.
4494 * @direction:  to or from device
4495 *
4496 * This local routine frees buffers used in the DMA buffers.
4497 */
4498static void ksz_free_buffers(struct dev_info *adapter,
4499        struct ksz_desc_info *desc_info, int direction)
4500{
4501        int i;
4502        struct ksz_dma_buf *dma_buf;
4503        struct ksz_desc *desc = desc_info->ring;
4504
4505        for (i = 0; i < desc_info->alloc; i++) {
4506                dma_buf = DMA_BUFFER(desc);
4507                if (dma_buf->skb)
4508                        free_dma_buf(adapter, dma_buf, direction);
4509                desc++;
4510        }
4511}
4512
4513/**
4514 * ksz_free_mem - free all resources used by descriptors
4515 * @adapter:    Adapter information structure.
4516 *
4517 * This local routine frees all the resources allocated by ksz_alloc_mem().
4518 */
4519static void ksz_free_mem(struct dev_info *adapter)
4520{
4521        /* Free transmit buffers. */
4522        ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE);
4523
4524        /* Free receive buffers. */
4525        ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE);
4526
4527        /* Free descriptors. */
4528        ksz_free_desc(adapter);
4529}
4530
4531static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
4532        u64 *counter)
4533{
4534        int i;
4535        int mib;
4536        int port;
4537        struct ksz_port_mib *port_mib;
4538
4539        memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
4540        for (i = 0, port = first; i < cnt; i++, port++) {
4541                port_mib = &hw->port_mib[port];
4542                for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
4543                        counter[mib] += port_mib->counter[mib];
4544        }
4545}
4546
4547/**
4548 * send_packet - send packet
4549 * @skb:        Socket buffer.
4550 * @dev:        Network device.
4551 *
4552 * This routine is used to send a packet out to the network.
4553 */
4554static void send_packet(struct sk_buff *skb, struct net_device *dev)
4555{
4556        struct ksz_desc *desc;
4557        struct ksz_desc *first;
4558        struct dev_priv *priv = netdev_priv(dev);
4559        struct dev_info *hw_priv = priv->adapter;
4560        struct ksz_hw *hw = &hw_priv->hw;
4561        struct ksz_desc_info *info = &hw->tx_desc_info;
4562        struct ksz_dma_buf *dma_buf;
4563        int len;
4564        int last_frag = skb_shinfo(skb)->nr_frags;
4565
4566        /*
4567         * KSZ8842 with multiple device interfaces needs to be told which port
4568         * to send.
4569         */
4570        if (hw->dev_count > 1)
4571                hw->dst_ports = 1 << priv->port.first_port;
4572
4573        /* Hardware will pad the length to 60. */
4574        len = skb->len;
4575
4576        /* Remember the very first descriptor. */
4577        first = info->cur;
4578        desc = first;
4579
4580        dma_buf = DMA_BUFFER(desc);
4581        if (last_frag) {
4582                int frag;
4583                skb_frag_t *this_frag;
4584
4585                dma_buf->len = skb_headlen(skb);
4586
4587                dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4588                                              dma_buf->len, DMA_TO_DEVICE);
4589                set_tx_buf(desc, dma_buf->dma);
4590                set_tx_len(desc, dma_buf->len);
4591
4592                frag = 0;
4593                do {
4594                        this_frag = &skb_shinfo(skb)->frags[frag];
4595
4596                        /* Get a new descriptor. */
4597                        get_tx_pkt(info, &desc);
4598
4599                        /* Keep track of descriptors used so far. */
4600                        ++hw->tx_int_cnt;
4601
4602                        dma_buf = DMA_BUFFER(desc);
4603                        dma_buf->len = skb_frag_size(this_frag);
4604
4605                        dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
4606                                                      skb_frag_address(this_frag),
4607                                                      dma_buf->len,
4608                                                      DMA_TO_DEVICE);
4609                        set_tx_buf(desc, dma_buf->dma);
4610                        set_tx_len(desc, dma_buf->len);
4611
4612                        frag++;
4613                        if (frag == last_frag)
4614                                break;
4615
4616                        /* Do not release the last descriptor here. */
4617                        release_desc(desc);
4618                } while (1);
4619
4620                /* current points to the last descriptor. */
4621                info->cur = desc;
4622
4623                /* Release the first descriptor. */
4624                release_desc(first);
4625        } else {
4626                dma_buf->len = len;
4627
4628                dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4629                                              dma_buf->len, DMA_TO_DEVICE);
4630                set_tx_buf(desc, dma_buf->dma);
4631                set_tx_len(desc, dma_buf->len);
4632        }
4633
4634        if (skb->ip_summed == CHECKSUM_PARTIAL) {
4635                (desc)->sw.buf.tx.csum_gen_tcp = 1;
4636                (desc)->sw.buf.tx.csum_gen_udp = 1;
4637        }
4638
4639        /*
4640         * The last descriptor holds the packet so that it can be returned to
4641         * network subsystem after all descriptors are transmitted.
4642         */
4643        dma_buf->skb = skb;
4644
4645        hw_send_pkt(hw);
4646
4647        /* Update transmit statistics. */
4648        dev->stats.tx_packets++;
4649        dev->stats.tx_bytes += len;
4650}
4651
4652/**
4653 * transmit_cleanup - clean up transmit descriptors
4654 * @hw_priv:    Network device.
4655 * @normal:     break if owned
4656 *
4657 * This routine is called to clean up the transmitted buffers.
4658 */
4659static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4660{
4661        int last;
4662        union desc_stat status;
4663        struct ksz_hw *hw = &hw_priv->hw;
4664        struct ksz_desc_info *info = &hw->tx_desc_info;
4665        struct ksz_desc *desc;
4666        struct ksz_dma_buf *dma_buf;
4667        struct net_device *dev = NULL;
4668
4669        spin_lock_irq(&hw_priv->hwlock);
4670        last = info->last;
4671
4672        while (info->avail < info->alloc) {
4673                /* Get next descriptor which is not hardware owned. */
4674                desc = &info->ring[last];
4675                status.data = le32_to_cpu(desc->phw->ctrl.data);
4676                if (status.tx.hw_owned) {
4677                        if (normal)
4678                                break;
4679                        else
4680                                reset_desc(desc, status);
4681                }
4682
4683                dma_buf = DMA_BUFFER(desc);
4684                dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
4685                                 dma_buf->len, DMA_TO_DEVICE);
4686
4687                /* This descriptor contains the last buffer in the packet. */
4688                if (dma_buf->skb) {
4689                        dev = dma_buf->skb->dev;
4690
4691                        /* Release the packet back to network subsystem. */
4692                        dev_kfree_skb_irq(dma_buf->skb);
4693                        dma_buf->skb = NULL;
4694                }
4695
4696                /* Free the transmitted descriptor. */
4697                last++;
4698                last &= info->mask;
4699                info->avail++;
4700        }
4701        info->last = last;
4702        spin_unlock_irq(&hw_priv->hwlock);
4703
4704        /* Notify the network subsystem that the packet has been sent. */
4705        if (dev)
4706                netif_trans_update(dev);
4707}
4708
4709/**
4710 * tx_done - transmit done processing
4711 * @hw_priv:    Network device.
4712 *
4713 * This routine is called when the transmit interrupt is triggered, indicating
4714 * either a packet is sent successfully or there are transmit errors.
4715 */
4716static void tx_done(struct dev_info *hw_priv)
4717{
4718        struct ksz_hw *hw = &hw_priv->hw;
4719        int port;
4720
4721        transmit_cleanup(hw_priv, 1);
4722
4723        for (port = 0; port < hw->dev_count; port++) {
4724                struct net_device *dev = hw->port_info[port].pdev;
4725
4726                if (netif_running(dev) && netif_queue_stopped(dev))
4727                        netif_wake_queue(dev);
4728        }
4729}
4730
4731static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4732{
4733        skb->dev = old->dev;
4734        skb->protocol = old->protocol;
4735        skb->ip_summed = old->ip_summed;
4736        skb->csum = old->csum;
4737        skb_set_network_header(skb, ETH_HLEN);
4738
4739        dev_consume_skb_any(old);
4740}
4741
4742/**
4743 * netdev_tx - send out packet
4744 * @skb:        Socket buffer.
4745 * @dev:        Network device.
4746 *
4747 * This function is used by the upper network layer to send out a packet.
4748 *
4749 * Return 0 if successful; otherwise an error code indicating failure.
4750 */
4751static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4752{
4753        struct dev_priv *priv = netdev_priv(dev);
4754        struct dev_info *hw_priv = priv->adapter;
4755        struct ksz_hw *hw = &hw_priv->hw;
4756        int left;
4757        int num = 1;
4758        int rc = 0;
4759
4760        if (hw->features & SMALL_PACKET_TX_BUG) {
4761                struct sk_buff *org_skb = skb;
4762
4763                if (skb->len <= 48) {
4764                        if (skb_end_pointer(skb) - skb->data >= 50) {
4765                                memset(&skb->data[skb->len], 0, 50 - skb->len);
4766                                skb->len = 50;
4767                        } else {
4768                                skb = netdev_alloc_skb(dev, 50);
4769                                if (!skb)
4770                                        return NETDEV_TX_BUSY;
4771                                memcpy(skb->data, org_skb->data, org_skb->len);
4772                                memset(&skb->data[org_skb->len], 0,
4773                                        50 - org_skb->len);
4774                                skb->len = 50;
4775                                copy_old_skb(org_skb, skb);
4776                        }
4777                }
4778        }
4779
4780        spin_lock_irq(&hw_priv->hwlock);
4781
4782        num = skb_shinfo(skb)->nr_frags + 1;
4783        left = hw_alloc_pkt(hw, skb->len, num);
4784        if (left) {
4785                if (left < num ||
4786                    (CHECKSUM_PARTIAL == skb->ip_summed &&
4787                     skb->protocol == htons(ETH_P_IPV6))) {
4788                        struct sk_buff *org_skb = skb;
4789
4790                        skb = netdev_alloc_skb(dev, org_skb->len);
4791                        if (!skb) {
4792                                rc = NETDEV_TX_BUSY;
4793                                goto unlock;
4794                        }
4795                        skb_copy_and_csum_dev(org_skb, skb->data);
4796                        org_skb->ip_summed = CHECKSUM_NONE;
4797                        skb->len = org_skb->len;
4798                        copy_old_skb(org_skb, skb);
4799                }
4800                send_packet(skb, dev);
4801                if (left <= num)
4802                        netif_stop_queue(dev);
4803        } else {
4804                /* Stop the transmit queue until packet is allocated. */
4805                netif_stop_queue(dev);
4806                rc = NETDEV_TX_BUSY;
4807        }
4808unlock:
4809        spin_unlock_irq(&hw_priv->hwlock);
4810
4811        return rc;
4812}
4813
4814/**
4815 * netdev_tx_timeout - transmit timeout processing
4816 * @dev:        Network device.
4817 * @txqueue:    index of hanging queue
4818 *
4819 * This routine is called when the transmit timer expires.  That indicates the
4820 * hardware is not running correctly because transmit interrupts are not
4821 * triggered to free up resources so that the transmit routine can continue
4822 * sending out packets.  The hardware is reset to correct the problem.
4823 */
4824static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
4825{
4826        static unsigned long last_reset;
4827
4828        struct dev_priv *priv = netdev_priv(dev);
4829        struct dev_info *hw_priv = priv->adapter;
4830        struct ksz_hw *hw = &hw_priv->hw;
4831        int port;
4832
4833        if (hw->dev_count > 1) {
4834                /*
4835                 * Only reset the hardware if time between calls is long
4836                 * enough.
4837                 */
4838                if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
4839                        hw_priv = NULL;
4840        }
4841
4842        last_reset = jiffies;
4843        if (hw_priv) {
4844                hw_dis_intr(hw);
4845                hw_disable(hw);
4846
4847                transmit_cleanup(hw_priv, 0);
4848                hw_reset_pkts(&hw->rx_desc_info);
4849                hw_reset_pkts(&hw->tx_desc_info);
4850                ksz_init_rx_buffers(hw_priv);
4851
4852                hw_reset(hw);
4853
4854                hw_set_desc_base(hw,
4855                        hw->tx_desc_info.ring_phys,
4856                        hw->rx_desc_info.ring_phys);
4857                hw_set_addr(hw);
4858                if (hw->all_multi)
4859                        hw_set_multicast(hw, hw->all_multi);
4860                else if (hw->multi_list_size)
4861                        hw_set_grp_addr(hw);
4862
4863                if (hw->dev_count > 1) {
4864                        hw_set_add_addr(hw);
4865                        for (port = 0; port < SWITCH_PORT_NUM; port++) {
4866                                struct net_device *port_dev;
4867
4868                                port_set_stp_state(hw, port,
4869                                        STP_STATE_DISABLED);
4870
4871                                port_dev = hw->port_info[port].pdev;
4872                                if (netif_running(port_dev))
4873                                        port_set_stp_state(hw, port,
4874                                                STP_STATE_SIMPLE);
4875                        }
4876                }
4877
4878                hw_enable(hw);
4879                hw_ena_intr(hw);
4880        }
4881
4882        netif_trans_update(dev);
4883        netif_wake_queue(dev);
4884}
4885
4886static inline void csum_verified(struct sk_buff *skb)
4887{
4888        unsigned short protocol;
4889        struct iphdr *iph;
4890
4891        protocol = skb->protocol;
4892        skb_reset_network_header(skb);
4893        iph = (struct iphdr *) skb_network_header(skb);
4894        if (protocol == htons(ETH_P_8021Q)) {
4895                protocol = iph->tot_len;
4896                skb_set_network_header(skb, VLAN_HLEN);
4897                iph = (struct iphdr *) skb_network_header(skb);
4898        }
4899        if (protocol == htons(ETH_P_IP)) {
4900                if (iph->protocol == IPPROTO_TCP)
4901                        skb->ip_summed = CHECKSUM_UNNECESSARY;
4902        }
4903}
4904
4905static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
4906        struct ksz_desc *desc, union desc_stat status)
4907{
4908        int packet_len;
4909        struct dev_priv *priv = netdev_priv(dev);
4910        struct dev_info *hw_priv = priv->adapter;
4911        struct ksz_dma_buf *dma_buf;
4912        struct sk_buff *skb;
4913
4914        /* Received length includes 4-byte CRC. */
4915        packet_len = status.rx.frame_len - 4;
4916
4917        dma_buf = DMA_BUFFER(desc);
4918        dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
4919                                packet_len + 4, DMA_FROM_DEVICE);
4920
4921        do {
4922                /* skb->data != skb->head */
4923                skb = netdev_alloc_skb(dev, packet_len + 2);
4924                if (!skb) {
4925                        dev->stats.rx_dropped++;
4926                        return -ENOMEM;
4927                }
4928
4929                /*
4930                 * Align socket buffer in 4-byte boundary for better
4931                 * performance.
4932                 */
4933                skb_reserve(skb, 2);
4934
4935                skb_put_data(skb, dma_buf->skb->data, packet_len);
4936        } while (0);
4937
4938        skb->protocol = eth_type_trans(skb, dev);
4939
4940        if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
4941                csum_verified(skb);
4942
4943        /* Update receive statistics. */
4944        dev->stats.rx_packets++;
4945        dev->stats.rx_bytes += packet_len;
4946
4947        /* Notify upper layer for received packet. */
4948        netif_rx(skb);
4949
4950        return 0;
4951}
4952
4953static int dev_rcv_packets(struct dev_info *hw_priv)
4954{
4955        int next;
4956        union desc_stat status;
4957        struct ksz_hw *hw = &hw_priv->hw;
4958        struct net_device *dev = hw->port_info[0].pdev;
4959        struct ksz_desc_info *info = &hw->rx_desc_info;
4960        int left = info->alloc;
4961        struct ksz_desc *desc;
4962        int received = 0;
4963
4964        next = info->next;
4965        while (left--) {
4966                /* Get next descriptor which is not hardware owned. */
4967                desc = &info->ring[next];
4968                status.data = le32_to_cpu(desc->phw->ctrl.data);
4969                if (status.rx.hw_owned)
4970                        break;
4971
4972                /* Status valid only when last descriptor bit is set. */
4973                if (status.rx.last_desc && status.rx.first_desc) {
4974                        if (rx_proc(dev, hw, desc, status))
4975                                goto release_packet;
4976                        received++;
4977                }
4978
4979release_packet:
4980                release_desc(desc);
4981                next++;
4982                next &= info->mask;
4983        }
4984        info->next = next;
4985
4986        return received;
4987}
4988
4989static int port_rcv_packets(struct dev_info *hw_priv)
4990{
4991        int next;
4992        union desc_stat status;
4993        struct ksz_hw *hw = &hw_priv->hw;
4994        struct net_device *dev = hw->port_info[0].pdev;
4995        struct ksz_desc_info *info = &hw->rx_desc_info;
4996        int left = info->alloc;
4997        struct ksz_desc *desc;
4998        int received = 0;
4999
5000        next = info->next;
5001        while (left--) {
5002                /* Get next descriptor which is not hardware owned. */
5003                desc = &info->ring[next];
5004                status.data = le32_to_cpu(desc->phw->ctrl.data);
5005                if (status.rx.hw_owned)
5006                        break;
5007
5008                if (hw->dev_count > 1) {
5009                        /* Get received port number. */
5010                        int p = HW_TO_DEV_PORT(status.rx.src_port);
5011
5012                        dev = hw->port_info[p].pdev;
5013                        if (!netif_running(dev))
5014                                goto release_packet;
5015                }
5016
5017                /* Status valid only when last descriptor bit is set. */
5018                if (status.rx.last_desc && status.rx.first_desc) {
5019                        if (rx_proc(dev, hw, desc, status))
5020                                goto release_packet;
5021                        received++;
5022                }
5023
5024release_packet:
5025                release_desc(desc);
5026                next++;
5027                next &= info->mask;
5028        }
5029        info->next = next;
5030
5031        return received;
5032}
5033
5034static int dev_rcv_special(struct dev_info *hw_priv)
5035{
5036        int next;
5037        union desc_stat status;
5038        struct ksz_hw *hw = &hw_priv->hw;
5039        struct net_device *dev = hw->port_info[0].pdev;
5040        struct ksz_desc_info *info = &hw->rx_desc_info;
5041        int left = info->alloc;
5042        struct ksz_desc *desc;
5043        int received = 0;
5044
5045        next = info->next;
5046        while (left--) {
5047                /* Get next descriptor which is not hardware owned. */
5048                desc = &info->ring[next];
5049                status.data = le32_to_cpu(desc->phw->ctrl.data);
5050                if (status.rx.hw_owned)
5051                        break;
5052
5053                if (hw->dev_count > 1) {
5054                        /* Get received port number. */
5055                        int p = HW_TO_DEV_PORT(status.rx.src_port);
5056
5057                        dev = hw->port_info[p].pdev;
5058                        if (!netif_running(dev))
5059                                goto release_packet;
5060                }
5061
5062                /* Status valid only when last descriptor bit is set. */
5063                if (status.rx.last_desc && status.rx.first_desc) {
5064                        /*
5065                         * Receive without error.  With receive errors
5066                         * disabled, packets with receive errors will be
5067                         * dropped, so no need to check the error bit.
5068                         */
5069                        if (!status.rx.error || (status.data &
5070                                        KS_DESC_RX_ERROR_COND) ==
5071                                        KS_DESC_RX_ERROR_TOO_LONG) {
5072                                if (rx_proc(dev, hw, desc, status))
5073                                        goto release_packet;
5074                                received++;
5075                        } else {
5076                                struct dev_priv *priv = netdev_priv(dev);
5077
5078                                /* Update receive error statistics. */
5079                                priv->port.counter[OID_COUNTER_RCV_ERROR]++;
5080                        }
5081                }
5082
5083release_packet:
5084                release_desc(desc);
5085                next++;
5086                next &= info->mask;
5087        }
5088        info->next = next;
5089
5090        return received;
5091}
5092
5093static void rx_proc_task(struct tasklet_struct *t)
5094{
5095        struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
5096        struct ksz_hw *hw = &hw_priv->hw;
5097
5098        if (!hw->enabled)
5099                return;
5100        if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
5101
5102                /* In case receive process is suspended because of overrun. */
5103                hw_resume_rx(hw);
5104
5105                /* tasklets are interruptible. */
5106                spin_lock_irq(&hw_priv->hwlock);
5107                hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
5108                spin_unlock_irq(&hw_priv->hwlock);
5109        } else {
5110                hw_ack_intr(hw, KS884X_INT_RX);
5111                tasklet_schedule(&hw_priv->rx_tasklet);
5112        }
5113}
5114
5115static void tx_proc_task(struct tasklet_struct *t)
5116{
5117        struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
5118        struct ksz_hw *hw = &hw_priv->hw;
5119
5120        hw_ack_intr(hw, KS884X_INT_TX_MASK);
5121
5122        tx_done(hw_priv);
5123
5124        /* tasklets are interruptible. */
5125        spin_lock_irq(&hw_priv->hwlock);
5126        hw_turn_on_intr(hw, KS884X_INT_TX);
5127        spin_unlock_irq(&hw_priv->hwlock);
5128}
5129
5130static inline void handle_rx_stop(struct ksz_hw *hw)
5131{
5132        /* Receive just has been stopped. */
5133        if (0 == hw->rx_stop)
5134                hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
5135        else if (hw->rx_stop > 1) {
5136                if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
5137                        hw_start_rx(hw);
5138                } else {
5139                        hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
5140                        hw->rx_stop = 0;
5141                }
5142        } else
5143                /* Receive just has been started. */
5144                hw->rx_stop++;
5145}
5146
5147/**
5148 * netdev_intr - interrupt handling
5149 * @irq:        Interrupt number.
5150 * @dev_id:     Network device.
5151 *
5152 * This function is called by upper network layer to signal interrupt.
5153 *
5154 * Return IRQ_HANDLED if interrupt is handled.
5155 */
5156static irqreturn_t netdev_intr(int irq, void *dev_id)
5157{
5158        uint int_enable = 0;
5159        struct net_device *dev = (struct net_device *) dev_id;
5160        struct dev_priv *priv = netdev_priv(dev);
5161        struct dev_info *hw_priv = priv->adapter;
5162        struct ksz_hw *hw = &hw_priv->hw;
5163
5164        spin_lock(&hw_priv->hwlock);
5165
5166        hw_read_intr(hw, &int_enable);
5167
5168        /* Not our interrupt! */
5169        if (!int_enable) {
5170                spin_unlock(&hw_priv->hwlock);
5171                return IRQ_NONE;
5172        }
5173
5174        do {
5175                hw_ack_intr(hw, int_enable);
5176                int_enable &= hw->intr_mask;
5177
5178                if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
5179                        hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
5180                        tasklet_schedule(&hw_priv->tx_tasklet);
5181                }
5182
5183                if (likely(int_enable & KS884X_INT_RX)) {
5184                        hw_dis_intr_bit(hw, KS884X_INT_RX);
5185                        tasklet_schedule(&hw_priv->rx_tasklet);
5186                }
5187
5188                if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
5189                        dev->stats.rx_fifo_errors++;
5190                        hw_resume_rx(hw);
5191                }
5192
5193                if (unlikely(int_enable & KS884X_INT_PHY)) {
5194                        struct ksz_port *port = &priv->port;
5195
5196                        hw->features |= LINK_INT_WORKING;
5197                        port_get_link_speed(port);
5198                }
5199
5200                if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
5201                        handle_rx_stop(hw);
5202                        break;
5203                }
5204
5205                if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
5206                        u32 data;
5207
5208                        hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
5209                        pr_info("Tx stopped\n");
5210                        data = readl(hw->io + KS_DMA_TX_CTRL);
5211                        if (!(data & DMA_TX_ENABLE))
5212                                pr_info("Tx disabled\n");
5213                        break;
5214                }
5215        } while (0);
5216
5217        hw_ena_intr(hw);
5218
5219        spin_unlock(&hw_priv->hwlock);
5220
5221        return IRQ_HANDLED;
5222}
5223
5224/*
5225 * Linux network device functions
5226 */
5227
5228
5229#ifdef CONFIG_NET_POLL_CONTROLLER
5230static void netdev_netpoll(struct net_device *dev)
5231{
5232        struct dev_priv *priv = netdev_priv(dev);
5233        struct dev_info *hw_priv = priv->adapter;
5234
5235        hw_dis_intr(&hw_priv->hw);
5236        netdev_intr(dev->irq, dev);
5237}
5238#endif
5239
5240static void bridge_change(struct ksz_hw *hw)
5241{
5242        int port;
5243        u8  member;
5244        struct ksz_switch *sw = hw->ksz_switch;
5245
5246        /* No ports in forwarding state. */
5247        if (!sw->member) {
5248                port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
5249                sw_block_addr(hw);
5250        }
5251        for (port = 0; port < SWITCH_PORT_NUM; port++) {
5252                if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
5253                        member = HOST_MASK | sw->member;
5254                else
5255                        member = HOST_MASK | (1 << port);
5256                if (member != sw->port_cfg[port].member)
5257                        sw_cfg_port_base_vlan(hw, port, member);
5258        }
5259}
5260
5261/**
5262 * netdev_close - close network device
5263 * @dev:        Network device.
5264 *
5265 * This function process the close operation of network device.  This is caused
5266 * by the user command "ifconfig ethX down."
5267 *
5268 * Return 0 if successful; otherwise an error code indicating failure.
5269 */
5270static int netdev_close(struct net_device *dev)
5271{
5272        struct dev_priv *priv = netdev_priv(dev);
5273        struct dev_info *hw_priv = priv->adapter;
5274        struct ksz_port *port = &priv->port;
5275        struct ksz_hw *hw = &hw_priv->hw;
5276        int pi;
5277
5278        netif_stop_queue(dev);
5279
5280        ksz_stop_timer(&priv->monitor_timer_info);
5281
5282        /* Need to shut the port manually in multiple device interfaces mode. */
5283        if (hw->dev_count > 1) {
5284                port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
5285
5286                /* Port is closed.  Need to change bridge setting. */
5287                if (hw->features & STP_SUPPORT) {
5288                        pi = 1 << port->first_port;
5289                        if (hw->ksz_switch->member & pi) {
5290                                hw->ksz_switch->member &= ~pi;
5291                                bridge_change(hw);
5292                        }
5293                }
5294        }
5295        if (port->first_port > 0)
5296                hw_del_addr(hw, dev->dev_addr);
5297        if (!hw_priv->wol_enable)
5298                port_set_power_saving(port, true);
5299
5300        if (priv->multicast)
5301                --hw->all_multi;
5302        if (priv->promiscuous)
5303                --hw->promiscuous;
5304
5305        hw_priv->opened--;
5306        if (!(hw_priv->opened)) {
5307                ksz_stop_timer(&hw_priv->mib_timer_info);
5308                flush_work(&hw_priv->mib_read);
5309
5310                hw_dis_intr(hw);
5311                hw_disable(hw);
5312                hw_clr_multicast(hw);
5313
5314                /* Delay for receive task to stop scheduling itself. */
5315                msleep(2000 / HZ);
5316
5317                tasklet_kill(&hw_priv->rx_tasklet);
5318                tasklet_kill(&hw_priv->tx_tasklet);
5319                free_irq(dev->irq, hw_priv->dev);
5320
5321                transmit_cleanup(hw_priv, 0);
5322                hw_reset_pkts(&hw->rx_desc_info);
5323                hw_reset_pkts(&hw->tx_desc_info);
5324
5325                /* Clean out static MAC table when the switch is shutdown. */
5326                if (hw->features & STP_SUPPORT)
5327                        sw_clr_sta_mac_table(hw);
5328        }
5329
5330        return 0;
5331}
5332
5333static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
5334{
5335        if (hw->ksz_switch) {
5336                u32 data;
5337
5338                data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5339                if (hw->features & RX_HUGE_FRAME)
5340                        data |= SWITCH_HUGE_PACKET;
5341                else
5342                        data &= ~SWITCH_HUGE_PACKET;
5343                writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
5344        }
5345        if (hw->features & RX_HUGE_FRAME) {
5346                hw->rx_cfg |= DMA_RX_ERROR;
5347                hw_priv->dev_rcv = dev_rcv_special;
5348        } else {
5349                hw->rx_cfg &= ~DMA_RX_ERROR;
5350                if (hw->dev_count > 1)
5351                        hw_priv->dev_rcv = port_rcv_packets;
5352                else
5353                        hw_priv->dev_rcv = dev_rcv_packets;
5354        }
5355}
5356
5357static int prepare_hardware(struct net_device *dev)
5358{
5359        struct dev_priv *priv = netdev_priv(dev);
5360        struct dev_info *hw_priv = priv->adapter;
5361        struct ksz_hw *hw = &hw_priv->hw;
5362        int rc = 0;
5363
5364        /* Remember the network device that requests interrupts. */
5365        hw_priv->dev = dev;
5366        rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
5367        if (rc)
5368                return rc;
5369        tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
5370        tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
5371
5372        hw->promiscuous = 0;
5373        hw->all_multi = 0;
5374        hw->multi_list_size = 0;
5375
5376        hw_reset(hw);
5377
5378        hw_set_desc_base(hw,
5379                hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
5380        hw_set_addr(hw);
5381        hw_cfg_huge_frame(hw_priv, hw);
5382        ksz_init_rx_buffers(hw_priv);
5383        return 0;
5384}
5385
5386static void set_media_state(struct net_device *dev, int media_state)
5387{
5388        struct dev_priv *priv = netdev_priv(dev);
5389
5390        if (media_state == priv->media_state)
5391                netif_carrier_on(dev);
5392        else
5393                netif_carrier_off(dev);
5394        netif_info(priv, link, dev, "link %s\n",
5395                   media_state == priv->media_state ? "on" : "off");
5396}
5397
5398/**
5399 * netdev_open - open network device
5400 * @dev:        Network device.
5401 *
5402 * This function process the open operation of network device.  This is caused
5403 * by the user command "ifconfig ethX up."
5404 *
5405 * Return 0 if successful; otherwise an error code indicating failure.
5406 */
5407static int netdev_open(struct net_device *dev)
5408{
5409        struct dev_priv *priv = netdev_priv(dev);
5410        struct dev_info *hw_priv = priv->adapter;
5411        struct ksz_hw *hw = &hw_priv->hw;
5412        struct ksz_port *port = &priv->port;
5413        unsigned long next_jiffies;
5414        int i;
5415        int p;
5416        int rc = 0;
5417
5418        next_jiffies = jiffies + HZ * 2;
5419        priv->multicast = 0;
5420        priv->promiscuous = 0;
5421
5422        /* Reset device statistics. */
5423        memset(&dev->stats, 0, sizeof(struct net_device_stats));
5424        memset((void *) port->counter, 0,
5425                (sizeof(u64) * OID_COUNTER_LAST));
5426
5427        if (!(hw_priv->opened)) {
5428                rc = prepare_hardware(dev);
5429                if (rc)
5430                        return rc;
5431                for (i = 0; i < hw->mib_port_cnt; i++) {
5432                        next_jiffies += HZ * 1;
5433                        hw_priv->counter[i].time = next_jiffies;
5434                        hw->port_mib[i].state = media_disconnected;
5435                        port_init_cnt(hw, i);
5436                }
5437                if (hw->ksz_switch)
5438                        hw->port_mib[HOST_PORT].state = media_connected;
5439                else {
5440                        hw_add_wol_bcast(hw);
5441                        hw_cfg_wol_pme(hw, 0);
5442                        hw_clr_wol_pme_status(&hw_priv->hw);
5443                }
5444        }
5445        port_set_power_saving(port, false);
5446
5447        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
5448                /*
5449                 * Initialize to invalid value so that link detection
5450                 * is done.
5451                 */
5452                hw->port_info[p].partner = 0xFF;
5453                hw->port_info[p].state = media_disconnected;
5454        }
5455
5456        /* Need to open the port in multiple device interfaces mode. */
5457        if (hw->dev_count > 1) {
5458                port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
5459                if (port->first_port > 0)
5460                        hw_add_addr(hw, dev->dev_addr);
5461        }
5462
5463        port_get_link_speed(port);
5464        if (port->force_link)
5465                port_force_link_speed(port);
5466        else
5467                port_set_link_speed(port);
5468
5469        if (!(hw_priv->opened)) {
5470                hw_setup_intr(hw);
5471                hw_enable(hw);
5472                hw_ena_intr(hw);
5473
5474                if (hw->mib_port_cnt)
5475                        ksz_start_timer(&hw_priv->mib_timer_info,
5476                                hw_priv->mib_timer_info.period);
5477        }
5478
5479        hw_priv->opened++;
5480
5481        ksz_start_timer(&priv->monitor_timer_info,
5482                priv->monitor_timer_info.period);
5483
5484        priv->media_state = port->linked->state;
5485
5486        set_media_state(dev, media_connected);
5487        netif_start_queue(dev);
5488
5489        return 0;
5490}
5491
5492/* RX errors = rx_errors */
5493/* RX dropped = rx_dropped */
5494/* RX overruns = rx_fifo_errors */
5495/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
5496/* TX errors = tx_errors */
5497/* TX dropped = tx_dropped */
5498/* TX overruns = tx_fifo_errors */
5499/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
5500/* collisions = collisions */
5501
5502/**
5503 * netdev_query_statistics - query network device statistics
5504 * @dev:        Network device.
5505 *
5506 * This function returns the statistics of the network device.  The device
5507 * needs not be opened.
5508 *
5509 * Return network device statistics.
5510 */
5511static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
5512{
5513        struct dev_priv *priv = netdev_priv(dev);
5514        struct ksz_port *port = &priv->port;
5515        struct ksz_hw *hw = &priv->adapter->hw;
5516        struct ksz_port_mib *mib;
5517        int i;
5518        int p;
5519
5520        dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
5521        dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
5522
5523        /* Reset to zero to add count later. */
5524        dev->stats.multicast = 0;
5525        dev->stats.collisions = 0;
5526        dev->stats.rx_length_errors = 0;
5527        dev->stats.rx_crc_errors = 0;
5528        dev->stats.rx_frame_errors = 0;
5529        dev->stats.tx_window_errors = 0;
5530
5531        for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
5532                mib = &hw->port_mib[p];
5533
5534                dev->stats.multicast += (unsigned long)
5535                        mib->counter[MIB_COUNTER_RX_MULTICAST];
5536
5537                dev->stats.collisions += (unsigned long)
5538                        mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
5539
5540                dev->stats.rx_length_errors += (unsigned long)(
5541                        mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
5542                        mib->counter[MIB_COUNTER_RX_FRAGMENT] +
5543                        mib->counter[MIB_COUNTER_RX_OVERSIZE] +
5544                        mib->counter[MIB_COUNTER_RX_JABBER]);
5545                dev->stats.rx_crc_errors += (unsigned long)
5546                        mib->counter[MIB_COUNTER_RX_CRC_ERR];
5547                dev->stats.rx_frame_errors += (unsigned long)(
5548                        mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
5549                        mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
5550
5551                dev->stats.tx_window_errors += (unsigned long)
5552                        mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
5553        }
5554
5555        return &dev->stats;
5556}
5557
5558/**
5559 * netdev_set_mac_address - set network device MAC address
5560 * @dev:        Network device.
5561 * @addr:       Buffer of MAC address.
5562 *
5563 * This function is used to set the MAC address of the network device.
5564 *
5565 * Return 0 to indicate success.
5566 */
5567static int netdev_set_mac_address(struct net_device *dev, void *addr)
5568{
5569        struct dev_priv *priv = netdev_priv(dev);
5570        struct dev_info *hw_priv = priv->adapter;
5571        struct ksz_hw *hw = &hw_priv->hw;
5572        struct sockaddr *mac = addr;
5573        uint interrupt;
5574
5575        if (priv->port.first_port > 0)
5576                hw_del_addr(hw, dev->dev_addr);
5577        else {
5578                hw->mac_override = 1;
5579                memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
5580        }
5581
5582        eth_hw_addr_set(dev, mac->sa_data);
5583
5584        interrupt = hw_block_intr(hw);
5585
5586        if (priv->port.first_port > 0)
5587                hw_add_addr(hw, dev->dev_addr);
5588        else
5589                hw_set_addr(hw);
5590        hw_restore_intr(hw, interrupt);
5591
5592        return 0;
5593}
5594
5595static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
5596        struct ksz_hw *hw, int promiscuous)
5597{
5598        if (promiscuous != priv->promiscuous) {
5599                u8 prev_state = hw->promiscuous;
5600
5601                if (promiscuous)
5602                        ++hw->promiscuous;
5603                else
5604                        --hw->promiscuous;
5605                priv->promiscuous = promiscuous;
5606
5607                /* Turn on/off promiscuous mode. */
5608                if (hw->promiscuous <= 1 && prev_state <= 1)
5609                        hw_set_promiscuous(hw, hw->promiscuous);
5610
5611                /*
5612                 * Port is not in promiscuous mode, meaning it is released
5613                 * from the bridge.
5614                 */
5615                if ((hw->features & STP_SUPPORT) && !promiscuous &&
5616                    netif_is_bridge_port(dev)) {
5617                        struct ksz_switch *sw = hw->ksz_switch;
5618                        int port = priv->port.first_port;
5619
5620                        port_set_stp_state(hw, port, STP_STATE_DISABLED);
5621                        port = 1 << port;
5622                        if (sw->member & port) {
5623                                sw->member &= ~port;
5624                                bridge_change(hw);
5625                        }
5626                }
5627        }
5628}
5629
5630static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
5631        int multicast)
5632{
5633        if (multicast != priv->multicast) {
5634                u8 all_multi = hw->all_multi;
5635
5636                if (multicast)
5637                        ++hw->all_multi;
5638                else
5639                        --hw->all_multi;
5640                priv->multicast = multicast;
5641
5642                /* Turn on/off all multicast mode. */
5643                if (hw->all_multi <= 1 && all_multi <= 1)
5644                        hw_set_multicast(hw, hw->all_multi);
5645        }
5646}
5647
5648/**
5649 * netdev_set_rx_mode
5650 * @dev:        Network device.
5651 *
5652 * This routine is used to set multicast addresses or put the network device
5653 * into promiscuous mode.
5654 */
5655static void netdev_set_rx_mode(struct net_device *dev)
5656{
5657        struct dev_priv *priv = netdev_priv(dev);
5658        struct dev_info *hw_priv = priv->adapter;
5659        struct ksz_hw *hw = &hw_priv->hw;
5660        struct netdev_hw_addr *ha;
5661        int multicast = (dev->flags & IFF_ALLMULTI);
5662
5663        dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
5664
5665        if (hw_priv->hw.dev_count > 1)
5666                multicast |= (dev->flags & IFF_MULTICAST);
5667        dev_set_multicast(priv, hw, multicast);
5668
5669        /* Cannot use different hashes in multiple device interfaces mode. */
5670        if (hw_priv->hw.dev_count > 1)
5671                return;
5672
5673        if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
5674                int i = 0;
5675
5676                /* List too big to support so turn on all multicast mode. */
5677                if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
5678                        if (MAX_MULTICAST_LIST != hw->multi_list_size) {
5679                                hw->multi_list_size = MAX_MULTICAST_LIST;
5680                                ++hw->all_multi;
5681                                hw_set_multicast(hw, hw->all_multi);
5682                        }
5683                        return;
5684                }
5685
5686                netdev_for_each_mc_addr(ha, dev) {
5687                        if (i >= MAX_MULTICAST_LIST)
5688                                break;
5689                        memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
5690                }
5691                hw->multi_list_size = (u8) i;
5692                hw_set_grp_addr(hw);
5693        } else {
5694                if (MAX_MULTICAST_LIST == hw->multi_list_size) {
5695                        --hw->all_multi;
5696                        hw_set_multicast(hw, hw->all_multi);
5697                }
5698                hw->multi_list_size = 0;
5699                hw_clr_multicast(hw);
5700        }
5701}
5702
5703static int netdev_change_mtu(struct net_device *dev, int new_mtu)
5704{
5705        struct dev_priv *priv = netdev_priv(dev);
5706        struct dev_info *hw_priv = priv->adapter;
5707        struct ksz_hw *hw = &hw_priv->hw;
5708        int hw_mtu;
5709
5710        if (netif_running(dev))
5711                return -EBUSY;
5712
5713        /* Cannot use different MTU in multiple device interfaces mode. */
5714        if (hw->dev_count > 1)
5715                if (dev != hw_priv->dev)
5716                        return 0;
5717
5718        hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
5719        if (hw_mtu > REGULAR_RX_BUF_SIZE) {
5720                hw->features |= RX_HUGE_FRAME;
5721                hw_mtu = MAX_RX_BUF_SIZE;
5722        } else {
5723                hw->features &= ~RX_HUGE_FRAME;
5724                hw_mtu = REGULAR_RX_BUF_SIZE;
5725        }
5726        hw_mtu = (hw_mtu + 3) & ~3;
5727        hw_priv->mtu = hw_mtu;
5728        dev->mtu = new_mtu;
5729
5730        return 0;
5731}
5732
5733/**
5734 * netdev_ioctl - I/O control processing
5735 * @dev:        Network device.
5736 * @ifr:        Interface request structure.
5737 * @cmd:        I/O control code.
5738 *
5739 * This function is used to process I/O control calls.
5740 *
5741 * Return 0 to indicate success.
5742 */
5743static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5744{
5745        struct dev_priv *priv = netdev_priv(dev);
5746        struct dev_info *hw_priv = priv->adapter;
5747        struct ksz_hw *hw = &hw_priv->hw;
5748        struct ksz_port *port = &priv->port;
5749        int result = 0;
5750        struct mii_ioctl_data *data = if_mii(ifr);
5751
5752        if (down_interruptible(&priv->proc_sem))
5753                return -ERESTARTSYS;
5754
5755        switch (cmd) {
5756        /* Get address of MII PHY in use. */
5757        case SIOCGMIIPHY:
5758                data->phy_id = priv->id;
5759                fallthrough;
5760
5761        /* Read MII PHY register. */
5762        case SIOCGMIIREG:
5763                if (data->phy_id != priv->id || data->reg_num >= 6)
5764                        result = -EIO;
5765                else
5766                        hw_r_phy(hw, port->linked->port_id, data->reg_num,
5767                                &data->val_out);
5768                break;
5769
5770        /* Write MII PHY register. */
5771        case SIOCSMIIREG:
5772                if (!capable(CAP_NET_ADMIN))
5773                        result = -EPERM;
5774                else if (data->phy_id != priv->id || data->reg_num >= 6)
5775                        result = -EIO;
5776                else
5777                        hw_w_phy(hw, port->linked->port_id, data->reg_num,
5778                                data->val_in);
5779                break;
5780
5781        default:
5782                result = -EOPNOTSUPP;
5783        }
5784
5785        up(&priv->proc_sem);
5786
5787        return result;
5788}
5789
5790/*
5791 * MII support
5792 */
5793
5794/**
5795 * mdio_read - read PHY register
5796 * @dev:        Network device.
5797 * @phy_id:     The PHY id.
5798 * @reg_num:    The register number.
5799 *
5800 * This function returns the PHY register value.
5801 *
5802 * Return the register value.
5803 */
5804static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
5805{
5806        struct dev_priv *priv = netdev_priv(dev);
5807        struct ksz_port *port = &priv->port;
5808        struct ksz_hw *hw = port->hw;
5809        u16 val_out;
5810
5811        hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
5812        return val_out;
5813}
5814
5815/**
5816 * mdio_write - set PHY register
5817 * @dev:        Network device.
5818 * @phy_id:     The PHY id.
5819 * @reg_num:    The register number.
5820 * @val:        The register value.
5821 *
5822 * This procedure sets the PHY register value.
5823 */
5824static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
5825{
5826        struct dev_priv *priv = netdev_priv(dev);
5827        struct ksz_port *port = &priv->port;
5828        struct ksz_hw *hw = port->hw;
5829        int i;
5830        int pi;
5831
5832        for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
5833                hw_w_phy(hw, pi, reg_num << 1, val);
5834}
5835
5836/*
5837 * ethtool support
5838 */
5839
5840#define EEPROM_SIZE                     0x40
5841
5842static u16 eeprom_data[EEPROM_SIZE] = { 0 };
5843
5844#define ADVERTISED_ALL                  \
5845        (ADVERTISED_10baseT_Half |      \
5846        ADVERTISED_10baseT_Full |       \
5847        ADVERTISED_100baseT_Half |      \
5848        ADVERTISED_100baseT_Full)
5849
5850/* These functions use the MII functions in mii.c. */
5851
5852/**
5853 * netdev_get_link_ksettings - get network device settings
5854 * @dev:        Network device.
5855 * @cmd:        Ethtool command.
5856 *
5857 * This function queries the PHY and returns its state in the ethtool command.
5858 *
5859 * Return 0 if successful; otherwise an error code.
5860 */
5861static int netdev_get_link_ksettings(struct net_device *dev,
5862                                     struct ethtool_link_ksettings *cmd)
5863{
5864        struct dev_priv *priv = netdev_priv(dev);
5865        struct dev_info *hw_priv = priv->adapter;
5866
5867        mutex_lock(&hw_priv->lock);
5868        mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
5869        ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
5870        mutex_unlock(&hw_priv->lock);
5871
5872        /* Save advertised settings for workaround in next function. */
5873        ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
5874                                                cmd->link_modes.advertising);
5875
5876        return 0;
5877}
5878
5879/**
5880 * netdev_set_link_ksettings - set network device settings
5881 * @dev:        Network device.
5882 * @cmd:        Ethtool command.
5883 *
5884 * This function sets the PHY according to the ethtool command.
5885 *
5886 * Return 0 if successful; otherwise an error code.
5887 */
5888static int netdev_set_link_ksettings(struct net_device *dev,
5889                                     const struct ethtool_link_ksettings *cmd)
5890{
5891        struct dev_priv *priv = netdev_priv(dev);
5892        struct dev_info *hw_priv = priv->adapter;
5893        struct ksz_port *port = &priv->port;
5894        struct ethtool_link_ksettings copy_cmd;
5895        u32 speed = cmd->base.speed;
5896        u32 advertising;
5897        int rc;
5898
5899        ethtool_convert_link_mode_to_legacy_u32(&advertising,
5900                                                cmd->link_modes.advertising);
5901
5902        /*
5903         * ethtool utility does not change advertised setting if auto
5904         * negotiation is not specified explicitly.
5905         */
5906        if (cmd->base.autoneg && priv->advertising == advertising) {
5907                advertising |= ADVERTISED_ALL;
5908                if (10 == speed)
5909                        advertising &=
5910                                ~(ADVERTISED_100baseT_Full |
5911                                ADVERTISED_100baseT_Half);
5912                else if (100 == speed)
5913                        advertising &=
5914                                ~(ADVERTISED_10baseT_Full |
5915                                ADVERTISED_10baseT_Half);
5916                if (0 == cmd->base.duplex)
5917                        advertising &=
5918                                ~(ADVERTISED_100baseT_Full |
5919                                ADVERTISED_10baseT_Full);
5920                else if (1 == cmd->base.duplex)
5921                        advertising &=
5922                                ~(ADVERTISED_100baseT_Half |
5923                                ADVERTISED_10baseT_Half);
5924        }
5925        mutex_lock(&hw_priv->lock);
5926        if (cmd->base.autoneg &&
5927            (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
5928                port->duplex = 0;
5929                port->speed = 0;
5930                port->force_link = 0;
5931        } else {
5932                port->duplex = cmd->base.duplex + 1;
5933                if (1000 != speed)
5934                        port->speed = speed;
5935                if (cmd->base.autoneg)
5936                        port->force_link = 0;
5937                else
5938                        port->force_link = 1;
5939        }
5940
5941        memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
5942        ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
5943                                                advertising);
5944        rc = mii_ethtool_set_link_ksettings(
5945                &priv->mii_if,
5946                (const struct ethtool_link_ksettings *)&copy_cmd);
5947        mutex_unlock(&hw_priv->lock);
5948        return rc;
5949}
5950
5951/**
5952 * netdev_nway_reset - restart auto-negotiation
5953 * @dev:        Network device.
5954 *
5955 * This function restarts the PHY for auto-negotiation.
5956 *
5957 * Return 0 if successful; otherwise an error code.
5958 */
5959static int netdev_nway_reset(struct net_device *dev)
5960{
5961        struct dev_priv *priv = netdev_priv(dev);
5962        struct dev_info *hw_priv = priv->adapter;
5963        int rc;
5964
5965        mutex_lock(&hw_priv->lock);
5966        rc = mii_nway_restart(&priv->mii_if);
5967        mutex_unlock(&hw_priv->lock);
5968        return rc;
5969}
5970
5971/**
5972 * netdev_get_link - get network device link status
5973 * @dev:        Network device.
5974 *
5975 * This function gets the link status from the PHY.
5976 *
5977 * Return true if PHY is linked and false otherwise.
5978 */
5979static u32 netdev_get_link(struct net_device *dev)
5980{
5981        struct dev_priv *priv = netdev_priv(dev);
5982        int rc;
5983
5984        rc = mii_link_ok(&priv->mii_if);
5985        return rc;
5986}
5987
5988/**
5989 * netdev_get_drvinfo - get network driver information
5990 * @dev:        Network device.
5991 * @info:       Ethtool driver info data structure.
5992 *
5993 * This procedure returns the driver information.
5994 */
5995static void netdev_get_drvinfo(struct net_device *dev,
5996        struct ethtool_drvinfo *info)
5997{
5998        struct dev_priv *priv = netdev_priv(dev);
5999        struct dev_info *hw_priv = priv->adapter;
6000
6001        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
6002        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
6003        strlcpy(info->bus_info, pci_name(hw_priv->pdev),
6004                sizeof(info->bus_info));
6005}
6006
6007static struct hw_regs {
6008        int start;
6009        int end;
6010} hw_regs_range[] = {
6011        { KS_DMA_TX_CTRL,       KS884X_INTERRUPTS_STATUS },
6012        { KS_ADD_ADDR_0_LO,     KS_ADD_ADDR_F_HI },
6013        { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
6014        { KS884X_SIDER_P,       KS8842_SGCR7_P },
6015        { KS8842_MACAR1_P,      KS8842_TOSR8_P },
6016        { KS884X_P1MBCR_P,      KS8842_P3ERCR_P },
6017        { 0, 0 }
6018};
6019
6020/**
6021 * netdev_get_regs_len - get length of register dump
6022 * @dev:        Network device.
6023 *
6024 * This function returns the length of the register dump.
6025 *
6026 * Return length of the register dump.
6027 */
6028static int netdev_get_regs_len(struct net_device *dev)
6029{
6030        struct hw_regs *range = hw_regs_range;
6031        int regs_len = 0x10 * sizeof(u32);
6032
6033        while (range->end > range->start) {
6034                regs_len += (range->end - range->start + 3) / 4 * 4;
6035                range++;
6036        }
6037        return regs_len;
6038}
6039
6040/**
6041 * netdev_get_regs - get register dump
6042 * @dev:        Network device.
6043 * @regs:       Ethtool registers data structure.
6044 * @ptr:        Buffer to store the register values.
6045 *
6046 * This procedure dumps the register values in the provided buffer.
6047 */
6048static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
6049        void *ptr)
6050{
6051        struct dev_priv *priv = netdev_priv(dev);
6052        struct dev_info *hw_priv = priv->adapter;
6053        struct ksz_hw *hw = &hw_priv->hw;
6054        int *buf = (int *) ptr;
6055        struct hw_regs *range = hw_regs_range;
6056        int len;
6057
6058        mutex_lock(&hw_priv->lock);
6059        regs->version = 0;
6060        for (len = 0; len < 0x40; len += 4) {
6061                pci_read_config_dword(hw_priv->pdev, len, buf);
6062                buf++;
6063        }
6064        while (range->end > range->start) {
6065                for (len = range->start; len < range->end; len += 4) {
6066                        *buf = readl(hw->io + len);
6067                        buf++;
6068                }
6069                range++;
6070        }
6071        mutex_unlock(&hw_priv->lock);
6072}
6073
6074#define WOL_SUPPORT                     \
6075        (WAKE_PHY | WAKE_MAGIC |        \
6076        WAKE_UCAST | WAKE_MCAST |       \
6077        WAKE_BCAST | WAKE_ARP)
6078
6079/**
6080 * netdev_get_wol - get Wake-on-LAN support
6081 * @dev:        Network device.
6082 * @wol:        Ethtool Wake-on-LAN data structure.
6083 *
6084 * This procedure returns Wake-on-LAN support.
6085 */
6086static void netdev_get_wol(struct net_device *dev,
6087        struct ethtool_wolinfo *wol)
6088{
6089        struct dev_priv *priv = netdev_priv(dev);
6090        struct dev_info *hw_priv = priv->adapter;
6091
6092        wol->supported = hw_priv->wol_support;
6093        wol->wolopts = hw_priv->wol_enable;
6094        memset(&wol->sopass, 0, sizeof(wol->sopass));
6095}
6096
6097/**
6098 * netdev_set_wol - set Wake-on-LAN support
6099 * @dev:        Network device.
6100 * @wol:        Ethtool Wake-on-LAN data structure.
6101 *
6102 * This function sets Wake-on-LAN support.
6103 *
6104 * Return 0 if successful; otherwise an error code.
6105 */
6106static int netdev_set_wol(struct net_device *dev,
6107        struct ethtool_wolinfo *wol)
6108{
6109        struct dev_priv *priv = netdev_priv(dev);
6110        struct dev_info *hw_priv = priv->adapter;
6111
6112        /* Need to find a way to retrieve the device IP address. */
6113        static const u8 net_addr[] = { 192, 168, 1, 1 };
6114
6115        if (wol->wolopts & ~hw_priv->wol_support)
6116                return -EINVAL;
6117
6118        hw_priv->wol_enable = wol->wolopts;
6119
6120        /* Link wakeup cannot really be disabled. */
6121        if (wol->wolopts)
6122                hw_priv->wol_enable |= WAKE_PHY;
6123        hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
6124        return 0;
6125}
6126
6127/**
6128 * netdev_get_msglevel - get debug message level
6129 * @dev:        Network device.
6130 *
6131 * This function returns current debug message level.
6132 *
6133 * Return current debug message flags.
6134 */
6135static u32 netdev_get_msglevel(struct net_device *dev)
6136{
6137        struct dev_priv *priv = netdev_priv(dev);
6138
6139        return priv->msg_enable;
6140}
6141
6142/**
6143 * netdev_set_msglevel - set debug message level
6144 * @dev:        Network device.
6145 * @value:      Debug message flags.
6146 *
6147 * This procedure sets debug message level.
6148 */
6149static void netdev_set_msglevel(struct net_device *dev, u32 value)
6150{
6151        struct dev_priv *priv = netdev_priv(dev);
6152
6153        priv->msg_enable = value;
6154}
6155
6156/**
6157 * netdev_get_eeprom_len - get EEPROM length
6158 * @dev:        Network device.
6159 *
6160 * This function returns the length of the EEPROM.
6161 *
6162 * Return length of the EEPROM.
6163 */
6164static int netdev_get_eeprom_len(struct net_device *dev)
6165{
6166        return EEPROM_SIZE * 2;
6167}
6168
6169#define EEPROM_MAGIC                    0x10A18842
6170
6171/**
6172 * netdev_get_eeprom - get EEPROM data
6173 * @dev:        Network device.
6174 * @eeprom:     Ethtool EEPROM data structure.
6175 * @data:       Buffer to store the EEPROM data.
6176 *
6177 * This function dumps the EEPROM data in the provided buffer.
6178 *
6179 * Return 0 if successful; otherwise an error code.
6180 */
6181static int netdev_get_eeprom(struct net_device *dev,
6182        struct ethtool_eeprom *eeprom, u8 *data)
6183{
6184        struct dev_priv *priv = netdev_priv(dev);
6185        struct dev_info *hw_priv = priv->adapter;
6186        u8 *eeprom_byte = (u8 *) eeprom_data;
6187        int i;
6188        int len;
6189
6190        len = (eeprom->offset + eeprom->len + 1) / 2;
6191        for (i = eeprom->offset / 2; i < len; i++)
6192                eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6193        eeprom->magic = EEPROM_MAGIC;
6194        memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
6195
6196        return 0;
6197}
6198
6199/**
6200 * netdev_set_eeprom - write EEPROM data
6201 * @dev:        Network device.
6202 * @eeprom:     Ethtool EEPROM data structure.
6203 * @data:       Data buffer.
6204 *
6205 * This function modifies the EEPROM data one byte at a time.
6206 *
6207 * Return 0 if successful; otherwise an error code.
6208 */
6209static int netdev_set_eeprom(struct net_device *dev,
6210        struct ethtool_eeprom *eeprom, u8 *data)
6211{
6212        struct dev_priv *priv = netdev_priv(dev);
6213        struct dev_info *hw_priv = priv->adapter;
6214        u16 eeprom_word[EEPROM_SIZE];
6215        u8 *eeprom_byte = (u8 *) eeprom_word;
6216        int i;
6217        int len;
6218
6219        if (eeprom->magic != EEPROM_MAGIC)
6220                return -EINVAL;
6221
6222        len = (eeprom->offset + eeprom->len + 1) / 2;
6223        for (i = eeprom->offset / 2; i < len; i++)
6224                eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6225        memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
6226        memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
6227        for (i = 0; i < EEPROM_SIZE; i++)
6228                if (eeprom_word[i] != eeprom_data[i]) {
6229                        eeprom_data[i] = eeprom_word[i];
6230                        eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
6231        }
6232
6233        return 0;
6234}
6235
6236/**
6237 * netdev_get_pauseparam - get flow control parameters
6238 * @dev:        Network device.
6239 * @pause:      Ethtool PAUSE settings data structure.
6240 *
6241 * This procedure returns the PAUSE control flow settings.
6242 */
6243static void netdev_get_pauseparam(struct net_device *dev,
6244        struct ethtool_pauseparam *pause)
6245{
6246        struct dev_priv *priv = netdev_priv(dev);
6247        struct dev_info *hw_priv = priv->adapter;
6248        struct ksz_hw *hw = &hw_priv->hw;
6249
6250        pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
6251        if (!hw->ksz_switch) {
6252                pause->rx_pause =
6253                        (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
6254                pause->tx_pause =
6255                        (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
6256        } else {
6257                pause->rx_pause =
6258                        (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6259                                SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
6260                pause->tx_pause =
6261                        (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6262                                SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
6263        }
6264}
6265
6266/**
6267 * netdev_set_pauseparam - set flow control parameters
6268 * @dev:        Network device.
6269 * @pause:      Ethtool PAUSE settings data structure.
6270 *
6271 * This function sets the PAUSE control flow settings.
6272 * Not implemented yet.
6273 *
6274 * Return 0 if successful; otherwise an error code.
6275 */
6276static int netdev_set_pauseparam(struct net_device *dev,
6277        struct ethtool_pauseparam *pause)
6278{
6279        struct dev_priv *priv = netdev_priv(dev);
6280        struct dev_info *hw_priv = priv->adapter;
6281        struct ksz_hw *hw = &hw_priv->hw;
6282        struct ksz_port *port = &priv->port;
6283
6284        mutex_lock(&hw_priv->lock);
6285        if (pause->autoneg) {
6286                if (!pause->rx_pause && !pause->tx_pause)
6287                        port->flow_ctrl = PHY_NO_FLOW_CTRL;
6288                else
6289                        port->flow_ctrl = PHY_FLOW_CTRL;
6290                hw->overrides &= ~PAUSE_FLOW_CTRL;
6291                port->force_link = 0;
6292                if (hw->ksz_switch) {
6293                        sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6294                                SWITCH_RX_FLOW_CTRL, 1);
6295                        sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6296                                SWITCH_TX_FLOW_CTRL, 1);
6297                }
6298                port_set_link_speed(port);
6299        } else {
6300                hw->overrides |= PAUSE_FLOW_CTRL;
6301                if (hw->ksz_switch) {
6302                        sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6303                                SWITCH_RX_FLOW_CTRL, pause->rx_pause);
6304                        sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
6305                                SWITCH_TX_FLOW_CTRL, pause->tx_pause);
6306                } else
6307                        set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
6308        }
6309        mutex_unlock(&hw_priv->lock);
6310
6311        return 0;
6312}
6313
6314/**
6315 * netdev_get_ringparam - get tx/rx ring parameters
6316 * @dev:        Network device.
6317 * @ring:       Ethtool RING settings data structure.
6318 * @kernel_ring:        Ethtool external RING settings data structure.
6319 * @extack:     Netlink handle.
6320 *
6321 * This procedure returns the TX/RX ring settings.
6322 */
6323static void netdev_get_ringparam(struct net_device *dev,
6324                                 struct ethtool_ringparam *ring,
6325                                 struct kernel_ethtool_ringparam *kernel_ring,
6326                                 struct netlink_ext_ack *extack)
6327{
6328        struct dev_priv *priv = netdev_priv(dev);
6329        struct dev_info *hw_priv = priv->adapter;
6330        struct ksz_hw *hw = &hw_priv->hw;
6331
6332        ring->tx_max_pending = (1 << 9);
6333        ring->tx_pending = hw->tx_desc_info.alloc;
6334        ring->rx_max_pending = (1 << 9);
6335        ring->rx_pending = hw->rx_desc_info.alloc;
6336}
6337
6338#define STATS_LEN                       (TOTAL_PORT_COUNTER_NUM)
6339
6340static struct {
6341        char string[ETH_GSTRING_LEN];
6342} ethtool_stats_keys[STATS_LEN] = {
6343        { "rx_lo_priority_octets" },
6344        { "rx_hi_priority_octets" },
6345        { "rx_undersize_packets" },
6346        { "rx_fragments" },
6347        { "rx_oversize_packets" },
6348        { "rx_jabbers" },
6349        { "rx_symbol_errors" },
6350        { "rx_crc_errors" },
6351        { "rx_align_errors" },
6352        { "rx_mac_ctrl_packets" },
6353        { "rx_pause_packets" },
6354        { "rx_bcast_packets" },
6355        { "rx_mcast_packets" },
6356        { "rx_ucast_packets" },
6357        { "rx_64_or_less_octet_packets" },
6358        { "rx_65_to_127_octet_packets" },
6359        { "rx_128_to_255_octet_packets" },
6360        { "rx_256_to_511_octet_packets" },
6361        { "rx_512_to_1023_octet_packets" },
6362        { "rx_1024_to_1522_octet_packets" },
6363
6364        { "tx_lo_priority_octets" },
6365        { "tx_hi_priority_octets" },
6366        { "tx_late_collisions" },
6367        { "tx_pause_packets" },
6368        { "tx_bcast_packets" },
6369        { "tx_mcast_packets" },
6370        { "tx_ucast_packets" },
6371        { "tx_deferred" },
6372        { "tx_total_collisions" },
6373        { "tx_excessive_collisions" },
6374        { "tx_single_collisions" },
6375        { "tx_mult_collisions" },
6376
6377        { "rx_discards" },
6378        { "tx_discards" },
6379};
6380
6381/**
6382 * netdev_get_strings - get statistics identity strings
6383 * @dev:        Network device.
6384 * @stringset:  String set identifier.
6385 * @buf:        Buffer to store the strings.
6386 *
6387 * This procedure returns the strings used to identify the statistics.
6388 */
6389static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6390{
6391        struct dev_priv *priv = netdev_priv(dev);
6392        struct dev_info *hw_priv = priv->adapter;
6393        struct ksz_hw *hw = &hw_priv->hw;
6394
6395        if (ETH_SS_STATS == stringset)
6396                memcpy(buf, &ethtool_stats_keys,
6397                        ETH_GSTRING_LEN * hw->mib_cnt);
6398}
6399
6400/**
6401 * netdev_get_sset_count - get statistics size
6402 * @dev:        Network device.
6403 * @sset:       The statistics set number.
6404 *
6405 * This function returns the size of the statistics to be reported.
6406 *
6407 * Return size of the statistics to be reported.
6408 */
6409static int netdev_get_sset_count(struct net_device *dev, int sset)
6410{
6411        struct dev_priv *priv = netdev_priv(dev);
6412        struct dev_info *hw_priv = priv->adapter;
6413        struct ksz_hw *hw = &hw_priv->hw;
6414
6415        switch (sset) {
6416        case ETH_SS_STATS:
6417                return hw->mib_cnt;
6418        default:
6419                return -EOPNOTSUPP;
6420        }
6421}
6422
6423/**
6424 * netdev_get_ethtool_stats - get network device statistics
6425 * @dev:        Network device.
6426 * @stats:      Ethtool statistics data structure.
6427 * @data:       Buffer to store the statistics.
6428 *
6429 * This procedure returns the statistics.
6430 */
6431static void netdev_get_ethtool_stats(struct net_device *dev,
6432        struct ethtool_stats *stats, u64 *data)
6433{
6434        struct dev_priv *priv = netdev_priv(dev);
6435        struct dev_info *hw_priv = priv->adapter;
6436        struct ksz_hw *hw = &hw_priv->hw;
6437        struct ksz_port *port = &priv->port;
6438        int n_stats = stats->n_stats;
6439        int i;
6440        int n;
6441        int p;
6442        u64 counter[TOTAL_PORT_COUNTER_NUM];
6443
6444        mutex_lock(&hw_priv->lock);
6445        n = SWITCH_PORT_NUM;
6446        for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
6447                if (media_connected == hw->port_mib[p].state) {
6448                        hw_priv->counter[p].read = 1;
6449
6450                        /* Remember first port that requests read. */
6451                        if (n == SWITCH_PORT_NUM)
6452                                n = p;
6453                }
6454        }
6455        mutex_unlock(&hw_priv->lock);
6456
6457        if (n < SWITCH_PORT_NUM)
6458                schedule_work(&hw_priv->mib_read);
6459
6460        if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
6461                p = n;
6462                wait_event_interruptible_timeout(
6463                        hw_priv->counter[p].counter,
6464                        2 == hw_priv->counter[p].read,
6465                        HZ * 1);
6466        } else
6467                for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
6468                        if (0 == i) {
6469                                wait_event_interruptible_timeout(
6470                                        hw_priv->counter[p].counter,
6471                                        2 == hw_priv->counter[p].read,
6472                                        HZ * 2);
6473                        } else if (hw->port_mib[p].cnt_ptr) {
6474                                wait_event_interruptible_timeout(
6475                                        hw_priv->counter[p].counter,
6476                                        2 == hw_priv->counter[p].read,
6477                                        HZ * 1);
6478                        }
6479                }
6480
6481        get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
6482        n = hw->mib_cnt;
6483        if (n > n_stats)
6484                n = n_stats;
6485        n_stats -= n;
6486        for (i = 0; i < n; i++)
6487                *data++ = counter[i];
6488}
6489
6490/**
6491 * netdev_set_features - set receive checksum support
6492 * @dev:        Network device.
6493 * @features:   New device features (offloads).
6494 *
6495 * This function sets receive checksum support setting.
6496 *
6497 * Return 0 if successful; otherwise an error code.
6498 */
6499static int netdev_set_features(struct net_device *dev,
6500        netdev_features_t features)
6501{
6502        struct dev_priv *priv = netdev_priv(dev);
6503        struct dev_info *hw_priv = priv->adapter;
6504        struct ksz_hw *hw = &hw_priv->hw;
6505
6506        mutex_lock(&hw_priv->lock);
6507
6508        /* see note in hw_setup() */
6509        if (features & NETIF_F_RXCSUM)
6510                hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
6511        else
6512                hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
6513
6514        if (hw->enabled)
6515                writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
6516
6517        mutex_unlock(&hw_priv->lock);
6518
6519        return 0;
6520}
6521
6522static const struct ethtool_ops netdev_ethtool_ops = {
6523        .nway_reset             = netdev_nway_reset,
6524        .get_link               = netdev_get_link,
6525        .get_drvinfo            = netdev_get_drvinfo,
6526        .get_regs_len           = netdev_get_regs_len,
6527        .get_regs               = netdev_get_regs,
6528        .get_wol                = netdev_get_wol,
6529        .set_wol                = netdev_set_wol,
6530        .get_msglevel           = netdev_get_msglevel,
6531        .set_msglevel           = netdev_set_msglevel,
6532        .get_eeprom_len         = netdev_get_eeprom_len,
6533        .get_eeprom             = netdev_get_eeprom,
6534        .set_eeprom             = netdev_set_eeprom,
6535        .get_pauseparam         = netdev_get_pauseparam,
6536        .set_pauseparam         = netdev_set_pauseparam,
6537        .get_ringparam          = netdev_get_ringparam,
6538        .get_strings            = netdev_get_strings,
6539        .get_sset_count         = netdev_get_sset_count,
6540        .get_ethtool_stats      = netdev_get_ethtool_stats,
6541        .get_link_ksettings     = netdev_get_link_ksettings,
6542        .set_link_ksettings     = netdev_set_link_ksettings,
6543};
6544
6545/*
6546 * Hardware monitoring
6547 */
6548
6549static void update_link(struct net_device *dev, struct dev_priv *priv,
6550        struct ksz_port *port)
6551{
6552        if (priv->media_state != port->linked->state) {
6553                priv->media_state = port->linked->state;
6554                if (netif_running(dev))
6555                        set_media_state(dev, media_connected);
6556        }
6557}
6558
6559static void mib_read_work(struct work_struct *work)
6560{
6561        struct dev_info *hw_priv =
6562                container_of(work, struct dev_info, mib_read);
6563        struct ksz_hw *hw = &hw_priv->hw;
6564        unsigned long next_jiffies;
6565        struct ksz_port_mib *mib;
6566        int i;
6567
6568        next_jiffies = jiffies;
6569        for (i = 0; i < hw->mib_port_cnt; i++) {
6570                mib = &hw->port_mib[i];
6571
6572                /* Reading MIB counters or requested to read. */
6573                if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
6574
6575                        /* Need to process receive interrupt. */
6576                        if (port_r_cnt(hw, i))
6577                                break;
6578                        hw_priv->counter[i].read = 0;
6579
6580                        /* Finish reading counters. */
6581                        if (0 == mib->cnt_ptr) {
6582                                hw_priv->counter[i].read = 2;
6583                                wake_up_interruptible(
6584                                        &hw_priv->counter[i].counter);
6585                        }
6586                } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
6587                        /* Only read MIB counters when the port is connected. */
6588                        if (media_connected == mib->state)
6589                                hw_priv->counter[i].read = 1;
6590                        next_jiffies += HZ * 1 * hw->mib_port_cnt;
6591                        hw_priv->counter[i].time = next_jiffies;
6592
6593                /* Port is just disconnected. */
6594                } else if (mib->link_down) {
6595                        mib->link_down = 0;
6596
6597                        /* Read counters one last time after link is lost. */
6598                        hw_priv->counter[i].read = 1;
6599                }
6600        }
6601}
6602
6603static void mib_monitor(struct timer_list *t)
6604{
6605        struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
6606
6607        mib_read_work(&hw_priv->mib_read);
6608
6609        /* This is used to verify Wake-on-LAN is working. */
6610        if (hw_priv->pme_wait) {
6611                if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
6612                        hw_clr_wol_pme_status(&hw_priv->hw);
6613                        hw_priv->pme_wait = 0;
6614                }
6615        } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
6616
6617                /* PME is asserted.  Wait 2 seconds to clear it. */
6618                hw_priv->pme_wait = jiffies + HZ * 2;
6619        }
6620
6621        ksz_update_timer(&hw_priv->mib_timer_info);
6622}
6623
6624/**
6625 * dev_monitor - periodic monitoring
6626 * @t:  timer list containing a network device pointer.
6627 *
6628 * This routine is run in a kernel timer to monitor the network device.
6629 */
6630static void dev_monitor(struct timer_list *t)
6631{
6632        struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
6633        struct net_device *dev = priv->mii_if.dev;
6634        struct dev_info *hw_priv = priv->adapter;
6635        struct ksz_hw *hw = &hw_priv->hw;
6636        struct ksz_port *port = &priv->port;
6637
6638        if (!(hw->features & LINK_INT_WORKING))
6639                port_get_link_speed(port);
6640        update_link(dev, priv, port);
6641
6642        ksz_update_timer(&priv->monitor_timer_info);
6643}
6644
6645/*
6646 * Linux network device interface functions
6647 */
6648
6649/* Driver exported variables */
6650
6651static int msg_enable;
6652
6653static char *macaddr = ":";
6654static char *mac1addr = ":";
6655
6656/*
6657 * This enables multiple network device mode for KSZ8842, which contains a
6658 * switch with two physical ports.  Some users like to take control of the
6659 * ports for running Spanning Tree Protocol.  The driver will create an
6660 * additional eth? device for the other port.
6661 *
6662 * Some limitations are the network devices cannot have different MTU and
6663 * multicast hash tables.
6664 */
6665static int multi_dev;
6666
6667/*
6668 * As most users select multiple network device mode to use Spanning Tree
6669 * Protocol, this enables a feature in which most unicast and multicast packets
6670 * are forwarded inside the switch and not passed to the host.  Only packets
6671 * that need the host's attention are passed to it.  This prevents the host
6672 * wasting CPU time to examine each and every incoming packets and do the
6673 * forwarding itself.
6674 *
6675 * As the hack requires the private bridge header, the driver cannot compile
6676 * with just the kernel headers.
6677 *
6678 * Enabling STP support also turns on multiple network device mode.
6679 */
6680static int stp;
6681
6682/*
6683 * This enables fast aging in the KSZ8842 switch.  Not sure what situation
6684 * needs that.  However, fast aging is used to flush the dynamic MAC table when
6685 * STP support is enabled.
6686 */
6687static int fast_aging;
6688
6689/**
6690 * netdev_init - initialize network device.
6691 * @dev:        Network device.
6692 *
6693 * This function initializes the network device.
6694 *
6695 * Return 0 if successful; otherwise an error code indicating failure.
6696 */
6697static int __init netdev_init(struct net_device *dev)
6698{
6699        struct dev_priv *priv = netdev_priv(dev);
6700
6701        /* 500 ms timeout */
6702        ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
6703                dev_monitor);
6704
6705        /* 500 ms timeout */
6706        dev->watchdog_timeo = HZ / 2;
6707
6708        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
6709
6710        /*
6711         * Hardware does not really support IPv6 checksum generation, but
6712         * driver actually runs faster with this on.
6713         */
6714        dev->hw_features |= NETIF_F_IPV6_CSUM;
6715
6716        dev->features |= dev->hw_features;
6717
6718        sema_init(&priv->proc_sem, 1);
6719
6720        priv->mii_if.phy_id_mask = 0x1;
6721        priv->mii_if.reg_num_mask = 0x7;
6722        priv->mii_if.dev = dev;
6723        priv->mii_if.mdio_read = mdio_read;
6724        priv->mii_if.mdio_write = mdio_write;
6725        priv->mii_if.phy_id = priv->port.first_port + 1;
6726
6727        priv->msg_enable = netif_msg_init(msg_enable,
6728                (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
6729
6730        return 0;
6731}
6732
6733static const struct net_device_ops netdev_ops = {
6734        .ndo_init               = netdev_init,
6735        .ndo_open               = netdev_open,
6736        .ndo_stop               = netdev_close,
6737        .ndo_get_stats          = netdev_query_statistics,
6738        .ndo_start_xmit         = netdev_tx,
6739        .ndo_tx_timeout         = netdev_tx_timeout,
6740        .ndo_change_mtu         = netdev_change_mtu,
6741        .ndo_set_features       = netdev_set_features,
6742        .ndo_set_mac_address    = netdev_set_mac_address,
6743        .ndo_validate_addr      = eth_validate_addr,
6744        .ndo_eth_ioctl          = netdev_ioctl,
6745        .ndo_set_rx_mode        = netdev_set_rx_mode,
6746#ifdef CONFIG_NET_POLL_CONTROLLER
6747        .ndo_poll_controller    = netdev_netpoll,
6748#endif
6749};
6750
6751static void netdev_free(struct net_device *dev)
6752{
6753        if (dev->watchdog_timeo)
6754                unregister_netdev(dev);
6755
6756        free_netdev(dev);
6757}
6758
6759struct platform_info {
6760        struct dev_info dev_info;
6761        struct net_device *netdev[SWITCH_PORT_NUM];
6762};
6763
6764static int net_device_present;
6765
6766static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6767{
6768        int i;
6769        int j;
6770        int got_num;
6771        int num;
6772
6773        i = j = num = got_num = 0;
6774        while (j < ETH_ALEN) {
6775                if (macaddr[i]) {
6776                        int digit;
6777
6778                        got_num = 1;
6779                        digit = hex_to_bin(macaddr[i]);
6780                        if (digit >= 0)
6781                                num = num * 16 + digit;
6782                        else if (':' == macaddr[i])
6783                                got_num = 2;
6784                        else
6785                                break;
6786                } else if (got_num)
6787                        got_num = 2;
6788                else
6789                        break;
6790                if (2 == got_num) {
6791                        if (MAIN_PORT == port) {
6792                                hw_priv->hw.override_addr[j++] = (u8) num;
6793                                hw_priv->hw.override_addr[5] +=
6794                                        hw_priv->hw.id;
6795                        } else {
6796                                hw_priv->hw.ksz_switch->other_addr[j++] =
6797                                        (u8) num;
6798                                hw_priv->hw.ksz_switch->other_addr[5] +=
6799                                        hw_priv->hw.id;
6800                        }
6801                        num = got_num = 0;
6802                }
6803                i++;
6804        }
6805        if (ETH_ALEN == j) {
6806                if (MAIN_PORT == port)
6807                        hw_priv->hw.mac_override = 1;
6808        }
6809}
6810
6811#define KS884X_DMA_MASK                 (~0x0UL)
6812
6813static void read_other_addr(struct ksz_hw *hw)
6814{
6815        int i;
6816        u16 data[3];
6817        struct ksz_switch *sw = hw->ksz_switch;
6818
6819        for (i = 0; i < 3; i++)
6820                data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
6821        if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
6822                sw->other_addr[5] = (u8) data[0];
6823                sw->other_addr[4] = (u8)(data[0] >> 8);
6824                sw->other_addr[3] = (u8) data[1];
6825                sw->other_addr[2] = (u8)(data[1] >> 8);
6826                sw->other_addr[1] = (u8) data[2];
6827                sw->other_addr[0] = (u8)(data[2] >> 8);
6828        }
6829}
6830
6831#ifndef PCI_VENDOR_ID_MICREL_KS
6832#define PCI_VENDOR_ID_MICREL_KS         0x16c6
6833#endif
6834
6835static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
6836{
6837        struct net_device *dev;
6838        struct dev_priv *priv;
6839        struct dev_info *hw_priv;
6840        struct ksz_hw *hw;
6841        struct platform_info *info;
6842        struct ksz_port *port;
6843        unsigned long reg_base;
6844        unsigned long reg_len;
6845        int cnt;
6846        int i;
6847        int mib_port_count;
6848        int pi;
6849        int port_count;
6850        int result;
6851        char banner[sizeof(version)];
6852        struct ksz_switch *sw = NULL;
6853
6854        result = pci_enable_device(pdev);
6855        if (result)
6856                return result;
6857
6858        result = -ENODEV;
6859
6860        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
6861            dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
6862                return result;
6863
6864        reg_base = pci_resource_start(pdev, 0);
6865        reg_len = pci_resource_len(pdev, 0);
6866        if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
6867                return result;
6868
6869        if (!request_mem_region(reg_base, reg_len, DRV_NAME))
6870                return result;
6871        pci_set_master(pdev);
6872
6873        result = -ENOMEM;
6874
6875        info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
6876        if (!info)
6877                goto pcidev_init_dev_err;
6878
6879        hw_priv = &info->dev_info;
6880        hw_priv->pdev = pdev;
6881
6882        hw = &hw_priv->hw;
6883
6884        hw->io = ioremap(reg_base, reg_len);
6885        if (!hw->io)
6886                goto pcidev_init_io_err;
6887
6888        cnt = hw_init(hw);
6889        if (!cnt) {
6890                if (msg_enable & NETIF_MSG_PROBE)
6891                        pr_alert("chip not detected\n");
6892                result = -ENODEV;
6893                goto pcidev_init_alloc_err;
6894        }
6895
6896        snprintf(banner, sizeof(banner), "%s", version);
6897        banner[13] = cnt + '0';         /* Replace x in "Micrel KSZ884x" */
6898        dev_info(&hw_priv->pdev->dev, "%s\n", banner);
6899        dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
6900
6901        /* Assume device is KSZ8841. */
6902        hw->dev_count = 1;
6903        port_count = 1;
6904        mib_port_count = 1;
6905        hw->addr_list_size = 0;
6906        hw->mib_cnt = PORT_COUNTER_NUM;
6907        hw->mib_port_cnt = 1;
6908
6909        /* KSZ8842 has a switch with multiple ports. */
6910        if (2 == cnt) {
6911                if (fast_aging)
6912                        hw->overrides |= FAST_AGING;
6913
6914                hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
6915
6916                /* Multiple network device interfaces are required. */
6917                if (multi_dev) {
6918                        hw->dev_count = SWITCH_PORT_NUM;
6919                        hw->addr_list_size = SWITCH_PORT_NUM - 1;
6920                }
6921
6922                /* Single network device has multiple ports. */
6923                if (1 == hw->dev_count) {
6924                        port_count = SWITCH_PORT_NUM;
6925                        mib_port_count = SWITCH_PORT_NUM;
6926                }
6927                hw->mib_port_cnt = TOTAL_PORT_NUM;
6928                hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
6929                if (!hw->ksz_switch)
6930                        goto pcidev_init_alloc_err;
6931
6932                sw = hw->ksz_switch;
6933        }
6934        for (i = 0; i < hw->mib_port_cnt; i++)
6935                hw->port_mib[i].mib_start = 0;
6936
6937        hw->parent = hw_priv;
6938
6939        /* Default MTU is 1500. */
6940        hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
6941
6942        if (ksz_alloc_mem(hw_priv))
6943                goto pcidev_init_mem_err;
6944
6945        hw_priv->hw.id = net_device_present;
6946
6947        spin_lock_init(&hw_priv->hwlock);
6948        mutex_init(&hw_priv->lock);
6949
6950        for (i = 0; i < TOTAL_PORT_NUM; i++)
6951                init_waitqueue_head(&hw_priv->counter[i].counter);
6952
6953        if (macaddr[0] != ':')
6954                get_mac_addr(hw_priv, macaddr, MAIN_PORT);
6955
6956        /* Read MAC address and initialize override address if not overridden. */
6957        hw_read_addr(hw);
6958
6959        /* Multiple device interfaces mode requires a second MAC address. */
6960        if (hw->dev_count > 1) {
6961                memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
6962                read_other_addr(hw);
6963                if (mac1addr[0] != ':')
6964                        get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
6965        }
6966
6967        hw_setup(hw);
6968        if (hw->ksz_switch)
6969                sw_setup(hw);
6970        else {
6971                hw_priv->wol_support = WOL_SUPPORT;
6972                hw_priv->wol_enable = 0;
6973        }
6974
6975        INIT_WORK(&hw_priv->mib_read, mib_read_work);
6976
6977        /* 500 ms timeout */
6978        ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
6979                mib_monitor);
6980
6981        for (i = 0; i < hw->dev_count; i++) {
6982                dev = alloc_etherdev(sizeof(struct dev_priv));
6983                if (!dev)
6984                        goto pcidev_init_reg_err;
6985                SET_NETDEV_DEV(dev, &pdev->dev);
6986                info->netdev[i] = dev;
6987
6988                priv = netdev_priv(dev);
6989                priv->adapter = hw_priv;
6990                priv->id = net_device_present++;
6991
6992                port = &priv->port;
6993                port->port_cnt = port_count;
6994                port->mib_port_cnt = mib_port_count;
6995                port->first_port = i;
6996                port->flow_ctrl = PHY_FLOW_CTRL;
6997
6998                port->hw = hw;
6999                port->linked = &hw->port_info[port->first_port];
7000
7001                for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
7002                        hw->port_info[pi].port_id = pi;
7003                        hw->port_info[pi].pdev = dev;
7004                        hw->port_info[pi].state = media_disconnected;
7005                }
7006
7007                dev->mem_start = (unsigned long) hw->io;
7008                dev->mem_end = dev->mem_start + reg_len - 1;
7009                dev->irq = pdev->irq;
7010                if (MAIN_PORT == i)
7011                        eth_hw_addr_set(dev, hw_priv->hw.override_addr);
7012                else {
7013                        u8 addr[ETH_ALEN];
7014
7015                        ether_addr_copy(addr, sw->other_addr);
7016                        if (ether_addr_equal(sw->other_addr, hw->override_addr))
7017                                addr[5] += port->first_port;
7018                        eth_hw_addr_set(dev, addr);
7019                }
7020
7021                dev->netdev_ops = &netdev_ops;
7022                dev->ethtool_ops = &netdev_ethtool_ops;
7023
7024                /* MTU range: 60 - 1894 */
7025                dev->min_mtu = ETH_ZLEN;
7026                dev->max_mtu = MAX_RX_BUF_SIZE -
7027                               (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7028
7029                if (register_netdev(dev))
7030                        goto pcidev_init_reg_err;
7031                port_set_power_saving(port, true);
7032        }
7033
7034        pci_dev_get(hw_priv->pdev);
7035        pci_set_drvdata(pdev, info);
7036        return 0;
7037
7038pcidev_init_reg_err:
7039        for (i = 0; i < hw->dev_count; i++) {
7040                if (info->netdev[i]) {
7041                        netdev_free(info->netdev[i]);
7042                        info->netdev[i] = NULL;
7043                }
7044        }
7045
7046pcidev_init_mem_err:
7047        ksz_free_mem(hw_priv);
7048        kfree(hw->ksz_switch);
7049
7050pcidev_init_alloc_err:
7051        iounmap(hw->io);
7052
7053pcidev_init_io_err:
7054        kfree(info);
7055
7056pcidev_init_dev_err:
7057        release_mem_region(reg_base, reg_len);
7058
7059        return result;
7060}
7061
7062static void pcidev_exit(struct pci_dev *pdev)
7063{
7064        int i;
7065        struct platform_info *info = pci_get_drvdata(pdev);
7066        struct dev_info *hw_priv = &info->dev_info;
7067
7068        release_mem_region(pci_resource_start(pdev, 0),
7069                pci_resource_len(pdev, 0));
7070        for (i = 0; i < hw_priv->hw.dev_count; i++) {
7071                if (info->netdev[i])
7072                        netdev_free(info->netdev[i]);
7073        }
7074        if (hw_priv->hw.io)
7075                iounmap(hw_priv->hw.io);
7076        ksz_free_mem(hw_priv);
7077        kfree(hw_priv->hw.ksz_switch);
7078        pci_dev_put(hw_priv->pdev);
7079        kfree(info);
7080}
7081
7082static int __maybe_unused pcidev_resume(struct device *dev_d)
7083{
7084        int i;
7085        struct platform_info *info = dev_get_drvdata(dev_d);
7086        struct dev_info *hw_priv = &info->dev_info;
7087        struct ksz_hw *hw = &hw_priv->hw;
7088
7089        device_wakeup_disable(dev_d);
7090
7091        if (hw_priv->wol_enable)
7092                hw_cfg_wol_pme(hw, 0);
7093        for (i = 0; i < hw->dev_count; i++) {
7094                if (info->netdev[i]) {
7095                        struct net_device *dev = info->netdev[i];
7096
7097                        if (netif_running(dev)) {
7098                                netdev_open(dev);
7099                                netif_device_attach(dev);
7100                        }
7101                }
7102        }
7103        return 0;
7104}
7105
7106static int __maybe_unused pcidev_suspend(struct device *dev_d)
7107{
7108        int i;
7109        struct platform_info *info = dev_get_drvdata(dev_d);
7110        struct dev_info *hw_priv = &info->dev_info;
7111        struct ksz_hw *hw = &hw_priv->hw;
7112
7113        /* Need to find a way to retrieve the device IP address. */
7114        static const u8 net_addr[] = { 192, 168, 1, 1 };
7115
7116        for (i = 0; i < hw->dev_count; i++) {
7117                if (info->netdev[i]) {
7118                        struct net_device *dev = info->netdev[i];
7119
7120                        if (netif_running(dev)) {
7121                                netif_device_detach(dev);
7122                                netdev_close(dev);
7123                        }
7124                }
7125        }
7126        if (hw_priv->wol_enable) {
7127                hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
7128                hw_cfg_wol_pme(hw, 1);
7129        }
7130
7131        device_wakeup_enable(dev_d);
7132        return 0;
7133}
7134
7135static char pcidev_name[] = "ksz884xp";
7136
7137static const struct pci_device_id pcidev_table[] = {
7138        { PCI_VENDOR_ID_MICREL_KS, 0x8841,
7139                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
7140        { PCI_VENDOR_ID_MICREL_KS, 0x8842,
7141                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
7142        { 0 }
7143};
7144
7145MODULE_DEVICE_TABLE(pci, pcidev_table);
7146
7147static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume);
7148
7149static struct pci_driver pci_device_driver = {
7150        .driver.pm      = &pcidev_pm_ops,
7151        .name           = pcidev_name,
7152        .id_table       = pcidev_table,
7153        .probe          = pcidev_init,
7154        .remove         = pcidev_exit
7155};
7156
7157module_pci_driver(pci_device_driver);
7158
7159MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
7160MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
7161MODULE_LICENSE("GPL");
7162
7163module_param_named(message, msg_enable, int, 0);
7164MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
7165
7166module_param(macaddr, charp, 0);
7167module_param(mac1addr, charp, 0);
7168module_param(fast_aging, int, 0);
7169module_param(multi_dev, int, 0);
7170module_param(stp, int, 0);
7171MODULE_PARM_DESC(macaddr, "MAC address");
7172MODULE_PARM_DESC(mac1addr, "Second MAC address");
7173MODULE_PARM_DESC(fast_aging, "Fast aging");
7174MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
7175MODULE_PARM_DESC(stp, "STP support");
7176