linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (c) 2016-2017 Hisilicon Limited. */
   3
   4#ifndef __HCLGEVF_MAIN_H
   5#define __HCLGEVF_MAIN_H
   6#include <linux/fs.h>
   7#include <linux/types.h>
   8#include "hclge_mbx.h"
   9#include "hclgevf_cmd.h"
  10#include "hnae3.h"
  11
  12#define HCLGEVF_MOD_VERSION "1.0"
  13#define HCLGEVF_DRIVER_NAME "hclgevf"
  14
  15#define HCLGEVF_MISC_VECTOR_NUM         0
  16
  17#define HCLGEVF_INVALID_VPORT           0xffff
  18
  19/* This number in actual depends upon the total number of VFs
  20 * created by physical function. But the maximum number of
  21 * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
  22 */
  23#define HCLGEVF_MAX_VF_VECTOR_NUM       (32 + 1)
  24
  25#define HCLGEVF_VECTOR_REG_BASE         0x20000
  26#define HCLGEVF_MISC_VECTOR_REG_BASE    0x20400
  27#define HCLGEVF_VECTOR_REG_OFFSET       0x4
  28#define HCLGEVF_VECTOR_VF_OFFSET                0x100000
  29
  30/* Vector0 interrupt CMDQ event source register(RW) */
  31#define HCLGEVF_VECTOR0_CMDQ_SRC_REG    0x27100
  32/* CMDQ register bits for RX event(=MBX event) */
  33#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B   1
  34
  35#define HCLGEVF_TQP_RESET_TRY_TIMES     10
  36/* Reset related Registers */
  37#define HCLGEVF_FUN_RST_ING             0x20C00
  38#define HCLGEVF_FUN_RST_ING_B           0
  39
  40#define HCLGEVF_RSS_IND_TBL_SIZE                512
  41#define HCLGEVF_RSS_SET_BITMAP_MSK      0xffff
  42#define HCLGEVF_RSS_KEY_SIZE            40
  43#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ  0
  44#define HCLGEVF_RSS_HASH_ALGO_SIMPLE    1
  45#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
  46#define HCLGEVF_RSS_HASH_ALGO_MASK      0xf
  47#define HCLGEVF_RSS_CFG_TBL_NUM \
  48        (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
  49
  50#define HCLGEVF_MTA_TBL_SIZE            4096
  51#define HCLGEVF_MTA_TYPE_SEL_MAX        4
  52
  53/* states of hclgevf device & tasks */
  54enum hclgevf_states {
  55        /* device states */
  56        HCLGEVF_STATE_DOWN,
  57        HCLGEVF_STATE_DISABLED,
  58        /* task states */
  59        HCLGEVF_STATE_SERVICE_SCHED,
  60        HCLGEVF_STATE_RST_SERVICE_SCHED,
  61        HCLGEVF_STATE_RST_HANDLING,
  62        HCLGEVF_STATE_MBX_SERVICE_SCHED,
  63        HCLGEVF_STATE_MBX_HANDLING,
  64};
  65
  66#define HCLGEVF_MPF_ENBALE 1
  67
  68struct hclgevf_mac {
  69        u8 media_type;
  70        u8 mac_addr[ETH_ALEN];
  71        int link;
  72        u8 duplex;
  73        u32 speed;
  74};
  75
  76struct hclgevf_hw {
  77        void __iomem *io_base;
  78        int num_vec;
  79        struct hclgevf_cmq cmq;
  80        struct hclgevf_mac mac;
  81        void *hdev; /* hchgevf device it is part of */
  82};
  83
  84/* TQP stats */
  85struct hlcgevf_tqp_stats {
  86        /* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
  87        u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
  88        /* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
  89        u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
  90};
  91
  92struct hclgevf_tqp {
  93        struct device *dev;     /* device for DMA mapping */
  94        struct hnae3_queue q;
  95        struct hlcgevf_tqp_stats tqp_stats;
  96        u16 index;              /* global index in a NIC controller */
  97
  98        bool alloced;
  99};
 100
 101struct hclgevf_cfg {
 102        u8 vmdq_vport_num;
 103        u8 tc_num;
 104        u16 tqp_desc_num;
 105        u16 rx_buf_len;
 106        u8 phy_addr;
 107        u8 media_type;
 108        u8 mac_addr[ETH_ALEN];
 109        u32 numa_node_map;
 110};
 111
 112struct hclgevf_rss_cfg {
 113        u8  rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
 114        u32 hash_algo;
 115        u32 rss_size;
 116        u8 hw_tc_map;
 117        u8  rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
 118};
 119
 120struct hclgevf_misc_vector {
 121        u8 __iomem *addr;
 122        int vector_irq;
 123};
 124
 125struct hclgevf_dev {
 126        struct pci_dev *pdev;
 127        struct hnae3_ae_dev *ae_dev;
 128        struct hclgevf_hw hw;
 129        struct hclgevf_misc_vector misc_vector;
 130        struct hclgevf_rss_cfg rss_cfg;
 131        unsigned long state;
 132
 133#define HCLGEVF_RESET_REQUESTED         0
 134#define HCLGEVF_RESET_PENDING           1
 135        unsigned long reset_state;      /* requested, pending */
 136        u32 reset_attempts;
 137
 138        u32 fw_version;
 139        u16 num_tqps;           /* num task queue pairs of this PF */
 140
 141        u16 alloc_rss_size;     /* allocated RSS task queue */
 142        u16 rss_size_max;       /* HW defined max RSS task queue */
 143
 144        u16 num_alloc_vport;    /* num vports this driver supports */
 145        u32 numa_node_mask;
 146        u16 rx_buf_len;
 147        u16 num_desc;
 148        u8 hw_tc_map;
 149
 150        u16 num_msi;
 151        u16 num_msi_left;
 152        u16 num_msi_used;
 153        u16 num_roce_msix;      /* Num of roce vectors for this VF */
 154        u16 roce_base_msix_offset;
 155        int roce_base_vector;
 156        u32 base_msi_vector;
 157        u16 *vector_status;
 158        int *vector_irq;
 159
 160        bool accept_mta_mc; /* whether to accept mta filter multicast */
 161        u8 mta_mac_sel_type;
 162        bool mbx_event_pending;
 163        struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
 164        struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
 165
 166        struct timer_list service_timer;
 167        struct work_struct service_task;
 168        struct work_struct rst_service_task;
 169        struct work_struct mbx_service_task;
 170
 171        struct hclgevf_tqp *htqp;
 172
 173        struct hnae3_handle nic;
 174        struct hnae3_handle roce;
 175
 176        struct hnae3_client *nic_client;
 177        struct hnae3_client *roce_client;
 178        u32 flag;
 179};
 180
 181static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
 182{
 183        return (hdev &&
 184                (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
 185                (hdev->nic.reset_level == HNAE3_VF_RESET));
 186}
 187
 188static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
 189{
 190        return (hdev &&
 191                (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
 192                (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
 193}
 194
 195int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
 196                         const u8 *msg_data, u8 msg_len, bool need_resp,
 197                         u8 *resp_data, u16 resp_len);
 198void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
 199void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
 200
 201void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
 202void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 203                                 u8 duplex);
 204void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
 205void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
 206#endif
 207