1
2
3#include <adf_accel_devices.h>
4#include <adf_pf2vf_msg.h>
5#include <adf_common_drv.h>
6#include <adf_gen2_hw_data.h>
7#include "adf_dh895xcc_hw_data.h"
8#include "icp_qat_hw.h"
9
10
11static const u32 thrd_to_arb_map_sku4[] = {
12 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
13 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
14 0x00000000, 0x00000000, 0x00000000, 0x00000000
15};
16
17static const u32 thrd_to_arb_map_sku6[] = {
18 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
19 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
20 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
21};
22
23static struct adf_hw_device_class dh895xcc_class = {
24 .name = ADF_DH895XCC_DEVICE_NAME,
25 .type = DEV_DH895XCC,
26 .instances = 0
27};
28
29static u32 get_accel_mask(struct adf_hw_device_data *self)
30{
31 u32 fuses = self->fuses;
32
33 return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
34 ADF_DH895XCC_ACCELERATORS_MASK;
35}
36
37static u32 get_ae_mask(struct adf_hw_device_data *self)
38{
39 u32 fuses = self->fuses;
40
41 return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
42}
43
44static u32 get_num_accels(struct adf_hw_device_data *self)
45{
46 u32 i, ctr = 0;
47
48 if (!self || !self->accel_mask)
49 return 0;
50
51 for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
52 if (self->accel_mask & (1 << i))
53 ctr++;
54 }
55 return ctr;
56}
57
58static u32 get_num_aes(struct adf_hw_device_data *self)
59{
60 u32 i, ctr = 0;
61
62 if (!self || !self->ae_mask)
63 return 0;
64
65 for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
66 if (self->ae_mask & (1 << i))
67 ctr++;
68 }
69 return ctr;
70}
71
72static u32 get_misc_bar_id(struct adf_hw_device_data *self)
73{
74 return ADF_DH895XCC_PMISC_BAR;
75}
76
77static u32 get_etr_bar_id(struct adf_hw_device_data *self)
78{
79 return ADF_DH895XCC_ETR_BAR;
80}
81
82static u32 get_sram_bar_id(struct adf_hw_device_data *self)
83{
84 return ADF_DH895XCC_SRAM_BAR;
85}
86
87static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
88{
89 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
90 u32 capabilities;
91 u32 legfuses;
92
93 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
94 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
95 ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
96
97
98 pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
99
100 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
101 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
102 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
103 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
104 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
105 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
106
107 return capabilities;
108}
109
110static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
111{
112 int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
113 >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
114
115 switch (sku) {
116 case ADF_DH895XCC_FUSECTL_SKU_1:
117 return DEV_SKU_1;
118 case ADF_DH895XCC_FUSECTL_SKU_2:
119 return DEV_SKU_2;
120 case ADF_DH895XCC_FUSECTL_SKU_3:
121 return DEV_SKU_3;
122 case ADF_DH895XCC_FUSECTL_SKU_4:
123 return DEV_SKU_4;
124 default:
125 return DEV_SKU_UNKNOWN;
126 }
127 return DEV_SKU_UNKNOWN;
128}
129
130static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
131 u32 const **arb_map_config)
132{
133 switch (accel_dev->accel_pci_dev.sku) {
134 case DEV_SKU_1:
135 *arb_map_config = thrd_to_arb_map_sku4;
136 break;
137
138 case DEV_SKU_2:
139 case DEV_SKU_4:
140 *arb_map_config = thrd_to_arb_map_sku6;
141 break;
142 default:
143 dev_err(&GET_DEV(accel_dev),
144 "The configuration doesn't match any SKU");
145 *arb_map_config = NULL;
146 }
147}
148
149static u32 get_pf2vf_offset(u32 i)
150{
151 return ADF_DH895XCC_PF2VF_OFFSET(i);
152}
153
154static u32 get_vintmsk_offset(u32 i)
155{
156 return ADF_DH895XCC_VINTMSK_OFFSET(i);
157}
158
159static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
160{
161 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
162 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
163 unsigned long accel_mask = hw_device->accel_mask;
164 unsigned long ae_mask = hw_device->ae_mask;
165 void __iomem *csr = misc_bar->virt_addr;
166 unsigned int val, i;
167
168
169 for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
170 val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
171 val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
172 ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
173 val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
174 val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
175 ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
176 }
177
178
179 for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) {
180 val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
181 val |= ADF_DH895XCC_ERRSSMSH_EN;
182 ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
183 val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
184 val |= ADF_DH895XCC_ERRSSMSH_EN;
185 ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
186 }
187}
188
189static void adf_enable_ints(struct adf_accel_dev *accel_dev)
190{
191 void __iomem *addr;
192
193 addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
194
195
196 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
197 accel_dev->pf.vf_info ? 0 :
198 BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1);
199 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
200 ADF_DH895XCC_SMIA1_MASK);
201}
202
203static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
204{
205 return 0;
206}
207
208static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
209{
210 adf_gen2_cfg_iov_thds(accel_dev, enable,
211 ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
212 ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
213}
214
215void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
216{
217 hw_data->dev_class = &dh895xcc_class;
218 hw_data->instance_id = dh895xcc_class.instances++;
219 hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
220 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
221 hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
222 hw_data->num_logical_accel = 1;
223 hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
224 hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
225 hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
226 hw_data->alloc_irq = adf_isr_resource_alloc;
227 hw_data->free_irq = adf_isr_resource_free;
228 hw_data->enable_error_correction = adf_enable_error_correction;
229 hw_data->get_accel_mask = get_accel_mask;
230 hw_data->get_ae_mask = get_ae_mask;
231 hw_data->get_accel_cap = get_accel_cap;
232 hw_data->get_num_accels = get_num_accels;
233 hw_data->get_num_aes = get_num_aes;
234 hw_data->get_etr_bar_id = get_etr_bar_id;
235 hw_data->get_misc_bar_id = get_misc_bar_id;
236 hw_data->get_pf2vf_offset = get_pf2vf_offset;
237 hw_data->get_vintmsk_offset = get_vintmsk_offset;
238 hw_data->get_admin_info = adf_gen2_get_admin_info;
239 hw_data->get_arb_info = adf_gen2_get_arb_info;
240 hw_data->get_sram_bar_id = get_sram_bar_id;
241 hw_data->get_sku = get_sku;
242 hw_data->fw_name = ADF_DH895XCC_FW;
243 hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
244 hw_data->init_admin_comms = adf_init_admin_comms;
245 hw_data->exit_admin_comms = adf_exit_admin_comms;
246 hw_data->configure_iov_threads = configure_iov_threads;
247 hw_data->disable_iov = adf_disable_sriov;
248 hw_data->send_admin_init = adf_send_admin_init;
249 hw_data->init_arb = adf_init_arb;
250 hw_data->exit_arb = adf_exit_arb;
251 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
252 hw_data->enable_ints = adf_enable_ints;
253 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
254 hw_data->reset_device = adf_reset_sbr;
255 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
256 adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
257}
258
259void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
260{
261 hw_data->dev_class->instances--;
262}
263