1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/workqueue.h>
48#include <linux/pci.h>
49#include <linux/device.h>
50#include <linux/iommu.h>
51#include "adf_common_drv.h"
52#include "adf_cfg.h"
53#include "adf_pf2vf_msg.h"
54
55static struct workqueue_struct *pf2vf_resp_wq;
56
57#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
58#define ME2FUNCTION_MAP_A_NUM_REGS 96
59
60#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
61#define ME2FUNCTION_MAP_B_NUM_REGS 12
62
63#define ME2FUNCTION_MAP_REG_SIZE 4
64#define ME2FUNCTION_MAP_VALID BIT(7)
65
66#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \
67 ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
68 ME2FUNCTION_MAP_REG_SIZE * index)
69
70#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
71 ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
72 ME2FUNCTION_MAP_REG_SIZE * index, value)
73
74#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \
75 ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
76 ME2FUNCTION_MAP_REG_SIZE * index)
77
78#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
79 ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
80 ME2FUNCTION_MAP_REG_SIZE * index, value)
81
82struct adf_pf2vf_resp {
83 struct work_struct pf2vf_resp_work;
84 struct adf_accel_vf_info *vf_info;
85};
86
87static void adf_iov_send_resp(struct work_struct *work)
88{
89 struct adf_pf2vf_resp *pf2vf_resp =
90 container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
91
92 adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
93 kfree(pf2vf_resp);
94}
95
96static void adf_vf2pf_bh_handler(void *data)
97{
98 struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
99 struct adf_pf2vf_resp *pf2vf_resp;
100
101 pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
102 if (!pf2vf_resp)
103 return;
104
105 pf2vf_resp->vf_info = vf_info;
106 INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
107 queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
108}
109
110static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
111{
112 struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
113 int totalvfs = pci_sriov_get_totalvfs(pdev);
114 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
115 struct adf_bar *pmisc =
116 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
117 void __iomem *pmisc_addr = pmisc->virt_addr;
118 struct adf_accel_vf_info *vf_info;
119 int i;
120 u32 reg;
121
122 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
123 i++, vf_info++) {
124
125 vf_info->accel_dev = accel_dev;
126 vf_info->vf_nr = i;
127
128 tasklet_init(&vf_info->vf2pf_bh_tasklet,
129 (void *)adf_vf2pf_bh_handler,
130 (unsigned long)vf_info);
131 mutex_init(&vf_info->pf2vf_lock);
132 ratelimit_state_init(&vf_info->vf2pf_ratelimit,
133 DEFAULT_RATELIMIT_INTERVAL,
134 DEFAULT_RATELIMIT_BURST);
135 }
136
137
138 for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
139 reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
140 reg |= ME2FUNCTION_MAP_VALID;
141 WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
142 }
143
144
145 for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
146 reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
147 reg |= ME2FUNCTION_MAP_VALID;
148 WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
149 }
150
151
152 adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
153
154
155
156
157
158
159
160 return pci_enable_sriov(pdev, totalvfs);
161}
162
163
164
165
166
167
168
169
170
171void adf_disable_sriov(struct adf_accel_dev *accel_dev)
172{
173 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
174 struct adf_bar *pmisc =
175 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
176 void __iomem *pmisc_addr = pmisc->virt_addr;
177 int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
178 struct adf_accel_vf_info *vf;
179 u32 reg;
180 int i;
181
182 if (!accel_dev->pf.vf_info)
183 return;
184
185 adf_pf2vf_notify_restarting(accel_dev);
186
187 pci_disable_sriov(accel_to_pci_dev(accel_dev));
188
189
190 adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
191
192
193 for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
194 reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
195 reg &= ~ME2FUNCTION_MAP_VALID;
196 WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
197 }
198
199
200 for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
201 reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
202 reg &= ~ME2FUNCTION_MAP_VALID;
203 WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
204 }
205
206 for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
207 tasklet_disable(&vf->vf2pf_bh_tasklet);
208 tasklet_kill(&vf->vf2pf_bh_tasklet);
209 mutex_destroy(&vf->pf2vf_lock);
210 }
211
212 kfree(accel_dev->pf.vf_info);
213 accel_dev->pf.vf_info = NULL;
214}
215EXPORT_SYMBOL_GPL(adf_disable_sriov);
216
217
218
219
220
221
222
223
224
225int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
226{
227 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
228 int totalvfs = pci_sriov_get_totalvfs(pdev);
229 unsigned long val;
230 int ret;
231
232 if (!accel_dev) {
233 dev_err(&pdev->dev, "Failed to find accel_dev\n");
234 return -EFAULT;
235 }
236
237 if (!iommu_present(&pci_bus_type))
238 dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
239
240 if (accel_dev->pf.vf_info) {
241 dev_info(&pdev->dev, "Already enabled for this device\n");
242 return -EINVAL;
243 }
244
245 if (adf_dev_started(accel_dev)) {
246 if (adf_devmgr_in_reset(accel_dev) ||
247 adf_dev_in_use(accel_dev)) {
248 dev_err(&GET_DEV(accel_dev), "Device busy\n");
249 return -EBUSY;
250 }
251
252 adf_dev_stop(accel_dev);
253 adf_dev_shutdown(accel_dev);
254 }
255
256 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
257 return -EFAULT;
258 val = 0;
259 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
260 ADF_NUM_CY, (void *)&val, ADF_DEC))
261 return -EFAULT;
262
263 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
264
265
266 accel_dev->pf.vf_info = kcalloc(totalvfs,
267 sizeof(struct adf_accel_vf_info),
268 GFP_KERNEL);
269 if (!accel_dev->pf.vf_info)
270 return -ENOMEM;
271
272 if (adf_dev_init(accel_dev)) {
273 dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
274 accel_dev->accel_id);
275 return -EFAULT;
276 }
277
278 if (adf_dev_start(accel_dev)) {
279 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
280 accel_dev->accel_id);
281 return -EFAULT;
282 }
283
284 ret = adf_enable_sriov(accel_dev);
285 if (ret)
286 return ret;
287
288 return numvfs;
289}
290EXPORT_SYMBOL_GPL(adf_sriov_configure);
291
292int __init adf_init_pf_wq(void)
293{
294
295 pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
296
297 return !pf2vf_resp_wq ? -ENOMEM : 0;
298}
299
300void adf_exit_pf_wq(void)
301{
302 if (pf2vf_resp_wq) {
303 destroy_workqueue(pf2vf_resp_wq);
304 pf2vf_resp_wq = NULL;
305 }
306}
307