1
2
3#include <linux/module.h>
4#include <linux/slab.h>
5#include "adf_accel_devices.h"
6#include "adf_common_drv.h"
7#include "adf_transport.h"
8#include "adf_transport_access_macros.h"
9#include "adf_cfg.h"
10#include "adf_cfg_strings.h"
11#include "qat_crypto.h"
12#include "icp_qat_fw.h"
13
14#define SEC ADF_KERNEL_SEC
15
16static struct service_hndl qat_crypto;
17
18void qat_crypto_put_instance(struct qat_crypto_instance *inst)
19{
20 atomic_dec(&inst->refctr);
21 adf_dev_put(inst->accel_dev);
22}
23
24static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
25{
26 struct qat_crypto_instance *inst, *tmp;
27 int i;
28
29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30 for (i = 0; i < atomic_read(&inst->refctr); i++)
31 qat_crypto_put_instance(inst);
32
33 if (inst->sym_tx)
34 adf_remove_ring(inst->sym_tx);
35
36 if (inst->sym_rx)
37 adf_remove_ring(inst->sym_rx);
38
39 if (inst->pke_tx)
40 adf_remove_ring(inst->pke_tx);
41
42 if (inst->pke_rx)
43 adf_remove_ring(inst->pke_rx);
44
45 list_del(&inst->list);
46 kfree(inst);
47 }
48 return 0;
49}
50
51struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
52{
53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54 struct qat_crypto_instance *inst = NULL, *tmp_inst;
55 unsigned long best = ~0;
56
57 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
58 unsigned long ctr;
59
60 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62 adf_dev_started(tmp_dev) &&
63 !list_empty(&tmp_dev->crypto_list)) {
64 ctr = atomic_read(&tmp_dev->ref_count);
65 if (best > ctr) {
66 accel_dev = tmp_dev;
67 best = ctr;
68 }
69 }
70 }
71
72 if (!accel_dev) {
73 pr_info("QAT: Could not find a device on node %d\n", node);
74
75 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76 if (adf_dev_started(tmp_dev) &&
77 !list_empty(&tmp_dev->crypto_list)) {
78 accel_dev = tmp_dev;
79 break;
80 }
81 }
82 }
83
84 if (!accel_dev)
85 return NULL;
86
87 best = ~0;
88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
89 unsigned long ctr;
90
91 ctr = atomic_read(&tmp_inst->refctr);
92 if (best > ctr) {
93 inst = tmp_inst;
94 best = ctr;
95 }
96 }
97 if (inst) {
98 if (adf_dev_get(accel_dev)) {
99 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
100 return NULL;
101 }
102 atomic_inc(&inst->refctr);
103 }
104 return inst;
105}
106
107
108
109
110
111
112
113
114
115
116int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
117{
118 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
119 int banks = GET_MAX_BANKS(accel_dev);
120 int cpus = num_online_cpus();
121 unsigned long val;
122 int instances;
123 int ret;
124 int i;
125
126 if (adf_hw_dev_has_crypto(accel_dev))
127 instances = min(cpus, banks);
128 else
129 instances = 0;
130
131 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
132 if (ret)
133 goto err;
134
135 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
136 if (ret)
137 goto err;
138
139 for (i = 0; i < instances; i++) {
140 val = i;
141 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
142 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
143 key, &val, ADF_DEC);
144 if (ret)
145 goto err;
146
147 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
148 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
149 key, &val, ADF_DEC);
150 if (ret)
151 goto err;
152
153 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
154 i);
155 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
156 key, &val, ADF_DEC);
157 if (ret)
158 goto err;
159
160 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
161 val = 128;
162 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
163 key, &val, ADF_DEC);
164 if (ret)
165 goto err;
166
167 val = 512;
168 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
169 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
170 key, &val, ADF_DEC);
171 if (ret)
172 goto err;
173
174 val = 0;
175 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
176 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
177 key, &val, ADF_DEC);
178 if (ret)
179 goto err;
180
181 val = 2;
182 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
183 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
184 key, &val, ADF_DEC);
185 if (ret)
186 goto err;
187
188 val = 8;
189 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
190 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
191 key, &val, ADF_DEC);
192 if (ret)
193 goto err;
194
195 val = 10;
196 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
197 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
198 key, &val, ADF_DEC);
199 if (ret)
200 goto err;
201
202 val = ADF_COALESCING_DEF_TIME;
203 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
204 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
205 key, &val, ADF_DEC);
206 if (ret)
207 goto err;
208 }
209
210 val = i;
211 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
212 &val, ADF_DEC);
213 if (ret)
214 goto err;
215
216 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
217 return 0;
218err:
219 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
220 return ret;
221}
222EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
223
224static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
225{
226 unsigned long num_inst, num_msg_sym, num_msg_asym;
227 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
228 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
229 unsigned long sym_bank, asym_bank;
230 struct qat_crypto_instance *inst;
231 int msg_size;
232 int ret;
233 int i;
234
235 INIT_LIST_HEAD(&accel_dev->crypto_list);
236 ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
237 if (ret)
238 return ret;
239
240 ret = kstrtoul(val, 0, &num_inst);
241 if (ret)
242 return ret;
243
244 for (i = 0; i < num_inst; i++) {
245 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
246 dev_to_node(&GET_DEV(accel_dev)));
247 if (!inst) {
248 ret = -ENOMEM;
249 goto err;
250 }
251
252 list_add_tail(&inst->list, &accel_dev->crypto_list);
253 inst->id = i;
254 atomic_set(&inst->refctr, 0);
255 inst->accel_dev = accel_dev;
256
257 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
258 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
259 if (ret)
260 goto err;
261
262 ret = kstrtoul(val, 10, &sym_bank);
263 if (ret)
264 goto err;
265
266 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
267 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
268 if (ret)
269 goto err;
270
271 ret = kstrtoul(val, 10, &asym_bank);
272 if (ret)
273 goto err;
274
275 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
276 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
277 if (ret)
278 goto err;
279
280 ret = kstrtoul(val, 10, &num_msg_sym);
281 if (ret)
282 goto err;
283
284 num_msg_sym = num_msg_sym >> 1;
285
286 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
287 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
288 if (ret)
289 goto err;
290
291 ret = kstrtoul(val, 10, &num_msg_asym);
292 if (ret)
293 goto err;
294 num_msg_asym = num_msg_asym >> 1;
295
296 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
297 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
298 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
299 msg_size, key, NULL, 0, &inst->sym_tx);
300 if (ret)
301 goto err;
302
303 msg_size = msg_size >> 1;
304 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
305 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
306 msg_size, key, NULL, 0, &inst->pke_tx);
307 if (ret)
308 goto err;
309
310 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
311 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
312 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
313 msg_size, key, qat_alg_callback, 0,
314 &inst->sym_rx);
315 if (ret)
316 goto err;
317
318 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
319 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
320 msg_size, key, qat_alg_asym_callback, 0,
321 &inst->pke_rx);
322 if (ret)
323 goto err;
324 }
325 return 0;
326err:
327 qat_crypto_free_instances(accel_dev);
328 return ret;
329}
330
331static int qat_crypto_init(struct adf_accel_dev *accel_dev)
332{
333 if (qat_crypto_create_instances(accel_dev))
334 return -EFAULT;
335
336 return 0;
337}
338
339static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
340{
341 return qat_crypto_free_instances(accel_dev);
342}
343
344static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
345 enum adf_event event)
346{
347 int ret;
348
349 switch (event) {
350 case ADF_EVENT_INIT:
351 ret = qat_crypto_init(accel_dev);
352 break;
353 case ADF_EVENT_SHUTDOWN:
354 ret = qat_crypto_shutdown(accel_dev);
355 break;
356 case ADF_EVENT_RESTARTING:
357 case ADF_EVENT_RESTARTED:
358 case ADF_EVENT_START:
359 case ADF_EVENT_STOP:
360 default:
361 ret = 0;
362 }
363 return ret;
364}
365
366int qat_crypto_register(void)
367{
368 memset(&qat_crypto, 0, sizeof(qat_crypto));
369 qat_crypto.event_hld = qat_crypto_event_handler;
370 qat_crypto.name = "qat_crypto";
371 return adf_service_register(&qat_crypto);
372}
373
374int qat_crypto_unregister(void)
375{
376 return adf_service_unregister(&qat_crypto);
377}
378