1
2
3
4
5
6
7
8
9
10
11#include <linux/ctype.h>
12#include <linux/firmware.h>
13#include "otx_cpt_common.h"
14#include "otx_cptpf_ucode.h"
15#include "otx_cptpf.h"
16
17#define CSR_DELAY 30
18
19#define TAR_MAGIC "ustar"
20#define TAR_MAGIC_LEN 6
21#define TAR_BLOCK_LEN 512
22#define REGTYPE '0'
23#define AREGTYPE '\0'
24
25
26struct tar_hdr_t {
27 char name[100];
28 char mode[8];
29 char uid[8];
30 char gid[8];
31 char size[12];
32 char mtime[12];
33 char chksum[8];
34 char typeflag;
35 char linkname[100];
36 char magic[6];
37 char version[2];
38 char uname[32];
39 char gname[32];
40 char devmajor[8];
41 char devminor[8];
42 char prefix[155];
43};
44
45struct tar_blk_t {
46 union {
47 struct tar_hdr_t hdr;
48 char block[TAR_BLOCK_LEN];
49 };
50};
51
52struct tar_arch_info_t {
53 struct list_head ucodes;
54 const struct firmware *fw;
55};
56
57static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
58 struct otx_cpt_eng_grp_info *eng_grp)
59{
60 struct otx_cpt_bitmap bmap = { {0} };
61 bool found = false;
62 int i;
63
64 if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
65 dev_err(dev, "unsupported number of engines %d on octeontx\n",
66 eng_grp->g->engs_num);
67 return bmap;
68 }
69
70 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
71 if (eng_grp->engs[i].type) {
72 bitmap_or(bmap.bits, bmap.bits,
73 eng_grp->engs[i].bmap,
74 eng_grp->g->engs_num);
75 bmap.size = eng_grp->g->engs_num;
76 found = true;
77 }
78 }
79
80 if (!found)
81 dev_err(dev, "No engines reserved for engine group %d\n",
82 eng_grp->idx);
83 return bmap;
84}
85
86static int is_eng_type(int val, int eng_type)
87{
88 return val & (1 << eng_type);
89}
90
91static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
92 int eng_type)
93{
94 return is_eng_type(eng_grps->eng_types_supported, eng_type);
95}
96
97static void set_ucode_filename(struct otx_cpt_ucode *ucode,
98 const char *filename)
99{
100 strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
101}
102
103static char *get_eng_type_str(int eng_type)
104{
105 char *str = "unknown";
106
107 switch (eng_type) {
108 case OTX_CPT_SE_TYPES:
109 str = "SE";
110 break;
111
112 case OTX_CPT_AE_TYPES:
113 str = "AE";
114 break;
115 }
116 return str;
117}
118
119static char *get_ucode_type_str(int ucode_type)
120{
121 char *str = "unknown";
122
123 switch (ucode_type) {
124 case (1 << OTX_CPT_SE_TYPES):
125 str = "SE";
126 break;
127
128 case (1 << OTX_CPT_AE_TYPES):
129 str = "AE";
130 break;
131 }
132 return str;
133}
134
135static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
136{
137 char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
138 u32 i, val = 0;
139 u8 nn;
140
141 strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
142 for (i = 0; i < strlen(tmp_ver_str); i++)
143 tmp_ver_str[i] = tolower(tmp_ver_str[i]);
144
145 nn = ucode_hdr->ver_num.nn;
146 if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
147 (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
148 nn == OTX_CPT_SE_UC_TYPE3))
149 val |= 1 << OTX_CPT_SE_TYPES;
150 if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
151 nn == OTX_CPT_AE_UC_TYPE)
152 val |= 1 << OTX_CPT_AE_TYPES;
153
154 *ucode_type = val;
155
156 if (!val)
157 return -EINVAL;
158 if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
159 is_eng_type(val, OTX_CPT_SE_TYPES))
160 return -EINVAL;
161 return 0;
162}
163
164static int is_mem_zero(const char *ptr, int size)
165{
166 int i;
167
168 for (i = 0; i < size; i++) {
169 if (ptr[i])
170 return 0;
171 }
172 return 1;
173}
174
175static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
176{
177 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
178 dma_addr_t dma_addr;
179 struct otx_cpt_bitmap bmap;
180 int i;
181
182 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
183 if (!bmap.size)
184 return -EINVAL;
185
186 if (eng_grp->mirror.is_ena)
187 dma_addr =
188 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
189 else
190 dma_addr = eng_grp->ucode[0].align_dma;
191
192
193
194
195
196 for_each_set_bit(i, bmap.bits, bmap.size)
197 if (!eng_grp->g->eng_ref_cnt[i])
198 writeq((u64) dma_addr, cpt->reg_base +
199 OTX_CPT_PF_ENGX_UCODE_BASE(i));
200 return 0;
201}
202
203static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
204 void *obj)
205{
206 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
207 struct otx_cpt_bitmap bmap = { {0} };
208 int timeout = 10;
209 int i, busy;
210 u64 reg;
211
212 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
213 if (!bmap.size)
214 return -EINVAL;
215
216
217 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
218 for_each_set_bit(i, bmap.bits, bmap.size) {
219 if (reg & (1ull << i)) {
220 eng_grp->g->eng_ref_cnt[i]--;
221 reg &= ~(1ull << i);
222 }
223 }
224 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
225
226
227 do {
228 busy = 0;
229 usleep_range(10000, 20000);
230 if (timeout-- < 0)
231 return -EBUSY;
232
233 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
234 for_each_set_bit(i, bmap.bits, bmap.size)
235 if (reg & (1ull << i)) {
236 busy = 1;
237 break;
238 }
239 } while (busy);
240
241
242 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
243 for_each_set_bit(i, bmap.bits, bmap.size)
244 if (!eng_grp->g->eng_ref_cnt[i])
245 reg &= ~(1ull << i);
246 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
247
248 return 0;
249}
250
251static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
252 void *obj)
253{
254 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
255 struct otx_cpt_bitmap bmap;
256 u64 reg;
257 int i;
258
259 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
260 if (!bmap.size)
261 return -EINVAL;
262
263
264 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
265 for_each_set_bit(i, bmap.bits, bmap.size) {
266 if (!(reg & (1ull << i))) {
267 eng_grp->g->eng_ref_cnt[i]++;
268 reg |= 1ull << i;
269 }
270 }
271 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
272
273
274 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
275 for_each_set_bit(i, bmap.bits, bmap.size)
276 reg |= 1ull << i;
277 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
278
279 return 0;
280}
281
282static int process_tar_file(struct device *dev,
283 struct tar_arch_info_t *tar_arch, char *filename,
284 const u8 *data, u32 size)
285{
286 struct tar_ucode_info_t *tar_info;
287 struct otx_cpt_ucode_hdr *ucode_hdr;
288 int ucode_type, ucode_size;
289
290
291
292
293
294
295 if (size < sizeof(struct otx_cpt_ucode_hdr))
296 return 0;
297
298 ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
299
300
301
302
303 if (get_ucode_type(ucode_hdr, &ucode_type))
304 return 0;
305
306 ucode_size = ntohl(ucode_hdr->code_length) * 2;
307 if (!ucode_size || (size < round_up(ucode_size, 16) +
308 sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
309 dev_err(dev, "Ucode %s invalid size\n", filename);
310 return -EINVAL;
311 }
312
313 tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
314 if (!tar_info)
315 return -ENOMEM;
316
317 tar_info->ucode_ptr = data;
318 set_ucode_filename(&tar_info->ucode, filename);
319 memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
320 OTX_CPT_UCODE_VER_STR_SZ);
321 tar_info->ucode.ver_num = ucode_hdr->ver_num;
322 tar_info->ucode.type = ucode_type;
323 tar_info->ucode.size = ucode_size;
324 list_add_tail(&tar_info->list, &tar_arch->ucodes);
325
326 return 0;
327}
328
329static void release_tar_archive(struct tar_arch_info_t *tar_arch)
330{
331 struct tar_ucode_info_t *curr, *temp;
332
333 if (!tar_arch)
334 return;
335
336 list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
337 list_del(&curr->list);
338 kfree(curr);
339 }
340
341 if (tar_arch->fw)
342 release_firmware(tar_arch->fw);
343 kfree(tar_arch);
344}
345
346static struct tar_ucode_info_t *get_uc_from_tar_archive(
347 struct tar_arch_info_t *tar_arch,
348 int ucode_type)
349{
350 struct tar_ucode_info_t *curr, *uc_found = NULL;
351
352 list_for_each_entry(curr, &tar_arch->ucodes, list) {
353 if (!is_eng_type(curr->ucode.type, ucode_type))
354 continue;
355
356 if (!uc_found) {
357 uc_found = curr;
358 continue;
359 }
360
361 switch (ucode_type) {
362 case OTX_CPT_AE_TYPES:
363 break;
364
365 case OTX_CPT_SE_TYPES:
366 if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
367 (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
368 && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
369 uc_found = curr;
370 break;
371 }
372 }
373
374 return uc_found;
375}
376
377static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
378 char *tar_filename)
379{
380 struct tar_ucode_info_t *curr;
381
382 pr_debug("Tar archive filename %s\n", tar_filename);
383 pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
384 tar_arch->fw->size);
385 list_for_each_entry(curr, &tar_arch->ucodes, list) {
386 pr_debug("Ucode filename %s\n", curr->ucode.filename);
387 pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
388 pr_debug("Ucode version %d.%d.%d.%d\n",
389 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
390 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
391 pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
392 get_ucode_type_str(curr->ucode.type));
393 pr_debug("Ucode size %d\n", curr->ucode.size);
394 pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
395 }
396}
397
398static struct tar_arch_info_t *load_tar_archive(struct device *dev,
399 char *tar_filename)
400{
401 struct tar_arch_info_t *tar_arch = NULL;
402 struct tar_blk_t *tar_blk;
403 unsigned int cur_size;
404 size_t tar_offs = 0;
405 size_t tar_size;
406 int ret;
407
408 tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
409 if (!tar_arch)
410 return NULL;
411
412 INIT_LIST_HEAD(&tar_arch->ucodes);
413
414
415 ret = request_firmware(&tar_arch->fw, tar_filename, dev);
416 if (ret)
417 goto release_tar_arch;
418
419 if (tar_arch->fw->size < TAR_BLOCK_LEN) {
420 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
421 goto release_tar_arch;
422 }
423
424 tar_size = tar_arch->fw->size;
425 tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
426 if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
427 dev_err(dev, "Unsupported format of tar archive %s\n",
428 tar_filename);
429 goto release_tar_arch;
430 }
431
432 while (1) {
433
434 ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
435 if (ret)
436 goto release_tar_arch;
437
438 if (tar_offs + cur_size > tar_size ||
439 tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
440 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
441 goto release_tar_arch;
442 }
443
444 tar_offs += TAR_BLOCK_LEN;
445 if (tar_blk->hdr.typeflag == REGTYPE ||
446 tar_blk->hdr.typeflag == AREGTYPE) {
447 ret = process_tar_file(dev, tar_arch,
448 tar_blk->hdr.name,
449 &tar_arch->fw->data[tar_offs],
450 cur_size);
451 if (ret)
452 goto release_tar_arch;
453 }
454
455 tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
456 if (cur_size % TAR_BLOCK_LEN)
457 tar_offs += TAR_BLOCK_LEN;
458
459
460 if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
461 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
462 goto release_tar_arch;
463 }
464
465 if (is_mem_zero(&tar_arch->fw->data[tar_offs],
466 2*TAR_BLOCK_LEN))
467 break;
468
469
470 tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
471 }
472
473 print_tar_dbg_info(tar_arch, tar_filename);
474 return tar_arch;
475release_tar_arch:
476 release_tar_archive(tar_arch);
477 return NULL;
478}
479
480static struct otx_cpt_engs_rsvd *find_engines_by_type(
481 struct otx_cpt_eng_grp_info *eng_grp,
482 int eng_type)
483{
484 int i;
485
486 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
487 if (!eng_grp->engs[i].type)
488 continue;
489
490 if (eng_grp->engs[i].type == eng_type)
491 return &eng_grp->engs[i];
492 }
493 return NULL;
494}
495
496int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
497{
498 return is_eng_type(ucode->type, eng_type);
499}
500EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
501
502int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
503 int eng_type)
504{
505 struct otx_cpt_engs_rsvd *engs;
506
507 engs = find_engines_by_type(eng_grp, eng_type);
508
509 return (engs != NULL ? 1 : 0);
510}
511EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
512
513static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
514 char *buf, int size)
515{
516 if (eng_grp->mirror.is_ena) {
517 scnprintf(buf, size, "%s (shared with engine_group%d)",
518 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
519 eng_grp->mirror.idx);
520 } else {
521 scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
522 }
523}
524
525static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
526 char *buf, int size, int idx)
527{
528 struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
529 struct otx_cpt_engs_rsvd *engs;
530 int len, i;
531
532 buf[0] = '\0';
533 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
534 engs = &eng_grp->engs[i];
535 if (!engs->type)
536 continue;
537 if (idx != -1 && idx != i)
538 continue;
539
540 if (eng_grp->mirror.is_ena)
541 mirrored_engs = find_engines_by_type(
542 &eng_grp->g->grp[eng_grp->mirror.idx],
543 engs->type);
544 if (i > 0 && idx == -1) {
545 len = strlen(buf);
546 scnprintf(buf+len, size-len, ", ");
547 }
548
549 len = strlen(buf);
550 scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
551 engs->count + mirrored_engs->count : engs->count,
552 get_eng_type_str(engs->type));
553 if (mirrored_engs) {
554 len = strlen(buf);
555 scnprintf(buf+len, size-len,
556 "(%d shared with engine_group%d) ",
557 engs->count <= 0 ? engs->count +
558 mirrored_engs->count : mirrored_engs->count,
559 eng_grp->mirror.idx);
560 }
561 }
562}
563
564static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
565{
566 pr_debug("Ucode info\n");
567 pr_debug("Ucode version string %s\n", ucode->ver_str);
568 pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
569 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
570 pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
571 pr_debug("Ucode size %d\n", ucode->size);
572 pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
573 pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
574}
575
576static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
577 struct device *dev, char *buf, int size)
578{
579 struct otx_cpt_bitmap bmap;
580 u32 mask[2];
581
582 bmap = get_cores_bmap(dev, eng_grp);
583 if (!bmap.size) {
584 scnprintf(buf, size, "unknown");
585 return;
586 }
587 bitmap_to_arr32(mask, bmap.bits, bmap.size);
588 scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
589}
590
591
592static void print_dbg_info(struct device *dev,
593 struct otx_cpt_eng_grps *eng_grps)
594{
595 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
596 struct otx_cpt_eng_grp_info *mirrored_grp;
597 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
598 struct otx_cpt_eng_grp_info *grp;
599 struct otx_cpt_engs_rsvd *engs;
600 u32 mask[4];
601 int i, j;
602
603 pr_debug("Engine groups global info\n");
604 pr_debug("max SE %d, max AE %d\n",
605 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
606 pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
607 pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
608
609 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
610 grp = &eng_grps->grp[i];
611 pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
612 "enabled" : "disabled");
613 if (grp->is_enabled) {
614 mirrored_grp = &eng_grps->grp[grp->mirror.idx];
615 pr_debug("Ucode0 filename %s, version %s\n",
616 grp->mirror.is_ena ?
617 mirrored_grp->ucode[0].filename :
618 grp->ucode[0].filename,
619 grp->mirror.is_ena ?
620 mirrored_grp->ucode[0].ver_str :
621 grp->ucode[0].ver_str);
622 }
623
624 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
625 engs = &grp->engs[j];
626 if (engs->type) {
627 print_engs_info(grp, engs_info,
628 2*OTX_CPT_UCODE_NAME_LENGTH, j);
629 pr_debug("Slot%d: %s\n", j, engs_info);
630 bitmap_to_arr32(mask, engs->bmap,
631 eng_grps->engs_num);
632 pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
633 mask[3], mask[2], mask[1], mask[0]);
634 } else
635 pr_debug("Slot%d not used\n", j);
636 }
637 if (grp->is_enabled) {
638 cpt_print_engines_mask(grp, dev, engs_mask,
639 OTX_CPT_UCODE_NAME_LENGTH);
640 pr_debug("Cmask: %s\n", engs_mask);
641 }
642 }
643}
644
645static int update_engines_avail_count(struct device *dev,
646 struct otx_cpt_engs_available *avail,
647 struct otx_cpt_engs_rsvd *engs, int val)
648{
649 switch (engs->type) {
650 case OTX_CPT_SE_TYPES:
651 avail->se_cnt += val;
652 break;
653
654 case OTX_CPT_AE_TYPES:
655 avail->ae_cnt += val;
656 break;
657
658 default:
659 dev_err(dev, "Invalid engine type %d\n", engs->type);
660 return -EINVAL;
661 }
662
663 return 0;
664}
665
666static int update_engines_offset(struct device *dev,
667 struct otx_cpt_engs_available *avail,
668 struct otx_cpt_engs_rsvd *engs)
669{
670 switch (engs->type) {
671 case OTX_CPT_SE_TYPES:
672 engs->offset = 0;
673 break;
674
675 case OTX_CPT_AE_TYPES:
676 engs->offset = avail->max_se_cnt;
677 break;
678
679 default:
680 dev_err(dev, "Invalid engine type %d\n", engs->type);
681 return -EINVAL;
682 }
683
684 return 0;
685}
686
687static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
688{
689 int i, ret = 0;
690
691 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
692 if (!grp->engs[i].type)
693 continue;
694
695 if (grp->engs[i].count > 0) {
696 ret = update_engines_avail_count(dev, &grp->g->avail,
697 &grp->engs[i],
698 grp->engs[i].count);
699 if (ret)
700 return ret;
701 }
702
703 grp->engs[i].type = 0;
704 grp->engs[i].count = 0;
705 grp->engs[i].offset = 0;
706 grp->engs[i].ucode = NULL;
707 bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
708 }
709
710 return 0;
711}
712
713static int do_reserve_engines(struct device *dev,
714 struct otx_cpt_eng_grp_info *grp,
715 struct otx_cpt_engines *req_engs)
716{
717 struct otx_cpt_engs_rsvd *engs = NULL;
718 int i, ret;
719
720 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
721 if (!grp->engs[i].type) {
722 engs = &grp->engs[i];
723 break;
724 }
725 }
726
727 if (!engs)
728 return -ENOMEM;
729
730 engs->type = req_engs->type;
731 engs->count = req_engs->count;
732
733 ret = update_engines_offset(dev, &grp->g->avail, engs);
734 if (ret)
735 return ret;
736
737 if (engs->count > 0) {
738 ret = update_engines_avail_count(dev, &grp->g->avail, engs,
739 -engs->count);
740 if (ret)
741 return ret;
742 }
743
744 return 0;
745}
746
747static int check_engines_availability(struct device *dev,
748 struct otx_cpt_eng_grp_info *grp,
749 struct otx_cpt_engines *req_eng)
750{
751 int avail_cnt = 0;
752
753 switch (req_eng->type) {
754 case OTX_CPT_SE_TYPES:
755 avail_cnt = grp->g->avail.se_cnt;
756 break;
757
758 case OTX_CPT_AE_TYPES:
759 avail_cnt = grp->g->avail.ae_cnt;
760 break;
761
762 default:
763 dev_err(dev, "Invalid engine type %d\n", req_eng->type);
764 return -EINVAL;
765 }
766
767 if (avail_cnt < req_eng->count) {
768 dev_err(dev,
769 "Error available %s engines %d < than requested %d\n",
770 get_eng_type_str(req_eng->type),
771 avail_cnt, req_eng->count);
772 return -EBUSY;
773 }
774
775 return 0;
776}
777
778static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
779 struct otx_cpt_engines *req_engs, int req_cnt)
780{
781 int i, ret;
782
783
784 for (i = 0; i < req_cnt; i++) {
785 ret = check_engines_availability(dev, grp, &req_engs[i]);
786 if (ret)
787 return ret;
788 }
789
790
791 for (i = 0; i < req_cnt; i++) {
792 ret = do_reserve_engines(dev, grp, &req_engs[i]);
793 if (ret)
794 return ret;
795 }
796 return 0;
797}
798
799static ssize_t eng_grp_info_show(struct device *dev,
800 struct device_attribute *attr,
801 char *buf)
802{
803 char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
804 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
805 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
806 struct otx_cpt_eng_grp_info *eng_grp;
807 int ret;
808
809 eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
810 mutex_lock(&eng_grp->g->lock);
811
812 print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
813 print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
814 cpt_print_engines_mask(eng_grp, dev, engs_mask,
815 OTX_CPT_UCODE_NAME_LENGTH);
816 ret = scnprintf(buf, PAGE_SIZE,
817 "Microcode : %s\nEngines: %s\nEngines mask: %s\n",
818 ucode_info, engs_info, engs_mask);
819
820 mutex_unlock(&eng_grp->g->lock);
821 return ret;
822}
823
824static int create_sysfs_eng_grps_info(struct device *dev,
825 struct otx_cpt_eng_grp_info *eng_grp)
826{
827 int ret;
828
829 eng_grp->info_attr.show = eng_grp_info_show;
830 eng_grp->info_attr.store = NULL;
831 eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
832 eng_grp->info_attr.attr.mode = 0440;
833 sysfs_attr_init(&eng_grp->info_attr.attr);
834 ret = device_create_file(dev, &eng_grp->info_attr);
835 if (ret)
836 return ret;
837
838 return 0;
839}
840
841static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
842{
843 if (ucode->va) {
844 dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
845 ucode->va, ucode->dma);
846 ucode->va = NULL;
847 ucode->align_va = NULL;
848 ucode->dma = 0;
849 ucode->align_dma = 0;
850 ucode->size = 0;
851 }
852
853 memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
854 memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
855 set_ucode_filename(ucode, "");
856 ucode->type = 0;
857}
858
859static int copy_ucode_to_dma_mem(struct device *dev,
860 struct otx_cpt_ucode *ucode,
861 const u8 *ucode_data)
862{
863 u32 i;
864
865
866 ucode->va = dma_alloc_coherent(dev, ucode->size +
867 OTX_CPT_UCODE_ALIGNMENT,
868 &ucode->dma, GFP_KERNEL);
869 if (!ucode->va) {
870 dev_err(dev, "Unable to allocate space for microcode\n");
871 return -ENOMEM;
872 }
873 ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
874 ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
875
876 memcpy((void *) ucode->align_va, (void *) ucode_data +
877 sizeof(struct otx_cpt_ucode_hdr), ucode->size);
878
879
880 for (i = 0; i < (ucode->size / 8); i++)
881 ((u64 *)ucode->align_va)[i] =
882 cpu_to_be64(((u64 *)ucode->align_va)[i]);
883
884 for (i = 0; i < (ucode->size / 2); i++)
885 ((u16 *)ucode->align_va)[i] =
886 cpu_to_be16(((u16 *)ucode->align_va)[i]);
887 return 0;
888}
889
890static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
891 const char *ucode_filename)
892{
893 struct otx_cpt_ucode_hdr *ucode_hdr;
894 const struct firmware *fw;
895 int ret;
896
897 set_ucode_filename(ucode, ucode_filename);
898 ret = request_firmware(&fw, ucode->filename, dev);
899 if (ret)
900 return ret;
901
902 ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
903 memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
904 ucode->ver_num = ucode_hdr->ver_num;
905 ucode->size = ntohl(ucode_hdr->code_length) * 2;
906 if (!ucode->size || (fw->size < round_up(ucode->size, 16)
907 + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
908 dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
909 ret = -EINVAL;
910 goto release_fw;
911 }
912
913 ret = get_ucode_type(ucode_hdr, &ucode->type);
914 if (ret) {
915 dev_err(dev, "Microcode %s unknown type 0x%x\n",
916 ucode->filename, ucode->type);
917 goto release_fw;
918 }
919
920 ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
921 if (ret)
922 goto release_fw;
923
924 print_ucode_dbg_info(ucode);
925release_fw:
926 release_firmware(fw);
927 return ret;
928}
929
930static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
931 void *obj)
932{
933 int ret;
934
935 ret = cpt_set_ucode_base(eng_grp, obj);
936 if (ret)
937 return ret;
938
939 ret = cpt_attach_and_enable_cores(eng_grp, obj);
940 return ret;
941}
942
943static int disable_eng_grp(struct device *dev,
944 struct otx_cpt_eng_grp_info *eng_grp,
945 void *obj)
946{
947 int i, ret;
948
949 ret = cpt_detach_and_disable_cores(eng_grp, obj);
950 if (ret)
951 return ret;
952
953
954 ucode_unload(dev, &eng_grp->ucode[0]);
955
956 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
957 if (!eng_grp->engs[i].type)
958 continue;
959
960 eng_grp->engs[i].ucode = &eng_grp->ucode[0];
961 }
962
963 ret = cpt_set_ucode_base(eng_grp, obj);
964
965 return ret;
966}
967
968static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
969 struct otx_cpt_eng_grp_info *src_grp)
970{
971
972 src_grp->mirror.is_ena = false;
973 src_grp->mirror.idx = 0;
974 src_grp->mirror.ref_count++;
975
976
977 dst_grp->mirror.is_ena = true;
978 dst_grp->mirror.idx = src_grp->idx;
979 dst_grp->mirror.ref_count = 0;
980}
981
982static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
983{
984 struct otx_cpt_eng_grp_info *src_grp;
985
986 if (!dst_grp->mirror.is_ena)
987 return;
988
989 src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
990
991 src_grp->mirror.ref_count--;
992 dst_grp->mirror.is_ena = false;
993 dst_grp->mirror.idx = 0;
994 dst_grp->mirror.ref_count = 0;
995}
996
997static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
998 struct otx_cpt_engines *engs, int engs_cnt)
999{
1000 struct otx_cpt_engs_rsvd *mirrored_engs;
1001 int i;
1002
1003 for (i = 0; i < engs_cnt; i++) {
1004 mirrored_engs = find_engines_by_type(mirrored_eng_grp,
1005 engs[i].type);
1006 if (!mirrored_engs)
1007 continue;
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 engs[i].count -= mirrored_engs->count;
1024 }
1025}
1026
1027static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1028 struct otx_cpt_eng_grp_info *grp)
1029{
1030 struct otx_cpt_eng_grps *eng_grps = grp->g;
1031 int i;
1032
1033 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1034 if (!eng_grps->grp[i].is_enabled)
1035 continue;
1036 if (eng_grps->grp[i].ucode[0].type)
1037 continue;
1038 if (grp->idx == i)
1039 continue;
1040 if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1041 grp->ucode[0].ver_str,
1042 OTX_CPT_UCODE_VER_STR_SZ))
1043 return &eng_grps->grp[i];
1044 }
1045
1046 return NULL;
1047}
1048
1049static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1050 struct otx_cpt_eng_grps *eng_grps)
1051{
1052 int i;
1053
1054 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1055 if (!eng_grps->grp[i].is_enabled)
1056 return &eng_grps->grp[i];
1057 }
1058 return NULL;
1059}
1060
1061static int eng_grp_update_masks(struct device *dev,
1062 struct otx_cpt_eng_grp_info *eng_grp)
1063{
1064 struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1065 struct otx_cpt_bitmap tmp_bmap = { {0} };
1066 int i, j, cnt, max_cnt;
1067 int bit;
1068
1069 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1070 engs = &eng_grp->engs[i];
1071 if (!engs->type)
1072 continue;
1073 if (engs->count <= 0)
1074 continue;
1075
1076 switch (engs->type) {
1077 case OTX_CPT_SE_TYPES:
1078 max_cnt = eng_grp->g->avail.max_se_cnt;
1079 break;
1080
1081 case OTX_CPT_AE_TYPES:
1082 max_cnt = eng_grp->g->avail.max_ae_cnt;
1083 break;
1084
1085 default:
1086 dev_err(dev, "Invalid engine type %d\n", engs->type);
1087 return -EINVAL;
1088 }
1089
1090 cnt = engs->count;
1091 WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1092 bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1093 for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1094 if (!eng_grp->g->eng_ref_cnt[j]) {
1095 bitmap_set(tmp_bmap.bits, j, 1);
1096 cnt--;
1097 if (!cnt)
1098 break;
1099 }
1100 }
1101
1102 if (cnt)
1103 return -ENOSPC;
1104
1105 bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1106 }
1107
1108 if (!eng_grp->mirror.is_ena)
1109 return 0;
1110
1111 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1112 engs = &eng_grp->engs[i];
1113 if (!engs->type)
1114 continue;
1115
1116 mirrored_engs = find_engines_by_type(
1117 &eng_grp->g->grp[eng_grp->mirror.idx],
1118 engs->type);
1119 WARN_ON(!mirrored_engs && engs->count <= 0);
1120 if (!mirrored_engs)
1121 continue;
1122
1123 bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1124 eng_grp->g->engs_num);
1125 if (engs->count < 0) {
1126 bit = find_first_bit(mirrored_engs->bmap,
1127 eng_grp->g->engs_num);
1128 bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1129 }
1130 bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1131 eng_grp->g->engs_num);
1132 }
1133 return 0;
1134}
1135
1136static int delete_engine_group(struct device *dev,
1137 struct otx_cpt_eng_grp_info *eng_grp)
1138{
1139 int i, ret;
1140
1141 if (!eng_grp->is_enabled)
1142 return -EINVAL;
1143
1144 if (eng_grp->mirror.ref_count) {
1145 dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1146 eng_grp->idx);
1147 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1148 if (eng_grp->g->grp[i].mirror.is_ena &&
1149 eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1150 pr_cont(" %d", i);
1151 }
1152 pr_cont("\n");
1153 return -EINVAL;
1154 }
1155
1156
1157 remove_eng_grp_mirroring(eng_grp);
1158
1159
1160 ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1161 if (ret)
1162 return ret;
1163
1164
1165 ret = release_engines(dev, eng_grp);
1166 if (ret)
1167 return ret;
1168
1169 device_remove_file(dev, &eng_grp->info_attr);
1170 eng_grp->is_enabled = false;
1171
1172 return 0;
1173}
1174
1175static int validate_1_ucode_scenario(struct device *dev,
1176 struct otx_cpt_eng_grp_info *eng_grp,
1177 struct otx_cpt_engines *engs, int engs_cnt)
1178{
1179 int i;
1180
1181
1182 for (i = 0; i < engs_cnt; i++) {
1183 if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1184 engs[i].type)) {
1185 dev_err(dev,
1186 "Microcode %s does not support %s engines\n",
1187 eng_grp->ucode[0].filename,
1188 get_eng_type_str(engs[i].type));
1189 return -EINVAL;
1190 }
1191 }
1192 return 0;
1193}
1194
1195static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1196{
1197 struct otx_cpt_ucode *ucode;
1198
1199 if (eng_grp->mirror.is_ena)
1200 ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1201 else
1202 ucode = &eng_grp->ucode[0];
1203 WARN_ON(!eng_grp->engs[0].type);
1204 eng_grp->engs[0].ucode = ucode;
1205}
1206
1207static int create_engine_group(struct device *dev,
1208 struct otx_cpt_eng_grps *eng_grps,
1209 struct otx_cpt_engines *engs, int engs_cnt,
1210 void *ucode_data[], int ucodes_cnt,
1211 bool use_uc_from_tar_arch)
1212{
1213 struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1214 struct tar_ucode_info_t *tar_info;
1215 struct otx_cpt_eng_grp_info *eng_grp;
1216 int i, ret = 0;
1217
1218 if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1219 return -EINVAL;
1220
1221
1222 for (i = 0; i < engs_cnt; i++)
1223 if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1224 dev_err(dev, "Device does not support %s engines\n",
1225 get_eng_type_str(engs[i].type));
1226 return -EPERM;
1227 }
1228
1229
1230 eng_grp = find_unused_eng_grp(eng_grps);
1231 if (!eng_grp) {
1232 dev_err(dev, "Error all engine groups are being used\n");
1233 return -ENOSPC;
1234 }
1235
1236
1237 for (i = 0; i < ucodes_cnt; i++) {
1238 if (use_uc_from_tar_arch) {
1239 tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1240 eng_grp->ucode[i] = tar_info->ucode;
1241 ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1242 tar_info->ucode_ptr);
1243 } else
1244 ret = ucode_load(dev, &eng_grp->ucode[i],
1245 (char *) ucode_data[i]);
1246 if (ret)
1247 goto err_ucode_unload;
1248 }
1249
1250
1251 ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1252 if (ret)
1253 goto err_ucode_unload;
1254
1255
1256 mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1257 if (mirrored_eng_grp) {
1258
1259 setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1260
1261
1262
1263
1264
1265 update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1266 }
1267
1268
1269 ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1270 if (ret)
1271 goto err_ucode_unload;
1272
1273
1274 update_ucode_ptrs(eng_grp);
1275
1276
1277 ret = eng_grp_update_masks(dev, eng_grp);
1278 if (ret)
1279 goto err_release_engs;
1280
1281
1282 ret = create_sysfs_eng_grps_info(dev, eng_grp);
1283 if (ret)
1284 goto err_release_engs;
1285
1286
1287 ret = enable_eng_grp(eng_grp, eng_grps->obj);
1288 if (ret)
1289 goto err_release_engs;
1290
1291
1292
1293
1294
1295
1296 if (eng_grp->mirror.is_ena)
1297 ucode_unload(dev, &eng_grp->ucode[0]);
1298
1299 eng_grp->is_enabled = true;
1300 if (eng_grp->mirror.is_ena)
1301 dev_info(dev,
1302 "Engine_group%d: reuse microcode %s from group %d\n",
1303 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1304 mirrored_eng_grp->idx);
1305 else
1306 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1307 eng_grp->idx, eng_grp->ucode[0].ver_str);
1308
1309 return 0;
1310
1311err_release_engs:
1312 release_engines(dev, eng_grp);
1313err_ucode_unload:
1314 ucode_unload(dev, &eng_grp->ucode[0]);
1315 return ret;
1316}
1317
1318static ssize_t ucode_load_store(struct device *dev,
1319 struct device_attribute *attr,
1320 const char *buf, size_t count)
1321{
1322 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1323 char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1324 char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1325 char *start, *val, *err_msg, *tmp;
1326 struct otx_cpt_eng_grps *eng_grps;
1327 int grp_idx = 0, ret = -EINVAL;
1328 bool has_se, has_ie, has_ae;
1329 int del_grp_idx = -1;
1330 int ucode_idx = 0;
1331
1332 if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1333 return -EINVAL;
1334
1335 eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1336 err_msg = "Invalid engine group format";
1337 strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1338 start = tmp_buf;
1339
1340 has_se = has_ie = has_ae = false;
1341
1342 for (;;) {
1343 val = strsep(&start, ";");
1344 if (!val)
1345 break;
1346 val = strim(val);
1347 if (!*val)
1348 continue;
1349
1350 if (!strncasecmp(val, "engine_group", 12)) {
1351 if (del_grp_idx != -1)
1352 goto err_print;
1353 tmp = strim(strsep(&val, ":"));
1354 if (!val)
1355 goto err_print;
1356 if (strlen(tmp) != 13)
1357 goto err_print;
1358 if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1359 goto err_print;
1360 val = strim(val);
1361 if (strncasecmp(val, "null", 4))
1362 goto err_print;
1363 if (strlen(val) != 4)
1364 goto err_print;
1365 } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1366 if (has_se || ucode_idx)
1367 goto err_print;
1368 tmp = strim(strsep(&val, ":"));
1369 if (!val)
1370 goto err_print;
1371 if (strlen(tmp) != 2)
1372 goto err_print;
1373 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1374 goto err_print;
1375 engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1376 has_se = true;
1377 } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1378 if (has_ae || ucode_idx)
1379 goto err_print;
1380 tmp = strim(strsep(&val, ":"));
1381 if (!val)
1382 goto err_print;
1383 if (strlen(tmp) != 2)
1384 goto err_print;
1385 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1386 goto err_print;
1387 engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1388 has_ae = true;
1389 } else {
1390 if (ucode_idx > 1)
1391 goto err_print;
1392 if (!strlen(val))
1393 goto err_print;
1394 if (strnstr(val, " ", strlen(val)))
1395 goto err_print;
1396 ucode_filename[ucode_idx++] = val;
1397 }
1398 }
1399
1400
1401 if (del_grp_idx == -1) {
1402 if (!(grp_idx && ucode_idx))
1403 goto err_print;
1404
1405 if (ucode_idx > 1 && grp_idx < 2)
1406 goto err_print;
1407
1408 if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1409 err_msg = "Error max 2 engine types can be attached";
1410 goto err_print;
1411 }
1412
1413 } else {
1414 if (del_grp_idx < 0 ||
1415 del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1416 dev_err(dev, "Invalid engine group index %d\n",
1417 del_grp_idx);
1418 ret = -EINVAL;
1419 return ret;
1420 }
1421
1422 if (!eng_grps->grp[del_grp_idx].is_enabled) {
1423 dev_err(dev, "Error engine_group%d is not configured\n",
1424 del_grp_idx);
1425 ret = -EINVAL;
1426 return ret;
1427 }
1428
1429 if (grp_idx || ucode_idx)
1430 goto err_print;
1431 }
1432
1433 mutex_lock(&eng_grps->lock);
1434
1435 if (eng_grps->is_rdonly) {
1436 dev_err(dev, "Disable VFs before modifying engine groups\n");
1437 ret = -EACCES;
1438 goto err_unlock;
1439 }
1440
1441 if (del_grp_idx == -1)
1442
1443 ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1444 (void **) ucode_filename,
1445 ucode_idx, false);
1446 else
1447
1448 ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1449 if (ret)
1450 goto err_unlock;
1451
1452 print_dbg_info(dev, eng_grps);
1453err_unlock:
1454 mutex_unlock(&eng_grps->lock);
1455 return ret ? ret : count;
1456err_print:
1457 dev_err(dev, "%s\n", err_msg);
1458
1459 return ret;
1460}
1461
1462int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1463 struct otx_cpt_eng_grps *eng_grps,
1464 int pf_type)
1465{
1466 struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 };
1467 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1468 struct tar_arch_info_t *tar_arch = NULL;
1469 char *tar_filename;
1470 int i, ret = 0;
1471
1472 mutex_lock(&eng_grps->lock);
1473
1474
1475
1476
1477
1478 if (eng_grps->is_first_try)
1479 goto unlock_mutex;
1480 eng_grps->is_first_try = true;
1481
1482
1483 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1484 if (eng_grps->grp[i].is_enabled)
1485 goto unlock_mutex;
1486
1487 switch (pf_type) {
1488 case OTX_CPT_AE:
1489 case OTX_CPT_SE:
1490 tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1491 break;
1492
1493 default:
1494 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1495 ret = -EINVAL;
1496 goto unlock_mutex;
1497 }
1498
1499 tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1500 if (!tar_arch)
1501 goto unlock_mutex;
1502
1503
1504
1505
1506
1507
1508 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1509 if (tar_info[0] &&
1510 dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1511
1512 engs[0].type = OTX_CPT_SE_TYPES;
1513 engs[0].count = eng_grps->avail.max_se_cnt;
1514
1515 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1516 (void **) tar_info, 1, true);
1517 if (ret)
1518 goto release_tar_arch;
1519 }
1520
1521
1522
1523
1524
1525 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1526 if (tar_info[0] &&
1527 dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1528
1529 engs[0].type = OTX_CPT_AE_TYPES;
1530 engs[0].count = eng_grps->avail.max_ae_cnt;
1531
1532 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1533 (void **) tar_info, 1, true);
1534 if (ret)
1535 goto release_tar_arch;
1536 }
1537
1538 print_dbg_info(&pdev->dev, eng_grps);
1539release_tar_arch:
1540 release_tar_archive(tar_arch);
1541unlock_mutex:
1542 mutex_unlock(&eng_grps->lock);
1543 return ret;
1544}
1545
1546void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1547 bool is_rdonly)
1548{
1549 mutex_lock(&eng_grps->lock);
1550
1551 eng_grps->is_rdonly = is_rdonly;
1552
1553 mutex_unlock(&eng_grps->lock);
1554}
1555
1556void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1557{
1558 int grp, timeout = 100;
1559 u64 reg;
1560
1561
1562 for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1563 writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1564 udelay(CSR_DELAY);
1565 }
1566
1567 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1568 while (reg) {
1569 udelay(CSR_DELAY);
1570 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1571 if (timeout--) {
1572 dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1573 break;
1574 }
1575 }
1576
1577
1578 writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1579}
1580
1581void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1582 struct otx_cpt_eng_grps *eng_grps)
1583{
1584 struct otx_cpt_eng_grp_info *grp;
1585 int i, j;
1586
1587 mutex_lock(&eng_grps->lock);
1588 if (eng_grps->is_ucode_load_created) {
1589 device_remove_file(&pdev->dev,
1590 &eng_grps->ucode_load_attr);
1591 eng_grps->is_ucode_load_created = false;
1592 }
1593
1594
1595 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1596 if (eng_grps->grp[i].mirror.is_ena)
1597 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1598
1599
1600 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1601 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1602
1603
1604 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1605 grp = &eng_grps->grp[i];
1606 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1607 kfree(grp->engs[j].bmap);
1608 grp->engs[j].bmap = NULL;
1609 }
1610 }
1611
1612 mutex_unlock(&eng_grps->lock);
1613}
1614
1615int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1616 struct otx_cpt_eng_grps *eng_grps, int pf_type)
1617{
1618 struct otx_cpt_eng_grp_info *grp;
1619 int i, j, ret = 0;
1620
1621 mutex_init(&eng_grps->lock);
1622 eng_grps->obj = pci_get_drvdata(pdev);
1623 eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1624 eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1625
1626 eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1627 eng_grps->avail.max_ae_cnt;
1628 if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1629 dev_err(&pdev->dev,
1630 "Number of engines %d > than max supported %d\n",
1631 eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1632 ret = -EINVAL;
1633 goto err;
1634 }
1635
1636 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1637 grp = &eng_grps->grp[i];
1638 grp->g = eng_grps;
1639 grp->idx = i;
1640
1641 snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1642 "engine_group%d", i);
1643 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1644 grp->engs[j].bmap =
1645 kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1646 sizeof(long), GFP_KERNEL);
1647 if (!grp->engs[j].bmap) {
1648 ret = -ENOMEM;
1649 goto err;
1650 }
1651 }
1652 }
1653
1654 switch (pf_type) {
1655 case OTX_CPT_SE:
1656
1657 eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1658 break;
1659
1660 case OTX_CPT_AE:
1661
1662 eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1663 break;
1664
1665 default:
1666 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1667 ret = -EINVAL;
1668 goto err;
1669 }
1670
1671 eng_grps->ucode_load_attr.show = NULL;
1672 eng_grps->ucode_load_attr.store = ucode_load_store;
1673 eng_grps->ucode_load_attr.attr.name = "ucode_load";
1674 eng_grps->ucode_load_attr.attr.mode = 0220;
1675 sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1676 ret = device_create_file(&pdev->dev,
1677 &eng_grps->ucode_load_attr);
1678 if (ret)
1679 goto err;
1680 eng_grps->is_ucode_load_created = true;
1681
1682 print_dbg_info(&pdev->dev, eng_grps);
1683 return ret;
1684err:
1685 otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1686 return ret;
1687}
1688