linux/drivers/crypto/qat/qat_common/qat_uclo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
   2/* Copyright(c) 2014 - 2020 Intel Corporation */
   3#include <linux/slab.h>
   4#include <linux/ctype.h>
   5#include <linux/kernel.h>
   6#include <linux/delay.h>
   7#include <linux/pci_ids.h>
   8#include "adf_accel_devices.h"
   9#include "adf_common_drv.h"
  10#include "icp_qat_uclo.h"
  11#include "icp_qat_hal.h"
  12#include "icp_qat_fw_loader_handle.h"
  13
  14#define UWORD_CPYBUF_SIZE 1024
  15#define INVLD_UWORD 0xffffffffffull
  16#define PID_MINOR_REV 0xf
  17#define PID_MAJOR_REV (0xf << 4)
  18
  19static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
  20                                 unsigned int ae, unsigned int image_num)
  21{
  22        struct icp_qat_uclo_aedata *ae_data;
  23        struct icp_qat_uclo_encapme *encap_image;
  24        struct icp_qat_uclo_page *page = NULL;
  25        struct icp_qat_uclo_aeslice *ae_slice = NULL;
  26
  27        ae_data = &obj_handle->ae_data[ae];
  28        encap_image = &obj_handle->ae_uimage[image_num];
  29        ae_slice = &ae_data->ae_slices[ae_data->slice_num];
  30        ae_slice->encap_image = encap_image;
  31
  32        if (encap_image->img_ptr) {
  33                ae_slice->ctx_mask_assigned =
  34                                        encap_image->img_ptr->ctx_assigned;
  35                ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
  36        } else {
  37                ae_slice->ctx_mask_assigned = 0;
  38        }
  39        ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
  40        if (!ae_slice->region)
  41                return -ENOMEM;
  42        ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
  43        if (!ae_slice->page)
  44                goto out_err;
  45        page = ae_slice->page;
  46        page->encap_page = encap_image->page;
  47        ae_slice->page->region = ae_slice->region;
  48        ae_data->slice_num++;
  49        return 0;
  50out_err:
  51        kfree(ae_slice->region);
  52        ae_slice->region = NULL;
  53        return -ENOMEM;
  54}
  55
  56static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
  57{
  58        unsigned int i;
  59
  60        if (!ae_data) {
  61                pr_err("QAT: bad argument, ae_data is NULL\n ");
  62                return -EINVAL;
  63        }
  64
  65        for (i = 0; i < ae_data->slice_num; i++) {
  66                kfree(ae_data->ae_slices[i].region);
  67                ae_data->ae_slices[i].region = NULL;
  68                kfree(ae_data->ae_slices[i].page);
  69                ae_data->ae_slices[i].page = NULL;
  70        }
  71        return 0;
  72}
  73
  74static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
  75                                 unsigned int str_offset)
  76{
  77        if (!str_table->table_len || str_offset > str_table->table_len)
  78                return NULL;
  79        return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
  80}
  81
  82static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
  83{
  84        int maj = hdr->maj_ver & 0xff;
  85        int min = hdr->min_ver & 0xff;
  86
  87        if (hdr->file_id != ICP_QAT_UOF_FID) {
  88                pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
  89                return -EINVAL;
  90        }
  91        if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
  92                pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
  93                       maj, min);
  94                return -EINVAL;
  95        }
  96        return 0;
  97}
  98
  99static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
 100{
 101        int maj = suof_hdr->maj_ver & 0xff;
 102        int min = suof_hdr->min_ver & 0xff;
 103
 104        if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
 105                pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
 106                return -EINVAL;
 107        }
 108        if (suof_hdr->fw_type != 0) {
 109                pr_err("QAT: unsupported firmware type\n");
 110                return -EINVAL;
 111        }
 112        if (suof_hdr->num_chunks <= 0x1) {
 113                pr_err("QAT: SUOF chunk amount is incorrect\n");
 114                return -EINVAL;
 115        }
 116        if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
 117                pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
 118                       maj, min);
 119                return -EINVAL;
 120        }
 121        return 0;
 122}
 123
 124static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
 125                                      unsigned int addr, unsigned int *val,
 126                                      unsigned int num_in_bytes)
 127{
 128        unsigned int outval;
 129        unsigned char *ptr = (unsigned char *)val;
 130
 131        while (num_in_bytes) {
 132                memcpy(&outval, ptr, 4);
 133                SRAM_WRITE(handle, addr, outval);
 134                num_in_bytes -= 4;
 135                ptr += 4;
 136                addr += 4;
 137        }
 138}
 139
 140static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
 141                                      unsigned char ae, unsigned int addr,
 142                                      unsigned int *val,
 143                                      unsigned int num_in_bytes)
 144{
 145        unsigned int outval;
 146        unsigned char *ptr = (unsigned char *)val;
 147
 148        addr >>= 0x2; /* convert to uword address */
 149
 150        while (num_in_bytes) {
 151                memcpy(&outval, ptr, 4);
 152                qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
 153                num_in_bytes -= 4;
 154                ptr += 4;
 155        }
 156}
 157
 158static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
 159                                   unsigned char ae,
 160                                   struct icp_qat_uof_batch_init
 161                                   *umem_init_header)
 162{
 163        struct icp_qat_uof_batch_init *umem_init;
 164
 165        if (!umem_init_header)
 166                return;
 167        umem_init = umem_init_header->next;
 168        while (umem_init) {
 169                unsigned int addr, *value, size;
 170
 171                ae = umem_init->ae;
 172                addr = umem_init->addr;
 173                value = umem_init->value;
 174                size = umem_init->size;
 175                qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
 176                umem_init = umem_init->next;
 177        }
 178}
 179
 180static void
 181qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
 182                                 struct icp_qat_uof_batch_init **base)
 183{
 184        struct icp_qat_uof_batch_init *umem_init;
 185
 186        umem_init = *base;
 187        while (umem_init) {
 188                struct icp_qat_uof_batch_init *pre;
 189
 190                pre = umem_init;
 191                umem_init = umem_init->next;
 192                kfree(pre);
 193        }
 194        *base = NULL;
 195}
 196
 197static int qat_uclo_parse_num(char *str, unsigned int *num)
 198{
 199        char buf[16] = {0};
 200        unsigned long ae = 0;
 201        int i;
 202
 203        strncpy(buf, str, 15);
 204        for (i = 0; i < 16; i++) {
 205                if (!isdigit(buf[i])) {
 206                        buf[i] = '\0';
 207                        break;
 208                }
 209        }
 210        if ((kstrtoul(buf, 10, &ae)))
 211                return -EFAULT;
 212
 213        *num = (unsigned int)ae;
 214        return 0;
 215}
 216
 217static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
 218                                     struct icp_qat_uof_initmem *init_mem,
 219                                     unsigned int size_range, unsigned int *ae)
 220{
 221        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 222        char *str;
 223
 224        if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
 225                pr_err("QAT: initmem is out of range");
 226                return -EINVAL;
 227        }
 228        if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
 229                pr_err("QAT: Memory scope for init_mem error\n");
 230                return -EINVAL;
 231        }
 232        str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
 233        if (!str) {
 234                pr_err("QAT: AE name assigned in UOF init table is NULL\n");
 235                return -EINVAL;
 236        }
 237        if (qat_uclo_parse_num(str, ae)) {
 238                pr_err("QAT: Parse num for AE number failed\n");
 239                return -EINVAL;
 240        }
 241        if (*ae >= ICP_QAT_UCLO_MAX_AE) {
 242                pr_err("QAT: ae %d out of range\n", *ae);
 243                return -EINVAL;
 244        }
 245        return 0;
 246}
 247
 248static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
 249                                           *handle, struct icp_qat_uof_initmem
 250                                           *init_mem, unsigned int ae,
 251                                           struct icp_qat_uof_batch_init
 252                                           **init_tab_base)
 253{
 254        struct icp_qat_uof_batch_init *init_header, *tail;
 255        struct icp_qat_uof_batch_init *mem_init, *tail_old;
 256        struct icp_qat_uof_memvar_attr *mem_val_attr;
 257        unsigned int i, flag = 0;
 258
 259        mem_val_attr =
 260                (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
 261                sizeof(struct icp_qat_uof_initmem));
 262
 263        init_header = *init_tab_base;
 264        if (!init_header) {
 265                init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
 266                if (!init_header)
 267                        return -ENOMEM;
 268                init_header->size = 1;
 269                *init_tab_base = init_header;
 270                flag = 1;
 271        }
 272        tail_old = init_header;
 273        while (tail_old->next)
 274                tail_old = tail_old->next;
 275        tail = tail_old;
 276        for (i = 0; i < init_mem->val_attr_num; i++) {
 277                mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
 278                if (!mem_init)
 279                        goto out_err;
 280                mem_init->ae = ae;
 281                mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
 282                mem_init->value = &mem_val_attr->value;
 283                mem_init->size = 4;
 284                mem_init->next = NULL;
 285                tail->next = mem_init;
 286                tail = mem_init;
 287                init_header->size += qat_hal_get_ins_num();
 288                mem_val_attr++;
 289        }
 290        return 0;
 291out_err:
 292        /* Do not free the list head unless we allocated it. */
 293        tail_old = tail_old->next;
 294        if (flag) {
 295                kfree(*init_tab_base);
 296                *init_tab_base = NULL;
 297        }
 298
 299        while (tail_old) {
 300                mem_init = tail_old->next;
 301                kfree(tail_old);
 302                tail_old = mem_init;
 303        }
 304        return -ENOMEM;
 305}
 306
 307static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
 308                                  struct icp_qat_uof_initmem *init_mem)
 309{
 310        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 311        unsigned int ae;
 312
 313        if (qat_uclo_fetch_initmem_ae(handle, init_mem,
 314                                      handle->chip_info->lm_size, &ae))
 315                return -EINVAL;
 316        if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
 317                                            &obj_handle->lm_init_tab[ae]))
 318                return -EINVAL;
 319        return 0;
 320}
 321
 322static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
 323                                  struct icp_qat_uof_initmem *init_mem)
 324{
 325        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 326        unsigned int ae, ustore_size, uaddr, i;
 327        struct icp_qat_uclo_aedata *aed;
 328
 329        ustore_size = obj_handle->ustore_phy_size;
 330        if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
 331                return -EINVAL;
 332        if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
 333                                            &obj_handle->umem_init_tab[ae]))
 334                return -EINVAL;
 335        /* set the highest ustore address referenced */
 336        uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
 337        aed = &obj_handle->ae_data[ae];
 338        for (i = 0; i < aed->slice_num; i++) {
 339                if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
 340                        aed->ae_slices[i].encap_image->uwords_num = uaddr;
 341        }
 342        return 0;
 343}
 344
 345static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
 346                                   struct icp_qat_uof_initmem *init_mem)
 347{
 348        switch (init_mem->region) {
 349        case ICP_QAT_UOF_LMEM_REGION:
 350                if (qat_uclo_init_lmem_seg(handle, init_mem))
 351                        return -EINVAL;
 352                break;
 353        case ICP_QAT_UOF_UMEM_REGION:
 354                if (qat_uclo_init_umem_seg(handle, init_mem))
 355                        return -EINVAL;
 356                break;
 357        default:
 358                pr_err("QAT: initmem region error. region type=0x%x\n",
 359                       init_mem->region);
 360                return -EINVAL;
 361        }
 362        return 0;
 363}
 364
 365static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
 366                                struct icp_qat_uclo_encapme *image)
 367{
 368        unsigned int i;
 369        struct icp_qat_uclo_encap_page *page;
 370        struct icp_qat_uof_image *uof_image;
 371        unsigned char ae;
 372        unsigned int ustore_size;
 373        unsigned int patt_pos;
 374        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 375        unsigned long ae_mask = handle->hal_handle->ae_mask;
 376        unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 377        u64 *fill_data;
 378
 379        uof_image = image->img_ptr;
 380        fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
 381                            GFP_KERNEL);
 382        if (!fill_data)
 383                return -ENOMEM;
 384        for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
 385                memcpy(&fill_data[i], &uof_image->fill_pattern,
 386                       sizeof(u64));
 387        page = image->page;
 388
 389        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 390                if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
 391                        continue;
 392
 393                if (!test_bit(ae, &cfg_ae_mask))
 394                        continue;
 395
 396                ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
 397                patt_pos = page->beg_addr_p + page->micro_words_num;
 398
 399                qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
 400                                  page->beg_addr_p, &fill_data[0]);
 401                qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
 402                                  ustore_size - patt_pos + 1,
 403                                  &fill_data[page->beg_addr_p]);
 404        }
 405        kfree(fill_data);
 406        return 0;
 407}
 408
 409static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
 410{
 411        int i, ae;
 412        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 413        struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
 414        unsigned long ae_mask = handle->hal_handle->ae_mask;
 415
 416        for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
 417                if (initmem->num_in_bytes) {
 418                        if (qat_uclo_init_ae_memory(handle, initmem))
 419                                return -EINVAL;
 420                }
 421                initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
 422                        (uintptr_t)initmem +
 423                        sizeof(struct icp_qat_uof_initmem)) +
 424                        (sizeof(struct icp_qat_uof_memvar_attr) *
 425                        initmem->val_attr_num));
 426        }
 427
 428        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 429                if (qat_hal_batch_wr_lm(handle, ae,
 430                                        obj_handle->lm_init_tab[ae])) {
 431                        pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
 432                        return -EINVAL;
 433                }
 434                qat_uclo_cleanup_batch_init_list(handle,
 435                                                 &obj_handle->lm_init_tab[ae]);
 436                qat_uclo_batch_wr_umem(handle, ae,
 437                                       obj_handle->umem_init_tab[ae]);
 438                qat_uclo_cleanup_batch_init_list(handle,
 439                                                 &obj_handle->
 440                                                 umem_init_tab[ae]);
 441        }
 442        return 0;
 443}
 444
 445static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
 446                                 char *chunk_id, void *cur)
 447{
 448        int i;
 449        struct icp_qat_uof_chunkhdr *chunk_hdr =
 450            (struct icp_qat_uof_chunkhdr *)
 451            ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
 452
 453        for (i = 0; i < obj_hdr->num_chunks; i++) {
 454                if ((cur < (void *)&chunk_hdr[i]) &&
 455                    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
 456                             ICP_QAT_UOF_OBJID_LEN)) {
 457                        return &chunk_hdr[i];
 458                }
 459        }
 460        return NULL;
 461}
 462
 463static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
 464{
 465        int i;
 466        unsigned int topbit = 1 << 0xF;
 467        unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
 468
 469        reg ^= inbyte << 0x8;
 470        for (i = 0; i < 0x8; i++) {
 471                if (reg & topbit)
 472                        reg = (reg << 1) ^ 0x1021;
 473                else
 474                        reg <<= 1;
 475        }
 476        return reg & 0xFFFF;
 477}
 478
 479static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
 480{
 481        unsigned int chksum = 0;
 482
 483        if (ptr)
 484                while (num--)
 485                        chksum = qat_uclo_calc_checksum(chksum, *ptr++);
 486        return chksum;
 487}
 488
 489static struct icp_qat_uclo_objhdr *
 490qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
 491                   char *chunk_id)
 492{
 493        struct icp_qat_uof_filechunkhdr *file_chunk;
 494        struct icp_qat_uclo_objhdr *obj_hdr;
 495        char *chunk;
 496        int i;
 497
 498        file_chunk = (struct icp_qat_uof_filechunkhdr *)
 499                (buf + sizeof(struct icp_qat_uof_filehdr));
 500        for (i = 0; i < file_hdr->num_chunks; i++) {
 501                if (!strncmp(file_chunk->chunk_id, chunk_id,
 502                             ICP_QAT_UOF_OBJID_LEN)) {
 503                        chunk = buf + file_chunk->offset;
 504                        if (file_chunk->checksum != qat_uclo_calc_str_checksum(
 505                                chunk, file_chunk->size))
 506                                break;
 507                        obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
 508                        if (!obj_hdr)
 509                                break;
 510                        obj_hdr->file_buff = chunk;
 511                        obj_hdr->checksum = file_chunk->checksum;
 512                        obj_hdr->size = file_chunk->size;
 513                        return obj_hdr;
 514                }
 515                file_chunk++;
 516        }
 517        return NULL;
 518}
 519
 520static unsigned int
 521qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
 522                            struct icp_qat_uof_image *image)
 523{
 524        struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
 525        struct icp_qat_uof_objtable *neigh_reg_tab;
 526        struct icp_qat_uof_code_page *code_page;
 527
 528        code_page = (struct icp_qat_uof_code_page *)
 529                        ((char *)image + sizeof(struct icp_qat_uof_image));
 530        uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
 531                     code_page->uc_var_tab_offset);
 532        imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
 533                      code_page->imp_var_tab_offset);
 534        imp_expr_tab = (struct icp_qat_uof_objtable *)
 535                       (encap_uof_obj->beg_uof +
 536                       code_page->imp_expr_tab_offset);
 537        if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
 538            imp_expr_tab->entry_num) {
 539                pr_err("QAT: UOF can't contain imported variable to be parsed\n");
 540                return -EINVAL;
 541        }
 542        neigh_reg_tab = (struct icp_qat_uof_objtable *)
 543                        (encap_uof_obj->beg_uof +
 544                        code_page->neigh_reg_tab_offset);
 545        if (neigh_reg_tab->entry_num) {
 546                pr_err("QAT: UOF can't contain neighbor register table\n");
 547                return -EINVAL;
 548        }
 549        if (image->numpages > 1) {
 550                pr_err("QAT: UOF can't contain multiple pages\n");
 551                return -EINVAL;
 552        }
 553        if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
 554                pr_err("QAT: UOF can't use shared control store feature\n");
 555                return -EFAULT;
 556        }
 557        if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
 558                pr_err("QAT: UOF can't use reloadable feature\n");
 559                return -EFAULT;
 560        }
 561        return 0;
 562}
 563
 564static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
 565                                     *encap_uof_obj,
 566                                     struct icp_qat_uof_image *img,
 567                                     struct icp_qat_uclo_encap_page *page)
 568{
 569        struct icp_qat_uof_code_page *code_page;
 570        struct icp_qat_uof_code_area *code_area;
 571        struct icp_qat_uof_objtable *uword_block_tab;
 572        struct icp_qat_uof_uword_block *uwblock;
 573        int i;
 574
 575        code_page = (struct icp_qat_uof_code_page *)
 576                        ((char *)img + sizeof(struct icp_qat_uof_image));
 577        page->def_page = code_page->def_page;
 578        page->page_region = code_page->page_region;
 579        page->beg_addr_v = code_page->beg_addr_v;
 580        page->beg_addr_p = code_page->beg_addr_p;
 581        code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
 582                                                code_page->code_area_offset);
 583        page->micro_words_num = code_area->micro_words_num;
 584        uword_block_tab = (struct icp_qat_uof_objtable *)
 585                          (encap_uof_obj->beg_uof +
 586                          code_area->uword_block_tab);
 587        page->uwblock_num = uword_block_tab->entry_num;
 588        uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
 589                        sizeof(struct icp_qat_uof_objtable));
 590        page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
 591        for (i = 0; i < uword_block_tab->entry_num; i++)
 592                page->uwblock[i].micro_words =
 593                (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
 594}
 595
 596static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
 597                               struct icp_qat_uclo_encapme *ae_uimage,
 598                               int max_image)
 599{
 600        int i, j;
 601        struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
 602        struct icp_qat_uof_image *image;
 603        struct icp_qat_uof_objtable *ae_regtab;
 604        struct icp_qat_uof_objtable *init_reg_sym_tab;
 605        struct icp_qat_uof_objtable *sbreak_tab;
 606        struct icp_qat_uof_encap_obj *encap_uof_obj =
 607                                        &obj_handle->encap_uof_obj;
 608
 609        for (j = 0; j < max_image; j++) {
 610                chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
 611                                                ICP_QAT_UOF_IMAG, chunk_hdr);
 612                if (!chunk_hdr)
 613                        break;
 614                image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
 615                                                     chunk_hdr->offset);
 616                ae_regtab = (struct icp_qat_uof_objtable *)
 617                           (image->reg_tab_offset +
 618                           obj_handle->obj_hdr->file_buff);
 619                ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
 620                ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
 621                        (((char *)ae_regtab) +
 622                        sizeof(struct icp_qat_uof_objtable));
 623                init_reg_sym_tab = (struct icp_qat_uof_objtable *)
 624                                   (image->init_reg_sym_tab +
 625                                   obj_handle->obj_hdr->file_buff);
 626                ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
 627                ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
 628                        (((char *)init_reg_sym_tab) +
 629                        sizeof(struct icp_qat_uof_objtable));
 630                sbreak_tab = (struct icp_qat_uof_objtable *)
 631                        (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
 632                ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
 633                ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
 634                                      (((char *)sbreak_tab) +
 635                                      sizeof(struct icp_qat_uof_objtable));
 636                ae_uimage[j].img_ptr = image;
 637                if (qat_uclo_check_image_compat(encap_uof_obj, image))
 638                        goto out_err;
 639                ae_uimage[j].page =
 640                        kzalloc(sizeof(struct icp_qat_uclo_encap_page),
 641                                GFP_KERNEL);
 642                if (!ae_uimage[j].page)
 643                        goto out_err;
 644                qat_uclo_map_image_page(encap_uof_obj, image,
 645                                        ae_uimage[j].page);
 646        }
 647        return j;
 648out_err:
 649        for (i = 0; i < j; i++)
 650                kfree(ae_uimage[i].page);
 651        return 0;
 652}
 653
 654static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
 655{
 656        int i, ae;
 657        int mflag = 0;
 658        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 659        unsigned long ae_mask = handle->hal_handle->ae_mask;
 660        unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 661
 662        for_each_set_bit(ae, &ae_mask, max_ae) {
 663                if (!test_bit(ae, &cfg_ae_mask))
 664                        continue;
 665
 666                for (i = 0; i < obj_handle->uimage_num; i++) {
 667                        if (!test_bit(ae, (unsigned long *)
 668                        &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
 669                                continue;
 670                        mflag = 1;
 671                        if (qat_uclo_init_ae_data(obj_handle, ae, i))
 672                                return -EINVAL;
 673                }
 674        }
 675        if (!mflag) {
 676                pr_err("QAT: uimage uses AE not set\n");
 677                return -EINVAL;
 678        }
 679        return 0;
 680}
 681
 682static struct icp_qat_uof_strtable *
 683qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
 684                       char *tab_name, struct icp_qat_uof_strtable *str_table)
 685{
 686        struct icp_qat_uof_chunkhdr *chunk_hdr;
 687
 688        chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
 689                                        obj_hdr->file_buff, tab_name, NULL);
 690        if (chunk_hdr) {
 691                int hdr_size;
 692
 693                memcpy(&str_table->table_len, obj_hdr->file_buff +
 694                       chunk_hdr->offset, sizeof(str_table->table_len));
 695                hdr_size = (char *)&str_table->strings - (char *)str_table;
 696                str_table->strings = (uintptr_t)obj_hdr->file_buff +
 697                                        chunk_hdr->offset + hdr_size;
 698                return str_table;
 699        }
 700        return NULL;
 701}
 702
 703static void
 704qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
 705                           struct icp_qat_uclo_init_mem_table *init_mem_tab)
 706{
 707        struct icp_qat_uof_chunkhdr *chunk_hdr;
 708
 709        chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
 710                                        ICP_QAT_UOF_IMEM, NULL);
 711        if (chunk_hdr) {
 712                memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
 713                        chunk_hdr->offset, sizeof(unsigned int));
 714                init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
 715                (encap_uof_obj->beg_uof + chunk_hdr->offset +
 716                sizeof(unsigned int));
 717        }
 718}
 719
 720static unsigned int
 721qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
 722{
 723        switch (handle->pci_dev->device) {
 724        case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
 725                return ICP_QAT_AC_895XCC_DEV_TYPE;
 726        case PCI_DEVICE_ID_INTEL_QAT_C62X:
 727                return ICP_QAT_AC_C62X_DEV_TYPE;
 728        case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
 729                return ICP_QAT_AC_C3XXX_DEV_TYPE;
 730        case ADF_4XXX_PCI_DEVICE_ID:
 731                return ICP_QAT_AC_4XXX_A_DEV_TYPE;
 732        default:
 733                pr_err("QAT: unsupported device 0x%x\n",
 734                       handle->pci_dev->device);
 735                return 0;
 736        }
 737}
 738
 739static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
 740{
 741        unsigned int maj_ver, prod_type = obj_handle->prod_type;
 742
 743        if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
 744                pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
 745                       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
 746                       prod_type);
 747                return -EINVAL;
 748        }
 749        maj_ver = obj_handle->prod_rev & 0xff;
 750        if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
 751            obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
 752                pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
 753                return -EINVAL;
 754        }
 755        return 0;
 756}
 757
 758static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
 759                             unsigned char ae, unsigned char ctx_mask,
 760                             enum icp_qat_uof_regtype reg_type,
 761                             unsigned short reg_addr, unsigned int value)
 762{
 763        switch (reg_type) {
 764        case ICP_GPA_ABS:
 765        case ICP_GPB_ABS:
 766                ctx_mask = 0;
 767                fallthrough;
 768        case ICP_GPA_REL:
 769        case ICP_GPB_REL:
 770                return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
 771                                        reg_addr, value);
 772        case ICP_SR_ABS:
 773        case ICP_DR_ABS:
 774        case ICP_SR_RD_ABS:
 775        case ICP_DR_RD_ABS:
 776                ctx_mask = 0;
 777                fallthrough;
 778        case ICP_SR_REL:
 779        case ICP_DR_REL:
 780        case ICP_SR_RD_REL:
 781        case ICP_DR_RD_REL:
 782                return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
 783                                            reg_addr, value);
 784        case ICP_SR_WR_ABS:
 785        case ICP_DR_WR_ABS:
 786                ctx_mask = 0;
 787                fallthrough;
 788        case ICP_SR_WR_REL:
 789        case ICP_DR_WR_REL:
 790                return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
 791                                            reg_addr, value);
 792        case ICP_NEIGH_REL:
 793                return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
 794        default:
 795                pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
 796                return -EFAULT;
 797        }
 798        return 0;
 799}
 800
 801static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
 802                                 unsigned int ae,
 803                                 struct icp_qat_uclo_encapme *encap_ae)
 804{
 805        unsigned int i;
 806        unsigned char ctx_mask;
 807        struct icp_qat_uof_init_regsym *init_regsym;
 808
 809        if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
 810            ICP_QAT_UCLO_MAX_CTX)
 811                ctx_mask = 0xff;
 812        else
 813                ctx_mask = 0x55;
 814
 815        for (i = 0; i < encap_ae->init_regsym_num; i++) {
 816                unsigned int exp_res;
 817
 818                init_regsym = &encap_ae->init_regsym[i];
 819                exp_res = init_regsym->value;
 820                switch (init_regsym->init_type) {
 821                case ICP_QAT_UOF_INIT_REG:
 822                        qat_uclo_init_reg(handle, ae, ctx_mask,
 823                                          (enum icp_qat_uof_regtype)
 824                                          init_regsym->reg_type,
 825                                          (unsigned short)init_regsym->reg_addr,
 826                                          exp_res);
 827                        break;
 828                case ICP_QAT_UOF_INIT_REG_CTX:
 829                        /* check if ctx is appropriate for the ctxMode */
 830                        if (!((1 << init_regsym->ctx) & ctx_mask)) {
 831                                pr_err("QAT: invalid ctx num = 0x%x\n",
 832                                       init_regsym->ctx);
 833                                return -EINVAL;
 834                        }
 835                        qat_uclo_init_reg(handle, ae,
 836                                          (unsigned char)
 837                                          (1 << init_regsym->ctx),
 838                                          (enum icp_qat_uof_regtype)
 839                                          init_regsym->reg_type,
 840                                          (unsigned short)init_regsym->reg_addr,
 841                                          exp_res);
 842                        break;
 843                case ICP_QAT_UOF_INIT_EXPR:
 844                        pr_err("QAT: INIT_EXPR feature not supported\n");
 845                        return -EINVAL;
 846                case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
 847                        pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
 848                        return -EINVAL;
 849                default:
 850                        break;
 851                }
 852        }
 853        return 0;
 854}
 855
 856static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
 857{
 858        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 859        unsigned long ae_mask = handle->hal_handle->ae_mask;
 860        struct icp_qat_uclo_aedata *aed;
 861        unsigned int s, ae;
 862
 863        if (obj_handle->global_inited)
 864                return 0;
 865        if (obj_handle->init_mem_tab.entry_num) {
 866                if (qat_uclo_init_memory(handle)) {
 867                        pr_err("QAT: initialize memory failed\n");
 868                        return -EINVAL;
 869                }
 870        }
 871
 872        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 873                aed = &obj_handle->ae_data[ae];
 874                for (s = 0; s < aed->slice_num; s++) {
 875                        if (!aed->ae_slices[s].encap_image)
 876                                continue;
 877                        if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
 878                                return -EINVAL;
 879                }
 880        }
 881        obj_handle->global_inited = 1;
 882        return 0;
 883}
 884
 885static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
 886                             struct icp_qat_uclo_objhandle *obj_handle,
 887                             unsigned char ae,
 888                             struct icp_qat_uof_image *uof_image)
 889{
 890        unsigned char mode;
 891        int ret;
 892
 893        mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
 894        ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
 895        if (ret) {
 896                pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
 897                return ret;
 898        }
 899        if (handle->chip_info->nn) {
 900                mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
 901                ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
 902                if (ret) {
 903                        pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
 904                        return ret;
 905                }
 906        }
 907        mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
 908        ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
 909        if (ret) {
 910                pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
 911                return ret;
 912        }
 913        mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
 914        ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
 915        if (ret) {
 916                pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
 917                return ret;
 918        }
 919        if (handle->chip_info->lm2lm3) {
 920                mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
 921                ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
 922                if (ret) {
 923                        pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
 924                        return ret;
 925                }
 926                mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
 927                ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
 928                if (ret) {
 929                        pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
 930                        return ret;
 931                }
 932                mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
 933                qat_hal_set_ae_tindex_mode(handle, ae, mode);
 934        }
 935        return 0;
 936}
 937
 938static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
 939{
 940        struct icp_qat_uof_image *uof_image;
 941        struct icp_qat_uclo_aedata *ae_data;
 942        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 943        unsigned long ae_mask = handle->hal_handle->ae_mask;
 944        unsigned long cfg_ae_mask = handle->cfg_ae_mask;
 945        unsigned char ae, s;
 946        int error;
 947
 948        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
 949                if (!test_bit(ae, &cfg_ae_mask))
 950                        continue;
 951
 952                ae_data = &obj_handle->ae_data[ae];
 953                for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
 954                                      ICP_QAT_UCLO_MAX_CTX); s++) {
 955                        if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
 956                                continue;
 957                        uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
 958                        error = qat_hal_set_modes(handle, obj_handle, ae,
 959                                                  uof_image);
 960                        if (error)
 961                                return error;
 962                }
 963        }
 964        return 0;
 965}
 966
 967static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
 968{
 969        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 970        struct icp_qat_uclo_encapme *image;
 971        int a;
 972
 973        for (a = 0; a < obj_handle->uimage_num; a++) {
 974                image = &obj_handle->ae_uimage[a];
 975                image->uwords_num = image->page->beg_addr_p +
 976                                        image->page->micro_words_num;
 977        }
 978}
 979
 980static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
 981{
 982        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 983        unsigned int ae;
 984
 985        obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
 986        obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
 987                                             obj_handle->obj_hdr->file_buff;
 988        obj_handle->uword_in_bytes = 6;
 989        obj_handle->prod_type = qat_uclo_get_dev_type(handle);
 990        obj_handle->prod_rev = PID_MAJOR_REV |
 991                        (PID_MINOR_REV & handle->hal_handle->revision_id);
 992        if (qat_uclo_check_uof_compat(obj_handle)) {
 993                pr_err("QAT: UOF incompatible\n");
 994                return -EINVAL;
 995        }
 996        obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
 997                                        GFP_KERNEL);
 998        if (!obj_handle->uword_buf)
 999                return -ENOMEM;
1000        obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
1001        if (!obj_handle->obj_hdr->file_buff ||
1002            !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
1003                                    &obj_handle->str_table)) {
1004                pr_err("QAT: UOF doesn't have effective images\n");
1005                goto out_err;
1006        }
1007        obj_handle->uimage_num =
1008                qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1009                                    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1010        if (!obj_handle->uimage_num)
1011                goto out_err;
1012        if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1013                pr_err("QAT: Bad object\n");
1014                goto out_check_uof_aemask_err;
1015        }
1016        qat_uclo_init_uword_num(handle);
1017        qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1018                                   &obj_handle->init_mem_tab);
1019        if (qat_uclo_set_ae_mode(handle))
1020                goto out_check_uof_aemask_err;
1021        return 0;
1022out_check_uof_aemask_err:
1023        for (ae = 0; ae < obj_handle->uimage_num; ae++)
1024                kfree(obj_handle->ae_uimage[ae].page);
1025out_err:
1026        kfree(obj_handle->uword_buf);
1027        return -EFAULT;
1028}
1029
1030static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1031                                      struct icp_qat_suof_filehdr *suof_ptr,
1032                                      int suof_size)
1033{
1034        unsigned int check_sum = 0;
1035        unsigned int min_ver_offset = 0;
1036        struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1037
1038        suof_handle->file_id = ICP_QAT_SUOF_FID;
1039        suof_handle->suof_buf = (char *)suof_ptr;
1040        suof_handle->suof_size = suof_size;
1041        min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1042                                              min_ver);
1043        check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1044                                               min_ver_offset);
1045        if (check_sum != suof_ptr->check_sum) {
1046                pr_err("QAT: incorrect SUOF checksum\n");
1047                return -EINVAL;
1048        }
1049        suof_handle->check_sum = suof_ptr->check_sum;
1050        suof_handle->min_ver = suof_ptr->min_ver;
1051        suof_handle->maj_ver = suof_ptr->maj_ver;
1052        suof_handle->fw_type = suof_ptr->fw_type;
1053        return 0;
1054}
1055
1056static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1057                              struct icp_qat_suof_img_hdr *suof_img_hdr,
1058                              struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1059{
1060        struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1061        struct icp_qat_simg_ae_mode *ae_mode;
1062        struct icp_qat_suof_objhdr *suof_objhdr;
1063
1064        suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1065                                   suof_chunk_hdr->offset +
1066                                   sizeof(*suof_objhdr));
1067        suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1068                                  (suof_handle->suof_buf +
1069                                   suof_chunk_hdr->offset))->img_length;
1070
1071        suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1072        suof_img_hdr->css_key = (suof_img_hdr->css_header +
1073                                 sizeof(struct icp_qat_css_hdr));
1074        suof_img_hdr->css_signature = suof_img_hdr->css_key +
1075                                      ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1076                                      ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
1077        suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1078                                 ICP_QAT_CSS_SIGNATURE_LEN(handle);
1079
1080        ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1081        suof_img_hdr->ae_mask = ae_mode->ae_mask;
1082        suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1083        suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1084        suof_img_hdr->fw_type = ae_mode->fw_type;
1085}
1086
1087static void
1088qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1089                          struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1090{
1091        char **sym_str = (char **)&suof_handle->sym_str;
1092        unsigned int *sym_size = &suof_handle->sym_size;
1093        struct icp_qat_suof_strtable *str_table_obj;
1094
1095        *sym_size = *(unsigned int *)(uintptr_t)
1096                   (suof_chunk_hdr->offset + suof_handle->suof_buf);
1097        *sym_str = (char *)(uintptr_t)
1098                   (suof_handle->suof_buf + suof_chunk_hdr->offset +
1099                   sizeof(str_table_obj->tab_length));
1100}
1101
1102static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1103                                      struct icp_qat_suof_img_hdr *img_hdr)
1104{
1105        struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1106        unsigned int prod_rev, maj_ver, prod_type;
1107
1108        prod_type = qat_uclo_get_dev_type(handle);
1109        img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1110        prod_rev = PID_MAJOR_REV |
1111                         (PID_MINOR_REV & handle->hal_handle->revision_id);
1112        if (img_ae_mode->dev_type != prod_type) {
1113                pr_err("QAT: incompatible product type %x\n",
1114                       img_ae_mode->dev_type);
1115                return -EINVAL;
1116        }
1117        maj_ver = prod_rev & 0xff;
1118        if (maj_ver > img_ae_mode->devmax_ver ||
1119            maj_ver < img_ae_mode->devmin_ver) {
1120                pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1121                return -EINVAL;
1122        }
1123        return 0;
1124}
1125
1126static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1127{
1128        struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1129
1130        kfree(sobj_handle->img_table.simg_hdr);
1131        sobj_handle->img_table.simg_hdr = NULL;
1132        kfree(handle->sobj_handle);
1133        handle->sobj_handle = NULL;
1134}
1135
1136static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1137                              unsigned int img_id, unsigned int num_simgs)
1138{
1139        struct icp_qat_suof_img_hdr img_header;
1140
1141        if (img_id != num_simgs - 1) {
1142                memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1143                       sizeof(*suof_img_hdr));
1144                memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1145                       sizeof(*suof_img_hdr));
1146                memcpy(&suof_img_hdr[img_id], &img_header,
1147                       sizeof(*suof_img_hdr));
1148        }
1149}
1150
1151static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1152                             struct icp_qat_suof_filehdr *suof_ptr,
1153                             int suof_size)
1154{
1155        struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1156        struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1157        struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1158        int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1159        unsigned int i = 0;
1160        struct icp_qat_suof_img_hdr img_header;
1161
1162        if (!suof_ptr || suof_size == 0) {
1163                pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1164                return -EINVAL;
1165        }
1166        if (qat_uclo_check_suof_format(suof_ptr))
1167                return -EINVAL;
1168        ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1169        if (ret)
1170                return ret;
1171        suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1172                         ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1173
1174        qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1175        suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1176
1177        if (suof_handle->img_table.num_simgs != 0) {
1178                suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1179                                       sizeof(img_header),
1180                                       GFP_KERNEL);
1181                if (!suof_img_hdr)
1182                        return -ENOMEM;
1183                suof_handle->img_table.simg_hdr = suof_img_hdr;
1184
1185                for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1186                        qat_uclo_map_simg(handle, &suof_img_hdr[i],
1187                                          &suof_chunk_hdr[1 + i]);
1188                        ret = qat_uclo_check_simg_compat(handle,
1189                                                         &suof_img_hdr[i]);
1190                        if (ret)
1191                                return ret;
1192                        suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1193                        if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1194                                ae0_img = i;
1195                }
1196
1197                if (!handle->chip_info->tgroup_share_ustore) {
1198                        qat_uclo_tail_img(suof_img_hdr, ae0_img,
1199                                          suof_handle->img_table.num_simgs);
1200                }
1201        }
1202        return 0;
1203}
1204
1205#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
1206#define BITS_IN_DWORD 32
1207
1208static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1209                            struct icp_qat_fw_auth_desc *desc)
1210{
1211        u32 fcu_sts, retry = 0;
1212        u32 fcu_ctl_csr, fcu_sts_csr;
1213        u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
1214        u64 bus_addr;
1215
1216        bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1217                           - sizeof(struct icp_qat_auth_chunk);
1218
1219        fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1220        fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1221        fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
1222        fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
1223
1224        SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1225        SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
1226        SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1227
1228        do {
1229                msleep(FW_AUTH_WAIT_PERIOD);
1230                fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1231                if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1232                        goto auth_fail;
1233                if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1234                        if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1235                                return 0;
1236        } while (retry++ < FW_AUTH_MAX_RETRY);
1237auth_fail:
1238        pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1239               fcu_sts & FCU_AUTH_STS_MASK, retry);
1240        return -EINVAL;
1241}
1242
1243static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
1244                                  int imgid)
1245{
1246        struct icp_qat_suof_handle *sobj_handle;
1247
1248        if (!handle->chip_info->tgroup_share_ustore)
1249                return false;
1250
1251        sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1252        if (handle->hal_handle->admin_ae_mask &
1253            sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1254                return false;
1255
1256        return true;
1257}
1258
1259static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1260                                      struct icp_qat_fw_auth_desc *desc)
1261{
1262        unsigned long ae_mask = handle->hal_handle->ae_mask;
1263        unsigned long desc_ae_mask = desc->ae_mask;
1264        u32 fcu_sts, ae_broadcast_mask = 0;
1265        u32 fcu_loaded_csr, ae_loaded;
1266        u32 fcu_sts_csr, fcu_ctl_csr;
1267        unsigned int ae, retry = 0;
1268
1269        if (handle->chip_info->tgroup_share_ustore) {
1270                fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1271                fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1272                fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1273        } else {
1274                pr_err("Chip 0x%x doesn't support broadcast load\n",
1275                       handle->pci_dev->device);
1276                return -EINVAL;
1277        }
1278
1279        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
1280                if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
1281                        pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
1282                        return -EINVAL;
1283                }
1284
1285                if (test_bit(ae, &desc_ae_mask))
1286                        ae_broadcast_mask |= 1 << ae;
1287        }
1288
1289        if (ae_broadcast_mask) {
1290                SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
1291                            ae_broadcast_mask);
1292
1293                SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
1294
1295                do {
1296                        msleep(FW_AUTH_WAIT_PERIOD);
1297                        fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1298                        fcu_sts &= FCU_AUTH_STS_MASK;
1299
1300                        if (fcu_sts == FCU_STS_LOAD_FAIL) {
1301                                pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
1302                                return -EINVAL;
1303                        } else if (fcu_sts == FCU_STS_LOAD_DONE) {
1304                                ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
1305                                ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
1306
1307                                if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
1308                                        break;
1309                        }
1310                } while (retry++ < FW_AUTH_MAX_RETRY);
1311
1312                if (retry > FW_AUTH_MAX_RETRY) {
1313                        pr_err("QAT: broadcast load failed timeout %d\n", retry);
1314                        return -EINVAL;
1315                }
1316        }
1317        return 0;
1318}
1319
1320static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1321                               struct icp_firml_dram_desc *dram_desc,
1322                               unsigned int size)
1323{
1324        void *vptr;
1325        dma_addr_t ptr;
1326
1327        vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1328                                  size, &ptr, GFP_KERNEL);
1329        if (!vptr)
1330                return -ENOMEM;
1331        dram_desc->dram_base_addr_v = vptr;
1332        dram_desc->dram_bus_addr = ptr;
1333        dram_desc->dram_size = size;
1334        return 0;
1335}
1336
1337static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1338                               struct icp_firml_dram_desc *dram_desc)
1339{
1340        if (handle && dram_desc && dram_desc->dram_base_addr_v) {
1341                dma_free_coherent(&handle->pci_dev->dev,
1342                                  (size_t)(dram_desc->dram_size),
1343                                  dram_desc->dram_base_addr_v,
1344                                  dram_desc->dram_bus_addr);
1345        }
1346
1347        if (dram_desc)
1348                memset(dram_desc, 0, sizeof(*dram_desc));
1349}
1350
1351static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1352                                   struct icp_qat_fw_auth_desc **desc)
1353{
1354        struct icp_firml_dram_desc dram_desc;
1355
1356        if (*desc) {
1357                dram_desc.dram_base_addr_v = *desc;
1358                dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1359                                           (*desc))->chunk_bus_addr;
1360                dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1361                                       (*desc))->chunk_size;
1362                qat_uclo_simg_free(handle, &dram_desc);
1363        }
1364}
1365
1366static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1367                                char *image, unsigned int size,
1368                                struct icp_qat_fw_auth_desc **desc)
1369{
1370        struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1371        struct icp_qat_fw_auth_desc *auth_desc;
1372        struct icp_qat_auth_chunk *auth_chunk;
1373        u64 virt_addr,  bus_addr, virt_base;
1374        unsigned int length, simg_offset = sizeof(*auth_chunk);
1375        struct icp_qat_simg_ae_mode *simg_ae_mode;
1376        struct icp_firml_dram_desc img_desc;
1377
1378        if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1379                pr_err("QAT: error, input image size overflow %d\n", size);
1380                return -EINVAL;
1381        }
1382        length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1383                 ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
1384                 size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
1385        if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1386                pr_err("QAT: error, allocate continuous dram fail\n");
1387                return -ENOMEM;
1388        }
1389
1390        auth_chunk = img_desc.dram_base_addr_v;
1391        auth_chunk->chunk_size = img_desc.dram_size;
1392        auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1393        virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1394        bus_addr  = img_desc.dram_bus_addr + simg_offset;
1395        auth_desc = img_desc.dram_base_addr_v;
1396        auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1397        auth_desc->css_hdr_low = (unsigned int)bus_addr;
1398        virt_addr = virt_base;
1399
1400        memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1401        /* pub key */
1402        bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1403                           sizeof(*css_hdr);
1404        virt_addr = virt_addr + sizeof(*css_hdr);
1405
1406        auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1407        auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1408
1409        memcpy((void *)(uintptr_t)virt_addr,
1410               (void *)(image + sizeof(*css_hdr)),
1411               ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
1412        /* padding */
1413        memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1414               0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
1415
1416        /* exponent */
1417        memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1418               ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
1419               (void *)(image + sizeof(*css_hdr) +
1420                        ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1421               sizeof(unsigned int));
1422
1423        /* signature */
1424        bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1425                            auth_desc->fwsk_pub_low) +
1426                   ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1427        virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1428        auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1429        auth_desc->signature_low = (unsigned int)bus_addr;
1430
1431        memcpy((void *)(uintptr_t)virt_addr,
1432               (void *)(image + sizeof(*css_hdr) +
1433               ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1434               ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
1435               ICP_QAT_CSS_SIGNATURE_LEN(handle));
1436
1437        bus_addr = ADD_ADDR(auth_desc->signature_high,
1438                            auth_desc->signature_low) +
1439                   ICP_QAT_CSS_SIGNATURE_LEN(handle);
1440        virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1441
1442        auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1443        auth_desc->img_low = (unsigned int)bus_addr;
1444        auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
1445        memcpy((void *)(uintptr_t)virt_addr,
1446               (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
1447               auth_desc->img_len);
1448        virt_addr = virt_base;
1449        /* AE firmware */
1450        if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1451            CSS_AE_FIRMWARE) {
1452                auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1453                auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1454                bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1455                                    auth_desc->img_ae_mode_data_low) +
1456                           sizeof(struct icp_qat_simg_ae_mode);
1457
1458                auth_desc->img_ae_init_data_high = (unsigned int)
1459                                                 (bus_addr >> BITS_IN_DWORD);
1460                auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1461                bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1462                auth_desc->img_ae_insts_high = (unsigned int)
1463                                             (bus_addr >> BITS_IN_DWORD);
1464                auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1465                virt_addr += sizeof(struct icp_qat_css_hdr);
1466                virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1467                virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1468                simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
1469                auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
1470        } else {
1471                auth_desc->img_ae_insts_high = auth_desc->img_high;
1472                auth_desc->img_ae_insts_low = auth_desc->img_low;
1473        }
1474        *desc = auth_desc;
1475        return 0;
1476}
1477
1478static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1479                            struct icp_qat_fw_auth_desc *desc)
1480{
1481        unsigned long ae_mask = handle->hal_handle->ae_mask;
1482        u32 fcu_sts_csr, fcu_ctl_csr;
1483        u32 loaded_aes, loaded_csr;
1484        unsigned int i;
1485        u32 fcu_sts;
1486
1487        fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1488        fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1489        loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1490
1491        for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
1492                int retry = 0;
1493
1494                if (!((desc->ae_mask >> i) & 0x1))
1495                        continue;
1496                if (qat_hal_check_ae_active(handle, i)) {
1497                        pr_err("QAT: AE %d is active\n", i);
1498                        return -EINVAL;
1499                }
1500                SET_CAP_CSR(handle, fcu_ctl_csr,
1501                            (FCU_CTRL_CMD_LOAD |
1502                            (1 << FCU_CTRL_BROADCAST_POS) |
1503                            (i << FCU_CTRL_AE_POS)));
1504
1505                do {
1506                        msleep(FW_AUTH_WAIT_PERIOD);
1507                        fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1508                        if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1509                            FCU_STS_LOAD_DONE) {
1510                                loaded_aes = GET_CAP_CSR(handle, loaded_csr);
1511                                loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
1512                                if (loaded_aes & (1 << i))
1513                                        break;
1514                        }
1515                } while (retry++ < FW_AUTH_MAX_RETRY);
1516                if (retry > FW_AUTH_MAX_RETRY) {
1517                        pr_err("QAT: firmware load failed timeout %x\n", retry);
1518                        return -EINVAL;
1519                }
1520        }
1521        return 0;
1522}
1523
1524static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1525                                 void *addr_ptr, int mem_size)
1526{
1527        struct icp_qat_suof_handle *suof_handle;
1528
1529        suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1530        if (!suof_handle)
1531                return -ENOMEM;
1532        handle->sobj_handle = suof_handle;
1533        if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1534                qat_uclo_del_suof(handle);
1535                pr_err("QAT: map SUOF failed\n");
1536                return -EINVAL;
1537        }
1538        return 0;
1539}
1540
1541int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1542                       void *addr_ptr, int mem_size)
1543{
1544        struct icp_qat_fw_auth_desc *desc = NULL;
1545        int status = 0;
1546
1547        if (handle->chip_info->fw_auth) {
1548                status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
1549                if (!status)
1550                        status = qat_uclo_auth_fw(handle, desc);
1551                qat_uclo_ummap_auth_fw(handle, &desc);
1552        } else {
1553                if (handle->chip_info->mmp_sram_size < mem_size) {
1554                        pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
1555                        return -EFBIG;
1556                }
1557                qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1558        }
1559        return status;
1560}
1561
1562static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1563                                void *addr_ptr, int mem_size)
1564{
1565        struct icp_qat_uof_filehdr *filehdr;
1566        struct icp_qat_uclo_objhandle *objhdl;
1567
1568        objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1569        if (!objhdl)
1570                return -ENOMEM;
1571        objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1572        if (!objhdl->obj_buf)
1573                goto out_objbuf_err;
1574        filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1575        if (qat_uclo_check_uof_format(filehdr))
1576                goto out_objhdr_err;
1577        objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1578                                             ICP_QAT_UOF_OBJS);
1579        if (!objhdl->obj_hdr) {
1580                pr_err("QAT: object file chunk is null\n");
1581                goto out_objhdr_err;
1582        }
1583        handle->obj_handle = objhdl;
1584        if (qat_uclo_parse_uof_obj(handle))
1585                goto out_overlay_obj_err;
1586        return 0;
1587
1588out_overlay_obj_err:
1589        handle->obj_handle = NULL;
1590        kfree(objhdl->obj_hdr);
1591out_objhdr_err:
1592        kfree(objhdl->obj_buf);
1593out_objbuf_err:
1594        kfree(objhdl);
1595        return -ENOMEM;
1596}
1597
1598static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1599                                     struct icp_qat_mof_file_hdr *mof_ptr,
1600                                     u32 mof_size)
1601{
1602        struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1603        unsigned int min_ver_offset;
1604        unsigned int checksum;
1605
1606        mobj_handle->file_id = ICP_QAT_MOF_FID;
1607        mobj_handle->mof_buf = (char *)mof_ptr;
1608        mobj_handle->mof_size = mof_size;
1609
1610        min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
1611                                             min_ver);
1612        checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
1613                                              min_ver_offset);
1614        if (checksum != mof_ptr->checksum) {
1615                pr_err("QAT: incorrect MOF checksum\n");
1616                return -EINVAL;
1617        }
1618
1619        mobj_handle->checksum = mof_ptr->checksum;
1620        mobj_handle->min_ver = mof_ptr->min_ver;
1621        mobj_handle->maj_ver = mof_ptr->maj_ver;
1622        return 0;
1623}
1624
1625static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1626{
1627        struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1628
1629        kfree(mobj_handle->obj_table.obj_hdr);
1630        mobj_handle->obj_table.obj_hdr = NULL;
1631        kfree(handle->mobj_handle);
1632        handle->mobj_handle = NULL;
1633}
1634
1635static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1636                                        char *obj_name, char **obj_ptr,
1637                                        unsigned int *obj_size)
1638{
1639        struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1640        unsigned int i;
1641
1642        for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1643                if (!strncmp(obj_hdr[i].obj_name, obj_name,
1644                             ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1645                        *obj_ptr  = obj_hdr[i].obj_buf;
1646                        *obj_size = obj_hdr[i].obj_size;
1647                        return 0;
1648                }
1649        }
1650
1651        pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1652        return -EINVAL;
1653}
1654
1655static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1656                                     struct icp_qat_mof_objhdr *mobj_hdr,
1657                                     struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1658{
1659        u8 *obj;
1660
1661        if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
1662                     ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1663                obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
1664        } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
1665                            ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1666                obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
1667        } else {
1668                pr_err("QAT: unsupported chunk id\n");
1669                return -EINVAL;
1670        }
1671        mobj_hdr->obj_buf = obj;
1672        mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1673        mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
1674        return 0;
1675}
1676
1677static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1678{
1679        struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1680        struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1681        struct icp_qat_mof_obj_hdr *uobj_hdr;
1682        struct icp_qat_mof_obj_hdr *sobj_hdr;
1683        struct icp_qat_mof_objhdr *mobj_hdr;
1684        unsigned int uobj_chunk_num = 0;
1685        unsigned int sobj_chunk_num = 0;
1686        unsigned int *valid_chunk;
1687        int ret, i;
1688
1689        uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1690        sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1691        if (uobj_hdr)
1692                uobj_chunk_num = uobj_hdr->num_chunks;
1693        if (sobj_hdr)
1694                sobj_chunk_num = sobj_hdr->num_chunks;
1695
1696        mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
1697                           sizeof(*mobj_hdr), GFP_KERNEL);
1698        if (!mobj_hdr)
1699                return -ENOMEM;
1700
1701        mobj_handle->obj_table.obj_hdr = mobj_hdr;
1702        valid_chunk = &mobj_handle->obj_table.num_objs;
1703        uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1704                         ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
1705        sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1706                        ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
1707
1708        /* map uof objects */
1709        for (i = 0; i < uobj_chunk_num; i++) {
1710                ret = qat_uclo_map_obj_from_mof(mobj_handle,
1711                                                &mobj_hdr[*valid_chunk],
1712                                                &uobj_chunkhdr[i]);
1713                if (ret)
1714                        return ret;
1715                (*valid_chunk)++;
1716        }
1717
1718        /* map suof objects */
1719        for (i = 0; i < sobj_chunk_num; i++) {
1720                ret = qat_uclo_map_obj_from_mof(mobj_handle,
1721                                                &mobj_hdr[*valid_chunk],
1722                                                &sobj_chunkhdr[i]);
1723                if (ret)
1724                        return ret;
1725                (*valid_chunk)++;
1726        }
1727
1728        if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
1729                pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
1730                return -EINVAL;
1731        }
1732        return 0;
1733}
1734
1735static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
1736                                     struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1737{
1738        char **sym_str = (char **)&mobj_handle->sym_str;
1739        unsigned int *sym_size = &mobj_handle->sym_size;
1740        struct icp_qat_mof_str_table *str_table_obj;
1741
1742        *sym_size = *(unsigned int *)(uintptr_t)
1743                    (mof_chunkhdr->offset + mobj_handle->mof_buf);
1744        *sym_str = (char *)(uintptr_t)
1745                   (mobj_handle->mof_buf + mof_chunkhdr->offset +
1746                    sizeof(str_table_obj->tab_len));
1747}
1748
1749static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
1750                                   struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1751{
1752        char *chunk_id = mof_chunkhdr->chunk_id;
1753
1754        if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1755                qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
1756        else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1757                mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
1758                                         mof_chunkhdr->offset;
1759        else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1760                mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
1761                                         mof_chunkhdr->offset;
1762}
1763
1764static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
1765{
1766        int maj = mof_hdr->maj_ver & 0xff;
1767        int min = mof_hdr->min_ver & 0xff;
1768
1769        if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
1770                pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
1771                return -EINVAL;
1772        }
1773
1774        if (mof_hdr->num_chunks <= 0x1) {
1775                pr_err("QAT: MOF chunk amount is incorrect\n");
1776                return -EINVAL;
1777        }
1778        if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
1779                pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
1780                       maj, min);
1781                return -EINVAL;
1782        }
1783        return 0;
1784}
1785
1786static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
1787                                struct icp_qat_mof_file_hdr *mof_ptr,
1788                                u32 mof_size, char *obj_name, char **obj_ptr,
1789                                unsigned int *obj_size)
1790{
1791        struct icp_qat_mof_chunkhdr *mof_chunkhdr;
1792        unsigned int file_id = mof_ptr->file_id;
1793        struct icp_qat_mof_handle *mobj_handle;
1794        unsigned short chunks_num;
1795        unsigned int i;
1796        int ret;
1797
1798        if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
1799                if (obj_ptr)
1800                        *obj_ptr = (char *)mof_ptr;
1801                if (obj_size)
1802                        *obj_size = mof_size;
1803                return 0;
1804        }
1805        if (qat_uclo_check_mof_format(mof_ptr))
1806                return -EINVAL;
1807
1808        mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
1809        if (!mobj_handle)
1810                return -ENOMEM;
1811
1812        handle->mobj_handle = mobj_handle;
1813        ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
1814        if (ret)
1815                return ret;
1816
1817        mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
1818        chunks_num = mof_ptr->num_chunks;
1819
1820        /* Parse MOF file chunks */
1821        for (i = 0; i < chunks_num; i++)
1822                qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
1823
1824        /* All sym_objs uobjs and sobjs should be available */
1825        if (!mobj_handle->sym_str ||
1826            (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
1827                return -EINVAL;
1828
1829        ret = qat_uclo_map_objs_from_mof(mobj_handle);
1830        if (ret)
1831                return ret;
1832
1833        /* Seek specified uof object in MOF */
1834        return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
1835                                            obj_ptr, obj_size);
1836}
1837
1838int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1839                     void *addr_ptr, u32 mem_size, char *obj_name)
1840{
1841        char *obj_addr;
1842        u32 obj_size;
1843        int ret;
1844
1845        BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1846                     (sizeof(handle->hal_handle->ae_mask) * 8));
1847
1848        if (!handle || !addr_ptr || mem_size < 24)
1849                return -EINVAL;
1850
1851        if (obj_name) {
1852                ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
1853                                           &obj_addr, &obj_size);
1854                if (ret)
1855                        return ret;
1856        } else {
1857                obj_addr = addr_ptr;
1858                obj_size = mem_size;
1859        }
1860
1861        return (handle->chip_info->fw_auth) ?
1862                        qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
1863                        qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
1864}
1865
1866void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
1867{
1868        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1869        unsigned int a;
1870
1871        if (handle->mobj_handle)
1872                qat_uclo_del_mof(handle);
1873        if (handle->sobj_handle)
1874                qat_uclo_del_suof(handle);
1875        if (!obj_handle)
1876                return;
1877
1878        kfree(obj_handle->uword_buf);
1879        for (a = 0; a < obj_handle->uimage_num; a++)
1880                kfree(obj_handle->ae_uimage[a].page);
1881
1882        for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1883                qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1884
1885        kfree(obj_handle->obj_hdr);
1886        kfree(obj_handle->obj_buf);
1887        kfree(obj_handle);
1888        handle->obj_handle = NULL;
1889}
1890
1891static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1892                                 struct icp_qat_uclo_encap_page *encap_page,
1893                                 u64 *uword, unsigned int addr_p,
1894                                 unsigned int raddr, u64 fill)
1895{
1896        unsigned int i, addr;
1897        u64 uwrd = 0;
1898
1899        if (!encap_page) {
1900                *uword = fill;
1901                return;
1902        }
1903        addr = (encap_page->page_region) ? raddr : addr_p;
1904        for (i = 0; i < encap_page->uwblock_num; i++) {
1905                if (addr >= encap_page->uwblock[i].start_addr &&
1906                    addr <= encap_page->uwblock[i].start_addr +
1907                    encap_page->uwblock[i].words_num - 1) {
1908                        addr -= encap_page->uwblock[i].start_addr;
1909                        addr *= obj_handle->uword_in_bytes;
1910                        memcpy(&uwrd, (void *)(((uintptr_t)
1911                               encap_page->uwblock[i].micro_words) + addr),
1912                               obj_handle->uword_in_bytes);
1913                        uwrd = uwrd & GENMASK_ULL(43, 0);
1914                }
1915        }
1916        *uword = uwrd;
1917        if (*uword == INVLD_UWORD)
1918                *uword = fill;
1919}
1920
1921static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1922                                        struct icp_qat_uclo_encap_page
1923                                        *encap_page, unsigned int ae)
1924{
1925        unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1926        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1927        u64 fill_pat;
1928
1929        /* load the page starting at appropriate ustore address */
1930        /* get fill-pattern from an image -- they are all the same */
1931        memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1932               sizeof(u64));
1933        uw_physical_addr = encap_page->beg_addr_p;
1934        uw_relative_addr = 0;
1935        words_num = encap_page->micro_words_num;
1936        while (words_num) {
1937                if (words_num < UWORD_CPYBUF_SIZE)
1938                        cpylen = words_num;
1939                else
1940                        cpylen = UWORD_CPYBUF_SIZE;
1941
1942                /* load the buffer */
1943                for (i = 0; i < cpylen; i++)
1944                        qat_uclo_fill_uwords(obj_handle, encap_page,
1945                                             &obj_handle->uword_buf[i],
1946                                             uw_physical_addr + i,
1947                                             uw_relative_addr + i, fill_pat);
1948
1949                /* copy the buffer to ustore */
1950                qat_hal_wr_uwords(handle, (unsigned char)ae,
1951                                  uw_physical_addr, cpylen,
1952                                  obj_handle->uword_buf);
1953
1954                uw_physical_addr += cpylen;
1955                uw_relative_addr += cpylen;
1956                words_num -= cpylen;
1957        }
1958}
1959
1960static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1961                                    struct icp_qat_uof_image *image)
1962{
1963        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1964        unsigned long ae_mask = handle->hal_handle->ae_mask;
1965        unsigned long cfg_ae_mask = handle->cfg_ae_mask;
1966        unsigned long ae_assigned = image->ae_assigned;
1967        struct icp_qat_uclo_aedata *aed;
1968        unsigned int ctx_mask, s;
1969        struct icp_qat_uclo_page *page;
1970        unsigned char ae;
1971        int ctx;
1972
1973        if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1974                ctx_mask = 0xff;
1975        else
1976                ctx_mask = 0x55;
1977        /* load the default page and set assigned CTX PC
1978         * to the entrypoint address */
1979        for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
1980                if (!test_bit(ae, &cfg_ae_mask))
1981                        continue;
1982
1983                if (!test_bit(ae, &ae_assigned))
1984                        continue;
1985
1986                aed = &obj_handle->ae_data[ae];
1987                /* find the slice to which this image is assigned */
1988                for (s = 0; s < aed->slice_num; s++) {
1989                        if (image->ctx_assigned &
1990                            aed->ae_slices[s].ctx_mask_assigned)
1991                                break;
1992                }
1993                if (s >= aed->slice_num)
1994                        continue;
1995                page = aed->ae_slices[s].page;
1996                if (!page->encap_page->def_page)
1997                        continue;
1998                qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1999
2000                page = aed->ae_slices[s].page;
2001                for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2002                        aed->ae_slices[s].cur_page[ctx] =
2003                                        (ctx_mask & (1 << ctx)) ? page : NULL;
2004                qat_hal_set_live_ctx(handle, (unsigned char)ae,
2005                                     image->ctx_assigned);
2006                qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
2007                               image->entry_address);
2008        }
2009}
2010
2011static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2012{
2013        unsigned int i;
2014        struct icp_qat_fw_auth_desc *desc = NULL;
2015        struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2016        struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2017
2018        for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2019                if (qat_uclo_map_auth_fw(handle,
2020                                         (char *)simg_hdr[i].simg_buf,
2021                                         (unsigned int)
2022                                         simg_hdr[i].simg_len,
2023                                         &desc))
2024                        goto wr_err;
2025                if (qat_uclo_auth_fw(handle, desc))
2026                        goto wr_err;
2027                if (qat_uclo_is_broadcast(handle, i)) {
2028                        if (qat_uclo_broadcast_load_fw(handle, desc))
2029                                goto wr_err;
2030                } else {
2031                        if (qat_uclo_load_fw(handle, desc))
2032                                goto wr_err;
2033                }
2034                qat_uclo_ummap_auth_fw(handle, &desc);
2035        }
2036        return 0;
2037wr_err:
2038        qat_uclo_ummap_auth_fw(handle, &desc);
2039        return -EINVAL;
2040}
2041
2042static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2043{
2044        struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2045        unsigned int i;
2046
2047        if (qat_uclo_init_globals(handle))
2048                return -EINVAL;
2049        for (i = 0; i < obj_handle->uimage_num; i++) {
2050                if (!obj_handle->ae_uimage[i].img_ptr)
2051                        return -EINVAL;
2052                if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2053                        return -EINVAL;
2054                qat_uclo_wr_uimage_page(handle,
2055                                        obj_handle->ae_uimage[i].img_ptr);
2056        }
2057        return 0;
2058}
2059
2060int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2061{
2062        return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2063                                   qat_uclo_wr_uof_img(handle);
2064}
2065
2066int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2067                             unsigned int cfg_ae_mask)
2068{
2069        if (!cfg_ae_mask)
2070                return -EINVAL;
2071
2072        handle->cfg_ae_mask = cfg_ae_mask;
2073        return 0;
2074}
2075