linux/drivers/misc/cxl/sysfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2014 IBM Corp.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/device.h>
   8#include <linux/sysfs.h>
   9#include <linux/pci_regs.h>
  10
  11#include "cxl.h"
  12
  13#define to_afu_chardev_m(d) dev_get_drvdata(d)
  14
  15/*********  Adapter attributes  **********************************************/
  16
  17static ssize_t caia_version_show(struct device *device,
  18                                 struct device_attribute *attr,
  19                                 char *buf)
  20{
  21        struct cxl *adapter = to_cxl_adapter(device);
  22
  23        return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
  24                         adapter->caia_minor);
  25}
  26
  27static ssize_t psl_revision_show(struct device *device,
  28                                 struct device_attribute *attr,
  29                                 char *buf)
  30{
  31        struct cxl *adapter = to_cxl_adapter(device);
  32
  33        return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
  34}
  35
  36static ssize_t base_image_show(struct device *device,
  37                               struct device_attribute *attr,
  38                               char *buf)
  39{
  40        struct cxl *adapter = to_cxl_adapter(device);
  41
  42        return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
  43}
  44
  45static ssize_t image_loaded_show(struct device *device,
  46                                 struct device_attribute *attr,
  47                                 char *buf)
  48{
  49        struct cxl *adapter = to_cxl_adapter(device);
  50
  51        if (adapter->user_image_loaded)
  52                return scnprintf(buf, PAGE_SIZE, "user\n");
  53        return scnprintf(buf, PAGE_SIZE, "factory\n");
  54}
  55
  56static ssize_t psl_timebase_synced_show(struct device *device,
  57                                        struct device_attribute *attr,
  58                                        char *buf)
  59{
  60        struct cxl *adapter = to_cxl_adapter(device);
  61        u64 psl_tb, delta;
  62
  63        /* Recompute the status only in native mode */
  64        if (cpu_has_feature(CPU_FTR_HVMODE)) {
  65                psl_tb = adapter->native->sl_ops->timebase_read(adapter);
  66                delta = abs(mftb() - psl_tb);
  67
  68                /* CORE TB and PSL TB difference <= 16usecs ? */
  69                adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
  70                pr_devel("PSL timebase %s - delta: 0x%016llx\n",
  71                         (tb_to_ns(delta) < 16000) ? "synchronized" :
  72                         "not synchronized", tb_to_ns(delta));
  73        }
  74        return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
  75}
  76
  77static ssize_t tunneled_ops_supported_show(struct device *device,
  78                                        struct device_attribute *attr,
  79                                        char *buf)
  80{
  81        struct cxl *adapter = to_cxl_adapter(device);
  82
  83        return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
  84}
  85
  86static ssize_t reset_adapter_store(struct device *device,
  87                                   struct device_attribute *attr,
  88                                   const char *buf, size_t count)
  89{
  90        struct cxl *adapter = to_cxl_adapter(device);
  91        int rc;
  92        int val;
  93
  94        rc = sscanf(buf, "%i", &val);
  95        if ((rc != 1) || (val != 1 && val != -1))
  96                return -EINVAL;
  97
  98        /*
  99         * See if we can lock the context mapping that's only allowed
 100         * when there are no contexts attached to the adapter. Once
 101         * taken this will also prevent any context from getting activated.
 102         */
 103        if (val == 1) {
 104                rc =  cxl_adapter_context_lock(adapter);
 105                if (rc)
 106                        goto out;
 107
 108                rc = cxl_ops->adapter_reset(adapter);
 109                /* In case reset failed release context lock */
 110                if (rc)
 111                        cxl_adapter_context_unlock(adapter);
 112
 113        } else if (val == -1) {
 114                /* Perform a forced adapter reset */
 115                rc = cxl_ops->adapter_reset(adapter);
 116        }
 117
 118out:
 119        return rc ? rc : count;
 120}
 121
 122static ssize_t load_image_on_perst_show(struct device *device,
 123                                 struct device_attribute *attr,
 124                                 char *buf)
 125{
 126        struct cxl *adapter = to_cxl_adapter(device);
 127
 128        if (!adapter->perst_loads_image)
 129                return scnprintf(buf, PAGE_SIZE, "none\n");
 130
 131        if (adapter->perst_select_user)
 132                return scnprintf(buf, PAGE_SIZE, "user\n");
 133        return scnprintf(buf, PAGE_SIZE, "factory\n");
 134}
 135
 136static ssize_t load_image_on_perst_store(struct device *device,
 137                                 struct device_attribute *attr,
 138                                 const char *buf, size_t count)
 139{
 140        struct cxl *adapter = to_cxl_adapter(device);
 141        int rc;
 142
 143        if (!strncmp(buf, "none", 4))
 144                adapter->perst_loads_image = false;
 145        else if (!strncmp(buf, "user", 4)) {
 146                adapter->perst_select_user = true;
 147                adapter->perst_loads_image = true;
 148        } else if (!strncmp(buf, "factory", 7)) {
 149                adapter->perst_select_user = false;
 150                adapter->perst_loads_image = true;
 151        } else
 152                return -EINVAL;
 153
 154        if ((rc = cxl_update_image_control(adapter)))
 155                return rc;
 156
 157        return count;
 158}
 159
 160static ssize_t perst_reloads_same_image_show(struct device *device,
 161                                 struct device_attribute *attr,
 162                                 char *buf)
 163{
 164        struct cxl *adapter = to_cxl_adapter(device);
 165
 166        return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
 167}
 168
 169static ssize_t perst_reloads_same_image_store(struct device *device,
 170                                 struct device_attribute *attr,
 171                                 const char *buf, size_t count)
 172{
 173        struct cxl *adapter = to_cxl_adapter(device);
 174        int rc;
 175        int val;
 176
 177        rc = sscanf(buf, "%i", &val);
 178        if ((rc != 1) || !(val == 1 || val == 0))
 179                return -EINVAL;
 180
 181        adapter->perst_same_image = (val == 1);
 182        return count;
 183}
 184
 185static struct device_attribute adapter_attrs[] = {
 186        __ATTR_RO(caia_version),
 187        __ATTR_RO(psl_revision),
 188        __ATTR_RO(base_image),
 189        __ATTR_RO(image_loaded),
 190        __ATTR_RO(psl_timebase_synced),
 191        __ATTR_RO(tunneled_ops_supported),
 192        __ATTR_RW(load_image_on_perst),
 193        __ATTR_RW(perst_reloads_same_image),
 194        __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
 195};
 196
 197
 198/*********  AFU master specific attributes  **********************************/
 199
 200static ssize_t mmio_size_show_master(struct device *device,
 201                                     struct device_attribute *attr,
 202                                     char *buf)
 203{
 204        struct cxl_afu *afu = to_afu_chardev_m(device);
 205
 206        return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
 207}
 208
 209static ssize_t pp_mmio_off_show(struct device *device,
 210                                struct device_attribute *attr,
 211                                char *buf)
 212{
 213        struct cxl_afu *afu = to_afu_chardev_m(device);
 214
 215        return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
 216}
 217
 218static ssize_t pp_mmio_len_show(struct device *device,
 219                                struct device_attribute *attr,
 220                                char *buf)
 221{
 222        struct cxl_afu *afu = to_afu_chardev_m(device);
 223
 224        return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
 225}
 226
 227static struct device_attribute afu_master_attrs[] = {
 228        __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
 229        __ATTR_RO(pp_mmio_off),
 230        __ATTR_RO(pp_mmio_len),
 231};
 232
 233
 234/*********  AFU attributes  **************************************************/
 235
 236static ssize_t mmio_size_show(struct device *device,
 237                              struct device_attribute *attr,
 238                              char *buf)
 239{
 240        struct cxl_afu *afu = to_cxl_afu(device);
 241
 242        if (afu->pp_size)
 243                return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
 244        return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
 245}
 246
 247static ssize_t reset_store_afu(struct device *device,
 248                               struct device_attribute *attr,
 249                               const char *buf, size_t count)
 250{
 251        struct cxl_afu *afu = to_cxl_afu(device);
 252        int rc;
 253
 254        /* Not safe to reset if it is currently in use */
 255        mutex_lock(&afu->contexts_lock);
 256        if (!idr_is_empty(&afu->contexts_idr)) {
 257                rc = -EBUSY;
 258                goto err;
 259        }
 260
 261        if ((rc = cxl_ops->afu_reset(afu)))
 262                goto err;
 263
 264        rc = count;
 265err:
 266        mutex_unlock(&afu->contexts_lock);
 267        return rc;
 268}
 269
 270static ssize_t irqs_min_show(struct device *device,
 271                             struct device_attribute *attr,
 272                             char *buf)
 273{
 274        struct cxl_afu *afu = to_cxl_afu(device);
 275
 276        return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
 277}
 278
 279static ssize_t irqs_max_show(struct device *device,
 280                                  struct device_attribute *attr,
 281                                  char *buf)
 282{
 283        struct cxl_afu *afu = to_cxl_afu(device);
 284
 285        return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
 286}
 287
 288static ssize_t irqs_max_store(struct device *device,
 289                                  struct device_attribute *attr,
 290                                  const char *buf, size_t count)
 291{
 292        struct cxl_afu *afu = to_cxl_afu(device);
 293        ssize_t ret;
 294        int irqs_max;
 295
 296        ret = sscanf(buf, "%i", &irqs_max);
 297        if (ret != 1)
 298                return -EINVAL;
 299
 300        if (irqs_max < afu->pp_irqs)
 301                return -EINVAL;
 302
 303        if (cpu_has_feature(CPU_FTR_HVMODE)) {
 304                if (irqs_max > afu->adapter->user_irqs)
 305                        return -EINVAL;
 306        } else {
 307                /* pHyp sets a per-AFU limit */
 308                if (irqs_max > afu->guest->max_ints)
 309                        return -EINVAL;
 310        }
 311
 312        afu->irqs_max = irqs_max;
 313        return count;
 314}
 315
 316static ssize_t modes_supported_show(struct device *device,
 317                                    struct device_attribute *attr, char *buf)
 318{
 319        struct cxl_afu *afu = to_cxl_afu(device);
 320        char *p = buf, *end = buf + PAGE_SIZE;
 321
 322        if (afu->modes_supported & CXL_MODE_DEDICATED)
 323                p += scnprintf(p, end - p, "dedicated_process\n");
 324        if (afu->modes_supported & CXL_MODE_DIRECTED)
 325                p += scnprintf(p, end - p, "afu_directed\n");
 326        return (p - buf);
 327}
 328
 329static ssize_t prefault_mode_show(struct device *device,
 330                                  struct device_attribute *attr,
 331                                  char *buf)
 332{
 333        struct cxl_afu *afu = to_cxl_afu(device);
 334
 335        switch (afu->prefault_mode) {
 336        case CXL_PREFAULT_WED:
 337                return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
 338        case CXL_PREFAULT_ALL:
 339                return scnprintf(buf, PAGE_SIZE, "all\n");
 340        default:
 341                return scnprintf(buf, PAGE_SIZE, "none\n");
 342        }
 343}
 344
 345static ssize_t prefault_mode_store(struct device *device,
 346                          struct device_attribute *attr,
 347                          const char *buf, size_t count)
 348{
 349        struct cxl_afu *afu = to_cxl_afu(device);
 350        enum prefault_modes mode = -1;
 351
 352        if (!strncmp(buf, "none", 4))
 353                mode = CXL_PREFAULT_NONE;
 354        else {
 355                if (!radix_enabled()) {
 356
 357                        /* only allowed when not in radix mode */
 358                        if (!strncmp(buf, "work_element_descriptor", 23))
 359                                mode = CXL_PREFAULT_WED;
 360                        if (!strncmp(buf, "all", 3))
 361                                mode = CXL_PREFAULT_ALL;
 362                } else {
 363                        dev_err(device, "Cannot prefault with radix enabled\n");
 364                }
 365        }
 366
 367        if (mode == -1)
 368                return -EINVAL;
 369
 370        afu->prefault_mode = mode;
 371        return count;
 372}
 373
 374static ssize_t mode_show(struct device *device,
 375                         struct device_attribute *attr,
 376                         char *buf)
 377{
 378        struct cxl_afu *afu = to_cxl_afu(device);
 379
 380        if (afu->current_mode == CXL_MODE_DEDICATED)
 381                return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
 382        if (afu->current_mode == CXL_MODE_DIRECTED)
 383                return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
 384        return scnprintf(buf, PAGE_SIZE, "none\n");
 385}
 386
 387static ssize_t mode_store(struct device *device, struct device_attribute *attr,
 388                          const char *buf, size_t count)
 389{
 390        struct cxl_afu *afu = to_cxl_afu(device);
 391        int old_mode, mode = -1;
 392        int rc = -EBUSY;
 393
 394        /* can't change this if we have a user */
 395        mutex_lock(&afu->contexts_lock);
 396        if (!idr_is_empty(&afu->contexts_idr))
 397                goto err;
 398
 399        if (!strncmp(buf, "dedicated_process", 17))
 400                mode = CXL_MODE_DEDICATED;
 401        if (!strncmp(buf, "afu_directed", 12))
 402                mode = CXL_MODE_DIRECTED;
 403        if (!strncmp(buf, "none", 4))
 404                mode = 0;
 405
 406        if (mode == -1) {
 407                rc = -EINVAL;
 408                goto err;
 409        }
 410
 411        /*
 412         * afu_deactivate_mode needs to be done outside the lock, prevent
 413         * other contexts coming in before we are ready:
 414         */
 415        old_mode = afu->current_mode;
 416        afu->current_mode = 0;
 417        afu->num_procs = 0;
 418
 419        mutex_unlock(&afu->contexts_lock);
 420
 421        if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
 422                return rc;
 423        if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
 424                return rc;
 425
 426        return count;
 427err:
 428        mutex_unlock(&afu->contexts_lock);
 429        return rc;
 430}
 431
 432static ssize_t api_version_show(struct device *device,
 433                                struct device_attribute *attr,
 434                                char *buf)
 435{
 436        return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
 437}
 438
 439static ssize_t api_version_compatible_show(struct device *device,
 440                                           struct device_attribute *attr,
 441                                           char *buf)
 442{
 443        return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
 444}
 445
 446static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
 447                               struct bin_attribute *bin_attr, char *buf,
 448                               loff_t off, size_t count)
 449{
 450        struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
 451
 452        return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
 453}
 454
 455static struct device_attribute afu_attrs[] = {
 456        __ATTR_RO(mmio_size),
 457        __ATTR_RO(irqs_min),
 458        __ATTR_RW(irqs_max),
 459        __ATTR_RO(modes_supported),
 460        __ATTR_RW(mode),
 461        __ATTR_RW(prefault_mode),
 462        __ATTR_RO(api_version),
 463        __ATTR_RO(api_version_compatible),
 464        __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
 465};
 466
 467int cxl_sysfs_adapter_add(struct cxl *adapter)
 468{
 469        struct device_attribute *dev_attr;
 470        int i, rc;
 471
 472        for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
 473                dev_attr = &adapter_attrs[i];
 474                if (cxl_ops->support_attributes(dev_attr->attr.name,
 475                                                CXL_ADAPTER_ATTRS)) {
 476                        if ((rc = device_create_file(&adapter->dev, dev_attr)))
 477                                goto err;
 478                }
 479        }
 480        return 0;
 481err:
 482        for (i--; i >= 0; i--) {
 483                dev_attr = &adapter_attrs[i];
 484                if (cxl_ops->support_attributes(dev_attr->attr.name,
 485                                                CXL_ADAPTER_ATTRS))
 486                        device_remove_file(&adapter->dev, dev_attr);
 487        }
 488        return rc;
 489}
 490
 491void cxl_sysfs_adapter_remove(struct cxl *adapter)
 492{
 493        struct device_attribute *dev_attr;
 494        int i;
 495
 496        for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
 497                dev_attr = &adapter_attrs[i];
 498                if (cxl_ops->support_attributes(dev_attr->attr.name,
 499                                                CXL_ADAPTER_ATTRS))
 500                        device_remove_file(&adapter->dev, dev_attr);
 501        }
 502}
 503
 504struct afu_config_record {
 505        struct kobject kobj;
 506        struct bin_attribute config_attr;
 507        struct list_head list;
 508        int cr;
 509        u16 device;
 510        u16 vendor;
 511        u32 class;
 512};
 513
 514#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
 515
 516static ssize_t vendor_show(struct kobject *kobj,
 517                           struct kobj_attribute *attr, char *buf)
 518{
 519        struct afu_config_record *cr = to_cr(kobj);
 520
 521        return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
 522}
 523
 524static ssize_t device_show(struct kobject *kobj,
 525                           struct kobj_attribute *attr, char *buf)
 526{
 527        struct afu_config_record *cr = to_cr(kobj);
 528
 529        return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
 530}
 531
 532static ssize_t class_show(struct kobject *kobj,
 533                          struct kobj_attribute *attr, char *buf)
 534{
 535        struct afu_config_record *cr = to_cr(kobj);
 536
 537        return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
 538}
 539
 540static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
 541                               struct bin_attribute *bin_attr, char *buf,
 542                               loff_t off, size_t count)
 543{
 544        struct afu_config_record *cr = to_cr(kobj);
 545        struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
 546
 547        u64 i, j, val, rc;
 548
 549        for (i = 0; i < count;) {
 550                rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
 551                if (rc)
 552                        val = ~0ULL;
 553                for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
 554                        buf[i] = (val >> (j * 8)) & 0xff;
 555        }
 556
 557        return count;
 558}
 559
 560static struct kobj_attribute vendor_attribute =
 561        __ATTR_RO(vendor);
 562static struct kobj_attribute device_attribute =
 563        __ATTR_RO(device);
 564static struct kobj_attribute class_attribute =
 565        __ATTR_RO(class);
 566
 567static struct attribute *afu_cr_attrs[] = {
 568        &vendor_attribute.attr,
 569        &device_attribute.attr,
 570        &class_attribute.attr,
 571        NULL,
 572};
 573
 574static void release_afu_config_record(struct kobject *kobj)
 575{
 576        struct afu_config_record *cr = to_cr(kobj);
 577
 578        kfree(cr);
 579}
 580
 581static struct kobj_type afu_config_record_type = {
 582        .sysfs_ops = &kobj_sysfs_ops,
 583        .release = release_afu_config_record,
 584        .default_attrs = afu_cr_attrs,
 585};
 586
 587static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
 588{
 589        struct afu_config_record *cr;
 590        int rc;
 591
 592        cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
 593        if (!cr)
 594                return ERR_PTR(-ENOMEM);
 595
 596        cr->cr = cr_idx;
 597
 598        rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
 599        if (rc)
 600                goto err;
 601        rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
 602        if (rc)
 603                goto err;
 604        rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
 605        if (rc)
 606                goto err;
 607        cr->class >>= 8;
 608
 609        /*
 610         * Export raw AFU PCIe like config record. For now this is read only by
 611         * root - we can expand that later to be readable by non-root and maybe
 612         * even writable provided we have a good use-case. Once we support
 613         * exposing AFUs through a virtual PHB they will get that for free from
 614         * Linux' PCI infrastructure, but until then it's not clear that we
 615         * need it for anything since the main use case is just identifying
 616         * AFUs, which can be done via the vendor, device and class attributes.
 617         */
 618        sysfs_bin_attr_init(&cr->config_attr);
 619        cr->config_attr.attr.name = "config";
 620        cr->config_attr.attr.mode = S_IRUSR;
 621        cr->config_attr.size = afu->crs_len;
 622        cr->config_attr.read = afu_read_config;
 623
 624        rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
 625                                  &afu->dev.kobj, "cr%i", cr->cr);
 626        if (rc)
 627                goto err1;
 628
 629        rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
 630        if (rc)
 631                goto err1;
 632
 633        rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
 634        if (rc)
 635                goto err2;
 636
 637        return cr;
 638err2:
 639        sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
 640err1:
 641        kobject_put(&cr->kobj);
 642        return ERR_PTR(rc);
 643err:
 644        kfree(cr);
 645        return ERR_PTR(rc);
 646}
 647
 648void cxl_sysfs_afu_remove(struct cxl_afu *afu)
 649{
 650        struct device_attribute *dev_attr;
 651        struct afu_config_record *cr, *tmp;
 652        int i;
 653
 654        /* remove the err buffer bin attribute */
 655        if (afu->eb_len)
 656                device_remove_bin_file(&afu->dev, &afu->attr_eb);
 657
 658        for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
 659                dev_attr = &afu_attrs[i];
 660                if (cxl_ops->support_attributes(dev_attr->attr.name,
 661                                                CXL_AFU_ATTRS))
 662                        device_remove_file(&afu->dev, &afu_attrs[i]);
 663        }
 664
 665        list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
 666                sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
 667                kobject_put(&cr->kobj);
 668        }
 669}
 670
 671int cxl_sysfs_afu_add(struct cxl_afu *afu)
 672{
 673        struct device_attribute *dev_attr;
 674        struct afu_config_record *cr;
 675        int i, rc;
 676
 677        INIT_LIST_HEAD(&afu->crs);
 678
 679        for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
 680                dev_attr = &afu_attrs[i];
 681                if (cxl_ops->support_attributes(dev_attr->attr.name,
 682                                                CXL_AFU_ATTRS)) {
 683                        if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
 684                                goto err;
 685                }
 686        }
 687
 688        /* conditionally create the add the binary file for error info buffer */
 689        if (afu->eb_len) {
 690                sysfs_attr_init(&afu->attr_eb.attr);
 691
 692                afu->attr_eb.attr.name = "afu_err_buff";
 693                afu->attr_eb.attr.mode = S_IRUGO;
 694                afu->attr_eb.size = afu->eb_len;
 695                afu->attr_eb.read = afu_eb_read;
 696
 697                rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
 698                if (rc) {
 699                        dev_err(&afu->dev,
 700                                "Unable to create eb attr for the afu. Err(%d)\n",
 701                                rc);
 702                        goto err;
 703                }
 704        }
 705
 706        for (i = 0; i < afu->crs_num; i++) {
 707                cr = cxl_sysfs_afu_new_cr(afu, i);
 708                if (IS_ERR(cr)) {
 709                        rc = PTR_ERR(cr);
 710                        goto err1;
 711                }
 712                list_add(&cr->list, &afu->crs);
 713        }
 714
 715        return 0;
 716
 717err1:
 718        cxl_sysfs_afu_remove(afu);
 719        return rc;
 720err:
 721        /* reset the eb_len as we havent created the bin attr */
 722        afu->eb_len = 0;
 723
 724        for (i--; i >= 0; i--) {
 725                dev_attr = &afu_attrs[i];
 726                if (cxl_ops->support_attributes(dev_attr->attr.name,
 727                                                CXL_AFU_ATTRS))
 728                device_remove_file(&afu->dev, &afu_attrs[i]);
 729        }
 730        return rc;
 731}
 732
 733int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
 734{
 735        struct device_attribute *dev_attr;
 736        int i, rc;
 737
 738        for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
 739                dev_attr = &afu_master_attrs[i];
 740                if (cxl_ops->support_attributes(dev_attr->attr.name,
 741                                                CXL_AFU_MASTER_ATTRS)) {
 742                        if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
 743                                goto err;
 744                }
 745        }
 746
 747        return 0;
 748
 749err:
 750        for (i--; i >= 0; i--) {
 751                dev_attr = &afu_master_attrs[i];
 752                if (cxl_ops->support_attributes(dev_attr->attr.name,
 753                                                CXL_AFU_MASTER_ATTRS))
 754                        device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
 755        }
 756        return rc;
 757}
 758
 759void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
 760{
 761        struct device_attribute *dev_attr;
 762        int i;
 763
 764        for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
 765                dev_attr = &afu_master_attrs[i];
 766                if (cxl_ops->support_attributes(dev_attr->attr.name,
 767                                                CXL_AFU_MASTER_ATTRS))
 768                        device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
 769        }
 770}
 771