linux/drivers/cxl/core/bus.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
   3#include <linux/io-64-nonatomic-lo-hi.h>
   4#include <linux/device.h>
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include <linux/slab.h>
   8#include <linux/idr.h>
   9#include <cxlmem.h>
  10#include <cxl.h>
  11#include "core.h"
  12
  13/**
  14 * DOC: cxl core
  15 *
  16 * The CXL core provides a set of interfaces that can be consumed by CXL aware
  17 * drivers. The interfaces allow for creation, modification, and destruction of
  18 * regions, memory devices, ports, and decoders. CXL aware drivers must register
  19 * with the CXL core via these interfaces in order to be able to participate in
  20 * cross-device interleave coordination. The CXL core also establishes and
  21 * maintains the bridge to the nvdimm subsystem.
  22 *
  23 * CXL core introduces sysfs hierarchy to control the devices that are
  24 * instantiated by the core.
  25 */
  26
  27static DEFINE_IDA(cxl_port_ida);
  28
  29static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
  30                            char *buf)
  31{
  32        return sysfs_emit(buf, "%s\n", dev->type->name);
  33}
  34static DEVICE_ATTR_RO(devtype);
  35
  36static struct attribute *cxl_base_attributes[] = {
  37        &dev_attr_devtype.attr,
  38        NULL,
  39};
  40
  41struct attribute_group cxl_base_attribute_group = {
  42        .attrs = cxl_base_attributes,
  43};
  44
  45static ssize_t start_show(struct device *dev, struct device_attribute *attr,
  46                          char *buf)
  47{
  48        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  49
  50        return sysfs_emit(buf, "%#llx\n", cxld->range.start);
  51}
  52static DEVICE_ATTR_RO(start);
  53
  54static ssize_t size_show(struct device *dev, struct device_attribute *attr,
  55                        char *buf)
  56{
  57        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  58
  59        return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
  60}
  61static DEVICE_ATTR_RO(size);
  62
  63#define CXL_DECODER_FLAG_ATTR(name, flag)                            \
  64static ssize_t name##_show(struct device *dev,                       \
  65                           struct device_attribute *attr, char *buf) \
  66{                                                                    \
  67        struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
  68                                                                     \
  69        return sysfs_emit(buf, "%s\n",                               \
  70                          (cxld->flags & (flag)) ? "1" : "0");       \
  71}                                                                    \
  72static DEVICE_ATTR_RO(name)
  73
  74CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
  75CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
  76CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
  77CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
  78CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
  79
  80static ssize_t target_type_show(struct device *dev,
  81                                struct device_attribute *attr, char *buf)
  82{
  83        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  84
  85        switch (cxld->target_type) {
  86        case CXL_DECODER_ACCELERATOR:
  87                return sysfs_emit(buf, "accelerator\n");
  88        case CXL_DECODER_EXPANDER:
  89                return sysfs_emit(buf, "expander\n");
  90        }
  91        return -ENXIO;
  92}
  93static DEVICE_ATTR_RO(target_type);
  94
  95static ssize_t target_list_show(struct device *dev,
  96                               struct device_attribute *attr, char *buf)
  97{
  98        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  99        ssize_t offset = 0;
 100        int i, rc = 0;
 101
 102        device_lock(dev);
 103        for (i = 0; i < cxld->interleave_ways; i++) {
 104                struct cxl_dport *dport = cxld->target[i];
 105                struct cxl_dport *next = NULL;
 106
 107                if (!dport)
 108                        break;
 109
 110                if (i + 1 < cxld->interleave_ways)
 111                        next = cxld->target[i + 1];
 112                rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
 113                                   next ? "," : "");
 114                if (rc < 0)
 115                        break;
 116                offset += rc;
 117        }
 118        device_unlock(dev);
 119
 120        if (rc < 0)
 121                return rc;
 122
 123        rc = sysfs_emit_at(buf, offset, "\n");
 124        if (rc < 0)
 125                return rc;
 126
 127        return offset + rc;
 128}
 129static DEVICE_ATTR_RO(target_list);
 130
 131static struct attribute *cxl_decoder_base_attrs[] = {
 132        &dev_attr_start.attr,
 133        &dev_attr_size.attr,
 134        &dev_attr_locked.attr,
 135        &dev_attr_target_list.attr,
 136        NULL,
 137};
 138
 139static struct attribute_group cxl_decoder_base_attribute_group = {
 140        .attrs = cxl_decoder_base_attrs,
 141};
 142
 143static struct attribute *cxl_decoder_root_attrs[] = {
 144        &dev_attr_cap_pmem.attr,
 145        &dev_attr_cap_ram.attr,
 146        &dev_attr_cap_type2.attr,
 147        &dev_attr_cap_type3.attr,
 148        NULL,
 149};
 150
 151static struct attribute_group cxl_decoder_root_attribute_group = {
 152        .attrs = cxl_decoder_root_attrs,
 153};
 154
 155static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
 156        &cxl_decoder_root_attribute_group,
 157        &cxl_decoder_base_attribute_group,
 158        &cxl_base_attribute_group,
 159        NULL,
 160};
 161
 162static struct attribute *cxl_decoder_switch_attrs[] = {
 163        &dev_attr_target_type.attr,
 164        NULL,
 165};
 166
 167static struct attribute_group cxl_decoder_switch_attribute_group = {
 168        .attrs = cxl_decoder_switch_attrs,
 169};
 170
 171static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
 172        &cxl_decoder_switch_attribute_group,
 173        &cxl_decoder_base_attribute_group,
 174        &cxl_base_attribute_group,
 175        NULL,
 176};
 177
 178static void cxl_decoder_release(struct device *dev)
 179{
 180        struct cxl_decoder *cxld = to_cxl_decoder(dev);
 181        struct cxl_port *port = to_cxl_port(dev->parent);
 182
 183        ida_free(&port->decoder_ida, cxld->id);
 184        kfree(cxld);
 185}
 186
 187static const struct device_type cxl_decoder_switch_type = {
 188        .name = "cxl_decoder_switch",
 189        .release = cxl_decoder_release,
 190        .groups = cxl_decoder_switch_attribute_groups,
 191};
 192
 193static const struct device_type cxl_decoder_root_type = {
 194        .name = "cxl_decoder_root",
 195        .release = cxl_decoder_release,
 196        .groups = cxl_decoder_root_attribute_groups,
 197};
 198
 199bool is_root_decoder(struct device *dev)
 200{
 201        return dev->type == &cxl_decoder_root_type;
 202}
 203EXPORT_SYMBOL_GPL(is_root_decoder);
 204
 205struct cxl_decoder *to_cxl_decoder(struct device *dev)
 206{
 207        if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
 208                          "not a cxl_decoder device\n"))
 209                return NULL;
 210        return container_of(dev, struct cxl_decoder, dev);
 211}
 212EXPORT_SYMBOL_GPL(to_cxl_decoder);
 213
 214static void cxl_dport_release(struct cxl_dport *dport)
 215{
 216        list_del(&dport->list);
 217        put_device(dport->dport);
 218        kfree(dport);
 219}
 220
 221static void cxl_port_release(struct device *dev)
 222{
 223        struct cxl_port *port = to_cxl_port(dev);
 224        struct cxl_dport *dport, *_d;
 225
 226        device_lock(dev);
 227        list_for_each_entry_safe(dport, _d, &port->dports, list)
 228                cxl_dport_release(dport);
 229        device_unlock(dev);
 230        ida_free(&cxl_port_ida, port->id);
 231        kfree(port);
 232}
 233
 234static const struct attribute_group *cxl_port_attribute_groups[] = {
 235        &cxl_base_attribute_group,
 236        NULL,
 237};
 238
 239static const struct device_type cxl_port_type = {
 240        .name = "cxl_port",
 241        .release = cxl_port_release,
 242        .groups = cxl_port_attribute_groups,
 243};
 244
 245struct cxl_port *to_cxl_port(struct device *dev)
 246{
 247        if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
 248                          "not a cxl_port device\n"))
 249                return NULL;
 250        return container_of(dev, struct cxl_port, dev);
 251}
 252
 253static void unregister_port(void *_port)
 254{
 255        struct cxl_port *port = _port;
 256        struct cxl_dport *dport;
 257
 258        device_lock(&port->dev);
 259        list_for_each_entry(dport, &port->dports, list) {
 260                char link_name[CXL_TARGET_STRLEN];
 261
 262                if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
 263                             dport->port_id) >= CXL_TARGET_STRLEN)
 264                        continue;
 265                sysfs_remove_link(&port->dev.kobj, link_name);
 266        }
 267        device_unlock(&port->dev);
 268        device_unregister(&port->dev);
 269}
 270
 271static void cxl_unlink_uport(void *_port)
 272{
 273        struct cxl_port *port = _port;
 274
 275        sysfs_remove_link(&port->dev.kobj, "uport");
 276}
 277
 278static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
 279{
 280        int rc;
 281
 282        rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
 283        if (rc)
 284                return rc;
 285        return devm_add_action_or_reset(host, cxl_unlink_uport, port);
 286}
 287
 288static struct cxl_port *cxl_port_alloc(struct device *uport,
 289                                       resource_size_t component_reg_phys,
 290                                       struct cxl_port *parent_port)
 291{
 292        struct cxl_port *port;
 293        struct device *dev;
 294        int rc;
 295
 296        port = kzalloc(sizeof(*port), GFP_KERNEL);
 297        if (!port)
 298                return ERR_PTR(-ENOMEM);
 299
 300        rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
 301        if (rc < 0)
 302                goto err;
 303        port->id = rc;
 304
 305        /*
 306         * The top-level cxl_port "cxl_root" does not have a cxl_port as
 307         * its parent and it does not have any corresponding component
 308         * registers as its decode is described by a fixed platform
 309         * description.
 310         */
 311        dev = &port->dev;
 312        if (parent_port)
 313                dev->parent = &parent_port->dev;
 314        else
 315                dev->parent = uport;
 316
 317        port->uport = uport;
 318        port->component_reg_phys = component_reg_phys;
 319        ida_init(&port->decoder_ida);
 320        INIT_LIST_HEAD(&port->dports);
 321
 322        device_initialize(dev);
 323        device_set_pm_not_required(dev);
 324        dev->bus = &cxl_bus_type;
 325        dev->type = &cxl_port_type;
 326
 327        return port;
 328
 329err:
 330        kfree(port);
 331        return ERR_PTR(rc);
 332}
 333
 334/**
 335 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
 336 * @host: host device for devm operations
 337 * @uport: "physical" device implementing this upstream port
 338 * @component_reg_phys: (optional) for configurable cxl_port instances
 339 * @parent_port: next hop up in the CXL memory decode hierarchy
 340 */
 341struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
 342                                   resource_size_t component_reg_phys,
 343                                   struct cxl_port *parent_port)
 344{
 345        struct cxl_port *port;
 346        struct device *dev;
 347        int rc;
 348
 349        port = cxl_port_alloc(uport, component_reg_phys, parent_port);
 350        if (IS_ERR(port))
 351                return port;
 352
 353        dev = &port->dev;
 354        if (parent_port)
 355                rc = dev_set_name(dev, "port%d", port->id);
 356        else
 357                rc = dev_set_name(dev, "root%d", port->id);
 358        if (rc)
 359                goto err;
 360
 361        rc = device_add(dev);
 362        if (rc)
 363                goto err;
 364
 365        rc = devm_add_action_or_reset(host, unregister_port, port);
 366        if (rc)
 367                return ERR_PTR(rc);
 368
 369        rc = devm_cxl_link_uport(host, port);
 370        if (rc)
 371                return ERR_PTR(rc);
 372
 373        return port;
 374
 375err:
 376        put_device(dev);
 377        return ERR_PTR(rc);
 378}
 379EXPORT_SYMBOL_GPL(devm_cxl_add_port);
 380
 381static struct cxl_dport *find_dport(struct cxl_port *port, int id)
 382{
 383        struct cxl_dport *dport;
 384
 385        device_lock_assert(&port->dev);
 386        list_for_each_entry (dport, &port->dports, list)
 387                if (dport->port_id == id)
 388                        return dport;
 389        return NULL;
 390}
 391
 392static int add_dport(struct cxl_port *port, struct cxl_dport *new)
 393{
 394        struct cxl_dport *dup;
 395
 396        device_lock(&port->dev);
 397        dup = find_dport(port, new->port_id);
 398        if (dup)
 399                dev_err(&port->dev,
 400                        "unable to add dport%d-%s non-unique port id (%s)\n",
 401                        new->port_id, dev_name(new->dport),
 402                        dev_name(dup->dport));
 403        else
 404                list_add_tail(&new->list, &port->dports);
 405        device_unlock(&port->dev);
 406
 407        return dup ? -EEXIST : 0;
 408}
 409
 410/**
 411 * cxl_add_dport - append downstream port data to a cxl_port
 412 * @port: the cxl_port that references this dport
 413 * @dport_dev: firmware or PCI device representing the dport
 414 * @port_id: identifier for this dport in a decoder's target list
 415 * @component_reg_phys: optional location of CXL component registers
 416 *
 417 * Note that all allocations and links are undone by cxl_port deletion
 418 * and release.
 419 */
 420int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
 421                  resource_size_t component_reg_phys)
 422{
 423        char link_name[CXL_TARGET_STRLEN];
 424        struct cxl_dport *dport;
 425        int rc;
 426
 427        if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
 428            CXL_TARGET_STRLEN)
 429                return -EINVAL;
 430
 431        dport = kzalloc(sizeof(*dport), GFP_KERNEL);
 432        if (!dport)
 433                return -ENOMEM;
 434
 435        INIT_LIST_HEAD(&dport->list);
 436        dport->dport = get_device(dport_dev);
 437        dport->port_id = port_id;
 438        dport->component_reg_phys = component_reg_phys;
 439        dport->port = port;
 440
 441        rc = add_dport(port, dport);
 442        if (rc)
 443                goto err;
 444
 445        rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
 446        if (rc)
 447                goto err;
 448
 449        return 0;
 450err:
 451        cxl_dport_release(dport);
 452        return rc;
 453}
 454EXPORT_SYMBOL_GPL(cxl_add_dport);
 455
 456static struct cxl_decoder *
 457cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
 458                  resource_size_t len, int interleave_ways,
 459                  int interleave_granularity, enum cxl_decoder_type type,
 460                  unsigned long flags)
 461{
 462        struct cxl_decoder *cxld;
 463        struct device *dev;
 464        int rc = 0;
 465
 466        if (interleave_ways < 1)
 467                return ERR_PTR(-EINVAL);
 468
 469        device_lock(&port->dev);
 470        if (list_empty(&port->dports))
 471                rc = -EINVAL;
 472        device_unlock(&port->dev);
 473        if (rc)
 474                return ERR_PTR(rc);
 475
 476        cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
 477        if (!cxld)
 478                return ERR_PTR(-ENOMEM);
 479
 480        rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
 481        if (rc < 0)
 482                goto err;
 483
 484        *cxld = (struct cxl_decoder) {
 485                .id = rc,
 486                .range = {
 487                        .start = base,
 488                        .end = base + len - 1,
 489                },
 490                .flags = flags,
 491                .interleave_ways = interleave_ways,
 492                .interleave_granularity = interleave_granularity,
 493                .target_type = type,
 494        };
 495
 496        /* handle implied target_list */
 497        if (interleave_ways == 1)
 498                cxld->target[0] =
 499                        list_first_entry(&port->dports, struct cxl_dport, list);
 500        dev = &cxld->dev;
 501        device_initialize(dev);
 502        device_set_pm_not_required(dev);
 503        dev->parent = &port->dev;
 504        dev->bus = &cxl_bus_type;
 505
 506        /* root ports do not have a cxl_port_type parent */
 507        if (port->dev.parent->type == &cxl_port_type)
 508                dev->type = &cxl_decoder_switch_type;
 509        else
 510                dev->type = &cxl_decoder_root_type;
 511
 512        return cxld;
 513err:
 514        kfree(cxld);
 515        return ERR_PTR(rc);
 516}
 517
 518struct cxl_decoder *
 519devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
 520                     resource_size_t base, resource_size_t len,
 521                     int interleave_ways, int interleave_granularity,
 522                     enum cxl_decoder_type type, unsigned long flags)
 523{
 524        struct cxl_decoder *cxld;
 525        struct device *dev;
 526        int rc;
 527
 528        cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
 529                                 interleave_granularity, type, flags);
 530        if (IS_ERR(cxld))
 531                return cxld;
 532
 533        dev = &cxld->dev;
 534        rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
 535        if (rc)
 536                goto err;
 537
 538        rc = device_add(dev);
 539        if (rc)
 540                goto err;
 541
 542        rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
 543        if (rc)
 544                return ERR_PTR(rc);
 545        return cxld;
 546
 547err:
 548        put_device(dev);
 549        return ERR_PTR(rc);
 550}
 551EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
 552
 553/**
 554 * __cxl_driver_register - register a driver for the cxl bus
 555 * @cxl_drv: cxl driver structure to attach
 556 * @owner: owning module/driver
 557 * @modname: KBUILD_MODNAME for parent driver
 558 */
 559int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
 560                          const char *modname)
 561{
 562        if (!cxl_drv->probe) {
 563                pr_debug("%s ->probe() must be specified\n", modname);
 564                return -EINVAL;
 565        }
 566
 567        if (!cxl_drv->name) {
 568                pr_debug("%s ->name must be specified\n", modname);
 569                return -EINVAL;
 570        }
 571
 572        if (!cxl_drv->id) {
 573                pr_debug("%s ->id must be specified\n", modname);
 574                return -EINVAL;
 575        }
 576
 577        cxl_drv->drv.bus = &cxl_bus_type;
 578        cxl_drv->drv.owner = owner;
 579        cxl_drv->drv.mod_name = modname;
 580        cxl_drv->drv.name = cxl_drv->name;
 581
 582        return driver_register(&cxl_drv->drv);
 583}
 584EXPORT_SYMBOL_GPL(__cxl_driver_register);
 585
 586void cxl_driver_unregister(struct cxl_driver *cxl_drv)
 587{
 588        driver_unregister(&cxl_drv->drv);
 589}
 590EXPORT_SYMBOL_GPL(cxl_driver_unregister);
 591
 592static int cxl_device_id(struct device *dev)
 593{
 594        if (dev->type == &cxl_nvdimm_bridge_type)
 595                return CXL_DEVICE_NVDIMM_BRIDGE;
 596        if (dev->type == &cxl_nvdimm_type)
 597                return CXL_DEVICE_NVDIMM;
 598        return 0;
 599}
 600
 601static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 602{
 603        return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
 604                              cxl_device_id(dev));
 605}
 606
 607static int cxl_bus_match(struct device *dev, struct device_driver *drv)
 608{
 609        return cxl_device_id(dev) == to_cxl_drv(drv)->id;
 610}
 611
 612static int cxl_bus_probe(struct device *dev)
 613{
 614        return to_cxl_drv(dev->driver)->probe(dev);
 615}
 616
 617static void cxl_bus_remove(struct device *dev)
 618{
 619        struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
 620
 621        if (cxl_drv->remove)
 622                cxl_drv->remove(dev);
 623}
 624
 625struct bus_type cxl_bus_type = {
 626        .name = "cxl",
 627        .uevent = cxl_bus_uevent,
 628        .match = cxl_bus_match,
 629        .probe = cxl_bus_probe,
 630        .remove = cxl_bus_remove,
 631};
 632EXPORT_SYMBOL_GPL(cxl_bus_type);
 633
 634static __init int cxl_core_init(void)
 635{
 636        int rc;
 637
 638        rc = cxl_memdev_init();
 639        if (rc)
 640                return rc;
 641
 642        rc = bus_register(&cxl_bus_type);
 643        if (rc)
 644                goto err;
 645        return 0;
 646
 647err:
 648        cxl_memdev_exit();
 649        return rc;
 650}
 651
 652static void cxl_core_exit(void)
 653{
 654        bus_unregister(&cxl_bus_type);
 655        cxl_memdev_exit();
 656}
 657
 658module_init(cxl_core_init);
 659module_exit(cxl_core_exit);
 660MODULE_LICENSE("GPL v2");
 661