linux/drivers/base/platform.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * platform.c - platform 'pseudo' bus for legacy devices
   4 *
   5 * Copyright (c) 2002-3 Patrick Mochel
   6 * Copyright (c) 2002-3 Open Source Development Labs
   7 *
   8 * Please see Documentation/driver-api/driver-model/platform.rst for more
   9 * information.
  10 */
  11
  12#include <linux/string.h>
  13#include <linux/platform_device.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/interrupt.h>
  19#include <linux/ioport.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/memblock.h>
  22#include <linux/err.h>
  23#include <linux/slab.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm_domain.h>
  26#include <linux/idr.h>
  27#include <linux/acpi.h>
  28#include <linux/clk/clk-conf.h>
  29#include <linux/limits.h>
  30#include <linux/property.h>
  31#include <linux/kmemleak.h>
  32#include <linux/types.h>
  33
  34#include "base.h"
  35#include "power/power.h"
  36
  37/* For automatically allocated device IDs */
  38static DEFINE_IDA(platform_devid_ida);
  39
  40struct device platform_bus = {
  41        .init_name      = "platform",
  42};
  43EXPORT_SYMBOL_GPL(platform_bus);
  44
  45/**
  46 * platform_get_resource - get a resource for a device
  47 * @dev: platform device
  48 * @type: resource type
  49 * @num: resource index
  50 *
  51 * Return: a pointer to the resource or NULL on failure.
  52 */
  53struct resource *platform_get_resource(struct platform_device *dev,
  54                                       unsigned int type, unsigned int num)
  55{
  56        u32 i;
  57
  58        for (i = 0; i < dev->num_resources; i++) {
  59                struct resource *r = &dev->resource[i];
  60
  61                if (type == resource_type(r) && num-- == 0)
  62                        return r;
  63        }
  64        return NULL;
  65}
  66EXPORT_SYMBOL_GPL(platform_get_resource);
  67
  68struct resource *platform_get_mem_or_io(struct platform_device *dev,
  69                                        unsigned int num)
  70{
  71        u32 i;
  72
  73        for (i = 0; i < dev->num_resources; i++) {
  74                struct resource *r = &dev->resource[i];
  75
  76                if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
  77                        return r;
  78        }
  79        return NULL;
  80}
  81EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
  82
  83#ifdef CONFIG_HAS_IOMEM
  84/**
  85 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
  86 *                                          platform device and get resource
  87 *
  88 * @pdev: platform device to use both for memory resource lookup as well as
  89 *        resource management
  90 * @index: resource index
  91 * @res: optional output parameter to store a pointer to the obtained resource.
  92 *
  93 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
  94 * on failure.
  95 */
  96void __iomem *
  97devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
  98                                unsigned int index, struct resource **res)
  99{
 100        struct resource *r;
 101
 102        r = platform_get_resource(pdev, IORESOURCE_MEM, index);
 103        if (res)
 104                *res = r;
 105        return devm_ioremap_resource(&pdev->dev, r);
 106}
 107EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
 108
 109/**
 110 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
 111 *                                  device
 112 *
 113 * @pdev: platform device to use both for memory resource lookup as well as
 114 *        resource management
 115 * @index: resource index
 116 *
 117 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
 118 * on failure.
 119 */
 120void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
 121                                             unsigned int index)
 122{
 123        return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
 124}
 125EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
 126
 127/**
 128 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
 129 *                                         a platform device, retrieve the
 130 *                                         resource by name
 131 *
 132 * @pdev: platform device to use both for memory resource lookup as well as
 133 *        resource management
 134 * @name: name of the resource
 135 *
 136 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
 137 * on failure.
 138 */
 139void __iomem *
 140devm_platform_ioremap_resource_byname(struct platform_device *pdev,
 141                                      const char *name)
 142{
 143        struct resource *res;
 144
 145        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 146        return devm_ioremap_resource(&pdev->dev, res);
 147}
 148EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
 149#endif /* CONFIG_HAS_IOMEM */
 150
 151/**
 152 * platform_get_irq_optional - get an optional IRQ for a device
 153 * @dev: platform device
 154 * @num: IRQ number index
 155 *
 156 * Gets an IRQ for a platform device. Device drivers should check the return
 157 * value for errors so as to not pass a negative integer value to the
 158 * request_irq() APIs. This is the same as platform_get_irq(), except that it
 159 * does not print an error message if an IRQ can not be obtained.
 160 *
 161 * For example::
 162 *
 163 *              int irq = platform_get_irq_optional(pdev, 0);
 164 *              if (irq < 0)
 165 *                      return irq;
 166 *
 167 * Return: non-zero IRQ number on success, negative error number on failure.
 168 */
 169int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
 170{
 171        int ret;
 172#ifdef CONFIG_SPARC
 173        /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
 174        if (!dev || num >= dev->archdata.num_irqs)
 175                goto out_not_found;
 176        ret = dev->archdata.irqs[num];
 177        goto out;
 178#else
 179        struct resource *r;
 180
 181        if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
 182                ret = of_irq_get(dev->dev.of_node, num);
 183                if (ret > 0 || ret == -EPROBE_DEFER)
 184                        goto out;
 185        }
 186
 187        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 188        if (has_acpi_companion(&dev->dev)) {
 189                if (r && r->flags & IORESOURCE_DISABLED) {
 190                        ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
 191                        if (ret)
 192                                goto out;
 193                }
 194        }
 195
 196        /*
 197         * The resources may pass trigger flags to the irqs that need
 198         * to be set up. It so happens that the trigger flags for
 199         * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
 200         * settings.
 201         */
 202        if (r && r->flags & IORESOURCE_BITS) {
 203                struct irq_data *irqd;
 204
 205                irqd = irq_get_irq_data(r->start);
 206                if (!irqd)
 207                        goto out_not_found;
 208                irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
 209        }
 210
 211        if (r) {
 212                ret = r->start;
 213                goto out;
 214        }
 215
 216        /*
 217         * For the index 0 interrupt, allow falling back to GpioInt
 218         * resources. While a device could have both Interrupt and GpioInt
 219         * resources, making this fallback ambiguous, in many common cases
 220         * the device will only expose one IRQ, and this fallback
 221         * allows a common code path across either kind of resource.
 222         */
 223        if (num == 0 && has_acpi_companion(&dev->dev)) {
 224                ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
 225                /* Our callers expect -ENXIO for missing IRQs. */
 226                if (ret >= 0 || ret == -EPROBE_DEFER)
 227                        goto out;
 228        }
 229
 230#endif
 231out_not_found:
 232        ret = -ENXIO;
 233out:
 234        WARN(ret == 0, "0 is an invalid IRQ number\n");
 235        return ret;
 236}
 237EXPORT_SYMBOL_GPL(platform_get_irq_optional);
 238
 239/**
 240 * platform_get_irq - get an IRQ for a device
 241 * @dev: platform device
 242 * @num: IRQ number index
 243 *
 244 * Gets an IRQ for a platform device and prints an error message if finding the
 245 * IRQ fails. Device drivers should check the return value for errors so as to
 246 * not pass a negative integer value to the request_irq() APIs.
 247 *
 248 * For example::
 249 *
 250 *              int irq = platform_get_irq(pdev, 0);
 251 *              if (irq < 0)
 252 *                      return irq;
 253 *
 254 * Return: non-zero IRQ number on success, negative error number on failure.
 255 */
 256int platform_get_irq(struct platform_device *dev, unsigned int num)
 257{
 258        int ret;
 259
 260        ret = platform_get_irq_optional(dev, num);
 261        if (ret < 0)
 262                return dev_err_probe(&dev->dev, ret,
 263                                     "IRQ index %u not found\n", num);
 264
 265        return ret;
 266}
 267EXPORT_SYMBOL_GPL(platform_get_irq);
 268
 269/**
 270 * platform_irq_count - Count the number of IRQs a platform device uses
 271 * @dev: platform device
 272 *
 273 * Return: Number of IRQs a platform device uses or EPROBE_DEFER
 274 */
 275int platform_irq_count(struct platform_device *dev)
 276{
 277        int ret, nr = 0;
 278
 279        while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
 280                nr++;
 281
 282        if (ret == -EPROBE_DEFER)
 283                return ret;
 284
 285        return nr;
 286}
 287EXPORT_SYMBOL_GPL(platform_irq_count);
 288
 289struct irq_affinity_devres {
 290        unsigned int count;
 291        unsigned int irq[];
 292};
 293
 294static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
 295{
 296        struct resource *r;
 297
 298        r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
 299        if (r)
 300                irqresource_disabled(r, 0);
 301}
 302
 303static void devm_platform_get_irqs_affinity_release(struct device *dev,
 304                                                    void *res)
 305{
 306        struct irq_affinity_devres *ptr = res;
 307        int i;
 308
 309        for (i = 0; i < ptr->count; i++) {
 310                irq_dispose_mapping(ptr->irq[i]);
 311
 312                if (has_acpi_companion(dev))
 313                        platform_disable_acpi_irq(to_platform_device(dev), i);
 314        }
 315}
 316
 317/**
 318 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
 319 *                              device using an interrupt affinity descriptor
 320 * @dev: platform device pointer
 321 * @affd: affinity descriptor
 322 * @minvec: minimum count of interrupt vectors
 323 * @maxvec: maximum count of interrupt vectors
 324 * @irqs: pointer holder for IRQ numbers
 325 *
 326 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
 327 * to the passed affinity descriptor
 328 *
 329 * Return: Number of vectors on success, negative error number on failure.
 330 */
 331int devm_platform_get_irqs_affinity(struct platform_device *dev,
 332                                    struct irq_affinity *affd,
 333                                    unsigned int minvec,
 334                                    unsigned int maxvec,
 335                                    int **irqs)
 336{
 337        struct irq_affinity_devres *ptr;
 338        struct irq_affinity_desc *desc;
 339        size_t size;
 340        int i, ret, nvec;
 341
 342        if (!affd)
 343                return -EPERM;
 344
 345        if (maxvec < minvec)
 346                return -ERANGE;
 347
 348        nvec = platform_irq_count(dev);
 349        if (nvec < 0)
 350                return nvec;
 351
 352        if (nvec < minvec)
 353                return -ENOSPC;
 354
 355        nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
 356        if (nvec < minvec)
 357                return -ENOSPC;
 358
 359        if (nvec > maxvec)
 360                nvec = maxvec;
 361
 362        size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
 363        ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
 364                           GFP_KERNEL);
 365        if (!ptr)
 366                return -ENOMEM;
 367
 368        ptr->count = nvec;
 369
 370        for (i = 0; i < nvec; i++) {
 371                int irq = platform_get_irq(dev, i);
 372                if (irq < 0) {
 373                        ret = irq;
 374                        goto err_free_devres;
 375                }
 376                ptr->irq[i] = irq;
 377        }
 378
 379        desc = irq_create_affinity_masks(nvec, affd);
 380        if (!desc) {
 381                ret = -ENOMEM;
 382                goto err_free_devres;
 383        }
 384
 385        for (i = 0; i < nvec; i++) {
 386                ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
 387                if (ret) {
 388                        dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
 389                                ptr->irq[i], ret);
 390                        goto err_free_desc;
 391                }
 392        }
 393
 394        devres_add(&dev->dev, ptr);
 395
 396        kfree(desc);
 397
 398        *irqs = ptr->irq;
 399
 400        return nvec;
 401
 402err_free_desc:
 403        kfree(desc);
 404err_free_devres:
 405        devres_free(ptr);
 406        return ret;
 407}
 408EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
 409
 410/**
 411 * platform_get_resource_byname - get a resource for a device by name
 412 * @dev: platform device
 413 * @type: resource type
 414 * @name: resource name
 415 */
 416struct resource *platform_get_resource_byname(struct platform_device *dev,
 417                                              unsigned int type,
 418                                              const char *name)
 419{
 420        u32 i;
 421
 422        for (i = 0; i < dev->num_resources; i++) {
 423                struct resource *r = &dev->resource[i];
 424
 425                if (unlikely(!r->name))
 426                        continue;
 427
 428                if (type == resource_type(r) && !strcmp(r->name, name))
 429                        return r;
 430        }
 431        return NULL;
 432}
 433EXPORT_SYMBOL_GPL(platform_get_resource_byname);
 434
 435static int __platform_get_irq_byname(struct platform_device *dev,
 436                                     const char *name)
 437{
 438        struct resource *r;
 439        int ret;
 440
 441        if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
 442                ret = of_irq_get_byname(dev->dev.of_node, name);
 443                if (ret > 0 || ret == -EPROBE_DEFER)
 444                        return ret;
 445        }
 446
 447        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
 448        if (r) {
 449                WARN(r->start == 0, "0 is an invalid IRQ number\n");
 450                return r->start;
 451        }
 452
 453        return -ENXIO;
 454}
 455
 456/**
 457 * platform_get_irq_byname - get an IRQ for a device by name
 458 * @dev: platform device
 459 * @name: IRQ name
 460 *
 461 * Get an IRQ like platform_get_irq(), but then by name rather then by index.
 462 *
 463 * Return: non-zero IRQ number on success, negative error number on failure.
 464 */
 465int platform_get_irq_byname(struct platform_device *dev, const char *name)
 466{
 467        int ret;
 468
 469        ret = __platform_get_irq_byname(dev, name);
 470        if (ret < 0)
 471                return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
 472                                     name);
 473        return ret;
 474}
 475EXPORT_SYMBOL_GPL(platform_get_irq_byname);
 476
 477/**
 478 * platform_get_irq_byname_optional - get an optional IRQ for a device by name
 479 * @dev: platform device
 480 * @name: IRQ name
 481 *
 482 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
 483 * does not print an error message if an IRQ can not be obtained.
 484 *
 485 * Return: non-zero IRQ number on success, negative error number on failure.
 486 */
 487int platform_get_irq_byname_optional(struct platform_device *dev,
 488                                     const char *name)
 489{
 490        return __platform_get_irq_byname(dev, name);
 491}
 492EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
 493
 494/**
 495 * platform_add_devices - add a numbers of platform devices
 496 * @devs: array of platform devices to add
 497 * @num: number of platform devices in array
 498 */
 499int platform_add_devices(struct platform_device **devs, int num)
 500{
 501        int i, ret = 0;
 502
 503        for (i = 0; i < num; i++) {
 504                ret = platform_device_register(devs[i]);
 505                if (ret) {
 506                        while (--i >= 0)
 507                                platform_device_unregister(devs[i]);
 508                        break;
 509                }
 510        }
 511
 512        return ret;
 513}
 514EXPORT_SYMBOL_GPL(platform_add_devices);
 515
 516struct platform_object {
 517        struct platform_device pdev;
 518        char name[];
 519};
 520
 521/*
 522 * Set up default DMA mask for platform devices if the they weren't
 523 * previously set by the architecture / DT.
 524 */
 525static void setup_pdev_dma_masks(struct platform_device *pdev)
 526{
 527        pdev->dev.dma_parms = &pdev->dma_parms;
 528
 529        if (!pdev->dev.coherent_dma_mask)
 530                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 531        if (!pdev->dev.dma_mask) {
 532                pdev->platform_dma_mask = DMA_BIT_MASK(32);
 533                pdev->dev.dma_mask = &pdev->platform_dma_mask;
 534        }
 535};
 536
 537/**
 538 * platform_device_put - destroy a platform device
 539 * @pdev: platform device to free
 540 *
 541 * Free all memory associated with a platform device.  This function must
 542 * _only_ be externally called in error cases.  All other usage is a bug.
 543 */
 544void platform_device_put(struct platform_device *pdev)
 545{
 546        if (!IS_ERR_OR_NULL(pdev))
 547                put_device(&pdev->dev);
 548}
 549EXPORT_SYMBOL_GPL(platform_device_put);
 550
 551static void platform_device_release(struct device *dev)
 552{
 553        struct platform_object *pa = container_of(dev, struct platform_object,
 554                                                  pdev.dev);
 555
 556        of_node_put(pa->pdev.dev.of_node);
 557        kfree(pa->pdev.dev.platform_data);
 558        kfree(pa->pdev.mfd_cell);
 559        kfree(pa->pdev.resource);
 560        kfree(pa->pdev.driver_override);
 561        kfree(pa);
 562}
 563
 564/**
 565 * platform_device_alloc - create a platform device
 566 * @name: base name of the device we're adding
 567 * @id: instance id
 568 *
 569 * Create a platform device object which can have other objects attached
 570 * to it, and which will have attached objects freed when it is released.
 571 */
 572struct platform_device *platform_device_alloc(const char *name, int id)
 573{
 574        struct platform_object *pa;
 575
 576        pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
 577        if (pa) {
 578                strcpy(pa->name, name);
 579                pa->pdev.name = pa->name;
 580                pa->pdev.id = id;
 581                device_initialize(&pa->pdev.dev);
 582                pa->pdev.dev.release = platform_device_release;
 583                setup_pdev_dma_masks(&pa->pdev);
 584        }
 585
 586        return pa ? &pa->pdev : NULL;
 587}
 588EXPORT_SYMBOL_GPL(platform_device_alloc);
 589
 590/**
 591 * platform_device_add_resources - add resources to a platform device
 592 * @pdev: platform device allocated by platform_device_alloc to add resources to
 593 * @res: set of resources that needs to be allocated for the device
 594 * @num: number of resources
 595 *
 596 * Add a copy of the resources to the platform device.  The memory
 597 * associated with the resources will be freed when the platform device is
 598 * released.
 599 */
 600int platform_device_add_resources(struct platform_device *pdev,
 601                                  const struct resource *res, unsigned int num)
 602{
 603        struct resource *r = NULL;
 604
 605        if (res) {
 606                r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
 607                if (!r)
 608                        return -ENOMEM;
 609        }
 610
 611        kfree(pdev->resource);
 612        pdev->resource = r;
 613        pdev->num_resources = num;
 614        return 0;
 615}
 616EXPORT_SYMBOL_GPL(platform_device_add_resources);
 617
 618/**
 619 * platform_device_add_data - add platform-specific data to a platform device
 620 * @pdev: platform device allocated by platform_device_alloc to add resources to
 621 * @data: platform specific data for this platform device
 622 * @size: size of platform specific data
 623 *
 624 * Add a copy of platform specific data to the platform device's
 625 * platform_data pointer.  The memory associated with the platform data
 626 * will be freed when the platform device is released.
 627 */
 628int platform_device_add_data(struct platform_device *pdev, const void *data,
 629                             size_t size)
 630{
 631        void *d = NULL;
 632
 633        if (data) {
 634                d = kmemdup(data, size, GFP_KERNEL);
 635                if (!d)
 636                        return -ENOMEM;
 637        }
 638
 639        kfree(pdev->dev.platform_data);
 640        pdev->dev.platform_data = d;
 641        return 0;
 642}
 643EXPORT_SYMBOL_GPL(platform_device_add_data);
 644
 645/**
 646 * platform_device_add - add a platform device to device hierarchy
 647 * @pdev: platform device we're adding
 648 *
 649 * This is part 2 of platform_device_register(), though may be called
 650 * separately _iff_ pdev was allocated by platform_device_alloc().
 651 */
 652int platform_device_add(struct platform_device *pdev)
 653{
 654        u32 i;
 655        int ret;
 656
 657        if (!pdev)
 658                return -EINVAL;
 659
 660        if (!pdev->dev.parent)
 661                pdev->dev.parent = &platform_bus;
 662
 663        pdev->dev.bus = &platform_bus_type;
 664
 665        switch (pdev->id) {
 666        default:
 667                dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
 668                break;
 669        case PLATFORM_DEVID_NONE:
 670                dev_set_name(&pdev->dev, "%s", pdev->name);
 671                break;
 672        case PLATFORM_DEVID_AUTO:
 673                /*
 674                 * Automatically allocated device ID. We mark it as such so
 675                 * that we remember it must be freed, and we append a suffix
 676                 * to avoid namespace collision with explicit IDs.
 677                 */
 678                ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
 679                if (ret < 0)
 680                        goto err_out;
 681                pdev->id = ret;
 682                pdev->id_auto = true;
 683                dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
 684                break;
 685        }
 686
 687        for (i = 0; i < pdev->num_resources; i++) {
 688                struct resource *p, *r = &pdev->resource[i];
 689
 690                if (r->name == NULL)
 691                        r->name = dev_name(&pdev->dev);
 692
 693                p = r->parent;
 694                if (!p) {
 695                        if (resource_type(r) == IORESOURCE_MEM)
 696                                p = &iomem_resource;
 697                        else if (resource_type(r) == IORESOURCE_IO)
 698                                p = &ioport_resource;
 699                }
 700
 701                if (p) {
 702                        ret = insert_resource(p, r);
 703                        if (ret) {
 704                                dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
 705                                goto failed;
 706                        }
 707                }
 708        }
 709
 710        pr_debug("Registering platform device '%s'. Parent at %s\n",
 711                 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
 712
 713        ret = device_add(&pdev->dev);
 714        if (ret == 0)
 715                return ret;
 716
 717 failed:
 718        if (pdev->id_auto) {
 719                ida_free(&platform_devid_ida, pdev->id);
 720                pdev->id = PLATFORM_DEVID_AUTO;
 721        }
 722
 723        while (i--) {
 724                struct resource *r = &pdev->resource[i];
 725                if (r->parent)
 726                        release_resource(r);
 727        }
 728
 729 err_out:
 730        return ret;
 731}
 732EXPORT_SYMBOL_GPL(platform_device_add);
 733
 734/**
 735 * platform_device_del - remove a platform-level device
 736 * @pdev: platform device we're removing
 737 *
 738 * Note that this function will also release all memory- and port-based
 739 * resources owned by the device (@dev->resource).  This function must
 740 * _only_ be externally called in error cases.  All other usage is a bug.
 741 */
 742void platform_device_del(struct platform_device *pdev)
 743{
 744        u32 i;
 745
 746        if (!IS_ERR_OR_NULL(pdev)) {
 747                device_del(&pdev->dev);
 748
 749                if (pdev->id_auto) {
 750                        ida_free(&platform_devid_ida, pdev->id);
 751                        pdev->id = PLATFORM_DEVID_AUTO;
 752                }
 753
 754                for (i = 0; i < pdev->num_resources; i++) {
 755                        struct resource *r = &pdev->resource[i];
 756                        if (r->parent)
 757                                release_resource(r);
 758                }
 759        }
 760}
 761EXPORT_SYMBOL_GPL(platform_device_del);
 762
 763/**
 764 * platform_device_register - add a platform-level device
 765 * @pdev: platform device we're adding
 766 *
 767 * NOTE: _Never_ directly free @pdev after calling this function, even if it
 768 * returned an error! Always use platform_device_put() to give up the
 769 * reference initialised in this function instead.
 770 */
 771int platform_device_register(struct platform_device *pdev)
 772{
 773        device_initialize(&pdev->dev);
 774        setup_pdev_dma_masks(pdev);
 775        return platform_device_add(pdev);
 776}
 777EXPORT_SYMBOL_GPL(platform_device_register);
 778
 779/**
 780 * platform_device_unregister - unregister a platform-level device
 781 * @pdev: platform device we're unregistering
 782 *
 783 * Unregistration is done in 2 steps. First we release all resources
 784 * and remove it from the subsystem, then we drop reference count by
 785 * calling platform_device_put().
 786 */
 787void platform_device_unregister(struct platform_device *pdev)
 788{
 789        platform_device_del(pdev);
 790        platform_device_put(pdev);
 791}
 792EXPORT_SYMBOL_GPL(platform_device_unregister);
 793
 794/**
 795 * platform_device_register_full - add a platform-level device with
 796 * resources and platform-specific data
 797 *
 798 * @pdevinfo: data used to create device
 799 *
 800 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
 801 */
 802struct platform_device *platform_device_register_full(
 803                const struct platform_device_info *pdevinfo)
 804{
 805        int ret;
 806        struct platform_device *pdev;
 807
 808        pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
 809        if (!pdev)
 810                return ERR_PTR(-ENOMEM);
 811
 812        pdev->dev.parent = pdevinfo->parent;
 813        pdev->dev.fwnode = pdevinfo->fwnode;
 814        pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
 815        pdev->dev.of_node_reused = pdevinfo->of_node_reused;
 816
 817        if (pdevinfo->dma_mask) {
 818                pdev->platform_dma_mask = pdevinfo->dma_mask;
 819                pdev->dev.dma_mask = &pdev->platform_dma_mask;
 820                pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
 821        }
 822
 823        ret = platform_device_add_resources(pdev,
 824                        pdevinfo->res, pdevinfo->num_res);
 825        if (ret)
 826                goto err;
 827
 828        ret = platform_device_add_data(pdev,
 829                        pdevinfo->data, pdevinfo->size_data);
 830        if (ret)
 831                goto err;
 832
 833        if (pdevinfo->properties) {
 834                ret = device_create_managed_software_node(&pdev->dev,
 835                                                          pdevinfo->properties, NULL);
 836                if (ret)
 837                        goto err;
 838        }
 839
 840        ret = platform_device_add(pdev);
 841        if (ret) {
 842err:
 843                ACPI_COMPANION_SET(&pdev->dev, NULL);
 844                platform_device_put(pdev);
 845                return ERR_PTR(ret);
 846        }
 847
 848        return pdev;
 849}
 850EXPORT_SYMBOL_GPL(platform_device_register_full);
 851
 852/**
 853 * __platform_driver_register - register a driver for platform-level devices
 854 * @drv: platform driver structure
 855 * @owner: owning module/driver
 856 */
 857int __platform_driver_register(struct platform_driver *drv,
 858                                struct module *owner)
 859{
 860        drv->driver.owner = owner;
 861        drv->driver.bus = &platform_bus_type;
 862
 863        return driver_register(&drv->driver);
 864}
 865EXPORT_SYMBOL_GPL(__platform_driver_register);
 866
 867/**
 868 * platform_driver_unregister - unregister a driver for platform-level devices
 869 * @drv: platform driver structure
 870 */
 871void platform_driver_unregister(struct platform_driver *drv)
 872{
 873        driver_unregister(&drv->driver);
 874}
 875EXPORT_SYMBOL_GPL(platform_driver_unregister);
 876
 877static int platform_probe_fail(struct platform_device *pdev)
 878{
 879        return -ENXIO;
 880}
 881
 882/**
 883 * __platform_driver_probe - register driver for non-hotpluggable device
 884 * @drv: platform driver structure
 885 * @probe: the driver probe routine, probably from an __init section
 886 * @module: module which will be the owner of the driver
 887 *
 888 * Use this instead of platform_driver_register() when you know the device
 889 * is not hotpluggable and has already been registered, and you want to
 890 * remove its run-once probe() infrastructure from memory after the driver
 891 * has bound to the device.
 892 *
 893 * One typical use for this would be with drivers for controllers integrated
 894 * into system-on-chip processors, where the controller devices have been
 895 * configured as part of board setup.
 896 *
 897 * Note that this is incompatible with deferred probing.
 898 *
 899 * Returns zero if the driver registered and bound to a device, else returns
 900 * a negative error code and with the driver not registered.
 901 */
 902int __init_or_module __platform_driver_probe(struct platform_driver *drv,
 903                int (*probe)(struct platform_device *), struct module *module)
 904{
 905        int retval, code;
 906
 907        if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
 908                pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
 909                         drv->driver.name, __func__);
 910                return -EINVAL;
 911        }
 912
 913        /*
 914         * We have to run our probes synchronously because we check if
 915         * we find any devices to bind to and exit with error if there
 916         * are any.
 917         */
 918        drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
 919
 920        /*
 921         * Prevent driver from requesting probe deferral to avoid further
 922         * futile probe attempts.
 923         */
 924        drv->prevent_deferred_probe = true;
 925
 926        /* make sure driver won't have bind/unbind attributes */
 927        drv->driver.suppress_bind_attrs = true;
 928
 929        /* temporary section violation during probe() */
 930        drv->probe = probe;
 931        retval = code = __platform_driver_register(drv, module);
 932        if (retval)
 933                return retval;
 934
 935        /*
 936         * Fixup that section violation, being paranoid about code scanning
 937         * the list of drivers in order to probe new devices.  Check to see
 938         * if the probe was successful, and make sure any forced probes of
 939         * new devices fail.
 940         */
 941        spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
 942        drv->probe = platform_probe_fail;
 943        if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
 944                retval = -ENODEV;
 945        spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
 946
 947        if (code != retval)
 948                platform_driver_unregister(drv);
 949        return retval;
 950}
 951EXPORT_SYMBOL_GPL(__platform_driver_probe);
 952
 953/**
 954 * __platform_create_bundle - register driver and create corresponding device
 955 * @driver: platform driver structure
 956 * @probe: the driver probe routine, probably from an __init section
 957 * @res: set of resources that needs to be allocated for the device
 958 * @n_res: number of resources
 959 * @data: platform specific data for this platform device
 960 * @size: size of platform specific data
 961 * @module: module which will be the owner of the driver
 962 *
 963 * Use this in legacy-style modules that probe hardware directly and
 964 * register a single platform device and corresponding platform driver.
 965 *
 966 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
 967 */
 968struct platform_device * __init_or_module __platform_create_bundle(
 969                        struct platform_driver *driver,
 970                        int (*probe)(struct platform_device *),
 971                        struct resource *res, unsigned int n_res,
 972                        const void *data, size_t size, struct module *module)
 973{
 974        struct platform_device *pdev;
 975        int error;
 976
 977        pdev = platform_device_alloc(driver->driver.name, -1);
 978        if (!pdev) {
 979                error = -ENOMEM;
 980                goto err_out;
 981        }
 982
 983        error = platform_device_add_resources(pdev, res, n_res);
 984        if (error)
 985                goto err_pdev_put;
 986
 987        error = platform_device_add_data(pdev, data, size);
 988        if (error)
 989                goto err_pdev_put;
 990
 991        error = platform_device_add(pdev);
 992        if (error)
 993                goto err_pdev_put;
 994
 995        error = __platform_driver_probe(driver, probe, module);
 996        if (error)
 997                goto err_pdev_del;
 998
 999        return pdev;
1000
1001err_pdev_del:
1002        platform_device_del(pdev);
1003err_pdev_put:
1004        platform_device_put(pdev);
1005err_out:
1006        return ERR_PTR(error);
1007}
1008EXPORT_SYMBOL_GPL(__platform_create_bundle);
1009
1010/**
1011 * __platform_register_drivers - register an array of platform drivers
1012 * @drivers: an array of drivers to register
1013 * @count: the number of drivers to register
1014 * @owner: module owning the drivers
1015 *
1016 * Registers platform drivers specified by an array. On failure to register a
1017 * driver, all previously registered drivers will be unregistered. Callers of
1018 * this API should use platform_unregister_drivers() to unregister drivers in
1019 * the reverse order.
1020 *
1021 * Returns: 0 on success or a negative error code on failure.
1022 */
1023int __platform_register_drivers(struct platform_driver * const *drivers,
1024                                unsigned int count, struct module *owner)
1025{
1026        unsigned int i;
1027        int err;
1028
1029        for (i = 0; i < count; i++) {
1030                pr_debug("registering platform driver %ps\n", drivers[i]);
1031
1032                err = __platform_driver_register(drivers[i], owner);
1033                if (err < 0) {
1034                        pr_err("failed to register platform driver %ps: %d\n",
1035                               drivers[i], err);
1036                        goto error;
1037                }
1038        }
1039
1040        return 0;
1041
1042error:
1043        while (i--) {
1044                pr_debug("unregistering platform driver %ps\n", drivers[i]);
1045                platform_driver_unregister(drivers[i]);
1046        }
1047
1048        return err;
1049}
1050EXPORT_SYMBOL_GPL(__platform_register_drivers);
1051
1052/**
1053 * platform_unregister_drivers - unregister an array of platform drivers
1054 * @drivers: an array of drivers to unregister
1055 * @count: the number of drivers to unregister
1056 *
1057 * Unregisters platform drivers specified by an array. This is typically used
1058 * to complement an earlier call to platform_register_drivers(). Drivers are
1059 * unregistered in the reverse order in which they were registered.
1060 */
1061void platform_unregister_drivers(struct platform_driver * const *drivers,
1062                                 unsigned int count)
1063{
1064        while (count--) {
1065                pr_debug("unregistering platform driver %ps\n", drivers[count]);
1066                platform_driver_unregister(drivers[count]);
1067        }
1068}
1069EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1070
1071static const struct platform_device_id *platform_match_id(
1072                        const struct platform_device_id *id,
1073                        struct platform_device *pdev)
1074{
1075        while (id->name[0]) {
1076                if (strcmp(pdev->name, id->name) == 0) {
1077                        pdev->id_entry = id;
1078                        return id;
1079                }
1080                id++;
1081        }
1082        return NULL;
1083}
1084
1085#ifdef CONFIG_PM_SLEEP
1086
1087static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1088{
1089        struct platform_driver *pdrv = to_platform_driver(dev->driver);
1090        struct platform_device *pdev = to_platform_device(dev);
1091        int ret = 0;
1092
1093        if (dev->driver && pdrv->suspend)
1094                ret = pdrv->suspend(pdev, mesg);
1095
1096        return ret;
1097}
1098
1099static int platform_legacy_resume(struct device *dev)
1100{
1101        struct platform_driver *pdrv = to_platform_driver(dev->driver);
1102        struct platform_device *pdev = to_platform_device(dev);
1103        int ret = 0;
1104
1105        if (dev->driver && pdrv->resume)
1106                ret = pdrv->resume(pdev);
1107
1108        return ret;
1109}
1110
1111#endif /* CONFIG_PM_SLEEP */
1112
1113#ifdef CONFIG_SUSPEND
1114
1115int platform_pm_suspend(struct device *dev)
1116{
1117        struct device_driver *drv = dev->driver;
1118        int ret = 0;
1119
1120        if (!drv)
1121                return 0;
1122
1123        if (drv->pm) {
1124                if (drv->pm->suspend)
1125                        ret = drv->pm->suspend(dev);
1126        } else {
1127                ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1128        }
1129
1130        return ret;
1131}
1132
1133int platform_pm_resume(struct device *dev)
1134{
1135        struct device_driver *drv = dev->driver;
1136        int ret = 0;
1137
1138        if (!drv)
1139                return 0;
1140
1141        if (drv->pm) {
1142                if (drv->pm->resume)
1143                        ret = drv->pm->resume(dev);
1144        } else {
1145                ret = platform_legacy_resume(dev);
1146        }
1147
1148        return ret;
1149}
1150
1151#endif /* CONFIG_SUSPEND */
1152
1153#ifdef CONFIG_HIBERNATE_CALLBACKS
1154
1155int platform_pm_freeze(struct device *dev)
1156{
1157        struct device_driver *drv = dev->driver;
1158        int ret = 0;
1159
1160        if (!drv)
1161                return 0;
1162
1163        if (drv->pm) {
1164                if (drv->pm->freeze)
1165                        ret = drv->pm->freeze(dev);
1166        } else {
1167                ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1168        }
1169
1170        return ret;
1171}
1172
1173int platform_pm_thaw(struct device *dev)
1174{
1175        struct device_driver *drv = dev->driver;
1176        int ret = 0;
1177
1178        if (!drv)
1179                return 0;
1180
1181        if (drv->pm) {
1182                if (drv->pm->thaw)
1183                        ret = drv->pm->thaw(dev);
1184        } else {
1185                ret = platform_legacy_resume(dev);
1186        }
1187
1188        return ret;
1189}
1190
1191int platform_pm_poweroff(struct device *dev)
1192{
1193        struct device_driver *drv = dev->driver;
1194        int ret = 0;
1195
1196        if (!drv)
1197                return 0;
1198
1199        if (drv->pm) {
1200                if (drv->pm->poweroff)
1201                        ret = drv->pm->poweroff(dev);
1202        } else {
1203                ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1204        }
1205
1206        return ret;
1207}
1208
1209int platform_pm_restore(struct device *dev)
1210{
1211        struct device_driver *drv = dev->driver;
1212        int ret = 0;
1213
1214        if (!drv)
1215                return 0;
1216
1217        if (drv->pm) {
1218                if (drv->pm->restore)
1219                        ret = drv->pm->restore(dev);
1220        } else {
1221                ret = platform_legacy_resume(dev);
1222        }
1223
1224        return ret;
1225}
1226
1227#endif /* CONFIG_HIBERNATE_CALLBACKS */
1228
1229/* modalias support enables more hands-off userspace setup:
1230 * (a) environment variable lets new-style hotplug events work once system is
1231 *     fully running:  "modprobe $MODALIAS"
1232 * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1233 *     mishandled before system is fully running:  "modprobe $(cat modalias)"
1234 */
1235static ssize_t modalias_show(struct device *dev,
1236                             struct device_attribute *attr, char *buf)
1237{
1238        struct platform_device *pdev = to_platform_device(dev);
1239        int len;
1240
1241        len = of_device_modalias(dev, buf, PAGE_SIZE);
1242        if (len != -ENODEV)
1243                return len;
1244
1245        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1246        if (len != -ENODEV)
1247                return len;
1248
1249        return sysfs_emit(buf, "platform:%s\n", pdev->name);
1250}
1251static DEVICE_ATTR_RO(modalias);
1252
1253static ssize_t numa_node_show(struct device *dev,
1254                              struct device_attribute *attr, char *buf)
1255{
1256        return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1257}
1258static DEVICE_ATTR_RO(numa_node);
1259
1260static ssize_t driver_override_show(struct device *dev,
1261                                    struct device_attribute *attr, char *buf)
1262{
1263        struct platform_device *pdev = to_platform_device(dev);
1264        ssize_t len;
1265
1266        device_lock(dev);
1267        len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1268        device_unlock(dev);
1269
1270        return len;
1271}
1272
1273static ssize_t driver_override_store(struct device *dev,
1274                                     struct device_attribute *attr,
1275                                     const char *buf, size_t count)
1276{
1277        struct platform_device *pdev = to_platform_device(dev);
1278        char *driver_override, *old, *cp;
1279
1280        /* We need to keep extra room for a newline */
1281        if (count >= (PAGE_SIZE - 1))
1282                return -EINVAL;
1283
1284        driver_override = kstrndup(buf, count, GFP_KERNEL);
1285        if (!driver_override)
1286                return -ENOMEM;
1287
1288        cp = strchr(driver_override, '\n');
1289        if (cp)
1290                *cp = '\0';
1291
1292        device_lock(dev);
1293        old = pdev->driver_override;
1294        if (strlen(driver_override)) {
1295                pdev->driver_override = driver_override;
1296        } else {
1297                kfree(driver_override);
1298                pdev->driver_override = NULL;
1299        }
1300        device_unlock(dev);
1301
1302        kfree(old);
1303
1304        return count;
1305}
1306static DEVICE_ATTR_RW(driver_override);
1307
1308static struct attribute *platform_dev_attrs[] = {
1309        &dev_attr_modalias.attr,
1310        &dev_attr_numa_node.attr,
1311        &dev_attr_driver_override.attr,
1312        NULL,
1313};
1314
1315static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1316                int n)
1317{
1318        struct device *dev = container_of(kobj, typeof(*dev), kobj);
1319
1320        if (a == &dev_attr_numa_node.attr &&
1321                        dev_to_node(dev) == NUMA_NO_NODE)
1322                return 0;
1323
1324        return a->mode;
1325}
1326
1327static const struct attribute_group platform_dev_group = {
1328        .attrs = platform_dev_attrs,
1329        .is_visible = platform_dev_attrs_visible,
1330};
1331__ATTRIBUTE_GROUPS(platform_dev);
1332
1333
1334/**
1335 * platform_match - bind platform device to platform driver.
1336 * @dev: device.
1337 * @drv: driver.
1338 *
1339 * Platform device IDs are assumed to be encoded like this:
1340 * "<name><instance>", where <name> is a short description of the type of
1341 * device, like "pci" or "floppy", and <instance> is the enumerated
1342 * instance of the device, like '0' or '42'.  Driver IDs are simply
1343 * "<name>".  So, extract the <name> from the platform_device structure,
1344 * and compare it against the name of the driver. Return whether they match
1345 * or not.
1346 */
1347static int platform_match(struct device *dev, struct device_driver *drv)
1348{
1349        struct platform_device *pdev = to_platform_device(dev);
1350        struct platform_driver *pdrv = to_platform_driver(drv);
1351
1352        /* When driver_override is set, only bind to the matching driver */
1353        if (pdev->driver_override)
1354                return !strcmp(pdev->driver_override, drv->name);
1355
1356        /* Attempt an OF style match first */
1357        if (of_driver_match_device(dev, drv))
1358                return 1;
1359
1360        /* Then try ACPI style match */
1361        if (acpi_driver_match_device(dev, drv))
1362                return 1;
1363
1364        /* Then try to match against the id table */
1365        if (pdrv->id_table)
1366                return platform_match_id(pdrv->id_table, pdev) != NULL;
1367
1368        /* fall-back to driver name match */
1369        return (strcmp(pdev->name, drv->name) == 0);
1370}
1371
1372static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1373{
1374        struct platform_device  *pdev = to_platform_device(dev);
1375        int rc;
1376
1377        /* Some devices have extra OF data and an OF-style MODALIAS */
1378        rc = of_device_uevent_modalias(dev, env);
1379        if (rc != -ENODEV)
1380                return rc;
1381
1382        rc = acpi_device_uevent_modalias(dev, env);
1383        if (rc != -ENODEV)
1384                return rc;
1385
1386        add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1387                        pdev->name);
1388        return 0;
1389}
1390
1391static int platform_probe(struct device *_dev)
1392{
1393        struct platform_driver *drv = to_platform_driver(_dev->driver);
1394        struct platform_device *dev = to_platform_device(_dev);
1395        int ret;
1396
1397        /*
1398         * A driver registered using platform_driver_probe() cannot be bound
1399         * again later because the probe function usually lives in __init code
1400         * and so is gone. For these drivers .probe is set to
1401         * platform_probe_fail in __platform_driver_probe(). Don't even prepare
1402         * clocks and PM domains for these to match the traditional behaviour.
1403         */
1404        if (unlikely(drv->probe == platform_probe_fail))
1405                return -ENXIO;
1406
1407        ret = of_clk_set_defaults(_dev->of_node, false);
1408        if (ret < 0)
1409                return ret;
1410
1411        ret = dev_pm_domain_attach(_dev, true);
1412        if (ret)
1413                goto out;
1414
1415        if (drv->probe) {
1416                ret = drv->probe(dev);
1417                if (ret)
1418                        dev_pm_domain_detach(_dev, true);
1419        }
1420
1421out:
1422        if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1423                dev_warn(_dev, "probe deferral not supported\n");
1424                ret = -ENXIO;
1425        }
1426
1427        return ret;
1428}
1429
1430static void platform_remove(struct device *_dev)
1431{
1432        struct platform_driver *drv = to_platform_driver(_dev->driver);
1433        struct platform_device *dev = to_platform_device(_dev);
1434
1435        if (drv->remove) {
1436                int ret = drv->remove(dev);
1437
1438                if (ret)
1439                        dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
1440        }
1441        dev_pm_domain_detach(_dev, true);
1442}
1443
1444static void platform_shutdown(struct device *_dev)
1445{
1446        struct platform_device *dev = to_platform_device(_dev);
1447        struct platform_driver *drv;
1448
1449        if (!_dev->driver)
1450                return;
1451
1452        drv = to_platform_driver(_dev->driver);
1453        if (drv->shutdown)
1454                drv->shutdown(dev);
1455}
1456
1457
1458int platform_dma_configure(struct device *dev)
1459{
1460        enum dev_dma_attr attr;
1461        int ret = 0;
1462
1463        if (dev->of_node) {
1464                ret = of_dma_configure(dev, dev->of_node, true);
1465        } else if (has_acpi_companion(dev)) {
1466                attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1467                ret = acpi_dma_configure(dev, attr);
1468        }
1469
1470        return ret;
1471}
1472
1473static const struct dev_pm_ops platform_dev_pm_ops = {
1474        SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
1475        USE_PLATFORM_PM_SLEEP_OPS
1476};
1477
1478struct bus_type platform_bus_type = {
1479        .name           = "platform",
1480        .dev_groups     = platform_dev_groups,
1481        .match          = platform_match,
1482        .uevent         = platform_uevent,
1483        .probe          = platform_probe,
1484        .remove         = platform_remove,
1485        .shutdown       = platform_shutdown,
1486        .dma_configure  = platform_dma_configure,
1487        .pm             = &platform_dev_pm_ops,
1488};
1489EXPORT_SYMBOL_GPL(platform_bus_type);
1490
1491static inline int __platform_match(struct device *dev, const void *drv)
1492{
1493        return platform_match(dev, (struct device_driver *)drv);
1494}
1495
1496/**
1497 * platform_find_device_by_driver - Find a platform device with a given
1498 * driver.
1499 * @start: The device to start the search from.
1500 * @drv: The device driver to look for.
1501 */
1502struct device *platform_find_device_by_driver(struct device *start,
1503                                              const struct device_driver *drv)
1504{
1505        return bus_find_device(&platform_bus_type, start, drv,
1506                               __platform_match);
1507}
1508EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1509
1510void __weak __init early_platform_cleanup(void) { }
1511
1512int __init platform_bus_init(void)
1513{
1514        int error;
1515
1516        early_platform_cleanup();
1517
1518        error = device_register(&platform_bus);
1519        if (error) {
1520                put_device(&platform_bus);
1521                return error;
1522        }
1523        error =  bus_register(&platform_bus_type);
1524        if (error)
1525                device_unregister(&platform_bus);
1526        of_platform_register_reconfig_notifier();
1527        return error;
1528}
1529