linux/drivers/base/platform.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * platform.c - platform 'pseudo' bus for legacy devices
   4 *
   5 * Copyright (c) 2002-3 Patrick Mochel
   6 * Copyright (c) 2002-3 Open Source Development Labs
   7 *
   8 * Please see Documentation/driver-api/driver-model/platform.rst for more
   9 * information.
  10 */
  11
  12#include <linux/string.h>
  13#include <linux/platform_device.h>
  14#include <linux/of_device.h>
  15#include <linux/of_irq.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/interrupt.h>
  19#include <linux/ioport.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/memblock.h>
  22#include <linux/err.h>
  23#include <linux/slab.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm_domain.h>
  26#include <linux/idr.h>
  27#include <linux/acpi.h>
  28#include <linux/clk/clk-conf.h>
  29#include <linux/limits.h>
  30#include <linux/property.h>
  31#include <linux/kmemleak.h>
  32#include <linux/types.h>
  33
  34#include "base.h"
  35#include "power/power.h"
  36
  37/* For automatically allocated device IDs */
  38static DEFINE_IDA(platform_devid_ida);
  39
  40struct device platform_bus = {
  41        .init_name      = "platform",
  42};
  43EXPORT_SYMBOL_GPL(platform_bus);
  44
  45/**
  46 * platform_get_resource - get a resource for a device
  47 * @dev: platform device
  48 * @type: resource type
  49 * @num: resource index
  50 *
  51 * Return: a pointer to the resource or NULL on failure.
  52 */
  53struct resource *platform_get_resource(struct platform_device *dev,
  54                                       unsigned int type, unsigned int num)
  55{
  56        u32 i;
  57
  58        for (i = 0; i < dev->num_resources; i++) {
  59                struct resource *r = &dev->resource[i];
  60
  61                if (type == resource_type(r) && num-- == 0)
  62                        return r;
  63        }
  64        return NULL;
  65}
  66EXPORT_SYMBOL_GPL(platform_get_resource);
  67
  68struct resource *platform_get_mem_or_io(struct platform_device *dev,
  69                                        unsigned int num)
  70{
  71        u32 i;
  72
  73        for (i = 0; i < dev->num_resources; i++) {
  74                struct resource *r = &dev->resource[i];
  75
  76                if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
  77                        return r;
  78        }
  79        return NULL;
  80}
  81EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
  82
  83#ifdef CONFIG_HAS_IOMEM
  84/**
  85 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
  86 *                                          platform device and get resource
  87 *
  88 * @pdev: platform device to use both for memory resource lookup as well as
  89 *        resource management
  90 * @index: resource index
  91 * @res: optional output parameter to store a pointer to the obtained resource.
  92 *
  93 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
  94 * on failure.
  95 */
  96void __iomem *
  97devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
  98                                unsigned int index, struct resource **res)
  99{
 100        struct resource *r;
 101
 102        r = platform_get_resource(pdev, IORESOURCE_MEM, index);
 103        if (res)
 104                *res = r;
 105        return devm_ioremap_resource(&pdev->dev, r);
 106}
 107EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
 108
 109/**
 110 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
 111 *                                  device
 112 *
 113 * @pdev: platform device to use both for memory resource lookup as well as
 114 *        resource management
 115 * @index: resource index
 116 *
 117 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
 118 * on failure.
 119 */
 120void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
 121                                             unsigned int index)
 122{
 123        return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
 124}
 125EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
 126
 127/**
 128 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
 129 *                                         a platform device, retrieve the
 130 *                                         resource by name
 131 *
 132 * @pdev: platform device to use both for memory resource lookup as well as
 133 *        resource management
 134 * @name: name of the resource
 135 *
 136 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
 137 * on failure.
 138 */
 139void __iomem *
 140devm_platform_ioremap_resource_byname(struct platform_device *pdev,
 141                                      const char *name)
 142{
 143        struct resource *res;
 144
 145        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 146        return devm_ioremap_resource(&pdev->dev, res);
 147}
 148EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
 149#endif /* CONFIG_HAS_IOMEM */
 150
 151/**
 152 * platform_get_irq_optional - get an optional IRQ for a device
 153 * @dev: platform device
 154 * @num: IRQ number index
 155 *
 156 * Gets an IRQ for a platform device. Device drivers should check the return
 157 * value for errors so as to not pass a negative integer value to the
 158 * request_irq() APIs. This is the same as platform_get_irq(), except that it
 159 * does not print an error message if an IRQ can not be obtained.
 160 *
 161 * For example::
 162 *
 163 *              int irq = platform_get_irq_optional(pdev, 0);
 164 *              if (irq < 0)
 165 *                      return irq;
 166 *
 167 * Return: non-zero IRQ number on success, negative error number on failure.
 168 */
 169int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
 170{
 171        int ret;
 172#ifdef CONFIG_SPARC
 173        /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
 174        if (!dev || num >= dev->archdata.num_irqs)
 175                goto out_not_found;
 176        ret = dev->archdata.irqs[num];
 177        goto out;
 178#else
 179        struct resource *r;
 180
 181        if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
 182                ret = of_irq_get(dev->dev.of_node, num);
 183                if (ret > 0 || ret == -EPROBE_DEFER)
 184                        goto out;
 185        }
 186
 187        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 188        if (has_acpi_companion(&dev->dev)) {
 189                if (r && r->flags & IORESOURCE_DISABLED) {
 190                        ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
 191                        if (ret)
 192                                goto out;
 193                }
 194        }
 195
 196        /*
 197         * The resources may pass trigger flags to the irqs that need
 198         * to be set up. It so happens that the trigger flags for
 199         * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
 200         * settings.
 201         */
 202        if (r && r->flags & IORESOURCE_BITS) {
 203                struct irq_data *irqd;
 204
 205                irqd = irq_get_irq_data(r->start);
 206                if (!irqd)
 207                        goto out_not_found;
 208                irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
 209        }
 210
 211        if (r) {
 212                ret = r->start;
 213                goto out;
 214        }
 215
 216        /*
 217         * For the index 0 interrupt, allow falling back to GpioInt
 218         * resources. While a device could have both Interrupt and GpioInt
 219         * resources, making this fallback ambiguous, in many common cases
 220         * the device will only expose one IRQ, and this fallback
 221         * allows a common code path across either kind of resource.
 222         */
 223        if (num == 0 && has_acpi_companion(&dev->dev)) {
 224                ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
 225                /* Our callers expect -ENXIO for missing IRQs. */
 226                if (ret >= 0 || ret == -EPROBE_DEFER)
 227                        goto out;
 228        }
 229
 230#endif
 231out_not_found:
 232        ret = -ENXIO;
 233out:
 234        WARN(ret == 0, "0 is an invalid IRQ number\n");
 235        return ret;
 236}
 237EXPORT_SYMBOL_GPL(platform_get_irq_optional);
 238
 239/**
 240 * platform_get_irq - get an IRQ for a device
 241 * @dev: platform device
 242 * @num: IRQ number index
 243 *
 244 * Gets an IRQ for a platform device and prints an error message if finding the
 245 * IRQ fails. Device drivers should check the return value for errors so as to
 246 * not pass a negative integer value to the request_irq() APIs.
 247 *
 248 * For example::
 249 *
 250 *              int irq = platform_get_irq(pdev, 0);
 251 *              if (irq < 0)
 252 *                      return irq;
 253 *
 254 * Return: non-zero IRQ number on success, negative error number on failure.
 255 */
 256int platform_get_irq(struct platform_device *dev, unsigned int num)
 257{
 258        int ret;
 259
 260        ret = platform_get_irq_optional(dev, num);
 261        if (ret < 0 && ret != -EPROBE_DEFER)
 262                dev_err(&dev->dev, "IRQ index %u not found\n", num);
 263
 264        return ret;
 265}
 266EXPORT_SYMBOL_GPL(platform_get_irq);
 267
 268/**
 269 * platform_irq_count - Count the number of IRQs a platform device uses
 270 * @dev: platform device
 271 *
 272 * Return: Number of IRQs a platform device uses or EPROBE_DEFER
 273 */
 274int platform_irq_count(struct platform_device *dev)
 275{
 276        int ret, nr = 0;
 277
 278        while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
 279                nr++;
 280
 281        if (ret == -EPROBE_DEFER)
 282                return ret;
 283
 284        return nr;
 285}
 286EXPORT_SYMBOL_GPL(platform_irq_count);
 287
 288struct irq_affinity_devres {
 289        unsigned int count;
 290        unsigned int irq[];
 291};
 292
 293static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
 294{
 295        struct resource *r;
 296
 297        r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
 298        if (r)
 299                irqresource_disabled(r, 0);
 300}
 301
 302static void devm_platform_get_irqs_affinity_release(struct device *dev,
 303                                                    void *res)
 304{
 305        struct irq_affinity_devres *ptr = res;
 306        int i;
 307
 308        for (i = 0; i < ptr->count; i++) {
 309                irq_dispose_mapping(ptr->irq[i]);
 310
 311                if (has_acpi_companion(dev))
 312                        platform_disable_acpi_irq(to_platform_device(dev), i);
 313        }
 314}
 315
 316/**
 317 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
 318 *                              device using an interrupt affinity descriptor
 319 * @dev: platform device pointer
 320 * @affd: affinity descriptor
 321 * @minvec: minimum count of interrupt vectors
 322 * @maxvec: maximum count of interrupt vectors
 323 * @irqs: pointer holder for IRQ numbers
 324 *
 325 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
 326 * to the passed affinity descriptor
 327 *
 328 * Return: Number of vectors on success, negative error number on failure.
 329 */
 330int devm_platform_get_irqs_affinity(struct platform_device *dev,
 331                                    struct irq_affinity *affd,
 332                                    unsigned int minvec,
 333                                    unsigned int maxvec,
 334                                    int **irqs)
 335{
 336        struct irq_affinity_devres *ptr;
 337        struct irq_affinity_desc *desc;
 338        size_t size;
 339        int i, ret, nvec;
 340
 341        if (!affd)
 342                return -EPERM;
 343
 344        if (maxvec < minvec)
 345                return -ERANGE;
 346
 347        nvec = platform_irq_count(dev);
 348        if (nvec < 0)
 349                return nvec;
 350
 351        if (nvec < minvec)
 352                return -ENOSPC;
 353
 354        nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
 355        if (nvec < minvec)
 356                return -ENOSPC;
 357
 358        if (nvec > maxvec)
 359                nvec = maxvec;
 360
 361        size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
 362        ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
 363                           GFP_KERNEL);
 364        if (!ptr)
 365                return -ENOMEM;
 366
 367        ptr->count = nvec;
 368
 369        for (i = 0; i < nvec; i++) {
 370                int irq = platform_get_irq(dev, i);
 371                if (irq < 0) {
 372                        ret = irq;
 373                        goto err_free_devres;
 374                }
 375                ptr->irq[i] = irq;
 376        }
 377
 378        desc = irq_create_affinity_masks(nvec, affd);
 379        if (!desc) {
 380                ret = -ENOMEM;
 381                goto err_free_devres;
 382        }
 383
 384        for (i = 0; i < nvec; i++) {
 385                ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
 386                if (ret) {
 387                        dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
 388                                ptr->irq[i], ret);
 389                        goto err_free_desc;
 390                }
 391        }
 392
 393        devres_add(&dev->dev, ptr);
 394
 395        kfree(desc);
 396
 397        *irqs = ptr->irq;
 398
 399        return nvec;
 400
 401err_free_desc:
 402        kfree(desc);
 403err_free_devres:
 404        devres_free(ptr);
 405        return ret;
 406}
 407EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
 408
 409/**
 410 * platform_get_resource_byname - get a resource for a device by name
 411 * @dev: platform device
 412 * @type: resource type
 413 * @name: resource name
 414 */
 415struct resource *platform_get_resource_byname(struct platform_device *dev,
 416                                              unsigned int type,
 417                                              const char *name)
 418{
 419        u32 i;
 420
 421        for (i = 0; i < dev->num_resources; i++) {
 422                struct resource *r = &dev->resource[i];
 423
 424                if (unlikely(!r->name))
 425                        continue;
 426
 427                if (type == resource_type(r) && !strcmp(r->name, name))
 428                        return r;
 429        }
 430        return NULL;
 431}
 432EXPORT_SYMBOL_GPL(platform_get_resource_byname);
 433
 434static int __platform_get_irq_byname(struct platform_device *dev,
 435                                     const char *name)
 436{
 437        struct resource *r;
 438        int ret;
 439
 440        if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
 441                ret = of_irq_get_byname(dev->dev.of_node, name);
 442                if (ret > 0 || ret == -EPROBE_DEFER)
 443                        return ret;
 444        }
 445
 446        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
 447        if (r) {
 448                WARN(r->start == 0, "0 is an invalid IRQ number\n");
 449                return r->start;
 450        }
 451
 452        return -ENXIO;
 453}
 454
 455/**
 456 * platform_get_irq_byname - get an IRQ for a device by name
 457 * @dev: platform device
 458 * @name: IRQ name
 459 *
 460 * Get an IRQ like platform_get_irq(), but then by name rather then by index.
 461 *
 462 * Return: non-zero IRQ number on success, negative error number on failure.
 463 */
 464int platform_get_irq_byname(struct platform_device *dev, const char *name)
 465{
 466        int ret;
 467
 468        ret = __platform_get_irq_byname(dev, name);
 469        if (ret < 0 && ret != -EPROBE_DEFER)
 470                dev_err(&dev->dev, "IRQ %s not found\n", name);
 471
 472        return ret;
 473}
 474EXPORT_SYMBOL_GPL(platform_get_irq_byname);
 475
 476/**
 477 * platform_get_irq_byname_optional - get an optional IRQ for a device by name
 478 * @dev: platform device
 479 * @name: IRQ name
 480 *
 481 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
 482 * does not print an error message if an IRQ can not be obtained.
 483 *
 484 * Return: non-zero IRQ number on success, negative error number on failure.
 485 */
 486int platform_get_irq_byname_optional(struct platform_device *dev,
 487                                     const char *name)
 488{
 489        return __platform_get_irq_byname(dev, name);
 490}
 491EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
 492
 493/**
 494 * platform_add_devices - add a numbers of platform devices
 495 * @devs: array of platform devices to add
 496 * @num: number of platform devices in array
 497 */
 498int platform_add_devices(struct platform_device **devs, int num)
 499{
 500        int i, ret = 0;
 501
 502        for (i = 0; i < num; i++) {
 503                ret = platform_device_register(devs[i]);
 504                if (ret) {
 505                        while (--i >= 0)
 506                                platform_device_unregister(devs[i]);
 507                        break;
 508                }
 509        }
 510
 511        return ret;
 512}
 513EXPORT_SYMBOL_GPL(platform_add_devices);
 514
 515struct platform_object {
 516        struct platform_device pdev;
 517        char name[];
 518};
 519
 520/*
 521 * Set up default DMA mask for platform devices if the they weren't
 522 * previously set by the architecture / DT.
 523 */
 524static void setup_pdev_dma_masks(struct platform_device *pdev)
 525{
 526        pdev->dev.dma_parms = &pdev->dma_parms;
 527
 528        if (!pdev->dev.coherent_dma_mask)
 529                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 530        if (!pdev->dev.dma_mask) {
 531                pdev->platform_dma_mask = DMA_BIT_MASK(32);
 532                pdev->dev.dma_mask = &pdev->platform_dma_mask;
 533        }
 534};
 535
 536/**
 537 * platform_device_put - destroy a platform device
 538 * @pdev: platform device to free
 539 *
 540 * Free all memory associated with a platform device.  This function must
 541 * _only_ be externally called in error cases.  All other usage is a bug.
 542 */
 543void platform_device_put(struct platform_device *pdev)
 544{
 545        if (!IS_ERR_OR_NULL(pdev))
 546                put_device(&pdev->dev);
 547}
 548EXPORT_SYMBOL_GPL(platform_device_put);
 549
 550static void platform_device_release(struct device *dev)
 551{
 552        struct platform_object *pa = container_of(dev, struct platform_object,
 553                                                  pdev.dev);
 554
 555        of_node_put(pa->pdev.dev.of_node);
 556        kfree(pa->pdev.dev.platform_data);
 557        kfree(pa->pdev.mfd_cell);
 558        kfree(pa->pdev.resource);
 559        kfree(pa->pdev.driver_override);
 560        kfree(pa);
 561}
 562
 563/**
 564 * platform_device_alloc - create a platform device
 565 * @name: base name of the device we're adding
 566 * @id: instance id
 567 *
 568 * Create a platform device object which can have other objects attached
 569 * to it, and which will have attached objects freed when it is released.
 570 */
 571struct platform_device *platform_device_alloc(const char *name, int id)
 572{
 573        struct platform_object *pa;
 574
 575        pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
 576        if (pa) {
 577                strcpy(pa->name, name);
 578                pa->pdev.name = pa->name;
 579                pa->pdev.id = id;
 580                device_initialize(&pa->pdev.dev);
 581                pa->pdev.dev.release = platform_device_release;
 582                setup_pdev_dma_masks(&pa->pdev);
 583        }
 584
 585        return pa ? &pa->pdev : NULL;
 586}
 587EXPORT_SYMBOL_GPL(platform_device_alloc);
 588
 589/**
 590 * platform_device_add_resources - add resources to a platform device
 591 * @pdev: platform device allocated by platform_device_alloc to add resources to
 592 * @res: set of resources that needs to be allocated for the device
 593 * @num: number of resources
 594 *
 595 * Add a copy of the resources to the platform device.  The memory
 596 * associated with the resources will be freed when the platform device is
 597 * released.
 598 */
 599int platform_device_add_resources(struct platform_device *pdev,
 600                                  const struct resource *res, unsigned int num)
 601{
 602        struct resource *r = NULL;
 603
 604        if (res) {
 605                r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
 606                if (!r)
 607                        return -ENOMEM;
 608        }
 609
 610        kfree(pdev->resource);
 611        pdev->resource = r;
 612        pdev->num_resources = num;
 613        return 0;
 614}
 615EXPORT_SYMBOL_GPL(platform_device_add_resources);
 616
 617/**
 618 * platform_device_add_data - add platform-specific data to a platform device
 619 * @pdev: platform device allocated by platform_device_alloc to add resources to
 620 * @data: platform specific data for this platform device
 621 * @size: size of platform specific data
 622 *
 623 * Add a copy of platform specific data to the platform device's
 624 * platform_data pointer.  The memory associated with the platform data
 625 * will be freed when the platform device is released.
 626 */
 627int platform_device_add_data(struct platform_device *pdev, const void *data,
 628                             size_t size)
 629{
 630        void *d = NULL;
 631
 632        if (data) {
 633                d = kmemdup(data, size, GFP_KERNEL);
 634                if (!d)
 635                        return -ENOMEM;
 636        }
 637
 638        kfree(pdev->dev.platform_data);
 639        pdev->dev.platform_data = d;
 640        return 0;
 641}
 642EXPORT_SYMBOL_GPL(platform_device_add_data);
 643
 644/**
 645 * platform_device_add - add a platform device to device hierarchy
 646 * @pdev: platform device we're adding
 647 *
 648 * This is part 2 of platform_device_register(), though may be called
 649 * separately _iff_ pdev was allocated by platform_device_alloc().
 650 */
 651int platform_device_add(struct platform_device *pdev)
 652{
 653        u32 i;
 654        int ret;
 655
 656        if (!pdev)
 657                return -EINVAL;
 658
 659        if (!pdev->dev.parent)
 660                pdev->dev.parent = &platform_bus;
 661
 662        pdev->dev.bus = &platform_bus_type;
 663
 664        switch (pdev->id) {
 665        default:
 666                dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
 667                break;
 668        case PLATFORM_DEVID_NONE:
 669                dev_set_name(&pdev->dev, "%s", pdev->name);
 670                break;
 671        case PLATFORM_DEVID_AUTO:
 672                /*
 673                 * Automatically allocated device ID. We mark it as such so
 674                 * that we remember it must be freed, and we append a suffix
 675                 * to avoid namespace collision with explicit IDs.
 676                 */
 677                ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
 678                if (ret < 0)
 679                        goto err_out;
 680                pdev->id = ret;
 681                pdev->id_auto = true;
 682                dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
 683                break;
 684        }
 685
 686        for (i = 0; i < pdev->num_resources; i++) {
 687                struct resource *p, *r = &pdev->resource[i];
 688
 689                if (r->name == NULL)
 690                        r->name = dev_name(&pdev->dev);
 691
 692                p = r->parent;
 693                if (!p) {
 694                        if (resource_type(r) == IORESOURCE_MEM)
 695                                p = &iomem_resource;
 696                        else if (resource_type(r) == IORESOURCE_IO)
 697                                p = &ioport_resource;
 698                }
 699
 700                if (p) {
 701                        ret = insert_resource(p, r);
 702                        if (ret) {
 703                                dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
 704                                goto failed;
 705                        }
 706                }
 707        }
 708
 709        pr_debug("Registering platform device '%s'. Parent at %s\n",
 710                 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
 711
 712        ret = device_add(&pdev->dev);
 713        if (ret == 0)
 714                return ret;
 715
 716 failed:
 717        if (pdev->id_auto) {
 718                ida_free(&platform_devid_ida, pdev->id);
 719                pdev->id = PLATFORM_DEVID_AUTO;
 720        }
 721
 722        while (i--) {
 723                struct resource *r = &pdev->resource[i];
 724                if (r->parent)
 725                        release_resource(r);
 726        }
 727
 728 err_out:
 729        return ret;
 730}
 731EXPORT_SYMBOL_GPL(platform_device_add);
 732
 733/**
 734 * platform_device_del - remove a platform-level device
 735 * @pdev: platform device we're removing
 736 *
 737 * Note that this function will also release all memory- and port-based
 738 * resources owned by the device (@dev->resource).  This function must
 739 * _only_ be externally called in error cases.  All other usage is a bug.
 740 */
 741void platform_device_del(struct platform_device *pdev)
 742{
 743        u32 i;
 744
 745        if (!IS_ERR_OR_NULL(pdev)) {
 746                device_del(&pdev->dev);
 747
 748                if (pdev->id_auto) {
 749                        ida_free(&platform_devid_ida, pdev->id);
 750                        pdev->id = PLATFORM_DEVID_AUTO;
 751                }
 752
 753                for (i = 0; i < pdev->num_resources; i++) {
 754                        struct resource *r = &pdev->resource[i];
 755                        if (r->parent)
 756                                release_resource(r);
 757                }
 758        }
 759}
 760EXPORT_SYMBOL_GPL(platform_device_del);
 761
 762/**
 763 * platform_device_register - add a platform-level device
 764 * @pdev: platform device we're adding
 765 */
 766int platform_device_register(struct platform_device *pdev)
 767{
 768        device_initialize(&pdev->dev);
 769        setup_pdev_dma_masks(pdev);
 770        return platform_device_add(pdev);
 771}
 772EXPORT_SYMBOL_GPL(platform_device_register);
 773
 774/**
 775 * platform_device_unregister - unregister a platform-level device
 776 * @pdev: platform device we're unregistering
 777 *
 778 * Unregistration is done in 2 steps. First we release all resources
 779 * and remove it from the subsystem, then we drop reference count by
 780 * calling platform_device_put().
 781 */
 782void platform_device_unregister(struct platform_device *pdev)
 783{
 784        platform_device_del(pdev);
 785        platform_device_put(pdev);
 786}
 787EXPORT_SYMBOL_GPL(platform_device_unregister);
 788
 789/**
 790 * platform_device_register_full - add a platform-level device with
 791 * resources and platform-specific data
 792 *
 793 * @pdevinfo: data used to create device
 794 *
 795 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
 796 */
 797struct platform_device *platform_device_register_full(
 798                const struct platform_device_info *pdevinfo)
 799{
 800        int ret;
 801        struct platform_device *pdev;
 802
 803        pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
 804        if (!pdev)
 805                return ERR_PTR(-ENOMEM);
 806
 807        pdev->dev.parent = pdevinfo->parent;
 808        pdev->dev.fwnode = pdevinfo->fwnode;
 809        pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
 810        pdev->dev.of_node_reused = pdevinfo->of_node_reused;
 811
 812        if (pdevinfo->dma_mask) {
 813                pdev->platform_dma_mask = pdevinfo->dma_mask;
 814                pdev->dev.dma_mask = &pdev->platform_dma_mask;
 815                pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
 816        }
 817
 818        ret = platform_device_add_resources(pdev,
 819                        pdevinfo->res, pdevinfo->num_res);
 820        if (ret)
 821                goto err;
 822
 823        ret = platform_device_add_data(pdev,
 824                        pdevinfo->data, pdevinfo->size_data);
 825        if (ret)
 826                goto err;
 827
 828        if (pdevinfo->properties) {
 829                ret = device_create_managed_software_node(&pdev->dev,
 830                                                          pdevinfo->properties, NULL);
 831                if (ret)
 832                        goto err;
 833        }
 834
 835        ret = platform_device_add(pdev);
 836        if (ret) {
 837err:
 838                ACPI_COMPANION_SET(&pdev->dev, NULL);
 839                platform_device_put(pdev);
 840                return ERR_PTR(ret);
 841        }
 842
 843        return pdev;
 844}
 845EXPORT_SYMBOL_GPL(platform_device_register_full);
 846
 847/**
 848 * __platform_driver_register - register a driver for platform-level devices
 849 * @drv: platform driver structure
 850 * @owner: owning module/driver
 851 */
 852int __platform_driver_register(struct platform_driver *drv,
 853                                struct module *owner)
 854{
 855        drv->driver.owner = owner;
 856        drv->driver.bus = &platform_bus_type;
 857
 858        return driver_register(&drv->driver);
 859}
 860EXPORT_SYMBOL_GPL(__platform_driver_register);
 861
 862/**
 863 * platform_driver_unregister - unregister a driver for platform-level devices
 864 * @drv: platform driver structure
 865 */
 866void platform_driver_unregister(struct platform_driver *drv)
 867{
 868        driver_unregister(&drv->driver);
 869}
 870EXPORT_SYMBOL_GPL(platform_driver_unregister);
 871
 872static int platform_probe_fail(struct platform_device *pdev)
 873{
 874        return -ENXIO;
 875}
 876
 877/**
 878 * __platform_driver_probe - register driver for non-hotpluggable device
 879 * @drv: platform driver structure
 880 * @probe: the driver probe routine, probably from an __init section
 881 * @module: module which will be the owner of the driver
 882 *
 883 * Use this instead of platform_driver_register() when you know the device
 884 * is not hotpluggable and has already been registered, and you want to
 885 * remove its run-once probe() infrastructure from memory after the driver
 886 * has bound to the device.
 887 *
 888 * One typical use for this would be with drivers for controllers integrated
 889 * into system-on-chip processors, where the controller devices have been
 890 * configured as part of board setup.
 891 *
 892 * Note that this is incompatible with deferred probing.
 893 *
 894 * Returns zero if the driver registered and bound to a device, else returns
 895 * a negative error code and with the driver not registered.
 896 */
 897int __init_or_module __platform_driver_probe(struct platform_driver *drv,
 898                int (*probe)(struct platform_device *), struct module *module)
 899{
 900        int retval, code;
 901
 902        if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
 903                pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
 904                         drv->driver.name, __func__);
 905                return -EINVAL;
 906        }
 907
 908        /*
 909         * We have to run our probes synchronously because we check if
 910         * we find any devices to bind to and exit with error if there
 911         * are any.
 912         */
 913        drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
 914
 915        /*
 916         * Prevent driver from requesting probe deferral to avoid further
 917         * futile probe attempts.
 918         */
 919        drv->prevent_deferred_probe = true;
 920
 921        /* make sure driver won't have bind/unbind attributes */
 922        drv->driver.suppress_bind_attrs = true;
 923
 924        /* temporary section violation during probe() */
 925        drv->probe = probe;
 926        retval = code = __platform_driver_register(drv, module);
 927        if (retval)
 928                return retval;
 929
 930        /*
 931         * Fixup that section violation, being paranoid about code scanning
 932         * the list of drivers in order to probe new devices.  Check to see
 933         * if the probe was successful, and make sure any forced probes of
 934         * new devices fail.
 935         */
 936        spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
 937        drv->probe = platform_probe_fail;
 938        if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
 939                retval = -ENODEV;
 940        spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
 941
 942        if (code != retval)
 943                platform_driver_unregister(drv);
 944        return retval;
 945}
 946EXPORT_SYMBOL_GPL(__platform_driver_probe);
 947
 948/**
 949 * __platform_create_bundle - register driver and create corresponding device
 950 * @driver: platform driver structure
 951 * @probe: the driver probe routine, probably from an __init section
 952 * @res: set of resources that needs to be allocated for the device
 953 * @n_res: number of resources
 954 * @data: platform specific data for this platform device
 955 * @size: size of platform specific data
 956 * @module: module which will be the owner of the driver
 957 *
 958 * Use this in legacy-style modules that probe hardware directly and
 959 * register a single platform device and corresponding platform driver.
 960 *
 961 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
 962 */
 963struct platform_device * __init_or_module __platform_create_bundle(
 964                        struct platform_driver *driver,
 965                        int (*probe)(struct platform_device *),
 966                        struct resource *res, unsigned int n_res,
 967                        const void *data, size_t size, struct module *module)
 968{
 969        struct platform_device *pdev;
 970        int error;
 971
 972        pdev = platform_device_alloc(driver->driver.name, -1);
 973        if (!pdev) {
 974                error = -ENOMEM;
 975                goto err_out;
 976        }
 977
 978        error = platform_device_add_resources(pdev, res, n_res);
 979        if (error)
 980                goto err_pdev_put;
 981
 982        error = platform_device_add_data(pdev, data, size);
 983        if (error)
 984                goto err_pdev_put;
 985
 986        error = platform_device_add(pdev);
 987        if (error)
 988                goto err_pdev_put;
 989
 990        error = __platform_driver_probe(driver, probe, module);
 991        if (error)
 992                goto err_pdev_del;
 993
 994        return pdev;
 995
 996err_pdev_del:
 997        platform_device_del(pdev);
 998err_pdev_put:
 999        platform_device_put(pdev);
1000err_out:
1001        return ERR_PTR(error);
1002}
1003EXPORT_SYMBOL_GPL(__platform_create_bundle);
1004
1005/**
1006 * __platform_register_drivers - register an array of platform drivers
1007 * @drivers: an array of drivers to register
1008 * @count: the number of drivers to register
1009 * @owner: module owning the drivers
1010 *
1011 * Registers platform drivers specified by an array. On failure to register a
1012 * driver, all previously registered drivers will be unregistered. Callers of
1013 * this API should use platform_unregister_drivers() to unregister drivers in
1014 * the reverse order.
1015 *
1016 * Returns: 0 on success or a negative error code on failure.
1017 */
1018int __platform_register_drivers(struct platform_driver * const *drivers,
1019                                unsigned int count, struct module *owner)
1020{
1021        unsigned int i;
1022        int err;
1023
1024        for (i = 0; i < count; i++) {
1025                pr_debug("registering platform driver %ps\n", drivers[i]);
1026
1027                err = __platform_driver_register(drivers[i], owner);
1028                if (err < 0) {
1029                        pr_err("failed to register platform driver %ps: %d\n",
1030                               drivers[i], err);
1031                        goto error;
1032                }
1033        }
1034
1035        return 0;
1036
1037error:
1038        while (i--) {
1039                pr_debug("unregistering platform driver %ps\n", drivers[i]);
1040                platform_driver_unregister(drivers[i]);
1041        }
1042
1043        return err;
1044}
1045EXPORT_SYMBOL_GPL(__platform_register_drivers);
1046
1047/**
1048 * platform_unregister_drivers - unregister an array of platform drivers
1049 * @drivers: an array of drivers to unregister
1050 * @count: the number of drivers to unregister
1051 *
1052 * Unregisters platform drivers specified by an array. This is typically used
1053 * to complement an earlier call to platform_register_drivers(). Drivers are
1054 * unregistered in the reverse order in which they were registered.
1055 */
1056void platform_unregister_drivers(struct platform_driver * const *drivers,
1057                                 unsigned int count)
1058{
1059        while (count--) {
1060                pr_debug("unregistering platform driver %ps\n", drivers[count]);
1061                platform_driver_unregister(drivers[count]);
1062        }
1063}
1064EXPORT_SYMBOL_GPL(platform_unregister_drivers);
1065
1066static const struct platform_device_id *platform_match_id(
1067                        const struct platform_device_id *id,
1068                        struct platform_device *pdev)
1069{
1070        while (id->name[0]) {
1071                if (strcmp(pdev->name, id->name) == 0) {
1072                        pdev->id_entry = id;
1073                        return id;
1074                }
1075                id++;
1076        }
1077        return NULL;
1078}
1079
1080#ifdef CONFIG_PM_SLEEP
1081
1082static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1083{
1084        struct platform_driver *pdrv = to_platform_driver(dev->driver);
1085        struct platform_device *pdev = to_platform_device(dev);
1086        int ret = 0;
1087
1088        if (dev->driver && pdrv->suspend)
1089                ret = pdrv->suspend(pdev, mesg);
1090
1091        return ret;
1092}
1093
1094static int platform_legacy_resume(struct device *dev)
1095{
1096        struct platform_driver *pdrv = to_platform_driver(dev->driver);
1097        struct platform_device *pdev = to_platform_device(dev);
1098        int ret = 0;
1099
1100        if (dev->driver && pdrv->resume)
1101                ret = pdrv->resume(pdev);
1102
1103        return ret;
1104}
1105
1106#endif /* CONFIG_PM_SLEEP */
1107
1108#ifdef CONFIG_SUSPEND
1109
1110int platform_pm_suspend(struct device *dev)
1111{
1112        struct device_driver *drv = dev->driver;
1113        int ret = 0;
1114
1115        if (!drv)
1116                return 0;
1117
1118        if (drv->pm) {
1119                if (drv->pm->suspend)
1120                        ret = drv->pm->suspend(dev);
1121        } else {
1122                ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1123        }
1124
1125        return ret;
1126}
1127
1128int platform_pm_resume(struct device *dev)
1129{
1130        struct device_driver *drv = dev->driver;
1131        int ret = 0;
1132
1133        if (!drv)
1134                return 0;
1135
1136        if (drv->pm) {
1137                if (drv->pm->resume)
1138                        ret = drv->pm->resume(dev);
1139        } else {
1140                ret = platform_legacy_resume(dev);
1141        }
1142
1143        return ret;
1144}
1145
1146#endif /* CONFIG_SUSPEND */
1147
1148#ifdef CONFIG_HIBERNATE_CALLBACKS
1149
1150int platform_pm_freeze(struct device *dev)
1151{
1152        struct device_driver *drv = dev->driver;
1153        int ret = 0;
1154
1155        if (!drv)
1156                return 0;
1157
1158        if (drv->pm) {
1159                if (drv->pm->freeze)
1160                        ret = drv->pm->freeze(dev);
1161        } else {
1162                ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1163        }
1164
1165        return ret;
1166}
1167
1168int platform_pm_thaw(struct device *dev)
1169{
1170        struct device_driver *drv = dev->driver;
1171        int ret = 0;
1172
1173        if (!drv)
1174                return 0;
1175
1176        if (drv->pm) {
1177                if (drv->pm->thaw)
1178                        ret = drv->pm->thaw(dev);
1179        } else {
1180                ret = platform_legacy_resume(dev);
1181        }
1182
1183        return ret;
1184}
1185
1186int platform_pm_poweroff(struct device *dev)
1187{
1188        struct device_driver *drv = dev->driver;
1189        int ret = 0;
1190
1191        if (!drv)
1192                return 0;
1193
1194        if (drv->pm) {
1195                if (drv->pm->poweroff)
1196                        ret = drv->pm->poweroff(dev);
1197        } else {
1198                ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1199        }
1200
1201        return ret;
1202}
1203
1204int platform_pm_restore(struct device *dev)
1205{
1206        struct device_driver *drv = dev->driver;
1207        int ret = 0;
1208
1209        if (!drv)
1210                return 0;
1211
1212        if (drv->pm) {
1213                if (drv->pm->restore)
1214                        ret = drv->pm->restore(dev);
1215        } else {
1216                ret = platform_legacy_resume(dev);
1217        }
1218
1219        return ret;
1220}
1221
1222#endif /* CONFIG_HIBERNATE_CALLBACKS */
1223
1224/* modalias support enables more hands-off userspace setup:
1225 * (a) environment variable lets new-style hotplug events work once system is
1226 *     fully running:  "modprobe $MODALIAS"
1227 * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1228 *     mishandled before system is fully running:  "modprobe $(cat modalias)"
1229 */
1230static ssize_t modalias_show(struct device *dev,
1231                             struct device_attribute *attr, char *buf)
1232{
1233        struct platform_device *pdev = to_platform_device(dev);
1234        int len;
1235
1236        len = of_device_modalias(dev, buf, PAGE_SIZE);
1237        if (len != -ENODEV)
1238                return len;
1239
1240        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
1241        if (len != -ENODEV)
1242                return len;
1243
1244        return sysfs_emit(buf, "platform:%s\n", pdev->name);
1245}
1246static DEVICE_ATTR_RO(modalias);
1247
1248static ssize_t numa_node_show(struct device *dev,
1249                              struct device_attribute *attr, char *buf)
1250{
1251        return sysfs_emit(buf, "%d\n", dev_to_node(dev));
1252}
1253static DEVICE_ATTR_RO(numa_node);
1254
1255static ssize_t driver_override_show(struct device *dev,
1256                                    struct device_attribute *attr, char *buf)
1257{
1258        struct platform_device *pdev = to_platform_device(dev);
1259        ssize_t len;
1260
1261        device_lock(dev);
1262        len = sysfs_emit(buf, "%s\n", pdev->driver_override);
1263        device_unlock(dev);
1264
1265        return len;
1266}
1267
1268static ssize_t driver_override_store(struct device *dev,
1269                                     struct device_attribute *attr,
1270                                     const char *buf, size_t count)
1271{
1272        struct platform_device *pdev = to_platform_device(dev);
1273        char *driver_override, *old, *cp;
1274
1275        /* We need to keep extra room for a newline */
1276        if (count >= (PAGE_SIZE - 1))
1277                return -EINVAL;
1278
1279        driver_override = kstrndup(buf, count, GFP_KERNEL);
1280        if (!driver_override)
1281                return -ENOMEM;
1282
1283        cp = strchr(driver_override, '\n');
1284        if (cp)
1285                *cp = '\0';
1286
1287        device_lock(dev);
1288        old = pdev->driver_override;
1289        if (strlen(driver_override)) {
1290                pdev->driver_override = driver_override;
1291        } else {
1292                kfree(driver_override);
1293                pdev->driver_override = NULL;
1294        }
1295        device_unlock(dev);
1296
1297        kfree(old);
1298
1299        return count;
1300}
1301static DEVICE_ATTR_RW(driver_override);
1302
1303static struct attribute *platform_dev_attrs[] = {
1304        &dev_attr_modalias.attr,
1305        &dev_attr_numa_node.attr,
1306        &dev_attr_driver_override.attr,
1307        NULL,
1308};
1309
1310static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
1311                int n)
1312{
1313        struct device *dev = container_of(kobj, typeof(*dev), kobj);
1314
1315        if (a == &dev_attr_numa_node.attr &&
1316                        dev_to_node(dev) == NUMA_NO_NODE)
1317                return 0;
1318
1319        return a->mode;
1320}
1321
1322static const struct attribute_group platform_dev_group = {
1323        .attrs = platform_dev_attrs,
1324        .is_visible = platform_dev_attrs_visible,
1325};
1326__ATTRIBUTE_GROUPS(platform_dev);
1327
1328
1329/**
1330 * platform_match - bind platform device to platform driver.
1331 * @dev: device.
1332 * @drv: driver.
1333 *
1334 * Platform device IDs are assumed to be encoded like this:
1335 * "<name><instance>", where <name> is a short description of the type of
1336 * device, like "pci" or "floppy", and <instance> is the enumerated
1337 * instance of the device, like '0' or '42'.  Driver IDs are simply
1338 * "<name>".  So, extract the <name> from the platform_device structure,
1339 * and compare it against the name of the driver. Return whether they match
1340 * or not.
1341 */
1342static int platform_match(struct device *dev, struct device_driver *drv)
1343{
1344        struct platform_device *pdev = to_platform_device(dev);
1345        struct platform_driver *pdrv = to_platform_driver(drv);
1346
1347        /* When driver_override is set, only bind to the matching driver */
1348        if (pdev->driver_override)
1349                return !strcmp(pdev->driver_override, drv->name);
1350
1351        /* Attempt an OF style match first */
1352        if (of_driver_match_device(dev, drv))
1353                return 1;
1354
1355        /* Then try ACPI style match */
1356        if (acpi_driver_match_device(dev, drv))
1357                return 1;
1358
1359        /* Then try to match against the id table */
1360        if (pdrv->id_table)
1361                return platform_match_id(pdrv->id_table, pdev) != NULL;
1362
1363        /* fall-back to driver name match */
1364        return (strcmp(pdev->name, drv->name) == 0);
1365}
1366
1367static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1368{
1369        struct platform_device  *pdev = to_platform_device(dev);
1370        int rc;
1371
1372        /* Some devices have extra OF data and an OF-style MODALIAS */
1373        rc = of_device_uevent_modalias(dev, env);
1374        if (rc != -ENODEV)
1375                return rc;
1376
1377        rc = acpi_device_uevent_modalias(dev, env);
1378        if (rc != -ENODEV)
1379                return rc;
1380
1381        add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1382                        pdev->name);
1383        return 0;
1384}
1385
1386static int platform_probe(struct device *_dev)
1387{
1388        struct platform_driver *drv = to_platform_driver(_dev->driver);
1389        struct platform_device *dev = to_platform_device(_dev);
1390        int ret;
1391
1392        /*
1393         * A driver registered using platform_driver_probe() cannot be bound
1394         * again later because the probe function usually lives in __init code
1395         * and so is gone. For these drivers .probe is set to
1396         * platform_probe_fail in __platform_driver_probe(). Don't even prepare
1397         * clocks and PM domains for these to match the traditional behaviour.
1398         */
1399        if (unlikely(drv->probe == platform_probe_fail))
1400                return -ENXIO;
1401
1402        ret = of_clk_set_defaults(_dev->of_node, false);
1403        if (ret < 0)
1404                return ret;
1405
1406        ret = dev_pm_domain_attach(_dev, true);
1407        if (ret)
1408                goto out;
1409
1410        if (drv->probe) {
1411                ret = drv->probe(dev);
1412                if (ret)
1413                        dev_pm_domain_detach(_dev, true);
1414        }
1415
1416out:
1417        if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1418                dev_warn(_dev, "probe deferral not supported\n");
1419                ret = -ENXIO;
1420        }
1421
1422        return ret;
1423}
1424
1425static void platform_remove(struct device *_dev)
1426{
1427        struct platform_driver *drv = to_platform_driver(_dev->driver);
1428        struct platform_device *dev = to_platform_device(_dev);
1429
1430        if (drv->remove) {
1431                int ret = drv->remove(dev);
1432
1433                if (ret)
1434                        dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
1435        }
1436        dev_pm_domain_detach(_dev, true);
1437}
1438
1439static void platform_shutdown(struct device *_dev)
1440{
1441        struct platform_device *dev = to_platform_device(_dev);
1442        struct platform_driver *drv;
1443
1444        if (!_dev->driver)
1445                return;
1446
1447        drv = to_platform_driver(_dev->driver);
1448        if (drv->shutdown)
1449                drv->shutdown(dev);
1450}
1451
1452
1453int platform_dma_configure(struct device *dev)
1454{
1455        enum dev_dma_attr attr;
1456        int ret = 0;
1457
1458        if (dev->of_node) {
1459                ret = of_dma_configure(dev, dev->of_node, true);
1460        } else if (has_acpi_companion(dev)) {
1461                attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1462                ret = acpi_dma_configure(dev, attr);
1463        }
1464
1465        return ret;
1466}
1467
1468static const struct dev_pm_ops platform_dev_pm_ops = {
1469        .runtime_suspend = pm_generic_runtime_suspend,
1470        .runtime_resume = pm_generic_runtime_resume,
1471        USE_PLATFORM_PM_SLEEP_OPS
1472};
1473
1474struct bus_type platform_bus_type = {
1475        .name           = "platform",
1476        .dev_groups     = platform_dev_groups,
1477        .match          = platform_match,
1478        .uevent         = platform_uevent,
1479        .probe          = platform_probe,
1480        .remove         = platform_remove,
1481        .shutdown       = platform_shutdown,
1482        .dma_configure  = platform_dma_configure,
1483        .pm             = &platform_dev_pm_ops,
1484};
1485EXPORT_SYMBOL_GPL(platform_bus_type);
1486
1487static inline int __platform_match(struct device *dev, const void *drv)
1488{
1489        return platform_match(dev, (struct device_driver *)drv);
1490}
1491
1492/**
1493 * platform_find_device_by_driver - Find a platform device with a given
1494 * driver.
1495 * @start: The device to start the search from.
1496 * @drv: The device driver to look for.
1497 */
1498struct device *platform_find_device_by_driver(struct device *start,
1499                                              const struct device_driver *drv)
1500{
1501        return bus_find_device(&platform_bus_type, start, drv,
1502                               __platform_match);
1503}
1504EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1505
1506void __weak __init early_platform_cleanup(void) { }
1507
1508int __init platform_bus_init(void)
1509{
1510        int error;
1511
1512        early_platform_cleanup();
1513
1514        error = device_register(&platform_bus);
1515        if (error) {
1516                put_device(&platform_bus);
1517                return error;
1518        }
1519        error =  bus_register(&platform_bus_type);
1520        if (error)
1521                device_unregister(&platform_bus);
1522        of_platform_register_reconfig_notifier();
1523        return error;
1524}
1525