linux/drivers/base/devres.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/devres.c - device resource management
   4 *
   5 * Copyright (c) 2006  SUSE Linux Products GmbH
   6 * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/module.h>
  11#include <linux/slab.h>
  12#include <linux/percpu.h>
  13
  14#include <asm/sections.h>
  15
  16#include "base.h"
  17#include "trace.h"
  18
  19struct devres_node {
  20        struct list_head                entry;
  21        dr_release_t                    release;
  22        const char                      *name;
  23        size_t                          size;
  24};
  25
  26struct devres {
  27        struct devres_node              node;
  28        /*
  29         * Some archs want to perform DMA into kmalloc caches
  30         * and need a guaranteed alignment larger than
  31         * the alignment of a 64-bit integer.
  32         * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
  33         * buffer alignment as if it was allocated by plain kmalloc().
  34         */
  35        u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
  36};
  37
  38struct devres_group {
  39        struct devres_node              node[2];
  40        void                            *id;
  41        int                             color;
  42        /* -- 8 pointers */
  43};
  44
  45static void set_node_dbginfo(struct devres_node *node, const char *name,
  46                             size_t size)
  47{
  48        node->name = name;
  49        node->size = size;
  50}
  51
  52#ifdef CONFIG_DEBUG_DEVRES
  53static int log_devres = 0;
  54module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
  55
  56static void devres_dbg(struct device *dev, struct devres_node *node,
  57                       const char *op)
  58{
  59        if (unlikely(log_devres))
  60                dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
  61                        op, node, node->name, node->size);
  62}
  63#else /* CONFIG_DEBUG_DEVRES */
  64#define devres_dbg(dev, node, op)       do {} while (0)
  65#endif /* CONFIG_DEBUG_DEVRES */
  66
  67static void devres_log(struct device *dev, struct devres_node *node,
  68                       const char *op)
  69{
  70        trace_devres_log(dev, op, node, node->name, node->size);
  71        devres_dbg(dev, node, op);
  72}
  73
  74/*
  75 * Release functions for devres group.  These callbacks are used only
  76 * for identification.
  77 */
  78static void group_open_release(struct device *dev, void *res)
  79{
  80        /* noop */
  81}
  82
  83static void group_close_release(struct device *dev, void *res)
  84{
  85        /* noop */
  86}
  87
  88static struct devres_group * node_to_group(struct devres_node *node)
  89{
  90        if (node->release == &group_open_release)
  91                return container_of(node, struct devres_group, node[0]);
  92        if (node->release == &group_close_release)
  93                return container_of(node, struct devres_group, node[1]);
  94        return NULL;
  95}
  96
  97static bool check_dr_size(size_t size, size_t *tot_size)
  98{
  99        /* We must catch any near-SIZE_MAX cases that could overflow. */
 100        if (unlikely(check_add_overflow(sizeof(struct devres),
 101                                        size, tot_size)))
 102                return false;
 103
 104        return true;
 105}
 106
 107static __always_inline struct devres * alloc_dr(dr_release_t release,
 108                                                size_t size, gfp_t gfp, int nid)
 109{
 110        size_t tot_size;
 111        struct devres *dr;
 112
 113        if (!check_dr_size(size, &tot_size))
 114                return NULL;
 115
 116        dr = kmalloc_node_track_caller(tot_size, gfp, nid);
 117        if (unlikely(!dr))
 118                return NULL;
 119
 120        memset(dr, 0, offsetof(struct devres, data));
 121
 122        INIT_LIST_HEAD(&dr->node.entry);
 123        dr->node.release = release;
 124        return dr;
 125}
 126
 127static void add_dr(struct device *dev, struct devres_node *node)
 128{
 129        devres_log(dev, node, "ADD");
 130        BUG_ON(!list_empty(&node->entry));
 131        list_add_tail(&node->entry, &dev->devres_head);
 132}
 133
 134static void replace_dr(struct device *dev,
 135                       struct devres_node *old, struct devres_node *new)
 136{
 137        devres_log(dev, old, "REPLACE");
 138        BUG_ON(!list_empty(&new->entry));
 139        list_replace(&old->entry, &new->entry);
 140}
 141
 142/**
 143 * __devres_alloc_node - Allocate device resource data
 144 * @release: Release function devres will be associated with
 145 * @size: Allocation size
 146 * @gfp: Allocation flags
 147 * @nid: NUMA node
 148 * @name: Name of the resource
 149 *
 150 * Allocate devres of @size bytes.  The allocated area is zeroed, then
 151 * associated with @release.  The returned pointer can be passed to
 152 * other devres_*() functions.
 153 *
 154 * RETURNS:
 155 * Pointer to allocated devres on success, NULL on failure.
 156 */
 157void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
 158                          const char *name)
 159{
 160        struct devres *dr;
 161
 162        dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
 163        if (unlikely(!dr))
 164                return NULL;
 165        set_node_dbginfo(&dr->node, name, size);
 166        return dr->data;
 167}
 168EXPORT_SYMBOL_GPL(__devres_alloc_node);
 169
 170/**
 171 * devres_for_each_res - Resource iterator
 172 * @dev: Device to iterate resource from
 173 * @release: Look for resources associated with this release function
 174 * @match: Match function (optional)
 175 * @match_data: Data for the match function
 176 * @fn: Function to be called for each matched resource.
 177 * @data: Data for @fn, the 3rd parameter of @fn
 178 *
 179 * Call @fn for each devres of @dev which is associated with @release
 180 * and for which @match returns 1.
 181 *
 182 * RETURNS:
 183 *      void
 184 */
 185void devres_for_each_res(struct device *dev, dr_release_t release,
 186                        dr_match_t match, void *match_data,
 187                        void (*fn)(struct device *, void *, void *),
 188                        void *data)
 189{
 190        struct devres_node *node;
 191        struct devres_node *tmp;
 192        unsigned long flags;
 193
 194        if (!fn)
 195                return;
 196
 197        spin_lock_irqsave(&dev->devres_lock, flags);
 198        list_for_each_entry_safe_reverse(node, tmp,
 199                        &dev->devres_head, entry) {
 200                struct devres *dr = container_of(node, struct devres, node);
 201
 202                if (node->release != release)
 203                        continue;
 204                if (match && !match(dev, dr->data, match_data))
 205                        continue;
 206                fn(dev, dr->data, data);
 207        }
 208        spin_unlock_irqrestore(&dev->devres_lock, flags);
 209}
 210EXPORT_SYMBOL_GPL(devres_for_each_res);
 211
 212/**
 213 * devres_free - Free device resource data
 214 * @res: Pointer to devres data to free
 215 *
 216 * Free devres created with devres_alloc().
 217 */
 218void devres_free(void *res)
 219{
 220        if (res) {
 221                struct devres *dr = container_of(res, struct devres, data);
 222
 223                BUG_ON(!list_empty(&dr->node.entry));
 224                kfree(dr);
 225        }
 226}
 227EXPORT_SYMBOL_GPL(devres_free);
 228
 229/**
 230 * devres_add - Register device resource
 231 * @dev: Device to add resource to
 232 * @res: Resource to register
 233 *
 234 * Register devres @res to @dev.  @res should have been allocated
 235 * using devres_alloc().  On driver detach, the associated release
 236 * function will be invoked and devres will be freed automatically.
 237 */
 238void devres_add(struct device *dev, void *res)
 239{
 240        struct devres *dr = container_of(res, struct devres, data);
 241        unsigned long flags;
 242
 243        spin_lock_irqsave(&dev->devres_lock, flags);
 244        add_dr(dev, &dr->node);
 245        spin_unlock_irqrestore(&dev->devres_lock, flags);
 246}
 247EXPORT_SYMBOL_GPL(devres_add);
 248
 249static struct devres *find_dr(struct device *dev, dr_release_t release,
 250                              dr_match_t match, void *match_data)
 251{
 252        struct devres_node *node;
 253
 254        list_for_each_entry_reverse(node, &dev->devres_head, entry) {
 255                struct devres *dr = container_of(node, struct devres, node);
 256
 257                if (node->release != release)
 258                        continue;
 259                if (match && !match(dev, dr->data, match_data))
 260                        continue;
 261                return dr;
 262        }
 263
 264        return NULL;
 265}
 266
 267/**
 268 * devres_find - Find device resource
 269 * @dev: Device to lookup resource from
 270 * @release: Look for resources associated with this release function
 271 * @match: Match function (optional)
 272 * @match_data: Data for the match function
 273 *
 274 * Find the latest devres of @dev which is associated with @release
 275 * and for which @match returns 1.  If @match is NULL, it's considered
 276 * to match all.
 277 *
 278 * RETURNS:
 279 * Pointer to found devres, NULL if not found.
 280 */
 281void * devres_find(struct device *dev, dr_release_t release,
 282                   dr_match_t match, void *match_data)
 283{
 284        struct devres *dr;
 285        unsigned long flags;
 286
 287        spin_lock_irqsave(&dev->devres_lock, flags);
 288        dr = find_dr(dev, release, match, match_data);
 289        spin_unlock_irqrestore(&dev->devres_lock, flags);
 290
 291        if (dr)
 292                return dr->data;
 293        return NULL;
 294}
 295EXPORT_SYMBOL_GPL(devres_find);
 296
 297/**
 298 * devres_get - Find devres, if non-existent, add one atomically
 299 * @dev: Device to lookup or add devres for
 300 * @new_res: Pointer to new initialized devres to add if not found
 301 * @match: Match function (optional)
 302 * @match_data: Data for the match function
 303 *
 304 * Find the latest devres of @dev which has the same release function
 305 * as @new_res and for which @match return 1.  If found, @new_res is
 306 * freed; otherwise, @new_res is added atomically.
 307 *
 308 * RETURNS:
 309 * Pointer to found or added devres.
 310 */
 311void * devres_get(struct device *dev, void *new_res,
 312                  dr_match_t match, void *match_data)
 313{
 314        struct devres *new_dr = container_of(new_res, struct devres, data);
 315        struct devres *dr;
 316        unsigned long flags;
 317
 318        spin_lock_irqsave(&dev->devres_lock, flags);
 319        dr = find_dr(dev, new_dr->node.release, match, match_data);
 320        if (!dr) {
 321                add_dr(dev, &new_dr->node);
 322                dr = new_dr;
 323                new_res = NULL;
 324        }
 325        spin_unlock_irqrestore(&dev->devres_lock, flags);
 326        devres_free(new_res);
 327
 328        return dr->data;
 329}
 330EXPORT_SYMBOL_GPL(devres_get);
 331
 332/**
 333 * devres_remove - Find a device resource and remove it
 334 * @dev: Device to find resource from
 335 * @release: Look for resources associated with this release function
 336 * @match: Match function (optional)
 337 * @match_data: Data for the match function
 338 *
 339 * Find the latest devres of @dev associated with @release and for
 340 * which @match returns 1.  If @match is NULL, it's considered to
 341 * match all.  If found, the resource is removed atomically and
 342 * returned.
 343 *
 344 * RETURNS:
 345 * Pointer to removed devres on success, NULL if not found.
 346 */
 347void * devres_remove(struct device *dev, dr_release_t release,
 348                     dr_match_t match, void *match_data)
 349{
 350        struct devres *dr;
 351        unsigned long flags;
 352
 353        spin_lock_irqsave(&dev->devres_lock, flags);
 354        dr = find_dr(dev, release, match, match_data);
 355        if (dr) {
 356                list_del_init(&dr->node.entry);
 357                devres_log(dev, &dr->node, "REM");
 358        }
 359        spin_unlock_irqrestore(&dev->devres_lock, flags);
 360
 361        if (dr)
 362                return dr->data;
 363        return NULL;
 364}
 365EXPORT_SYMBOL_GPL(devres_remove);
 366
 367/**
 368 * devres_destroy - Find a device resource and destroy it
 369 * @dev: Device to find resource from
 370 * @release: Look for resources associated with this release function
 371 * @match: Match function (optional)
 372 * @match_data: Data for the match function
 373 *
 374 * Find the latest devres of @dev associated with @release and for
 375 * which @match returns 1.  If @match is NULL, it's considered to
 376 * match all.  If found, the resource is removed atomically and freed.
 377 *
 378 * Note that the release function for the resource will not be called,
 379 * only the devres-allocated data will be freed.  The caller becomes
 380 * responsible for freeing any other data.
 381 *
 382 * RETURNS:
 383 * 0 if devres is found and freed, -ENOENT if not found.
 384 */
 385int devres_destroy(struct device *dev, dr_release_t release,
 386                   dr_match_t match, void *match_data)
 387{
 388        void *res;
 389
 390        res = devres_remove(dev, release, match, match_data);
 391        if (unlikely(!res))
 392                return -ENOENT;
 393
 394        devres_free(res);
 395        return 0;
 396}
 397EXPORT_SYMBOL_GPL(devres_destroy);
 398
 399
 400/**
 401 * devres_release - Find a device resource and destroy it, calling release
 402 * @dev: Device to find resource from
 403 * @release: Look for resources associated with this release function
 404 * @match: Match function (optional)
 405 * @match_data: Data for the match function
 406 *
 407 * Find the latest devres of @dev associated with @release and for
 408 * which @match returns 1.  If @match is NULL, it's considered to
 409 * match all.  If found, the resource is removed atomically, the
 410 * release function called and the resource freed.
 411 *
 412 * RETURNS:
 413 * 0 if devres is found and freed, -ENOENT if not found.
 414 */
 415int devres_release(struct device *dev, dr_release_t release,
 416                   dr_match_t match, void *match_data)
 417{
 418        void *res;
 419
 420        res = devres_remove(dev, release, match, match_data);
 421        if (unlikely(!res))
 422                return -ENOENT;
 423
 424        (*release)(dev, res);
 425        devres_free(res);
 426        return 0;
 427}
 428EXPORT_SYMBOL_GPL(devres_release);
 429
 430static int remove_nodes(struct device *dev,
 431                        struct list_head *first, struct list_head *end,
 432                        struct list_head *todo)
 433{
 434        struct devres_node *node, *n;
 435        int cnt = 0, nr_groups = 0;
 436
 437        /* First pass - move normal devres entries to @todo and clear
 438         * devres_group colors.
 439         */
 440        node = list_entry(first, struct devres_node, entry);
 441        list_for_each_entry_safe_from(node, n, end, entry) {
 442                struct devres_group *grp;
 443
 444                grp = node_to_group(node);
 445                if (grp) {
 446                        /* clear color of group markers in the first pass */
 447                        grp->color = 0;
 448                        nr_groups++;
 449                } else {
 450                        /* regular devres entry */
 451                        if (&node->entry == first)
 452                                first = first->next;
 453                        list_move_tail(&node->entry, todo);
 454                        cnt++;
 455                }
 456        }
 457
 458        if (!nr_groups)
 459                return cnt;
 460
 461        /* Second pass - Scan groups and color them.  A group gets
 462         * color value of two iff the group is wholly contained in
 463         * [current node, end). That is, for a closed group, both opening
 464         * and closing markers should be in the range, while just the
 465         * opening marker is enough for an open group.
 466         */
 467        node = list_entry(first, struct devres_node, entry);
 468        list_for_each_entry_safe_from(node, n, end, entry) {
 469                struct devres_group *grp;
 470
 471                grp = node_to_group(node);
 472                BUG_ON(!grp || list_empty(&grp->node[0].entry));
 473
 474                grp->color++;
 475                if (list_empty(&grp->node[1].entry))
 476                        grp->color++;
 477
 478                BUG_ON(grp->color <= 0 || grp->color > 2);
 479                if (grp->color == 2) {
 480                        /* No need to update current node or end. The removed
 481                         * nodes are always before both.
 482                         */
 483                        list_move_tail(&grp->node[0].entry, todo);
 484                        list_del_init(&grp->node[1].entry);
 485                }
 486        }
 487
 488        return cnt;
 489}
 490
 491static void release_nodes(struct device *dev, struct list_head *todo)
 492{
 493        struct devres *dr, *tmp;
 494
 495        /* Release.  Note that both devres and devres_group are
 496         * handled as devres in the following loop.  This is safe.
 497         */
 498        list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
 499                devres_log(dev, &dr->node, "REL");
 500                dr->node.release(dev, dr->data);
 501                kfree(dr);
 502        }
 503}
 504
 505/**
 506 * devres_release_all - Release all managed resources
 507 * @dev: Device to release resources for
 508 *
 509 * Release all resources associated with @dev.  This function is
 510 * called on driver detach.
 511 */
 512int devres_release_all(struct device *dev)
 513{
 514        unsigned long flags;
 515        LIST_HEAD(todo);
 516        int cnt;
 517
 518        /* Looks like an uninitialized device structure */
 519        if (WARN_ON(dev->devres_head.next == NULL))
 520                return -ENODEV;
 521
 522        /* Nothing to release if list is empty */
 523        if (list_empty(&dev->devres_head))
 524                return 0;
 525
 526        spin_lock_irqsave(&dev->devres_lock, flags);
 527        cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
 528        spin_unlock_irqrestore(&dev->devres_lock, flags);
 529
 530        release_nodes(dev, &todo);
 531        return cnt;
 532}
 533
 534/**
 535 * devres_open_group - Open a new devres group
 536 * @dev: Device to open devres group for
 537 * @id: Separator ID
 538 * @gfp: Allocation flags
 539 *
 540 * Open a new devres group for @dev with @id.  For @id, using a
 541 * pointer to an object which won't be used for another group is
 542 * recommended.  If @id is NULL, address-wise unique ID is created.
 543 *
 544 * RETURNS:
 545 * ID of the new group, NULL on failure.
 546 */
 547void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
 548{
 549        struct devres_group *grp;
 550        unsigned long flags;
 551
 552        grp = kmalloc(sizeof(*grp), gfp);
 553        if (unlikely(!grp))
 554                return NULL;
 555
 556        grp->node[0].release = &group_open_release;
 557        grp->node[1].release = &group_close_release;
 558        INIT_LIST_HEAD(&grp->node[0].entry);
 559        INIT_LIST_HEAD(&grp->node[1].entry);
 560        set_node_dbginfo(&grp->node[0], "grp<", 0);
 561        set_node_dbginfo(&grp->node[1], "grp>", 0);
 562        grp->id = grp;
 563        if (id)
 564                grp->id = id;
 565
 566        spin_lock_irqsave(&dev->devres_lock, flags);
 567        add_dr(dev, &grp->node[0]);
 568        spin_unlock_irqrestore(&dev->devres_lock, flags);
 569        return grp->id;
 570}
 571EXPORT_SYMBOL_GPL(devres_open_group);
 572
 573/* Find devres group with ID @id.  If @id is NULL, look for the latest. */
 574static struct devres_group * find_group(struct device *dev, void *id)
 575{
 576        struct devres_node *node;
 577
 578        list_for_each_entry_reverse(node, &dev->devres_head, entry) {
 579                struct devres_group *grp;
 580
 581                if (node->release != &group_open_release)
 582                        continue;
 583
 584                grp = container_of(node, struct devres_group, node[0]);
 585
 586                if (id) {
 587                        if (grp->id == id)
 588                                return grp;
 589                } else if (list_empty(&grp->node[1].entry))
 590                        return grp;
 591        }
 592
 593        return NULL;
 594}
 595
 596/**
 597 * devres_close_group - Close a devres group
 598 * @dev: Device to close devres group for
 599 * @id: ID of target group, can be NULL
 600 *
 601 * Close the group identified by @id.  If @id is NULL, the latest open
 602 * group is selected.
 603 */
 604void devres_close_group(struct device *dev, void *id)
 605{
 606        struct devres_group *grp;
 607        unsigned long flags;
 608
 609        spin_lock_irqsave(&dev->devres_lock, flags);
 610
 611        grp = find_group(dev, id);
 612        if (grp)
 613                add_dr(dev, &grp->node[1]);
 614        else
 615                WARN_ON(1);
 616
 617        spin_unlock_irqrestore(&dev->devres_lock, flags);
 618}
 619EXPORT_SYMBOL_GPL(devres_close_group);
 620
 621/**
 622 * devres_remove_group - Remove a devres group
 623 * @dev: Device to remove group for
 624 * @id: ID of target group, can be NULL
 625 *
 626 * Remove the group identified by @id.  If @id is NULL, the latest
 627 * open group is selected.  Note that removing a group doesn't affect
 628 * any other resources.
 629 */
 630void devres_remove_group(struct device *dev, void *id)
 631{
 632        struct devres_group *grp;
 633        unsigned long flags;
 634
 635        spin_lock_irqsave(&dev->devres_lock, flags);
 636
 637        grp = find_group(dev, id);
 638        if (grp) {
 639                list_del_init(&grp->node[0].entry);
 640                list_del_init(&grp->node[1].entry);
 641                devres_log(dev, &grp->node[0], "REM");
 642        } else
 643                WARN_ON(1);
 644
 645        spin_unlock_irqrestore(&dev->devres_lock, flags);
 646
 647        kfree(grp);
 648}
 649EXPORT_SYMBOL_GPL(devres_remove_group);
 650
 651/**
 652 * devres_release_group - Release resources in a devres group
 653 * @dev: Device to release group for
 654 * @id: ID of target group, can be NULL
 655 *
 656 * Release all resources in the group identified by @id.  If @id is
 657 * NULL, the latest open group is selected.  The selected group and
 658 * groups properly nested inside the selected group are removed.
 659 *
 660 * RETURNS:
 661 * The number of released non-group resources.
 662 */
 663int devres_release_group(struct device *dev, void *id)
 664{
 665        struct devres_group *grp;
 666        unsigned long flags;
 667        LIST_HEAD(todo);
 668        int cnt = 0;
 669
 670        spin_lock_irqsave(&dev->devres_lock, flags);
 671
 672        grp = find_group(dev, id);
 673        if (grp) {
 674                struct list_head *first = &grp->node[0].entry;
 675                struct list_head *end = &dev->devres_head;
 676
 677                if (!list_empty(&grp->node[1].entry))
 678                        end = grp->node[1].entry.next;
 679
 680                cnt = remove_nodes(dev, first, end, &todo);
 681                spin_unlock_irqrestore(&dev->devres_lock, flags);
 682
 683                release_nodes(dev, &todo);
 684        } else {
 685                WARN_ON(1);
 686                spin_unlock_irqrestore(&dev->devres_lock, flags);
 687        }
 688
 689        return cnt;
 690}
 691EXPORT_SYMBOL_GPL(devres_release_group);
 692
 693/*
 694 * Custom devres actions allow inserting a simple function call
 695 * into the teadown sequence.
 696 */
 697
 698struct action_devres {
 699        void *data;
 700        void (*action)(void *);
 701};
 702
 703static int devm_action_match(struct device *dev, void *res, void *p)
 704{
 705        struct action_devres *devres = res;
 706        struct action_devres *target = p;
 707
 708        return devres->action == target->action &&
 709               devres->data == target->data;
 710}
 711
 712static void devm_action_release(struct device *dev, void *res)
 713{
 714        struct action_devres *devres = res;
 715
 716        devres->action(devres->data);
 717}
 718
 719/**
 720 * devm_add_action() - add a custom action to list of managed resources
 721 * @dev: Device that owns the action
 722 * @action: Function that should be called
 723 * @data: Pointer to data passed to @action implementation
 724 *
 725 * This adds a custom action to the list of managed resources so that
 726 * it gets executed as part of standard resource unwinding.
 727 */
 728int devm_add_action(struct device *dev, void (*action)(void *), void *data)
 729{
 730        struct action_devres *devres;
 731
 732        devres = devres_alloc(devm_action_release,
 733                              sizeof(struct action_devres), GFP_KERNEL);
 734        if (!devres)
 735                return -ENOMEM;
 736
 737        devres->data = data;
 738        devres->action = action;
 739
 740        devres_add(dev, devres);
 741        return 0;
 742}
 743EXPORT_SYMBOL_GPL(devm_add_action);
 744
 745/**
 746 * devm_remove_action() - removes previously added custom action
 747 * @dev: Device that owns the action
 748 * @action: Function implementing the action
 749 * @data: Pointer to data passed to @action implementation
 750 *
 751 * Removes instance of @action previously added by devm_add_action().
 752 * Both action and data should match one of the existing entries.
 753 */
 754void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
 755{
 756        struct action_devres devres = {
 757                .data = data,
 758                .action = action,
 759        };
 760
 761        WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
 762                               &devres));
 763}
 764EXPORT_SYMBOL_GPL(devm_remove_action);
 765
 766/**
 767 * devm_release_action() - release previously added custom action
 768 * @dev: Device that owns the action
 769 * @action: Function implementing the action
 770 * @data: Pointer to data passed to @action implementation
 771 *
 772 * Releases and removes instance of @action previously added by
 773 * devm_add_action().  Both action and data should match one of the
 774 * existing entries.
 775 */
 776void devm_release_action(struct device *dev, void (*action)(void *), void *data)
 777{
 778        struct action_devres devres = {
 779                .data = data,
 780                .action = action,
 781        };
 782
 783        WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
 784                               &devres));
 785
 786}
 787EXPORT_SYMBOL_GPL(devm_release_action);
 788
 789/*
 790 * Managed kmalloc/kfree
 791 */
 792static void devm_kmalloc_release(struct device *dev, void *res)
 793{
 794        /* noop */
 795}
 796
 797static int devm_kmalloc_match(struct device *dev, void *res, void *data)
 798{
 799        return res == data;
 800}
 801
 802/**
 803 * devm_kmalloc - Resource-managed kmalloc
 804 * @dev: Device to allocate memory for
 805 * @size: Allocation size
 806 * @gfp: Allocation gfp flags
 807 *
 808 * Managed kmalloc.  Memory allocated with this function is
 809 * automatically freed on driver detach.  Like all other devres
 810 * resources, guaranteed alignment is unsigned long long.
 811 *
 812 * RETURNS:
 813 * Pointer to allocated memory on success, NULL on failure.
 814 */
 815void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
 816{
 817        struct devres *dr;
 818
 819        if (unlikely(!size))
 820                return ZERO_SIZE_PTR;
 821
 822        /* use raw alloc_dr for kmalloc caller tracing */
 823        dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
 824        if (unlikely(!dr))
 825                return NULL;
 826
 827        /*
 828         * This is named devm_kzalloc_release for historical reasons
 829         * The initial implementation did not support kmalloc, only kzalloc
 830         */
 831        set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
 832        devres_add(dev, dr->data);
 833        return dr->data;
 834}
 835EXPORT_SYMBOL_GPL(devm_kmalloc);
 836
 837/**
 838 * devm_krealloc - Resource-managed krealloc()
 839 * @dev: Device to re-allocate memory for
 840 * @ptr: Pointer to the memory chunk to re-allocate
 841 * @new_size: New allocation size
 842 * @gfp: Allocation gfp flags
 843 *
 844 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
 845 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
 846 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
 847 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
 848 * change the order in which the release callback for the re-alloc'ed devres
 849 * will be called (except when falling back to devm_kmalloc() or when freeing
 850 * resources when new_size is zero). The contents of the memory are preserved
 851 * up to the lesser of new and old sizes.
 852 */
 853void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
 854{
 855        size_t total_new_size, total_old_size;
 856        struct devres *old_dr, *new_dr;
 857        unsigned long flags;
 858
 859        if (unlikely(!new_size)) {
 860                devm_kfree(dev, ptr);
 861                return ZERO_SIZE_PTR;
 862        }
 863
 864        if (unlikely(ZERO_OR_NULL_PTR(ptr)))
 865                return devm_kmalloc(dev, new_size, gfp);
 866
 867        if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
 868                /*
 869                 * We cannot reliably realloc a const string returned by
 870                 * devm_kstrdup_const().
 871                 */
 872                return NULL;
 873
 874        if (!check_dr_size(new_size, &total_new_size))
 875                return NULL;
 876
 877        total_old_size = ksize(container_of(ptr, struct devres, data));
 878        if (total_old_size == 0) {
 879                WARN(1, "Pointer doesn't point to dynamically allocated memory.");
 880                return NULL;
 881        }
 882
 883        /*
 884         * If new size is smaller or equal to the actual number of bytes
 885         * allocated previously - just return the same pointer.
 886         */
 887        if (total_new_size <= total_old_size)
 888                return ptr;
 889
 890        /*
 891         * Otherwise: allocate new, larger chunk. We need to allocate before
 892         * taking the lock as most probably the caller uses GFP_KERNEL.
 893         */
 894        new_dr = alloc_dr(devm_kmalloc_release,
 895                          total_new_size, gfp, dev_to_node(dev));
 896        if (!new_dr)
 897                return NULL;
 898
 899        /*
 900         * The spinlock protects the linked list against concurrent
 901         * modifications but not the resource itself.
 902         */
 903        spin_lock_irqsave(&dev->devres_lock, flags);
 904
 905        old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
 906        if (!old_dr) {
 907                spin_unlock_irqrestore(&dev->devres_lock, flags);
 908                kfree(new_dr);
 909                WARN(1, "Memory chunk not managed or managed by a different device.");
 910                return NULL;
 911        }
 912
 913        replace_dr(dev, &old_dr->node, &new_dr->node);
 914
 915        spin_unlock_irqrestore(&dev->devres_lock, flags);
 916
 917        /*
 918         * We can copy the memory contents after releasing the lock as we're
 919         * no longer modyfing the list links.
 920         */
 921        memcpy(new_dr->data, old_dr->data,
 922               total_old_size - offsetof(struct devres, data));
 923        /*
 924         * Same for releasing the old devres - it's now been removed from the
 925         * list. This is also the reason why we must not use devm_kfree() - the
 926         * links are no longer valid.
 927         */
 928        kfree(old_dr);
 929
 930        return new_dr->data;
 931}
 932EXPORT_SYMBOL_GPL(devm_krealloc);
 933
 934/**
 935 * devm_kstrdup - Allocate resource managed space and
 936 *                copy an existing string into that.
 937 * @dev: Device to allocate memory for
 938 * @s: the string to duplicate
 939 * @gfp: the GFP mask used in the devm_kmalloc() call when
 940 *       allocating memory
 941 * RETURNS:
 942 * Pointer to allocated string on success, NULL on failure.
 943 */
 944char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
 945{
 946        size_t size;
 947        char *buf;
 948
 949        if (!s)
 950                return NULL;
 951
 952        size = strlen(s) + 1;
 953        buf = devm_kmalloc(dev, size, gfp);
 954        if (buf)
 955                memcpy(buf, s, size);
 956        return buf;
 957}
 958EXPORT_SYMBOL_GPL(devm_kstrdup);
 959
 960/**
 961 * devm_kstrdup_const - resource managed conditional string duplication
 962 * @dev: device for which to duplicate the string
 963 * @s: the string to duplicate
 964 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 965 *
 966 * Strings allocated by devm_kstrdup_const will be automatically freed when
 967 * the associated device is detached.
 968 *
 969 * RETURNS:
 970 * Source string if it is in .rodata section otherwise it falls back to
 971 * devm_kstrdup.
 972 */
 973const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
 974{
 975        if (is_kernel_rodata((unsigned long)s))
 976                return s;
 977
 978        return devm_kstrdup(dev, s, gfp);
 979}
 980EXPORT_SYMBOL_GPL(devm_kstrdup_const);
 981
 982/**
 983 * devm_kvasprintf - Allocate resource managed space and format a string
 984 *                   into that.
 985 * @dev: Device to allocate memory for
 986 * @gfp: the GFP mask used in the devm_kmalloc() call when
 987 *       allocating memory
 988 * @fmt: The printf()-style format string
 989 * @ap: Arguments for the format string
 990 * RETURNS:
 991 * Pointer to allocated string on success, NULL on failure.
 992 */
 993char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
 994                      va_list ap)
 995{
 996        unsigned int len;
 997        char *p;
 998        va_list aq;
 999
1000        va_copy(aq, ap);
1001        len = vsnprintf(NULL, 0, fmt, aq);
1002        va_end(aq);
1003
1004        p = devm_kmalloc(dev, len+1, gfp);
1005        if (!p)
1006                return NULL;
1007
1008        vsnprintf(p, len+1, fmt, ap);
1009
1010        return p;
1011}
1012EXPORT_SYMBOL(devm_kvasprintf);
1013
1014/**
1015 * devm_kasprintf - Allocate resource managed space and format a string
1016 *                  into that.
1017 * @dev: Device to allocate memory for
1018 * @gfp: the GFP mask used in the devm_kmalloc() call when
1019 *       allocating memory
1020 * @fmt: The printf()-style format string
1021 * @...: Arguments for the format string
1022 * RETURNS:
1023 * Pointer to allocated string on success, NULL on failure.
1024 */
1025char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1026{
1027        va_list ap;
1028        char *p;
1029
1030        va_start(ap, fmt);
1031        p = devm_kvasprintf(dev, gfp, fmt, ap);
1032        va_end(ap);
1033
1034        return p;
1035}
1036EXPORT_SYMBOL_GPL(devm_kasprintf);
1037
1038/**
1039 * devm_kfree - Resource-managed kfree
1040 * @dev: Device this memory belongs to
1041 * @p: Memory to free
1042 *
1043 * Free memory allocated with devm_kmalloc().
1044 */
1045void devm_kfree(struct device *dev, const void *p)
1046{
1047        int rc;
1048
1049        /*
1050         * Special cases: pointer to a string in .rodata returned by
1051         * devm_kstrdup_const() or NULL/ZERO ptr.
1052         */
1053        if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1054                return;
1055
1056        rc = devres_destroy(dev, devm_kmalloc_release,
1057                            devm_kmalloc_match, (void *)p);
1058        WARN_ON(rc);
1059}
1060EXPORT_SYMBOL_GPL(devm_kfree);
1061
1062/**
1063 * devm_kmemdup - Resource-managed kmemdup
1064 * @dev: Device this memory belongs to
1065 * @src: Memory region to duplicate
1066 * @len: Memory region length
1067 * @gfp: GFP mask to use
1068 *
1069 * Duplicate region of a memory using resource managed kmalloc
1070 */
1071void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1072{
1073        void *p;
1074
1075        p = devm_kmalloc(dev, len, gfp);
1076        if (p)
1077                memcpy(p, src, len);
1078
1079        return p;
1080}
1081EXPORT_SYMBOL_GPL(devm_kmemdup);
1082
1083struct pages_devres {
1084        unsigned long addr;
1085        unsigned int order;
1086};
1087
1088static int devm_pages_match(struct device *dev, void *res, void *p)
1089{
1090        struct pages_devres *devres = res;
1091        struct pages_devres *target = p;
1092
1093        return devres->addr == target->addr;
1094}
1095
1096static void devm_pages_release(struct device *dev, void *res)
1097{
1098        struct pages_devres *devres = res;
1099
1100        free_pages(devres->addr, devres->order);
1101}
1102
1103/**
1104 * devm_get_free_pages - Resource-managed __get_free_pages
1105 * @dev: Device to allocate memory for
1106 * @gfp_mask: Allocation gfp flags
1107 * @order: Allocation size is (1 << order) pages
1108 *
1109 * Managed get_free_pages.  Memory allocated with this function is
1110 * automatically freed on driver detach.
1111 *
1112 * RETURNS:
1113 * Address of allocated memory on success, 0 on failure.
1114 */
1115
1116unsigned long devm_get_free_pages(struct device *dev,
1117                                  gfp_t gfp_mask, unsigned int order)
1118{
1119        struct pages_devres *devres;
1120        unsigned long addr;
1121
1122        addr = __get_free_pages(gfp_mask, order);
1123
1124        if (unlikely(!addr))
1125                return 0;
1126
1127        devres = devres_alloc(devm_pages_release,
1128                              sizeof(struct pages_devres), GFP_KERNEL);
1129        if (unlikely(!devres)) {
1130                free_pages(addr, order);
1131                return 0;
1132        }
1133
1134        devres->addr = addr;
1135        devres->order = order;
1136
1137        devres_add(dev, devres);
1138        return addr;
1139}
1140EXPORT_SYMBOL_GPL(devm_get_free_pages);
1141
1142/**
1143 * devm_free_pages - Resource-managed free_pages
1144 * @dev: Device this memory belongs to
1145 * @addr: Memory to free
1146 *
1147 * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1148 * there is no need to supply the @order.
1149 */
1150void devm_free_pages(struct device *dev, unsigned long addr)
1151{
1152        struct pages_devres devres = { .addr = addr };
1153
1154        WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1155                               &devres));
1156}
1157EXPORT_SYMBOL_GPL(devm_free_pages);
1158
1159static void devm_percpu_release(struct device *dev, void *pdata)
1160{
1161        void __percpu *p;
1162
1163        p = *(void __percpu **)pdata;
1164        free_percpu(p);
1165}
1166
1167static int devm_percpu_match(struct device *dev, void *data, void *p)
1168{
1169        struct devres *devr = container_of(data, struct devres, data);
1170
1171        return *(void **)devr->data == p;
1172}
1173
1174/**
1175 * __devm_alloc_percpu - Resource-managed alloc_percpu
1176 * @dev: Device to allocate per-cpu memory for
1177 * @size: Size of per-cpu memory to allocate
1178 * @align: Alignment of per-cpu memory to allocate
1179 *
1180 * Managed alloc_percpu. Per-cpu memory allocated with this function is
1181 * automatically freed on driver detach.
1182 *
1183 * RETURNS:
1184 * Pointer to allocated memory on success, NULL on failure.
1185 */
1186void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1187                size_t align)
1188{
1189        void *p;
1190        void __percpu *pcpu;
1191
1192        pcpu = __alloc_percpu(size, align);
1193        if (!pcpu)
1194                return NULL;
1195
1196        p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1197        if (!p) {
1198                free_percpu(pcpu);
1199                return NULL;
1200        }
1201
1202        *(void __percpu **)p = pcpu;
1203
1204        devres_add(dev, p);
1205
1206        return pcpu;
1207}
1208EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1209
1210/**
1211 * devm_free_percpu - Resource-managed free_percpu
1212 * @dev: Device this memory belongs to
1213 * @pdata: Per-cpu memory to free
1214 *
1215 * Free memory allocated with devm_alloc_percpu().
1216 */
1217void devm_free_percpu(struct device *dev, void __percpu *pdata)
1218{
1219        WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1220                               (__force void *)pdata));
1221}
1222EXPORT_SYMBOL_GPL(devm_free_percpu);
1223