linux/drivers/base/memory.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Memory subsystem support
   4 *
   5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
   6 *            Dave Hansen <haveblue@us.ibm.com>
   7 *
   8 * This file provides the necessary infrastructure to represent
   9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
  10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
  11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/init.h>
  16#include <linux/topology.h>
  17#include <linux/capability.h>
  18#include <linux/device.h>
  19#include <linux/memory.h>
  20#include <linux/memory_hotplug.h>
  21#include <linux/mm.h>
  22#include <linux/stat.h>
  23#include <linux/slab.h>
  24#include <linux/xarray.h>
  25
  26#include <linux/atomic.h>
  27#include <linux/uaccess.h>
  28
  29#define MEMORY_CLASS_NAME       "memory"
  30
  31static const char *const online_type_to_str[] = {
  32        [MMOP_OFFLINE] = "offline",
  33        [MMOP_ONLINE] = "online",
  34        [MMOP_ONLINE_KERNEL] = "online_kernel",
  35        [MMOP_ONLINE_MOVABLE] = "online_movable",
  36};
  37
  38int mhp_online_type_from_str(const char *str)
  39{
  40        int i;
  41
  42        for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
  43                if (sysfs_streq(str, online_type_to_str[i]))
  44                        return i;
  45        }
  46        return -EINVAL;
  47}
  48
  49#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
  50
  51static int sections_per_block;
  52
  53static inline unsigned long memory_block_id(unsigned long section_nr)
  54{
  55        return section_nr / sections_per_block;
  56}
  57
  58static inline unsigned long pfn_to_block_id(unsigned long pfn)
  59{
  60        return memory_block_id(pfn_to_section_nr(pfn));
  61}
  62
  63static inline unsigned long phys_to_block_id(unsigned long phys)
  64{
  65        return pfn_to_block_id(PFN_DOWN(phys));
  66}
  67
  68static int memory_subsys_online(struct device *dev);
  69static int memory_subsys_offline(struct device *dev);
  70
  71static struct bus_type memory_subsys = {
  72        .name = MEMORY_CLASS_NAME,
  73        .dev_name = MEMORY_CLASS_NAME,
  74        .online = memory_subsys_online,
  75        .offline = memory_subsys_offline,
  76};
  77
  78/*
  79 * Memory blocks are cached in a local radix tree to avoid
  80 * a costly linear search for the corresponding device on
  81 * the subsystem bus.
  82 */
  83static DEFINE_XARRAY(memory_blocks);
  84
  85/*
  86 * Memory groups, indexed by memory group id (mgid).
  87 */
  88static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
  89#define MEMORY_GROUP_MARK_DYNAMIC       XA_MARK_1
  90
  91static BLOCKING_NOTIFIER_HEAD(memory_chain);
  92
  93int register_memory_notifier(struct notifier_block *nb)
  94{
  95        return blocking_notifier_chain_register(&memory_chain, nb);
  96}
  97EXPORT_SYMBOL(register_memory_notifier);
  98
  99void unregister_memory_notifier(struct notifier_block *nb)
 100{
 101        blocking_notifier_chain_unregister(&memory_chain, nb);
 102}
 103EXPORT_SYMBOL(unregister_memory_notifier);
 104
 105static void memory_block_release(struct device *dev)
 106{
 107        struct memory_block *mem = to_memory_block(dev);
 108
 109        kfree(mem);
 110}
 111
 112unsigned long __weak memory_block_size_bytes(void)
 113{
 114        return MIN_MEMORY_BLOCK_SIZE;
 115}
 116EXPORT_SYMBOL_GPL(memory_block_size_bytes);
 117
 118/*
 119 * Show the first physical section index (number) of this memory block.
 120 */
 121static ssize_t phys_index_show(struct device *dev,
 122                               struct device_attribute *attr, char *buf)
 123{
 124        struct memory_block *mem = to_memory_block(dev);
 125        unsigned long phys_index;
 126
 127        phys_index = mem->start_section_nr / sections_per_block;
 128
 129        return sysfs_emit(buf, "%08lx\n", phys_index);
 130}
 131
 132/*
 133 * Legacy interface that we cannot remove. Always indicate "removable"
 134 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
 135 */
 136static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
 137                              char *buf)
 138{
 139        return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
 140}
 141
 142/*
 143 * online, offline, going offline, etc.
 144 */
 145static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 146                          char *buf)
 147{
 148        struct memory_block *mem = to_memory_block(dev);
 149        const char *output;
 150
 151        /*
 152         * We can probably put these states in a nice little array
 153         * so that they're not open-coded
 154         */
 155        switch (mem->state) {
 156        case MEM_ONLINE:
 157                output = "online";
 158                break;
 159        case MEM_OFFLINE:
 160                output = "offline";
 161                break;
 162        case MEM_GOING_OFFLINE:
 163                output = "going-offline";
 164                break;
 165        default:
 166                WARN_ON(1);
 167                return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
 168        }
 169
 170        return sysfs_emit(buf, "%s\n", output);
 171}
 172
 173int memory_notify(unsigned long val, void *v)
 174{
 175        return blocking_notifier_call_chain(&memory_chain, val, v);
 176}
 177
 178static int memory_block_online(struct memory_block *mem)
 179{
 180        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
 181        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 182        unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
 183        struct zone *zone;
 184        int ret;
 185
 186        zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
 187                                  start_pfn, nr_pages);
 188
 189        /*
 190         * Although vmemmap pages have a different lifecycle than the pages
 191         * they describe (they remain until the memory is unplugged), doing
 192         * their initialization and accounting at memory onlining/offlining
 193         * stage helps to keep accounting easier to follow - e.g vmemmaps
 194         * belong to the same zone as the memory they backed.
 195         */
 196        if (nr_vmemmap_pages) {
 197                ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
 198                if (ret)
 199                        return ret;
 200        }
 201
 202        ret = online_pages(start_pfn + nr_vmemmap_pages,
 203                           nr_pages - nr_vmemmap_pages, zone, mem->group);
 204        if (ret) {
 205                if (nr_vmemmap_pages)
 206                        mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
 207                return ret;
 208        }
 209
 210        /*
 211         * Account once onlining succeeded. If the zone was unpopulated, it is
 212         * now already properly populated.
 213         */
 214        if (nr_vmemmap_pages)
 215                adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
 216                                          nr_vmemmap_pages);
 217
 218        return ret;
 219}
 220
 221static int memory_block_offline(struct memory_block *mem)
 222{
 223        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
 224        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 225        unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
 226        int ret;
 227
 228        /*
 229         * Unaccount before offlining, such that unpopulated zone and kthreads
 230         * can properly be torn down in offline_pages().
 231         */
 232        if (nr_vmemmap_pages)
 233                adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
 234                                          -nr_vmemmap_pages);
 235
 236        ret = offline_pages(start_pfn + nr_vmemmap_pages,
 237                            nr_pages - nr_vmemmap_pages, mem->group);
 238        if (ret) {
 239                /* offline_pages() failed. Account back. */
 240                if (nr_vmemmap_pages)
 241                        adjust_present_page_count(pfn_to_page(start_pfn),
 242                                                  mem->group, nr_vmemmap_pages);
 243                return ret;
 244        }
 245
 246        if (nr_vmemmap_pages)
 247                mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
 248
 249        return ret;
 250}
 251
 252/*
 253 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
 254 * OK to have direct references to sparsemem variables in here.
 255 */
 256static int
 257memory_block_action(struct memory_block *mem, unsigned long action)
 258{
 259        int ret;
 260
 261        switch (action) {
 262        case MEM_ONLINE:
 263                ret = memory_block_online(mem);
 264                break;
 265        case MEM_OFFLINE:
 266                ret = memory_block_offline(mem);
 267                break;
 268        default:
 269                WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
 270                     "%ld\n", __func__, mem->start_section_nr, action, action);
 271                ret = -EINVAL;
 272        }
 273
 274        return ret;
 275}
 276
 277static int memory_block_change_state(struct memory_block *mem,
 278                unsigned long to_state, unsigned long from_state_req)
 279{
 280        int ret = 0;
 281
 282        if (mem->state != from_state_req)
 283                return -EINVAL;
 284
 285        if (to_state == MEM_OFFLINE)
 286                mem->state = MEM_GOING_OFFLINE;
 287
 288        ret = memory_block_action(mem, to_state);
 289        mem->state = ret ? from_state_req : to_state;
 290
 291        return ret;
 292}
 293
 294/* The device lock serializes operations on memory_subsys_[online|offline] */
 295static int memory_subsys_online(struct device *dev)
 296{
 297        struct memory_block *mem = to_memory_block(dev);
 298        int ret;
 299
 300        if (mem->state == MEM_ONLINE)
 301                return 0;
 302
 303        /*
 304         * When called via device_online() without configuring the online_type,
 305         * we want to default to MMOP_ONLINE.
 306         */
 307        if (mem->online_type == MMOP_OFFLINE)
 308                mem->online_type = MMOP_ONLINE;
 309
 310        ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
 311        mem->online_type = MMOP_OFFLINE;
 312
 313        return ret;
 314}
 315
 316static int memory_subsys_offline(struct device *dev)
 317{
 318        struct memory_block *mem = to_memory_block(dev);
 319
 320        if (mem->state == MEM_OFFLINE)
 321                return 0;
 322
 323        return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
 324}
 325
 326static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 327                           const char *buf, size_t count)
 328{
 329        const int online_type = mhp_online_type_from_str(buf);
 330        struct memory_block *mem = to_memory_block(dev);
 331        int ret;
 332
 333        if (online_type < 0)
 334                return -EINVAL;
 335
 336        ret = lock_device_hotplug_sysfs();
 337        if (ret)
 338                return ret;
 339
 340        switch (online_type) {
 341        case MMOP_ONLINE_KERNEL:
 342        case MMOP_ONLINE_MOVABLE:
 343        case MMOP_ONLINE:
 344                /* mem->online_type is protected by device_hotplug_lock */
 345                mem->online_type = online_type;
 346                ret = device_online(&mem->dev);
 347                break;
 348        case MMOP_OFFLINE:
 349                ret = device_offline(&mem->dev);
 350                break;
 351        default:
 352                ret = -EINVAL; /* should never happen */
 353        }
 354
 355        unlock_device_hotplug();
 356
 357        if (ret < 0)
 358                return ret;
 359        if (ret)
 360                return -EINVAL;
 361
 362        return count;
 363}
 364
 365/*
 366 * Legacy interface that we cannot remove: s390x exposes the storage increment
 367 * covered by a memory block, allowing for identifying which memory blocks
 368 * comprise a storage increment. Since a memory block spans complete
 369 * storage increments nowadays, this interface is basically unused. Other
 370 * archs never exposed != 0.
 371 */
 372static ssize_t phys_device_show(struct device *dev,
 373                                struct device_attribute *attr, char *buf)
 374{
 375        struct memory_block *mem = to_memory_block(dev);
 376        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
 377
 378        return sysfs_emit(buf, "%d\n",
 379                          arch_get_memory_phys_device(start_pfn));
 380}
 381
 382#ifdef CONFIG_MEMORY_HOTREMOVE
 383static int print_allowed_zone(char *buf, int len, int nid,
 384                              struct memory_group *group,
 385                              unsigned long start_pfn, unsigned long nr_pages,
 386                              int online_type, struct zone *default_zone)
 387{
 388        struct zone *zone;
 389
 390        zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
 391        if (zone == default_zone)
 392                return 0;
 393
 394        return sysfs_emit_at(buf, len, " %s", zone->name);
 395}
 396
 397static ssize_t valid_zones_show(struct device *dev,
 398                                struct device_attribute *attr, char *buf)
 399{
 400        struct memory_block *mem = to_memory_block(dev);
 401        unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
 402        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
 403        struct memory_group *group = mem->group;
 404        struct zone *default_zone;
 405        int nid = mem->nid;
 406        int len = 0;
 407
 408        /*
 409         * Check the existing zone. Make sure that we do that only on the
 410         * online nodes otherwise the page_zone is not reliable
 411         */
 412        if (mem->state == MEM_ONLINE) {
 413                /*
 414                 * The block contains more than one zone can not be offlined.
 415                 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
 416                 */
 417                default_zone = test_pages_in_a_zone(start_pfn,
 418                                                    start_pfn + nr_pages);
 419                if (!default_zone)
 420                        return sysfs_emit(buf, "%s\n", "none");
 421                len += sysfs_emit_at(buf, len, "%s", default_zone->name);
 422                goto out;
 423        }
 424
 425        default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
 426                                          start_pfn, nr_pages);
 427
 428        len += sysfs_emit_at(buf, len, "%s", default_zone->name);
 429        len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
 430                                  MMOP_ONLINE_KERNEL, default_zone);
 431        len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
 432                                  MMOP_ONLINE_MOVABLE, default_zone);
 433out:
 434        len += sysfs_emit_at(buf, len, "\n");
 435        return len;
 436}
 437static DEVICE_ATTR_RO(valid_zones);
 438#endif
 439
 440static DEVICE_ATTR_RO(phys_index);
 441static DEVICE_ATTR_RW(state);
 442static DEVICE_ATTR_RO(phys_device);
 443static DEVICE_ATTR_RO(removable);
 444
 445/*
 446 * Show the memory block size (shared by all memory blocks).
 447 */
 448static ssize_t block_size_bytes_show(struct device *dev,
 449                                     struct device_attribute *attr, char *buf)
 450{
 451        return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
 452}
 453
 454static DEVICE_ATTR_RO(block_size_bytes);
 455
 456/*
 457 * Memory auto online policy.
 458 */
 459
 460static ssize_t auto_online_blocks_show(struct device *dev,
 461                                       struct device_attribute *attr, char *buf)
 462{
 463        return sysfs_emit(buf, "%s\n",
 464                          online_type_to_str[mhp_default_online_type]);
 465}
 466
 467static ssize_t auto_online_blocks_store(struct device *dev,
 468                                        struct device_attribute *attr,
 469                                        const char *buf, size_t count)
 470{
 471        const int online_type = mhp_online_type_from_str(buf);
 472
 473        if (online_type < 0)
 474                return -EINVAL;
 475
 476        mhp_default_online_type = online_type;
 477        return count;
 478}
 479
 480static DEVICE_ATTR_RW(auto_online_blocks);
 481
 482/*
 483 * Some architectures will have custom drivers to do this, and
 484 * will not need to do it from userspace.  The fake hot-add code
 485 * as well as ppc64 will do all of their discovery in userspace
 486 * and will require this interface.
 487 */
 488#ifdef CONFIG_ARCH_MEMORY_PROBE
 489static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
 490                           const char *buf, size_t count)
 491{
 492        u64 phys_addr;
 493        int nid, ret;
 494        unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
 495
 496        ret = kstrtoull(buf, 0, &phys_addr);
 497        if (ret)
 498                return ret;
 499
 500        if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
 501                return -EINVAL;
 502
 503        ret = lock_device_hotplug_sysfs();
 504        if (ret)
 505                return ret;
 506
 507        nid = memory_add_physaddr_to_nid(phys_addr);
 508        ret = __add_memory(nid, phys_addr,
 509                           MIN_MEMORY_BLOCK_SIZE * sections_per_block,
 510                           MHP_NONE);
 511
 512        if (ret)
 513                goto out;
 514
 515        ret = count;
 516out:
 517        unlock_device_hotplug();
 518        return ret;
 519}
 520
 521static DEVICE_ATTR_WO(probe);
 522#endif
 523
 524#ifdef CONFIG_MEMORY_FAILURE
 525/*
 526 * Support for offlining pages of memory
 527 */
 528
 529/* Soft offline a page */
 530static ssize_t soft_offline_page_store(struct device *dev,
 531                                       struct device_attribute *attr,
 532                                       const char *buf, size_t count)
 533{
 534        int ret;
 535        u64 pfn;
 536        if (!capable(CAP_SYS_ADMIN))
 537                return -EPERM;
 538        if (kstrtoull(buf, 0, &pfn) < 0)
 539                return -EINVAL;
 540        pfn >>= PAGE_SHIFT;
 541        ret = soft_offline_page(pfn, 0);
 542        return ret == 0 ? count : ret;
 543}
 544
 545/* Forcibly offline a page, including killing processes. */
 546static ssize_t hard_offline_page_store(struct device *dev,
 547                                       struct device_attribute *attr,
 548                                       const char *buf, size_t count)
 549{
 550        int ret;
 551        u64 pfn;
 552        if (!capable(CAP_SYS_ADMIN))
 553                return -EPERM;
 554        if (kstrtoull(buf, 0, &pfn) < 0)
 555                return -EINVAL;
 556        pfn >>= PAGE_SHIFT;
 557        ret = memory_failure(pfn, 0);
 558        return ret ? ret : count;
 559}
 560
 561static DEVICE_ATTR_WO(soft_offline_page);
 562static DEVICE_ATTR_WO(hard_offline_page);
 563#endif
 564
 565/* See phys_device_show(). */
 566int __weak arch_get_memory_phys_device(unsigned long start_pfn)
 567{
 568        return 0;
 569}
 570
 571/*
 572 * A reference for the returned memory block device is acquired.
 573 *
 574 * Called under device_hotplug_lock.
 575 */
 576static struct memory_block *find_memory_block_by_id(unsigned long block_id)
 577{
 578        struct memory_block *mem;
 579
 580        mem = xa_load(&memory_blocks, block_id);
 581        if (mem)
 582                get_device(&mem->dev);
 583        return mem;
 584}
 585
 586/*
 587 * Called under device_hotplug_lock.
 588 */
 589struct memory_block *find_memory_block(unsigned long section_nr)
 590{
 591        unsigned long block_id = memory_block_id(section_nr);
 592
 593        return find_memory_block_by_id(block_id);
 594}
 595
 596static struct attribute *memory_memblk_attrs[] = {
 597        &dev_attr_phys_index.attr,
 598        &dev_attr_state.attr,
 599        &dev_attr_phys_device.attr,
 600        &dev_attr_removable.attr,
 601#ifdef CONFIG_MEMORY_HOTREMOVE
 602        &dev_attr_valid_zones.attr,
 603#endif
 604        NULL
 605};
 606
 607static const struct attribute_group memory_memblk_attr_group = {
 608        .attrs = memory_memblk_attrs,
 609};
 610
 611static const struct attribute_group *memory_memblk_attr_groups[] = {
 612        &memory_memblk_attr_group,
 613        NULL,
 614};
 615
 616/*
 617 * register_memory - Setup a sysfs device for a memory block
 618 */
 619static
 620int register_memory(struct memory_block *memory)
 621{
 622        int ret;
 623
 624        memory->dev.bus = &memory_subsys;
 625        memory->dev.id = memory->start_section_nr / sections_per_block;
 626        memory->dev.release = memory_block_release;
 627        memory->dev.groups = memory_memblk_attr_groups;
 628        memory->dev.offline = memory->state == MEM_OFFLINE;
 629
 630        ret = device_register(&memory->dev);
 631        if (ret) {
 632                put_device(&memory->dev);
 633                return ret;
 634        }
 635        ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
 636                              GFP_KERNEL));
 637        if (ret) {
 638                put_device(&memory->dev);
 639                device_unregister(&memory->dev);
 640        }
 641        return ret;
 642}
 643
 644static int init_memory_block(unsigned long block_id, unsigned long state,
 645                             unsigned long nr_vmemmap_pages,
 646                             struct memory_group *group)
 647{
 648        struct memory_block *mem;
 649        int ret = 0;
 650
 651        mem = find_memory_block_by_id(block_id);
 652        if (mem) {
 653                put_device(&mem->dev);
 654                return -EEXIST;
 655        }
 656        mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 657        if (!mem)
 658                return -ENOMEM;
 659
 660        mem->start_section_nr = block_id * sections_per_block;
 661        mem->state = state;
 662        mem->nid = NUMA_NO_NODE;
 663        mem->nr_vmemmap_pages = nr_vmemmap_pages;
 664        INIT_LIST_HEAD(&mem->group_next);
 665
 666        if (group) {
 667                mem->group = group;
 668                list_add(&mem->group_next, &group->memory_blocks);
 669        }
 670
 671        ret = register_memory(mem);
 672
 673        return ret;
 674}
 675
 676static int add_memory_block(unsigned long base_section_nr)
 677{
 678        int section_count = 0;
 679        unsigned long nr;
 680
 681        for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
 682             nr++)
 683                if (present_section_nr(nr))
 684                        section_count++;
 685
 686        if (section_count == 0)
 687                return 0;
 688        return init_memory_block(memory_block_id(base_section_nr),
 689                                 MEM_ONLINE, 0,  NULL);
 690}
 691
 692static void unregister_memory(struct memory_block *memory)
 693{
 694        if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
 695                return;
 696
 697        WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
 698
 699        if (memory->group) {
 700                list_del(&memory->group_next);
 701                memory->group = NULL;
 702        }
 703
 704        /* drop the ref. we got via find_memory_block() */
 705        put_device(&memory->dev);
 706        device_unregister(&memory->dev);
 707}
 708
 709/*
 710 * Create memory block devices for the given memory area. Start and size
 711 * have to be aligned to memory block granularity. Memory block devices
 712 * will be initialized as offline.
 713 *
 714 * Called under device_hotplug_lock.
 715 */
 716int create_memory_block_devices(unsigned long start, unsigned long size,
 717                                unsigned long vmemmap_pages,
 718                                struct memory_group *group)
 719{
 720        const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
 721        unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
 722        struct memory_block *mem;
 723        unsigned long block_id;
 724        int ret = 0;
 725
 726        if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
 727                         !IS_ALIGNED(size, memory_block_size_bytes())))
 728                return -EINVAL;
 729
 730        for (block_id = start_block_id; block_id != end_block_id; block_id++) {
 731                ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
 732                                        group);
 733                if (ret)
 734                        break;
 735        }
 736        if (ret) {
 737                end_block_id = block_id;
 738                for (block_id = start_block_id; block_id != end_block_id;
 739                     block_id++) {
 740                        mem = find_memory_block_by_id(block_id);
 741                        if (WARN_ON_ONCE(!mem))
 742                                continue;
 743                        unregister_memory(mem);
 744                }
 745        }
 746        return ret;
 747}
 748
 749/*
 750 * Remove memory block devices for the given memory area. Start and size
 751 * have to be aligned to memory block granularity. Memory block devices
 752 * have to be offline.
 753 *
 754 * Called under device_hotplug_lock.
 755 */
 756void remove_memory_block_devices(unsigned long start, unsigned long size)
 757{
 758        const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
 759        const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
 760        struct memory_block *mem;
 761        unsigned long block_id;
 762
 763        if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
 764                         !IS_ALIGNED(size, memory_block_size_bytes())))
 765                return;
 766
 767        for (block_id = start_block_id; block_id != end_block_id; block_id++) {
 768                mem = find_memory_block_by_id(block_id);
 769                if (WARN_ON_ONCE(!mem))
 770                        continue;
 771                unregister_memory_block_under_nodes(mem);
 772                unregister_memory(mem);
 773        }
 774}
 775
 776/* return true if the memory block is offlined, otherwise, return false */
 777bool is_memblock_offlined(struct memory_block *mem)
 778{
 779        return mem->state == MEM_OFFLINE;
 780}
 781
 782static struct attribute *memory_root_attrs[] = {
 783#ifdef CONFIG_ARCH_MEMORY_PROBE
 784        &dev_attr_probe.attr,
 785#endif
 786
 787#ifdef CONFIG_MEMORY_FAILURE
 788        &dev_attr_soft_offline_page.attr,
 789        &dev_attr_hard_offline_page.attr,
 790#endif
 791
 792        &dev_attr_block_size_bytes.attr,
 793        &dev_attr_auto_online_blocks.attr,
 794        NULL
 795};
 796
 797static const struct attribute_group memory_root_attr_group = {
 798        .attrs = memory_root_attrs,
 799};
 800
 801static const struct attribute_group *memory_root_attr_groups[] = {
 802        &memory_root_attr_group,
 803        NULL,
 804};
 805
 806/*
 807 * Initialize the sysfs support for memory devices. At the time this function
 808 * is called, we cannot have concurrent creation/deletion of memory block
 809 * devices, the device_hotplug_lock is not needed.
 810 */
 811void __init memory_dev_init(void)
 812{
 813        int ret;
 814        unsigned long block_sz, nr;
 815
 816        /* Validate the configured memory block size */
 817        block_sz = memory_block_size_bytes();
 818        if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
 819                panic("Memory block size not suitable: 0x%lx\n", block_sz);
 820        sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
 821
 822        ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
 823        if (ret)
 824                panic("%s() failed to register subsystem: %d\n", __func__, ret);
 825
 826        /*
 827         * Create entries for memory sections that were found
 828         * during boot and have been initialized
 829         */
 830        for (nr = 0; nr <= __highest_present_section_nr;
 831             nr += sections_per_block) {
 832                ret = add_memory_block(nr);
 833                if (ret)
 834                        panic("%s() failed to add memory block: %d\n", __func__,
 835                              ret);
 836        }
 837}
 838
 839/**
 840 * walk_memory_blocks - walk through all present memory blocks overlapped
 841 *                      by the range [start, start + size)
 842 *
 843 * @start: start address of the memory range
 844 * @size: size of the memory range
 845 * @arg: argument passed to func
 846 * @func: callback for each memory section walked
 847 *
 848 * This function walks through all present memory blocks overlapped by the
 849 * range [start, start + size), calling func on each memory block.
 850 *
 851 * In case func() returns an error, walking is aborted and the error is
 852 * returned.
 853 *
 854 * Called under device_hotplug_lock.
 855 */
 856int walk_memory_blocks(unsigned long start, unsigned long size,
 857                       void *arg, walk_memory_blocks_func_t func)
 858{
 859        const unsigned long start_block_id = phys_to_block_id(start);
 860        const unsigned long end_block_id = phys_to_block_id(start + size - 1);
 861        struct memory_block *mem;
 862        unsigned long block_id;
 863        int ret = 0;
 864
 865        if (!size)
 866                return 0;
 867
 868        for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
 869                mem = find_memory_block_by_id(block_id);
 870                if (!mem)
 871                        continue;
 872
 873                ret = func(mem, arg);
 874                put_device(&mem->dev);
 875                if (ret)
 876                        break;
 877        }
 878        return ret;
 879}
 880
 881struct for_each_memory_block_cb_data {
 882        walk_memory_blocks_func_t func;
 883        void *arg;
 884};
 885
 886static int for_each_memory_block_cb(struct device *dev, void *data)
 887{
 888        struct memory_block *mem = to_memory_block(dev);
 889        struct for_each_memory_block_cb_data *cb_data = data;
 890
 891        return cb_data->func(mem, cb_data->arg);
 892}
 893
 894/**
 895 * for_each_memory_block - walk through all present memory blocks
 896 *
 897 * @arg: argument passed to func
 898 * @func: callback for each memory block walked
 899 *
 900 * This function walks through all present memory blocks, calling func on
 901 * each memory block.
 902 *
 903 * In case func() returns an error, walking is aborted and the error is
 904 * returned.
 905 */
 906int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
 907{
 908        struct for_each_memory_block_cb_data cb_data = {
 909                .func = func,
 910                .arg = arg,
 911        };
 912
 913        return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
 914                                for_each_memory_block_cb);
 915}
 916
 917/*
 918 * This is an internal helper to unify allocation and initialization of
 919 * memory groups. Note that the passed memory group will be copied to a
 920 * dynamically allocated memory group. After this call, the passed
 921 * memory group should no longer be used.
 922 */
 923static int memory_group_register(struct memory_group group)
 924{
 925        struct memory_group *new_group;
 926        uint32_t mgid;
 927        int ret;
 928
 929        if (!node_possible(group.nid))
 930                return -EINVAL;
 931
 932        new_group = kzalloc(sizeof(group), GFP_KERNEL);
 933        if (!new_group)
 934                return -ENOMEM;
 935        *new_group = group;
 936        INIT_LIST_HEAD(&new_group->memory_blocks);
 937
 938        ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
 939                       GFP_KERNEL);
 940        if (ret) {
 941                kfree(new_group);
 942                return ret;
 943        } else if (group.is_dynamic) {
 944                xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
 945        }
 946        return mgid;
 947}
 948
 949/**
 950 * memory_group_register_static() - Register a static memory group.
 951 * @nid: The node id.
 952 * @max_pages: The maximum number of pages we'll have in this static memory
 953 *             group.
 954 *
 955 * Register a new static memory group and return the memory group id.
 956 * All memory in the group belongs to a single unit, such as a DIMM. All
 957 * memory belonging to a static memory group is added in one go to be removed
 958 * in one go -- it's static.
 959 *
 960 * Returns an error if out of memory, if the node id is invalid, if no new
 961 * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
 962 * returns the new memory group id.
 963 */
 964int memory_group_register_static(int nid, unsigned long max_pages)
 965{
 966        struct memory_group group = {
 967                .nid = nid,
 968                .s = {
 969                        .max_pages = max_pages,
 970                },
 971        };
 972
 973        if (!max_pages)
 974                return -EINVAL;
 975        return memory_group_register(group);
 976}
 977EXPORT_SYMBOL_GPL(memory_group_register_static);
 978
 979/**
 980 * memory_group_register_dynamic() - Register a dynamic memory group.
 981 * @nid: The node id.
 982 * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
 983 *              memory group.
 984 *
 985 * Register a new dynamic memory group and return the memory group id.
 986 * Memory within a dynamic memory group is added/removed dynamically
 987 * in unit_pages.
 988 *
 989 * Returns an error if out of memory, if the node id is invalid, if no new
 990 * memory groups can be registered, or if unit_pages is invalid (0, not a
 991 * power of two, smaller than a single memory block). Otherwise, returns the
 992 * new memory group id.
 993 */
 994int memory_group_register_dynamic(int nid, unsigned long unit_pages)
 995{
 996        struct memory_group group = {
 997                .nid = nid,
 998                .is_dynamic = true,
 999                .d = {
1000                        .unit_pages = unit_pages,
1001                },
1002        };
1003
1004        if (!unit_pages || !is_power_of_2(unit_pages) ||
1005            unit_pages < PHYS_PFN(memory_block_size_bytes()))
1006                return -EINVAL;
1007        return memory_group_register(group);
1008}
1009EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1010
1011/**
1012 * memory_group_unregister() - Unregister a memory group.
1013 * @mgid: the memory group id
1014 *
1015 * Unregister a memory group. If any memory block still belongs to this
1016 * memory group, unregistering will fail.
1017 *
1018 * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1019 * memory blocks still belong to this memory group and returns 0 if
1020 * unregistering succeeded.
1021 */
1022int memory_group_unregister(int mgid)
1023{
1024        struct memory_group *group;
1025
1026        if (mgid < 0)
1027                return -EINVAL;
1028
1029        group = xa_load(&memory_groups, mgid);
1030        if (!group)
1031                return -EINVAL;
1032        if (!list_empty(&group->memory_blocks))
1033                return -EBUSY;
1034        xa_erase(&memory_groups, mgid);
1035        kfree(group);
1036        return 0;
1037}
1038EXPORT_SYMBOL_GPL(memory_group_unregister);
1039
1040/*
1041 * This is an internal helper only to be used in core memory hotplug code to
1042 * lookup a memory group. We don't care about locking, as we don't expect a
1043 * memory group to get unregistered while adding memory to it -- because
1044 * the group and the memory is managed by the same driver.
1045 */
1046struct memory_group *memory_group_find_by_id(int mgid)
1047{
1048        return xa_load(&memory_groups, mgid);
1049}
1050
1051/*
1052 * This is an internal helper only to be used in core memory hotplug code to
1053 * walk all dynamic memory groups excluding a given memory group, either
1054 * belonging to a specific node, or belonging to any node.
1055 */
1056int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
1057                               struct memory_group *excluded, void *arg)
1058{
1059        struct memory_group *group;
1060        unsigned long index;
1061        int ret = 0;
1062
1063        xa_for_each_marked(&memory_groups, index, group,
1064                           MEMORY_GROUP_MARK_DYNAMIC) {
1065                if (group == excluded)
1066                        continue;
1067#ifdef CONFIG_NUMA
1068                if (nid != NUMA_NO_NODE && group->nid != nid)
1069                        continue;
1070#endif /* CONFIG_NUMA */
1071                ret = func(group, arg);
1072                if (ret)
1073                        break;
1074        }
1075        return ret;
1076}
1077