linux/lib/lmb.c
<<
>>
Prefs
   1/*
   2 * Procedures for maintaining information about logical memory blocks.
   3 *
   4 * Peter Bergner, IBM Corp.     June 2001.
   5 * Copyright (C) 2001 Peter Bergner.
   6 *
   7 *      This program is free software; you can redistribute it and/or
   8 *      modify it under the terms of the GNU General Public License
   9 *      as published by the Free Software Foundation; either version
  10 *      2 of the License, or (at your option) any later version.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/init.h>
  15#include <linux/bitops.h>
  16#include <linux/lmb.h>
  17
  18#define LMB_ALLOC_ANYWHERE      0
  19
  20struct lmb lmb;
  21
  22static int lmb_debug;
  23
  24static int __init early_lmb(char *p)
  25{
  26        if (p && strstr(p, "debug"))
  27                lmb_debug = 1;
  28        return 0;
  29}
  30early_param("lmb", early_lmb);
  31
  32static void lmb_dump(struct lmb_region *region, char *name)
  33{
  34        unsigned long long base, size;
  35        int i;
  36
  37        pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
  38
  39        for (i = 0; i < region->cnt; i++) {
  40                base = region->region[i].base;
  41                size = region->region[i].size;
  42
  43                pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
  44                    name, i, base, base + size - 1, size);
  45        }
  46}
  47
  48void lmb_dump_all(void)
  49{
  50        if (!lmb_debug)
  51                return;
  52
  53        pr_info("LMB configuration:\n");
  54        pr_info(" rmo_size    = 0x%llx\n", (unsigned long long)lmb.rmo_size);
  55        pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
  56
  57        lmb_dump(&lmb.memory, "memory");
  58        lmb_dump(&lmb.reserved, "reserved");
  59}
  60
  61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
  62                                        u64 size2)
  63{
  64        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
  65}
  66
  67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
  68{
  69        if (base2 == base1 + size1)
  70                return 1;
  71        else if (base1 == base2 + size2)
  72                return -1;
  73
  74        return 0;
  75}
  76
  77static long lmb_regions_adjacent(struct lmb_region *rgn,
  78                unsigned long r1, unsigned long r2)
  79{
  80        u64 base1 = rgn->region[r1].base;
  81        u64 size1 = rgn->region[r1].size;
  82        u64 base2 = rgn->region[r2].base;
  83        u64 size2 = rgn->region[r2].size;
  84
  85        return lmb_addrs_adjacent(base1, size1, base2, size2);
  86}
  87
  88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  89{
  90        unsigned long i;
  91
  92        for (i = r; i < rgn->cnt - 1; i++) {
  93                rgn->region[i].base = rgn->region[i + 1].base;
  94                rgn->region[i].size = rgn->region[i + 1].size;
  95        }
  96        rgn->cnt--;
  97}
  98
  99/* Assumption: base addr of region 1 < base addr of region 2 */
 100static void lmb_coalesce_regions(struct lmb_region *rgn,
 101                unsigned long r1, unsigned long r2)
 102{
 103        rgn->region[r1].size += rgn->region[r2].size;
 104        lmb_remove_region(rgn, r2);
 105}
 106
 107void __init lmb_init(void)
 108{
 109        /* Create a dummy zero size LMB which will get coalesced away later.
 110         * This simplifies the lmb_add() code below...
 111         */
 112        lmb.memory.region[0].base = 0;
 113        lmb.memory.region[0].size = 0;
 114        lmb.memory.cnt = 1;
 115
 116        /* Ditto. */
 117        lmb.reserved.region[0].base = 0;
 118        lmb.reserved.region[0].size = 0;
 119        lmb.reserved.cnt = 1;
 120}
 121
 122void __init lmb_analyze(void)
 123{
 124        int i;
 125
 126        lmb.memory.size = 0;
 127
 128        for (i = 0; i < lmb.memory.cnt; i++)
 129                lmb.memory.size += lmb.memory.region[i].size;
 130}
 131
 132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
 133{
 134        unsigned long coalesced = 0;
 135        long adjacent, i;
 136
 137        if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
 138                rgn->region[0].base = base;
 139                rgn->region[0].size = size;
 140                return 0;
 141        }
 142
 143        /* First try and coalesce this LMB with another. */
 144        for (i = 0; i < rgn->cnt; i++) {
 145                u64 rgnbase = rgn->region[i].base;
 146                u64 rgnsize = rgn->region[i].size;
 147
 148                if ((rgnbase == base) && (rgnsize == size))
 149                        /* Already have this region, so we're done */
 150                        return 0;
 151
 152                adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
 153                if (adjacent > 0) {
 154                        rgn->region[i].base -= size;
 155                        rgn->region[i].size += size;
 156                        coalesced++;
 157                        break;
 158                } else if (adjacent < 0) {
 159                        rgn->region[i].size += size;
 160                        coalesced++;
 161                        break;
 162                }
 163        }
 164
 165        if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
 166                lmb_coalesce_regions(rgn, i, i+1);
 167                coalesced++;
 168        }
 169
 170        if (coalesced)
 171                return coalesced;
 172        if (rgn->cnt >= MAX_LMB_REGIONS)
 173                return -1;
 174
 175        /* Couldn't coalesce the LMB, so add it to the sorted table. */
 176        for (i = rgn->cnt - 1; i >= 0; i--) {
 177                if (base < rgn->region[i].base) {
 178                        rgn->region[i+1].base = rgn->region[i].base;
 179                        rgn->region[i+1].size = rgn->region[i].size;
 180                } else {
 181                        rgn->region[i+1].base = base;
 182                        rgn->region[i+1].size = size;
 183                        break;
 184                }
 185        }
 186
 187        if (base < rgn->region[0].base) {
 188                rgn->region[0].base = base;
 189                rgn->region[0].size = size;
 190        }
 191        rgn->cnt++;
 192
 193        return 0;
 194}
 195
 196long lmb_add(u64 base, u64 size)
 197{
 198        struct lmb_region *_rgn = &lmb.memory;
 199
 200        /* On pSeries LPAR systems, the first LMB is our RMO region. */
 201        if (base == 0)
 202                lmb.rmo_size = size;
 203
 204        return lmb_add_region(_rgn, base, size);
 205
 206}
 207
 208long lmb_remove(u64 base, u64 size)
 209{
 210        struct lmb_region *rgn = &(lmb.memory);
 211        u64 rgnbegin, rgnend;
 212        u64 end = base + size;
 213        int i;
 214
 215        rgnbegin = rgnend = 0; /* supress gcc warnings */
 216
 217        /* Find the region where (base, size) belongs to */
 218        for (i=0; i < rgn->cnt; i++) {
 219                rgnbegin = rgn->region[i].base;
 220                rgnend = rgnbegin + rgn->region[i].size;
 221
 222                if ((rgnbegin <= base) && (end <= rgnend))
 223                        break;
 224        }
 225
 226        /* Didn't find the region */
 227        if (i == rgn->cnt)
 228                return -1;
 229
 230        /* Check to see if we are removing entire region */
 231        if ((rgnbegin == base) && (rgnend == end)) {
 232                lmb_remove_region(rgn, i);
 233                return 0;
 234        }
 235
 236        /* Check to see if region is matching at the front */
 237        if (rgnbegin == base) {
 238                rgn->region[i].base = end;
 239                rgn->region[i].size -= size;
 240                return 0;
 241        }
 242
 243        /* Check to see if the region is matching at the end */
 244        if (rgnend == end) {
 245                rgn->region[i].size -= size;
 246                return 0;
 247        }
 248
 249        /*
 250         * We need to split the entry -  adjust the current one to the
 251         * beginging of the hole and add the region after hole.
 252         */
 253        rgn->region[i].size = base - rgn->region[i].base;
 254        return lmb_add_region(rgn, end, rgnend - end);
 255}
 256
 257long __init lmb_reserve(u64 base, u64 size)
 258{
 259        struct lmb_region *_rgn = &lmb.reserved;
 260
 261        BUG_ON(0 == size);
 262
 263        return lmb_add_region(_rgn, base, size);
 264}
 265
 266long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
 267{
 268        unsigned long i;
 269
 270        for (i = 0; i < rgn->cnt; i++) {
 271                u64 rgnbase = rgn->region[i].base;
 272                u64 rgnsize = rgn->region[i].size;
 273                if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
 274                        break;
 275        }
 276
 277        return (i < rgn->cnt) ? i : -1;
 278}
 279
 280static u64 lmb_align_down(u64 addr, u64 size)
 281{
 282        return addr & ~(size - 1);
 283}
 284
 285static u64 lmb_align_up(u64 addr, u64 size)
 286{
 287        return (addr + (size - 1)) & ~(size - 1);
 288}
 289
 290static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
 291                                           u64 size, u64 align)
 292{
 293        u64 base, res_base;
 294        long j;
 295
 296        base = lmb_align_down((end - size), align);
 297        while (start <= base) {
 298                j = lmb_overlaps_region(&lmb.reserved, base, size);
 299                if (j < 0) {
 300                        /* this area isn't reserved, take it */
 301                        if (lmb_add_region(&lmb.reserved, base, size) < 0)
 302                                base = ~(u64)0;
 303                        return base;
 304                }
 305                res_base = lmb.reserved.region[j].base;
 306                if (res_base < size)
 307                        break;
 308                base = lmb_align_down(res_base - size, align);
 309        }
 310
 311        return ~(u64)0;
 312}
 313
 314static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
 315                                       u64 (*nid_range)(u64, u64, int *),
 316                                       u64 size, u64 align, int nid)
 317{
 318        u64 start, end;
 319
 320        start = mp->base;
 321        end = start + mp->size;
 322
 323        start = lmb_align_up(start, align);
 324        while (start < end) {
 325                u64 this_end;
 326                int this_nid;
 327
 328                this_end = nid_range(start, end, &this_nid);
 329                if (this_nid == nid) {
 330                        u64 ret = lmb_alloc_nid_unreserved(start, this_end,
 331                                                           size, align);
 332                        if (ret != ~(u64)0)
 333                                return ret;
 334                }
 335                start = this_end;
 336        }
 337
 338        return ~(u64)0;
 339}
 340
 341u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
 342                         u64 (*nid_range)(u64 start, u64 end, int *nid))
 343{
 344        struct lmb_region *mem = &lmb.memory;
 345        int i;
 346
 347        BUG_ON(0 == size);
 348
 349        size = lmb_align_up(size, align);
 350
 351        for (i = 0; i < mem->cnt; i++) {
 352                u64 ret = lmb_alloc_nid_region(&mem->region[i],
 353                                               nid_range,
 354                                               size, align, nid);
 355                if (ret != ~(u64)0)
 356                        return ret;
 357        }
 358
 359        return lmb_alloc(size, align);
 360}
 361
 362u64 __init lmb_alloc(u64 size, u64 align)
 363{
 364        return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
 365}
 366
 367u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
 368{
 369        u64 alloc;
 370
 371        alloc = __lmb_alloc_base(size, align, max_addr);
 372
 373        if (alloc == 0)
 374                panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
 375                      (unsigned long long) size, (unsigned long long) max_addr);
 376
 377        return alloc;
 378}
 379
 380u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
 381{
 382        long i, j;
 383        u64 base = 0;
 384        u64 res_base;
 385
 386        BUG_ON(0 == size);
 387
 388        size = lmb_align_up(size, align);
 389
 390        /* On some platforms, make sure we allocate lowmem */
 391        /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
 392        if (max_addr == LMB_ALLOC_ANYWHERE)
 393                max_addr = LMB_REAL_LIMIT;
 394
 395        for (i = lmb.memory.cnt - 1; i >= 0; i--) {
 396                u64 lmbbase = lmb.memory.region[i].base;
 397                u64 lmbsize = lmb.memory.region[i].size;
 398
 399                if (lmbsize < size)
 400                        continue;
 401                if (max_addr == LMB_ALLOC_ANYWHERE)
 402                        base = lmb_align_down(lmbbase + lmbsize - size, align);
 403                else if (lmbbase < max_addr) {
 404                        base = min(lmbbase + lmbsize, max_addr);
 405                        base = lmb_align_down(base - size, align);
 406                } else
 407                        continue;
 408
 409                while (base && lmbbase <= base) {
 410                        j = lmb_overlaps_region(&lmb.reserved, base, size);
 411                        if (j < 0) {
 412                                /* this area isn't reserved, take it */
 413                                if (lmb_add_region(&lmb.reserved, base, size) < 0)
 414                                        return 0;
 415                                return base;
 416                        }
 417                        res_base = lmb.reserved.region[j].base;
 418                        if (res_base < size)
 419                                break;
 420                        base = lmb_align_down(res_base - size, align);
 421                }
 422        }
 423        return 0;
 424}
 425
 426/* You must call lmb_analyze() before this. */
 427u64 __init lmb_phys_mem_size(void)
 428{
 429        return lmb.memory.size;
 430}
 431
 432u64 lmb_end_of_DRAM(void)
 433{
 434        int idx = lmb.memory.cnt - 1;
 435
 436        return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
 437}
 438
 439/* You must call lmb_analyze() after this. */
 440void __init lmb_enforce_memory_limit(u64 memory_limit)
 441{
 442        unsigned long i;
 443        u64 limit;
 444        struct lmb_property *p;
 445
 446        if (!memory_limit)
 447                return;
 448
 449        /* Truncate the lmb regions to satisfy the memory limit. */
 450        limit = memory_limit;
 451        for (i = 0; i < lmb.memory.cnt; i++) {
 452                if (limit > lmb.memory.region[i].size) {
 453                        limit -= lmb.memory.region[i].size;
 454                        continue;
 455                }
 456
 457                lmb.memory.region[i].size = limit;
 458                lmb.memory.cnt = i + 1;
 459                break;
 460        }
 461
 462        if (lmb.memory.region[0].size < lmb.rmo_size)
 463                lmb.rmo_size = lmb.memory.region[0].size;
 464
 465        memory_limit = lmb_end_of_DRAM();
 466
 467        /* And truncate any reserves above the limit also. */
 468        for (i = 0; i < lmb.reserved.cnt; i++) {
 469                p = &lmb.reserved.region[i];
 470
 471                if (p->base > memory_limit)
 472                        p->size = 0;
 473                else if ((p->base + p->size) > memory_limit)
 474                        p->size = memory_limit - p->base;
 475
 476                if (p->size == 0) {
 477                        lmb_remove_region(&lmb.reserved, i);
 478                        i--;
 479                }
 480        }
 481}
 482
 483int __init lmb_is_reserved(u64 addr)
 484{
 485        int i;
 486
 487        for (i = 0; i < lmb.reserved.cnt; i++) {
 488                u64 upper = lmb.reserved.region[i].base +
 489                        lmb.reserved.region[i].size - 1;
 490                if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
 491                        return 1;
 492        }
 493        return 0;
 494}
 495
 496/*
 497 * Given a <base, len>, find which memory regions belong to this range.
 498 * Adjust the request and return a contiguous chunk.
 499 */
 500int lmb_find(struct lmb_property *res)
 501{
 502        int i;
 503        u64 rstart, rend;
 504
 505        rstart = res->base;
 506        rend = rstart + res->size - 1;
 507
 508        for (i = 0; i < lmb.memory.cnt; i++) {
 509                u64 start = lmb.memory.region[i].base;
 510                u64 end = start + lmb.memory.region[i].size - 1;
 511
 512                if (start > rend)
 513                        return -1;
 514
 515                if ((end >= rstart) && (start < rend)) {
 516                        /* adjust the request */
 517                        if (rstart < start)
 518                                rstart = start;
 519                        if (rend > end)
 520                                rend = end;
 521                        res->base = rstart;
 522                        res->size = rend - rstart + 1;
 523                        return 0;
 524                }
 525        }
 526        return -1;
 527}
 528