uboot/lib/lmb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.     June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
   7 */
   8
   9#include <common.h>
  10#include <image.h>
  11#include <lmb.h>
  12#include <log.h>
  13#include <malloc.h>
  14
  15#define LMB_ALLOC_ANYWHERE      0
  16
  17void lmb_dump_all(struct lmb *lmb)
  18{
  19#ifdef DEBUG
  20        unsigned long i;
  21
  22        debug("lmb_dump_all:\n");
  23        debug("    memory.cnt              = 0x%lx\n", lmb->memory.cnt);
  24        debug("    memory.size             = 0x%llx\n",
  25              (unsigned long long)lmb->memory.size);
  26        for (i = 0; i < lmb->memory.cnt; i++) {
  27                debug("    memory.reg[0x%lx].base   = 0x%llx\n", i,
  28                      (unsigned long long)lmb->memory.region[i].base);
  29                debug("            .size   = 0x%llx\n",
  30                      (unsigned long long)lmb->memory.region[i].size);
  31        }
  32
  33        debug("\n    reserved.cnt          = 0x%lx\n",
  34                lmb->reserved.cnt);
  35        debug("    reserved.size           = 0x%llx\n",
  36                (unsigned long long)lmb->reserved.size);
  37        for (i = 0; i < lmb->reserved.cnt; i++) {
  38                debug("    reserved.reg[0x%lx].base = 0x%llx\n", i,
  39                      (unsigned long long)lmb->reserved.region[i].base);
  40                debug("              .size = 0x%llx\n",
  41                      (unsigned long long)lmb->reserved.region[i].size);
  42        }
  43#endif /* DEBUG */
  44}
  45
  46static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
  47                              phys_addr_t base2, phys_size_t size2)
  48{
  49        const phys_addr_t base1_end = base1 + size1 - 1;
  50        const phys_addr_t base2_end = base2 + size2 - 1;
  51
  52        return ((base1 <= base2_end) && (base2 <= base1_end));
  53}
  54
  55static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
  56                               phys_addr_t base2, phys_size_t size2)
  57{
  58        if (base2 == base1 + size1)
  59                return 1;
  60        else if (base1 == base2 + size2)
  61                return -1;
  62
  63        return 0;
  64}
  65
  66static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
  67                                 unsigned long r2)
  68{
  69        phys_addr_t base1 = rgn->region[r1].base;
  70        phys_size_t size1 = rgn->region[r1].size;
  71        phys_addr_t base2 = rgn->region[r2].base;
  72        phys_size_t size2 = rgn->region[r2].size;
  73
  74        return lmb_addrs_adjacent(base1, size1, base2, size2);
  75}
  76
  77static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  78{
  79        unsigned long i;
  80
  81        for (i = r; i < rgn->cnt - 1; i++) {
  82                rgn->region[i].base = rgn->region[i + 1].base;
  83                rgn->region[i].size = rgn->region[i + 1].size;
  84        }
  85        rgn->cnt--;
  86}
  87
  88/* Assumption: base addr of region 1 < base addr of region 2 */
  89static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
  90                                 unsigned long r2)
  91{
  92        rgn->region[r1].size += rgn->region[r2].size;
  93        lmb_remove_region(rgn, r2);
  94}
  95
  96void lmb_init(struct lmb *lmb)
  97{
  98        lmb->memory.cnt = 0;
  99        lmb->memory.size = 0;
 100        lmb->reserved.cnt = 0;
 101        lmb->reserved.size = 0;
 102}
 103
 104static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
 105{
 106        arch_lmb_reserve(lmb);
 107        board_lmb_reserve(lmb);
 108
 109        if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob)
 110                boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
 111}
 112
 113/* Initialize the struct, add memory and call arch/board reserve functions */
 114void lmb_init_and_reserve(struct lmb *lmb, bd_t *bd, void *fdt_blob)
 115{
 116#ifdef CONFIG_NR_DRAM_BANKS
 117        int i;
 118#endif
 119
 120        lmb_init(lmb);
 121#ifdef CONFIG_NR_DRAM_BANKS
 122        for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
 123                if (bd->bi_dram[i].size) {
 124                        lmb_add(lmb, bd->bi_dram[i].start,
 125                                bd->bi_dram[i].size);
 126                }
 127        }
 128#else
 129        if (bd->bi_memsize)
 130                lmb_add(lmb, bd->bi_memstart, bd->bi_memsize);
 131#endif
 132        lmb_reserve_common(lmb, fdt_blob);
 133}
 134
 135/* Initialize the struct, add memory and call arch/board reserve functions */
 136void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
 137                                phys_size_t size, void *fdt_blob)
 138{
 139        lmb_init(lmb);
 140        lmb_add(lmb, base, size);
 141        lmb_reserve_common(lmb, fdt_blob);
 142}
 143
 144/* This routine called with relocation disabled. */
 145static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
 146{
 147        unsigned long coalesced = 0;
 148        long adjacent, i;
 149
 150        if (rgn->cnt == 0) {
 151                rgn->region[0].base = base;
 152                rgn->region[0].size = size;
 153                rgn->cnt = 1;
 154                return 0;
 155        }
 156
 157        /* First try and coalesce this LMB with another. */
 158        for (i = 0; i < rgn->cnt; i++) {
 159                phys_addr_t rgnbase = rgn->region[i].base;
 160                phys_size_t rgnsize = rgn->region[i].size;
 161
 162                if ((rgnbase == base) && (rgnsize == size))
 163                        /* Already have this region, so we're done */
 164                        return 0;
 165
 166                adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
 167                if (adjacent > 0) {
 168                        rgn->region[i].base -= size;
 169                        rgn->region[i].size += size;
 170                        coalesced++;
 171                        break;
 172                } else if (adjacent < 0) {
 173                        rgn->region[i].size += size;
 174                        coalesced++;
 175                        break;
 176                } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
 177                        /* regions overlap */
 178                        return -1;
 179                }
 180        }
 181
 182        if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
 183                lmb_coalesce_regions(rgn, i, i + 1);
 184                coalesced++;
 185        }
 186
 187        if (coalesced)
 188                return coalesced;
 189        if (rgn->cnt >= MAX_LMB_REGIONS)
 190                return -1;
 191
 192        /* Couldn't coalesce the LMB, so add it to the sorted table. */
 193        for (i = rgn->cnt-1; i >= 0; i--) {
 194                if (base < rgn->region[i].base) {
 195                        rgn->region[i + 1].base = rgn->region[i].base;
 196                        rgn->region[i + 1].size = rgn->region[i].size;
 197                } else {
 198                        rgn->region[i + 1].base = base;
 199                        rgn->region[i + 1].size = size;
 200                        break;
 201                }
 202        }
 203
 204        if (base < rgn->region[0].base) {
 205                rgn->region[0].base = base;
 206                rgn->region[0].size = size;
 207        }
 208
 209        rgn->cnt++;
 210
 211        return 0;
 212}
 213
 214/* This routine may be called with relocation disabled. */
 215long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 216{
 217        struct lmb_region *_rgn = &(lmb->memory);
 218
 219        return lmb_add_region(_rgn, base, size);
 220}
 221
 222long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 223{
 224        struct lmb_region *rgn = &(lmb->reserved);
 225        phys_addr_t rgnbegin, rgnend;
 226        phys_addr_t end = base + size - 1;
 227        int i;
 228
 229        rgnbegin = rgnend = 0; /* supress gcc warnings */
 230
 231        /* Find the region where (base, size) belongs to */
 232        for (i = 0; i < rgn->cnt; i++) {
 233                rgnbegin = rgn->region[i].base;
 234                rgnend = rgnbegin + rgn->region[i].size - 1;
 235
 236                if ((rgnbegin <= base) && (end <= rgnend))
 237                        break;
 238        }
 239
 240        /* Didn't find the region */
 241        if (i == rgn->cnt)
 242                return -1;
 243
 244        /* Check to see if we are removing entire region */
 245        if ((rgnbegin == base) && (rgnend == end)) {
 246                lmb_remove_region(rgn, i);
 247                return 0;
 248        }
 249
 250        /* Check to see if region is matching at the front */
 251        if (rgnbegin == base) {
 252                rgn->region[i].base = end + 1;
 253                rgn->region[i].size -= size;
 254                return 0;
 255        }
 256
 257        /* Check to see if the region is matching at the end */
 258        if (rgnend == end) {
 259                rgn->region[i].size -= size;
 260                return 0;
 261        }
 262
 263        /*
 264         * We need to split the entry -  adjust the current one to the
 265         * beginging of the hole and add the region after hole.
 266         */
 267        rgn->region[i].size = base - rgn->region[i].base;
 268        return lmb_add_region(rgn, end + 1, rgnend - end);
 269}
 270
 271long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 272{
 273        struct lmb_region *_rgn = &(lmb->reserved);
 274
 275        return lmb_add_region(_rgn, base, size);
 276}
 277
 278static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
 279                                phys_size_t size)
 280{
 281        unsigned long i;
 282
 283        for (i = 0; i < rgn->cnt; i++) {
 284                phys_addr_t rgnbase = rgn->region[i].base;
 285                phys_size_t rgnsize = rgn->region[i].size;
 286                if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
 287                        break;
 288        }
 289
 290        return (i < rgn->cnt) ? i : -1;
 291}
 292
 293phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
 294{
 295        return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
 296}
 297
 298phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
 299{
 300        phys_addr_t alloc;
 301
 302        alloc = __lmb_alloc_base(lmb, size, align, max_addr);
 303
 304        if (alloc == 0)
 305                printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
 306                       (ulong)size, (ulong)max_addr);
 307
 308        return alloc;
 309}
 310
 311static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
 312{
 313        return addr & ~(size - 1);
 314}
 315
 316phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
 317{
 318        long i, rgn;
 319        phys_addr_t base = 0;
 320        phys_addr_t res_base;
 321
 322        for (i = lmb->memory.cnt - 1; i >= 0; i--) {
 323                phys_addr_t lmbbase = lmb->memory.region[i].base;
 324                phys_size_t lmbsize = lmb->memory.region[i].size;
 325
 326                if (lmbsize < size)
 327                        continue;
 328                if (max_addr == LMB_ALLOC_ANYWHERE)
 329                        base = lmb_align_down(lmbbase + lmbsize - size, align);
 330                else if (lmbbase < max_addr) {
 331                        base = lmbbase + lmbsize;
 332                        if (base < lmbbase)
 333                                base = -1;
 334                        base = min(base, max_addr);
 335                        base = lmb_align_down(base - size, align);
 336                } else
 337                        continue;
 338
 339                while (base && lmbbase <= base) {
 340                        rgn = lmb_overlaps_region(&lmb->reserved, base, size);
 341                        if (rgn < 0) {
 342                                /* This area isn't reserved, take it */
 343                                if (lmb_add_region(&lmb->reserved, base,
 344                                                   size) < 0)
 345                                        return 0;
 346                                return base;
 347                        }
 348                        res_base = lmb->reserved.region[rgn].base;
 349                        if (res_base < size)
 350                                break;
 351                        base = lmb_align_down(res_base - size, align);
 352                }
 353        }
 354        return 0;
 355}
 356
 357/*
 358 * Try to allocate a specific address range: must be in defined memory but not
 359 * reserved
 360 */
 361phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 362{
 363        long rgn;
 364
 365        /* Check if the requested address is in one of the memory regions */
 366        rgn = lmb_overlaps_region(&lmb->memory, base, size);
 367        if (rgn >= 0) {
 368                /*
 369                 * Check if the requested end address is in the same memory
 370                 * region we found.
 371                 */
 372                if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
 373                                      lmb->memory.region[rgn].size,
 374                                      base + size - 1, 1)) {
 375                        /* ok, reserve the memory */
 376                        if (lmb_reserve(lmb, base, size) >= 0)
 377                                return base;
 378                }
 379        }
 380        return 0;
 381}
 382
 383/* Return number of bytes from a given address that are free */
 384phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
 385{
 386        int i;
 387        long rgn;
 388
 389        /* check if the requested address is in the memory regions */
 390        rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
 391        if (rgn >= 0) {
 392                for (i = 0; i < lmb->reserved.cnt; i++) {
 393                        if (addr < lmb->reserved.region[i].base) {
 394                                /* first reserved range > requested address */
 395                                return lmb->reserved.region[i].base - addr;
 396                        }
 397                        if (lmb->reserved.region[i].base +
 398                            lmb->reserved.region[i].size > addr) {
 399                                /* requested addr is in this reserved range */
 400                                return 0;
 401                        }
 402                }
 403                /* if we come here: no reserved ranges above requested addr */
 404                return lmb->memory.region[lmb->memory.cnt - 1].base +
 405                       lmb->memory.region[lmb->memory.cnt - 1].size - addr;
 406        }
 407        return 0;
 408}
 409
 410int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
 411{
 412        int i;
 413
 414        for (i = 0; i < lmb->reserved.cnt; i++) {
 415                phys_addr_t upper = lmb->reserved.region[i].base +
 416                        lmb->reserved.region[i].size - 1;
 417                if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
 418                        return 1;
 419        }
 420        return 0;
 421}
 422
 423__weak void board_lmb_reserve(struct lmb *lmb)
 424{
 425        /* please define platform specific board_lmb_reserve() */
 426}
 427
 428__weak void arch_lmb_reserve(struct lmb *lmb)
 429{
 430        /* please define platform specific arch_lmb_reserve() */
 431}
 432