uboot/lib/lmb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.     June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
   7 */
   8
   9#include <common.h>
  10#include <image.h>
  11#include <lmb.h>
  12#include <log.h>
  13#include <malloc.h>
  14
  15#include <asm/global_data.h>
  16#include <asm/sections.h>
  17
  18DECLARE_GLOBAL_DATA_PTR;
  19
  20#define LMB_ALLOC_ANYWHERE      0
  21
  22static void lmb_dump_region(struct lmb_region *rgn, char *name)
  23{
  24        unsigned long long base, size, end;
  25        enum lmb_flags flags;
  26        int i;
  27
  28        printf(" %s.cnt  = 0x%lx\n", name, rgn->cnt);
  29
  30        for (i = 0; i < rgn->cnt; i++) {
  31                base = rgn->region[i].base;
  32                size = rgn->region[i].size;
  33                end = base + size - 1;
  34                flags = rgn->region[i].flags;
  35
  36                printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
  37                       name, i, base, end, size, flags);
  38        }
  39}
  40
  41void lmb_dump_all_force(struct lmb *lmb)
  42{
  43        printf("lmb_dump_all:\n");
  44        lmb_dump_region(&lmb->memory, "memory");
  45        lmb_dump_region(&lmb->reserved, "reserved");
  46}
  47
  48void lmb_dump_all(struct lmb *lmb)
  49{
  50#ifdef DEBUG
  51        lmb_dump_all_force(lmb);
  52#endif
  53}
  54
  55static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
  56                              phys_addr_t base2, phys_size_t size2)
  57{
  58        const phys_addr_t base1_end = base1 + size1 - 1;
  59        const phys_addr_t base2_end = base2 + size2 - 1;
  60
  61        return ((base1 <= base2_end) && (base2 <= base1_end));
  62}
  63
  64static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
  65                               phys_addr_t base2, phys_size_t size2)
  66{
  67        if (base2 == base1 + size1)
  68                return 1;
  69        else if (base1 == base2 + size2)
  70                return -1;
  71
  72        return 0;
  73}
  74
  75static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
  76                                 unsigned long r2)
  77{
  78        phys_addr_t base1 = rgn->region[r1].base;
  79        phys_size_t size1 = rgn->region[r1].size;
  80        phys_addr_t base2 = rgn->region[r2].base;
  81        phys_size_t size2 = rgn->region[r2].size;
  82
  83        return lmb_addrs_adjacent(base1, size1, base2, size2);
  84}
  85
  86static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
  87{
  88        unsigned long i;
  89
  90        for (i = r; i < rgn->cnt - 1; i++) {
  91                rgn->region[i].base = rgn->region[i + 1].base;
  92                rgn->region[i].size = rgn->region[i + 1].size;
  93                rgn->region[i].flags = rgn->region[i + 1].flags;
  94        }
  95        rgn->cnt--;
  96}
  97
  98/* Assumption: base addr of region 1 < base addr of region 2 */
  99static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
 100                                 unsigned long r2)
 101{
 102        rgn->region[r1].size += rgn->region[r2].size;
 103        lmb_remove_region(rgn, r2);
 104}
 105
 106void lmb_init(struct lmb *lmb)
 107{
 108#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
 109        lmb->memory.max = CONFIG_LMB_MAX_REGIONS;
 110        lmb->reserved.max = CONFIG_LMB_MAX_REGIONS;
 111#else
 112        lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS;
 113        lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS;
 114        lmb->memory.region = lmb->memory_regions;
 115        lmb->reserved.region = lmb->reserved_regions;
 116#endif
 117        lmb->memory.cnt = 0;
 118        lmb->reserved.cnt = 0;
 119}
 120
 121void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
 122{
 123        ulong bank_end;
 124        int bank;
 125
 126        /*
 127         * Reserve memory from aligned address below the bottom of U-Boot stack
 128         * until end of U-Boot area using LMB to prevent U-Boot from overwriting
 129         * that memory.
 130         */
 131        debug("## Current stack ends at 0x%08lx ", sp);
 132
 133        /* adjust sp by 4K to be safe */
 134        sp -= align;
 135        for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
 136                if (!gd->bd->bi_dram[bank].size ||
 137                    sp < gd->bd->bi_dram[bank].start)
 138                        continue;
 139                /* Watch out for RAM at end of address space! */
 140                bank_end = gd->bd->bi_dram[bank].start +
 141                        gd->bd->bi_dram[bank].size - 1;
 142                if (sp > bank_end)
 143                        continue;
 144                if (bank_end > end)
 145                        bank_end = end - 1;
 146
 147                lmb_reserve(lmb, sp, bank_end - sp + 1);
 148
 149                if (gd->flags & GD_FLG_SKIP_RELOC)
 150                        lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len);
 151
 152                break;
 153        }
 154}
 155
 156static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
 157{
 158        arch_lmb_reserve(lmb);
 159        board_lmb_reserve(lmb);
 160
 161        if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
 162                boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
 163}
 164
 165/* Initialize the struct, add memory and call arch/board reserve functions */
 166void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
 167{
 168        int i;
 169
 170        lmb_init(lmb);
 171
 172        for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
 173                if (bd->bi_dram[i].size) {
 174                        lmb_add(lmb, bd->bi_dram[i].start,
 175                                bd->bi_dram[i].size);
 176                }
 177        }
 178
 179        lmb_reserve_common(lmb, fdt_blob);
 180}
 181
 182/* Initialize the struct, add memory and call arch/board reserve functions */
 183void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
 184                                phys_size_t size, void *fdt_blob)
 185{
 186        lmb_init(lmb);
 187        lmb_add(lmb, base, size);
 188        lmb_reserve_common(lmb, fdt_blob);
 189}
 190
 191/* This routine called with relocation disabled. */
 192static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
 193                                 phys_size_t size, enum lmb_flags flags)
 194{
 195        unsigned long coalesced = 0;
 196        long adjacent, i;
 197
 198        if (rgn->cnt == 0) {
 199                rgn->region[0].base = base;
 200                rgn->region[0].size = size;
 201                rgn->region[0].flags = flags;
 202                rgn->cnt = 1;
 203                return 0;
 204        }
 205
 206        /* First try and coalesce this LMB with another. */
 207        for (i = 0; i < rgn->cnt; i++) {
 208                phys_addr_t rgnbase = rgn->region[i].base;
 209                phys_size_t rgnsize = rgn->region[i].size;
 210                phys_size_t rgnflags = rgn->region[i].flags;
 211
 212                if (rgnbase == base && rgnsize == size) {
 213                        if (flags == rgnflags)
 214                                /* Already have this region, so we're done */
 215                                return 0;
 216                        else
 217                                return -1; /* regions with new flags */
 218                }
 219
 220                adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
 221                if (adjacent > 0) {
 222                        if (flags != rgnflags)
 223                                break;
 224                        rgn->region[i].base -= size;
 225                        rgn->region[i].size += size;
 226                        coalesced++;
 227                        break;
 228                } else if (adjacent < 0) {
 229                        if (flags != rgnflags)
 230                                break;
 231                        rgn->region[i].size += size;
 232                        coalesced++;
 233                        break;
 234                } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
 235                        /* regions overlap */
 236                        return -1;
 237                }
 238        }
 239
 240        if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
 241                if (rgn->region[i].flags == rgn->region[i + 1].flags) {
 242                        lmb_coalesce_regions(rgn, i, i + 1);
 243                        coalesced++;
 244                }
 245        }
 246
 247        if (coalesced)
 248                return coalesced;
 249        if (rgn->cnt >= rgn->max)
 250                return -1;
 251
 252        /* Couldn't coalesce the LMB, so add it to the sorted table. */
 253        for (i = rgn->cnt-1; i >= 0; i--) {
 254                if (base < rgn->region[i].base) {
 255                        rgn->region[i + 1].base = rgn->region[i].base;
 256                        rgn->region[i + 1].size = rgn->region[i].size;
 257                        rgn->region[i + 1].flags = rgn->region[i].flags;
 258                } else {
 259                        rgn->region[i + 1].base = base;
 260                        rgn->region[i + 1].size = size;
 261                        rgn->region[i + 1].flags = flags;
 262                        break;
 263                }
 264        }
 265
 266        if (base < rgn->region[0].base) {
 267                rgn->region[0].base = base;
 268                rgn->region[0].size = size;
 269                rgn->region[0].flags = flags;
 270        }
 271
 272        rgn->cnt++;
 273
 274        return 0;
 275}
 276
 277static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
 278                           phys_size_t size)
 279{
 280        return lmb_add_region_flags(rgn, base, size, LMB_NONE);
 281}
 282
 283/* This routine may be called with relocation disabled. */
 284long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 285{
 286        struct lmb_region *_rgn = &(lmb->memory);
 287
 288        return lmb_add_region(_rgn, base, size);
 289}
 290
 291long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 292{
 293        struct lmb_region *rgn = &(lmb->reserved);
 294        phys_addr_t rgnbegin, rgnend;
 295        phys_addr_t end = base + size - 1;
 296        int i;
 297
 298        rgnbegin = rgnend = 0; /* supress gcc warnings */
 299
 300        /* Find the region where (base, size) belongs to */
 301        for (i = 0; i < rgn->cnt; i++) {
 302                rgnbegin = rgn->region[i].base;
 303                rgnend = rgnbegin + rgn->region[i].size - 1;
 304
 305                if ((rgnbegin <= base) && (end <= rgnend))
 306                        break;
 307        }
 308
 309        /* Didn't find the region */
 310        if (i == rgn->cnt)
 311                return -1;
 312
 313        /* Check to see if we are removing entire region */
 314        if ((rgnbegin == base) && (rgnend == end)) {
 315                lmb_remove_region(rgn, i);
 316                return 0;
 317        }
 318
 319        /* Check to see if region is matching at the front */
 320        if (rgnbegin == base) {
 321                rgn->region[i].base = end + 1;
 322                rgn->region[i].size -= size;
 323                return 0;
 324        }
 325
 326        /* Check to see if the region is matching at the end */
 327        if (rgnend == end) {
 328                rgn->region[i].size -= size;
 329                return 0;
 330        }
 331
 332        /*
 333         * We need to split the entry -  adjust the current one to the
 334         * beginging of the hole and add the region after hole.
 335         */
 336        rgn->region[i].size = base - rgn->region[i].base;
 337        return lmb_add_region_flags(rgn, end + 1, rgnend - end,
 338                                    rgn->region[i].flags);
 339}
 340
 341long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
 342                       enum lmb_flags flags)
 343{
 344        struct lmb_region *_rgn = &(lmb->reserved);
 345
 346        return lmb_add_region_flags(_rgn, base, size, flags);
 347}
 348
 349long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 350{
 351        return lmb_reserve_flags(lmb, base, size, LMB_NONE);
 352}
 353
 354static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
 355                                phys_size_t size)
 356{
 357        unsigned long i;
 358
 359        for (i = 0; i < rgn->cnt; i++) {
 360                phys_addr_t rgnbase = rgn->region[i].base;
 361                phys_size_t rgnsize = rgn->region[i].size;
 362                if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
 363                        break;
 364        }
 365
 366        return (i < rgn->cnt) ? i : -1;
 367}
 368
 369phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
 370{
 371        return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
 372}
 373
 374phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
 375{
 376        phys_addr_t alloc;
 377
 378        alloc = __lmb_alloc_base(lmb, size, align, max_addr);
 379
 380        if (alloc == 0)
 381                printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
 382                       (ulong)size, (ulong)max_addr);
 383
 384        return alloc;
 385}
 386
 387static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
 388{
 389        return addr & ~(size - 1);
 390}
 391
 392phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
 393{
 394        long i, rgn;
 395        phys_addr_t base = 0;
 396        phys_addr_t res_base;
 397
 398        for (i = lmb->memory.cnt - 1; i >= 0; i--) {
 399                phys_addr_t lmbbase = lmb->memory.region[i].base;
 400                phys_size_t lmbsize = lmb->memory.region[i].size;
 401
 402                if (lmbsize < size)
 403                        continue;
 404                if (max_addr == LMB_ALLOC_ANYWHERE)
 405                        base = lmb_align_down(lmbbase + lmbsize - size, align);
 406                else if (lmbbase < max_addr) {
 407                        base = lmbbase + lmbsize;
 408                        if (base < lmbbase)
 409                                base = -1;
 410                        base = min(base, max_addr);
 411                        base = lmb_align_down(base - size, align);
 412                } else
 413                        continue;
 414
 415                while (base && lmbbase <= base) {
 416                        rgn = lmb_overlaps_region(&lmb->reserved, base, size);
 417                        if (rgn < 0) {
 418                                /* This area isn't reserved, take it */
 419                                if (lmb_add_region(&lmb->reserved, base,
 420                                                   size) < 0)
 421                                        return 0;
 422                                return base;
 423                        }
 424                        res_base = lmb->reserved.region[rgn].base;
 425                        if (res_base < size)
 426                                break;
 427                        base = lmb_align_down(res_base - size, align);
 428                }
 429        }
 430        return 0;
 431}
 432
 433/*
 434 * Try to allocate a specific address range: must be in defined memory but not
 435 * reserved
 436 */
 437phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 438{
 439        long rgn;
 440
 441        /* Check if the requested address is in one of the memory regions */
 442        rgn = lmb_overlaps_region(&lmb->memory, base, size);
 443        if (rgn >= 0) {
 444                /*
 445                 * Check if the requested end address is in the same memory
 446                 * region we found.
 447                 */
 448                if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
 449                                      lmb->memory.region[rgn].size,
 450                                      base + size - 1, 1)) {
 451                        /* ok, reserve the memory */
 452                        if (lmb_reserve(lmb, base, size) >= 0)
 453                                return base;
 454                }
 455        }
 456        return 0;
 457}
 458
 459/* Return number of bytes from a given address that are free */
 460phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
 461{
 462        int i;
 463        long rgn;
 464
 465        /* check if the requested address is in the memory regions */
 466        rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
 467        if (rgn >= 0) {
 468                for (i = 0; i < lmb->reserved.cnt; i++) {
 469                        if (addr < lmb->reserved.region[i].base) {
 470                                /* first reserved range > requested address */
 471                                return lmb->reserved.region[i].base - addr;
 472                        }
 473                        if (lmb->reserved.region[i].base +
 474                            lmb->reserved.region[i].size > addr) {
 475                                /* requested addr is in this reserved range */
 476                                return 0;
 477                        }
 478                }
 479                /* if we come here: no reserved ranges above requested addr */
 480                return lmb->memory.region[lmb->memory.cnt - 1].base +
 481                       lmb->memory.region[lmb->memory.cnt - 1].size - addr;
 482        }
 483        return 0;
 484}
 485
 486int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
 487{
 488        int i;
 489
 490        for (i = 0; i < lmb->reserved.cnt; i++) {
 491                phys_addr_t upper = lmb->reserved.region[i].base +
 492                        lmb->reserved.region[i].size - 1;
 493                if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
 494                        return (lmb->reserved.region[i].flags & flags) == flags;
 495        }
 496        return 0;
 497}
 498
 499int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
 500{
 501        return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
 502}
 503
 504__weak void board_lmb_reserve(struct lmb *lmb)
 505{
 506        /* please define platform specific board_lmb_reserve() */
 507}
 508
 509__weak void arch_lmb_reserve(struct lmb *lmb)
 510{
 511        /* please define platform specific arch_lmb_reserve() */
 512}
 513