linux/include/linux/memblock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _LINUX_MEMBLOCK_H
   3#define _LINUX_MEMBLOCK_H
   4#ifdef __KERNEL__
   5
   6/*
   7 * Logical memory blocks.
   8 *
   9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/mm.h>
  14#include <asm/dma.h>
  15
  16extern unsigned long max_low_pfn;
  17extern unsigned long min_low_pfn;
  18
  19/*
  20 * highest page
  21 */
  22extern unsigned long max_pfn;
  23/*
  24 * highest possible page
  25 */
  26extern unsigned long long max_possible_pfn;
  27
  28/**
  29 * enum memblock_flags - definition of memory region attributes
  30 * @MEMBLOCK_NONE: no special request
  31 * @MEMBLOCK_HOTPLUG: hotpluggable region
  32 * @MEMBLOCK_MIRROR: mirrored region
  33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
  34 */
  35enum memblock_flags {
  36        MEMBLOCK_NONE           = 0x0,  /* No special request */
  37        MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
  38        MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
  39        MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
  40};
  41
  42/**
  43 * struct memblock_region - represents a memory region
  44 * @base: physical address of the region
  45 * @size: size of the region
  46 * @flags: memory region attributes
  47 * @nid: NUMA node id
  48 */
  49struct memblock_region {
  50        phys_addr_t base;
  51        phys_addr_t size;
  52        enum memblock_flags flags;
  53#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  54        int nid;
  55#endif
  56};
  57
  58/**
  59 * struct memblock_type - collection of memory regions of certain type
  60 * @cnt: number of regions
  61 * @max: size of the allocated array
  62 * @total_size: size of all regions
  63 * @regions: array of regions
  64 * @name: the memory type symbolic name
  65 */
  66struct memblock_type {
  67        unsigned long cnt;
  68        unsigned long max;
  69        phys_addr_t total_size;
  70        struct memblock_region *regions;
  71        char *name;
  72};
  73
  74/**
  75 * struct memblock - memblock allocator metadata
  76 * @bottom_up: is bottom up direction?
  77 * @current_limit: physical address of the current allocation limit
  78 * @memory: usabe memory regions
  79 * @reserved: reserved memory regions
  80 * @physmem: all physical memory
  81 */
  82struct memblock {
  83        bool bottom_up;  /* is bottom up direction? */
  84        phys_addr_t current_limit;
  85        struct memblock_type memory;
  86        struct memblock_type reserved;
  87#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  88        struct memblock_type physmem;
  89#endif
  90};
  91
  92extern struct memblock memblock;
  93extern int memblock_debug;
  94
  95#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
  96#define __init_memblock __meminit
  97#define __initdata_memblock __meminitdata
  98void memblock_discard(void);
  99#else
 100#define __init_memblock
 101#define __initdata_memblock
 102static inline void memblock_discard(void) {}
 103#endif
 104
 105#define memblock_dbg(fmt, ...) \
 106        if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 107
 108phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
 109                                   phys_addr_t size, phys_addr_t align);
 110void memblock_allow_resize(void);
 111int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 112int memblock_add(phys_addr_t base, phys_addr_t size);
 113int memblock_remove(phys_addr_t base, phys_addr_t size);
 114int memblock_free(phys_addr_t base, phys_addr_t size);
 115int memblock_reserve(phys_addr_t base, phys_addr_t size);
 116void memblock_trim_memory(phys_addr_t align);
 117bool memblock_overlaps_region(struct memblock_type *type,
 118                              phys_addr_t base, phys_addr_t size);
 119int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 120int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 121int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 122int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 123int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 124
 125unsigned long memblock_free_all(void);
 126void reset_node_managed_pages(pg_data_t *pgdat);
 127void reset_all_zones_managed_pages(void);
 128
 129/* Low level functions */
 130int memblock_add_range(struct memblock_type *type,
 131                       phys_addr_t base, phys_addr_t size,
 132                       int nid, enum memblock_flags flags);
 133
 134void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 135                      struct memblock_type *type_a,
 136                      struct memblock_type *type_b, phys_addr_t *out_start,
 137                      phys_addr_t *out_end, int *out_nid);
 138
 139void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
 140                          struct memblock_type *type_a,
 141                          struct memblock_type *type_b, phys_addr_t *out_start,
 142                          phys_addr_t *out_end, int *out_nid);
 143
 144void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
 145                                phys_addr_t *out_end);
 146
 147void __memblock_free_late(phys_addr_t base, phys_addr_t size);
 148
 149/**
 150 * for_each_mem_range - iterate through memblock areas from type_a and not
 151 * included in type_b. Or just type_a if type_b is NULL.
 152 * @i: u64 used as loop variable
 153 * @type_a: ptr to memblock_type to iterate
 154 * @type_b: ptr to memblock_type which excludes from the iteration
 155 * @nid: node selector, %NUMA_NO_NODE for all nodes
 156 * @flags: pick from blocks based on memory attributes
 157 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 158 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 159 * @p_nid: ptr to int for nid of the range, can be %NULL
 160 */
 161#define for_each_mem_range(i, type_a, type_b, nid, flags,               \
 162                           p_start, p_end, p_nid)                       \
 163        for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
 164                                     p_start, p_end, p_nid);            \
 165             i != (u64)ULLONG_MAX;                                      \
 166             __next_mem_range(&i, nid, flags, type_a, type_b,           \
 167                              p_start, p_end, p_nid))
 168
 169/**
 170 * for_each_mem_range_rev - reverse iterate through memblock areas from
 171 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 172 * @i: u64 used as loop variable
 173 * @type_a: ptr to memblock_type to iterate
 174 * @type_b: ptr to memblock_type which excludes from the iteration
 175 * @nid: node selector, %NUMA_NO_NODE for all nodes
 176 * @flags: pick from blocks based on memory attributes
 177 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 178 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 179 * @p_nid: ptr to int for nid of the range, can be %NULL
 180 */
 181#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,           \
 182                               p_start, p_end, p_nid)                   \
 183        for (i = (u64)ULLONG_MAX,                                       \
 184                     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
 185                                          p_start, p_end, p_nid);       \
 186             i != (u64)ULLONG_MAX;                                      \
 187             __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
 188                                  p_start, p_end, p_nid))
 189
 190/**
 191 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 192 * @i: u64 used as loop variable
 193 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 194 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 195 *
 196 * Walks over reserved areas of memblock. Available as soon as memblock
 197 * is initialized.
 198 */
 199#define for_each_reserved_mem_region(i, p_start, p_end)                 \
 200        for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);   \
 201             i != (u64)ULLONG_MAX;                                      \
 202             __next_reserved_mem_region(&i, p_start, p_end))
 203
 204static inline bool memblock_is_hotpluggable(struct memblock_region *m)
 205{
 206        return m->flags & MEMBLOCK_HOTPLUG;
 207}
 208
 209static inline bool memblock_is_mirror(struct memblock_region *m)
 210{
 211        return m->flags & MEMBLOCK_MIRROR;
 212}
 213
 214static inline bool memblock_is_nomap(struct memblock_region *m)
 215{
 216        return m->flags & MEMBLOCK_NOMAP;
 217}
 218
 219#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 220int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
 221                            unsigned long  *end_pfn);
 222void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
 223                          unsigned long *out_end_pfn, int *out_nid);
 224
 225/**
 226 * for_each_mem_pfn_range - early memory pfn range iterator
 227 * @i: an integer used as loop variable
 228 * @nid: node selector, %MAX_NUMNODES for all nodes
 229 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 230 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 231 * @p_nid: ptr to int for nid of the range, can be %NULL
 232 *
 233 * Walks over configured memory ranges.
 234 */
 235#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
 236        for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
 237             i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
 238#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 239
 240#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 241void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
 242                                  unsigned long *out_spfn,
 243                                  unsigned long *out_epfn);
 244/**
 245 * for_each_free_mem_range_in_zone - iterate through zone specific free
 246 * memblock areas
 247 * @i: u64 used as loop variable
 248 * @zone: zone in which all of the memory blocks reside
 249 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 250 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 251 *
 252 * Walks over free (memory && !reserved) areas of memblock in a specific
 253 * zone. Available once memblock and an empty zone is initialized. The main
 254 * assumption is that the zone start, end, and pgdat have been associated.
 255 * This way we can use the zone to determine NUMA node, and if a given part
 256 * of the memblock is valid for the zone.
 257 */
 258#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
 259        for (i = 0,                                                     \
 260             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);    \
 261             i != U64_MAX;                                      \
 262             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 263
 264/**
 265 * for_each_free_mem_range_in_zone_from - iterate through zone specific
 266 * free memblock areas from a given point
 267 * @i: u64 used as loop variable
 268 * @zone: zone in which all of the memory blocks reside
 269 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 270 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 271 *
 272 * Walks over free (memory && !reserved) areas of memblock in a specific
 273 * zone, continuing from current position. Available as soon as memblock is
 274 * initialized.
 275 */
 276#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
 277        for (; i != U64_MAX;                                      \
 278             __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
 279#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 280
 281/**
 282 * for_each_free_mem_range - iterate through free memblock areas
 283 * @i: u64 used as loop variable
 284 * @nid: node selector, %NUMA_NO_NODE for all nodes
 285 * @flags: pick from blocks based on memory attributes
 286 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 287 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 288 * @p_nid: ptr to int for nid of the range, can be %NULL
 289 *
 290 * Walks over free (memory && !reserved) areas of memblock.  Available as
 291 * soon as memblock is initialized.
 292 */
 293#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
 294        for_each_mem_range(i, &memblock.memory, &memblock.reserved,     \
 295                           nid, flags, p_start, p_end, p_nid)
 296
 297/**
 298 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 299 * @i: u64 used as loop variable
 300 * @nid: node selector, %NUMA_NO_NODE for all nodes
 301 * @flags: pick from blocks based on memory attributes
 302 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 303 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 304 * @p_nid: ptr to int for nid of the range, can be %NULL
 305 *
 306 * Walks over free (memory && !reserved) areas of memblock in reverse
 307 * order.  Available as soon as memblock is initialized.
 308 */
 309#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
 310                                        p_nid)                          \
 311        for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
 312                               nid, flags, p_start, p_end, p_nid)
 313
 314#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 315int memblock_set_node(phys_addr_t base, phys_addr_t size,
 316                      struct memblock_type *type, int nid);
 317
 318static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 319{
 320        r->nid = nid;
 321}
 322
 323static inline int memblock_get_region_node(const struct memblock_region *r)
 324{
 325        return r->nid;
 326}
 327#else
 328static inline void memblock_set_region_node(struct memblock_region *r, int nid)
 329{
 330}
 331
 332static inline int memblock_get_region_node(const struct memblock_region *r)
 333{
 334        return 0;
 335}
 336#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 337
 338/* Flags for memblock allocation APIs */
 339#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
 340#define MEMBLOCK_ALLOC_ACCESSIBLE       0
 341#define MEMBLOCK_ALLOC_KASAN            1
 342
 343/* We are using top down, so it is safe to use 0 here */
 344#define MEMBLOCK_LOW_LIMIT 0
 345
 346#ifndef ARCH_LOW_ADDRESS_LIMIT
 347#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 348#endif
 349
 350phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 351                                      phys_addr_t start, phys_addr_t end);
 352phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 353
 354static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
 355                                              phys_addr_t align)
 356{
 357        return memblock_phys_alloc_range(size, align, 0,
 358                                         MEMBLOCK_ALLOC_ACCESSIBLE);
 359}
 360
 361void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
 362                                 phys_addr_t min_addr, phys_addr_t max_addr,
 363                                 int nid);
 364void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
 365                             phys_addr_t min_addr, phys_addr_t max_addr,
 366                             int nid);
 367
 368static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
 369{
 370        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 371                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 372}
 373
 374static inline void * __init memblock_alloc_raw(phys_addr_t size,
 375                                               phys_addr_t align)
 376{
 377        return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
 378                                          MEMBLOCK_ALLOC_ACCESSIBLE,
 379                                          NUMA_NO_NODE);
 380}
 381
 382static inline void * __init memblock_alloc_from(phys_addr_t size,
 383                                                phys_addr_t align,
 384                                                phys_addr_t min_addr)
 385{
 386        return memblock_alloc_try_nid(size, align, min_addr,
 387                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 388}
 389
 390static inline void * __init memblock_alloc_low(phys_addr_t size,
 391                                               phys_addr_t align)
 392{
 393        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 394                                      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 395}
 396
 397static inline void * __init memblock_alloc_node(phys_addr_t size,
 398                                                phys_addr_t align, int nid)
 399{
 400        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
 401                                      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 402}
 403
 404static inline void __init memblock_free_early(phys_addr_t base,
 405                                              phys_addr_t size)
 406{
 407        memblock_free(base, size);
 408}
 409
 410static inline void __init memblock_free_early_nid(phys_addr_t base,
 411                                                  phys_addr_t size, int nid)
 412{
 413        memblock_free(base, size);
 414}
 415
 416static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
 417{
 418        __memblock_free_late(base, size);
 419}
 420
 421/*
 422 * Set the allocation direction to bottom-up or top-down.
 423 */
 424static inline void __init memblock_set_bottom_up(bool enable)
 425{
 426        memblock.bottom_up = enable;
 427}
 428
 429/*
 430 * Check if the allocation direction is bottom-up or not.
 431 * if this is true, that said, memblock will allocate memory
 432 * in bottom-up direction.
 433 */
 434static inline bool memblock_bottom_up(void)
 435{
 436        return memblock.bottom_up;
 437}
 438
 439phys_addr_t memblock_phys_mem_size(void);
 440phys_addr_t memblock_reserved_size(void);
 441phys_addr_t memblock_mem_size(unsigned long limit_pfn);
 442phys_addr_t memblock_start_of_DRAM(void);
 443phys_addr_t memblock_end_of_DRAM(void);
 444void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 445void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
 446void memblock_mem_limit_remove_map(phys_addr_t limit);
 447bool memblock_is_memory(phys_addr_t addr);
 448bool memblock_is_map_memory(phys_addr_t addr);
 449bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 450bool memblock_is_reserved(phys_addr_t addr);
 451bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
 452
 453extern void __memblock_dump_all(void);
 454
 455static inline void memblock_dump_all(void)
 456{
 457        if (memblock_debug)
 458                __memblock_dump_all();
 459}
 460
 461/**
 462 * memblock_set_current_limit - Set the current allocation limit to allow
 463 *                         limiting allocations to what is currently
 464 *                         accessible during boot
 465 * @limit: New limit value (physical address)
 466 */
 467void memblock_set_current_limit(phys_addr_t limit);
 468
 469
 470phys_addr_t memblock_get_current_limit(void);
 471
 472/*
 473 * pfn conversion functions
 474 *
 475 * While the memory MEMBLOCKs should always be page aligned, the reserved
 476 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 477 * idea of what they return for such non aligned MEMBLOCKs.
 478 */
 479
 480/**
 481 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
 482 * @reg: memblock_region structure
 483 *
 484 * Return: the lowest pfn intersecting with the memory region
 485 */
 486static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
 487{
 488        return PFN_UP(reg->base);
 489}
 490
 491/**
 492 * memblock_region_memory_end_pfn - get the end pfn of the memory region
 493 * @reg: memblock_region structure
 494 *
 495 * Return: the end_pfn of the reserved region
 496 */
 497static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
 498{
 499        return PFN_DOWN(reg->base + reg->size);
 500}
 501
 502/**
 503 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
 504 * @reg: memblock_region structure
 505 *
 506 * Return: the lowest pfn intersecting with the reserved region
 507 */
 508static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
 509{
 510        return PFN_DOWN(reg->base);
 511}
 512
 513/**
 514 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
 515 * @reg: memblock_region structure
 516 *
 517 * Return: the end_pfn of the reserved region
 518 */
 519static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
 520{
 521        return PFN_UP(reg->base + reg->size);
 522}
 523
 524#define for_each_memblock(memblock_type, region)                                        \
 525        for (region = memblock.memblock_type.regions;                                   \
 526             region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);    \
 527             region++)
 528
 529#define for_each_memblock_type(i, memblock_type, rgn)                   \
 530        for (i = 0, rgn = &memblock_type->regions[0];                   \
 531             i < memblock_type->cnt;                                    \
 532             i++, rgn = &memblock_type->regions[i])
 533
 534extern void *alloc_large_system_hash(const char *tablename,
 535                                     unsigned long bucketsize,
 536                                     unsigned long numentries,
 537                                     int scale,
 538                                     int flags,
 539                                     unsigned int *_hash_shift,
 540                                     unsigned int *_hash_mask,
 541                                     unsigned long low_limit,
 542                                     unsigned long high_limit);
 543
 544#define HASH_EARLY      0x00000001      /* Allocating during early boot? */
 545#define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
 546                                         * shift passed via *_hash_shift */
 547#define HASH_ZERO       0x00000004      /* Zero allocated hash table */
 548
 549/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 550 * sufficient vmalloc space.
 551 */
 552#ifdef CONFIG_NUMA
 553#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
 554extern int hashdist;            /* Distribute hashes across NUMA nodes? */
 555#else
 556#define hashdist (0)
 557#endif
 558
 559#ifdef CONFIG_MEMTEST
 560extern void early_memtest(phys_addr_t start, phys_addr_t end);
 561#else
 562static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 563{
 564}
 565#endif
 566
 567#endif /* __KERNEL__ */
 568
 569#endif /* _LINUX_MEMBLOCK_H */
 570