dpdk/lib/eal/linux/eal_memory.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2014 Intel Corporation.
   3 * Copyright(c) 2013 6WIND S.A.
   4 */
   5
   6#include <errno.h>
   7#include <fcntl.h>
   8#include <stdarg.h>
   9#include <stdbool.h>
  10#include <stdlib.h>
  11#include <stdio.h>
  12#include <stdint.h>
  13#include <inttypes.h>
  14#include <string.h>
  15#include <sys/mman.h>
  16#include <sys/types.h>
  17#include <sys/stat.h>
  18#include <sys/queue.h>
  19#include <sys/file.h>
  20#include <sys/resource.h>
  21#include <unistd.h>
  22#include <limits.h>
  23#include <sys/ioctl.h>
  24#include <sys/time.h>
  25#include <signal.h>
  26#include <setjmp.h>
  27#ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
  28#include <linux/memfd.h>
  29#define MEMFD_SUPPORTED
  30#endif
  31#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
  32#include <numa.h>
  33#include <numaif.h>
  34#endif
  35
  36#include <rte_errno.h>
  37#include <rte_log.h>
  38#include <rte_memory.h>
  39#include <rte_launch.h>
  40#include <rte_eal.h>
  41#include <rte_per_lcore.h>
  42#include <rte_lcore.h>
  43#include <rte_common.h>
  44#include <rte_string_fns.h>
  45
  46#include "eal_private.h"
  47#include "eal_memalloc.h"
  48#include "eal_memcfg.h"
  49#include "eal_internal_cfg.h"
  50#include "eal_filesystem.h"
  51#include "eal_hugepages.h"
  52#include "eal_options.h"
  53
  54#define PFN_MASK_SIZE   8
  55
  56/**
  57 * @file
  58 * Huge page mapping under linux
  59 *
  60 * To reserve a big contiguous amount of memory, we use the hugepage
  61 * feature of linux. For that, we need to have hugetlbfs mounted. This
  62 * code will create many files in this directory (one per page) and
  63 * map them in virtual memory. For each page, we will retrieve its
  64 * physical address and remap it in order to have a virtual contiguous
  65 * zone as well as a physical contiguous zone.
  66 */
  67
  68static int phys_addrs_available = -1;
  69
  70#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
  71
  72uint64_t eal_get_baseaddr(void)
  73{
  74        /*
  75         * Linux kernel uses a really high address as starting address for
  76         * serving mmaps calls. If there exists addressing limitations and IOVA
  77         * mode is VA, this starting address is likely too high for those
  78         * devices. However, it is possible to use a lower address in the
  79         * process virtual address space as with 64 bits there is a lot of
  80         * available space.
  81         *
  82         * Current known limitations are 39 or 40 bits. Setting the starting
  83         * address at 4GB implies there are 508GB or 1020GB for mapping the
  84         * available hugepages. This is likely enough for most systems, although
  85         * a device with addressing limitations should call
  86         * rte_mem_check_dma_mask for ensuring all memory is within supported
  87         * range.
  88         */
  89        return 0x100000000ULL;
  90}
  91
  92/*
  93 * Get physical address of any mapped virtual address in the current process.
  94 */
  95phys_addr_t
  96rte_mem_virt2phy(const void *virtaddr)
  97{
  98        int fd, retval;
  99        uint64_t page, physaddr;
 100        unsigned long virt_pfn;
 101        int page_size;
 102        off_t offset;
 103
 104        if (phys_addrs_available == 0)
 105                return RTE_BAD_IOVA;
 106
 107        /* standard page size */
 108        page_size = getpagesize();
 109
 110        fd = open("/proc/self/pagemap", O_RDONLY);
 111        if (fd < 0) {
 112                RTE_LOG(INFO, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
 113                        __func__, strerror(errno));
 114                return RTE_BAD_IOVA;
 115        }
 116
 117        virt_pfn = (unsigned long)virtaddr / page_size;
 118        offset = sizeof(uint64_t) * virt_pfn;
 119        if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
 120                RTE_LOG(INFO, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
 121                                __func__, strerror(errno));
 122                close(fd);
 123                return RTE_BAD_IOVA;
 124        }
 125
 126        retval = read(fd, &page, PFN_MASK_SIZE);
 127        close(fd);
 128        if (retval < 0) {
 129                RTE_LOG(INFO, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
 130                                __func__, strerror(errno));
 131                return RTE_BAD_IOVA;
 132        } else if (retval != PFN_MASK_SIZE) {
 133                RTE_LOG(INFO, EAL, "%s(): read %d bytes from /proc/self/pagemap "
 134                                "but expected %d:\n",
 135                                __func__, retval, PFN_MASK_SIZE);
 136                return RTE_BAD_IOVA;
 137        }
 138
 139        /*
 140         * the pfn (page frame number) are bits 0-54 (see
 141         * pagemap.txt in linux Documentation)
 142         */
 143        if ((page & 0x7fffffffffffffULL) == 0)
 144                return RTE_BAD_IOVA;
 145
 146        physaddr = ((page & 0x7fffffffffffffULL) * page_size)
 147                + ((unsigned long)virtaddr % page_size);
 148
 149        return physaddr;
 150}
 151
 152rte_iova_t
 153rte_mem_virt2iova(const void *virtaddr)
 154{
 155        if (rte_eal_iova_mode() == RTE_IOVA_VA)
 156                return (uintptr_t)virtaddr;
 157        return rte_mem_virt2phy(virtaddr);
 158}
 159
 160/*
 161 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
 162 * it by browsing the /proc/self/pagemap special file.
 163 */
 164static int
 165find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 166{
 167        unsigned int i;
 168        phys_addr_t addr;
 169
 170        for (i = 0; i < hpi->num_pages[0]; i++) {
 171                addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
 172                if (addr == RTE_BAD_PHYS_ADDR)
 173                        return -1;
 174                hugepg_tbl[i].physaddr = addr;
 175        }
 176        return 0;
 177}
 178
 179/*
 180 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
 181 */
 182static int
 183set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 184{
 185        unsigned int i;
 186        static phys_addr_t addr;
 187
 188        for (i = 0; i < hpi->num_pages[0]; i++) {
 189                hugepg_tbl[i].physaddr = addr;
 190                addr += hugepg_tbl[i].size;
 191        }
 192        return 0;
 193}
 194
 195/*
 196 * Check whether address-space layout randomization is enabled in
 197 * the kernel. This is important for multi-process as it can prevent
 198 * two processes mapping data to the same virtual address
 199 * Returns:
 200 *    0 - address space randomization disabled
 201 *    1/2 - address space randomization enabled
 202 *    negative error code on error
 203 */
 204static int
 205aslr_enabled(void)
 206{
 207        char c;
 208        int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
 209        if (fd < 0)
 210                return -errno;
 211        retval = read(fd, &c, 1);
 212        close(fd);
 213        if (retval < 0)
 214                return -errno;
 215        if (retval == 0)
 216                return -EIO;
 217        switch (c) {
 218                case '0' : return 0;
 219                case '1' : return 1;
 220                case '2' : return 2;
 221                default: return -EINVAL;
 222        }
 223}
 224
 225static sigjmp_buf huge_jmpenv;
 226
 227static void huge_sigbus_handler(int signo __rte_unused)
 228{
 229        siglongjmp(huge_jmpenv, 1);
 230}
 231
 232/* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
 233 * non-static local variable in the stack frame calling sigsetjmp might be
 234 * clobbered by a call to longjmp.
 235 */
 236static int huge_wrap_sigsetjmp(void)
 237{
 238        return sigsetjmp(huge_jmpenv, 1);
 239}
 240
 241#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 242/* Callback for numa library. */
 243void numa_error(char *where)
 244{
 245        RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
 246}
 247#endif
 248
 249/*
 250 * Mmap all hugepages of hugepage table: it first open a file in
 251 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
 252 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
 253 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
 254 * map contiguous physical blocks in contiguous virtual blocks.
 255 */
 256static unsigned
 257map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
 258                  uint64_t *essential_memory __rte_unused)
 259{
 260        int fd;
 261        unsigned i;
 262        void *virtaddr;
 263#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 264        int node_id = -1;
 265        int essential_prev = 0;
 266        int oldpolicy;
 267        struct bitmask *oldmask = NULL;
 268        bool have_numa = true;
 269        unsigned long maxnode = 0;
 270        const struct internal_config *internal_conf =
 271                eal_get_internal_configuration();
 272
 273        /* Check if kernel supports NUMA. */
 274        if (numa_available() != 0) {
 275                RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
 276                have_numa = false;
 277        }
 278
 279        if (have_numa) {
 280                RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
 281                oldmask = numa_allocate_nodemask();
 282                if (get_mempolicy(&oldpolicy, oldmask->maskp,
 283                                  oldmask->size + 1, 0, 0) < 0) {
 284                        RTE_LOG(ERR, EAL,
 285                                "Failed to get current mempolicy: %s. "
 286                                "Assuming MPOL_DEFAULT.\n", strerror(errno));
 287                        oldpolicy = MPOL_DEFAULT;
 288                }
 289                for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
 290                        if (internal_conf->socket_mem[i])
 291                                maxnode = i + 1;
 292        }
 293#endif
 294
 295        for (i = 0; i < hpi->num_pages[0]; i++) {
 296                struct hugepage_file *hf = &hugepg_tbl[i];
 297                uint64_t hugepage_sz = hpi->hugepage_sz;
 298
 299#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 300                if (maxnode) {
 301                        unsigned int j;
 302
 303                        for (j = 0; j < maxnode; j++)
 304                                if (essential_memory[j])
 305                                        break;
 306
 307                        if (j == maxnode) {
 308                                node_id = (node_id + 1) % maxnode;
 309                                while (!internal_conf->socket_mem[node_id]) {
 310                                        node_id++;
 311                                        node_id %= maxnode;
 312                                }
 313                                essential_prev = 0;
 314                        } else {
 315                                node_id = j;
 316                                essential_prev = essential_memory[j];
 317
 318                                if (essential_memory[j] < hugepage_sz)
 319                                        essential_memory[j] = 0;
 320                                else
 321                                        essential_memory[j] -= hugepage_sz;
 322                        }
 323
 324                        RTE_LOG(DEBUG, EAL,
 325                                "Setting policy MPOL_PREFERRED for socket %d\n",
 326                                node_id);
 327                        numa_set_preferred(node_id);
 328                }
 329#endif
 330
 331                hf->file_id = i;
 332                hf->size = hugepage_sz;
 333                eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
 334                                hpi->hugedir, hf->file_id);
 335                hf->filepath[sizeof(hf->filepath) - 1] = '\0';
 336
 337                /* try to create hugepage file */
 338                fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
 339                if (fd < 0) {
 340                        RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
 341                                        strerror(errno));
 342                        goto out;
 343                }
 344
 345                /* map the segment, and populate page tables,
 346                 * the kernel fills this segment with zeros. we don't care where
 347                 * this gets mapped - we already have contiguous memory areas
 348                 * ready for us to map into.
 349                 */
 350                virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
 351                                MAP_SHARED | MAP_POPULATE, fd, 0);
 352                if (virtaddr == MAP_FAILED) {
 353                        RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
 354                                        strerror(errno));
 355                        close(fd);
 356                        goto out;
 357                }
 358
 359                hf->orig_va = virtaddr;
 360
 361                /* In linux, hugetlb limitations, like cgroup, are
 362                 * enforced at fault time instead of mmap(), even
 363                 * with the option of MAP_POPULATE. Kernel will send
 364                 * a SIGBUS signal. To avoid to be killed, save stack
 365                 * environment here, if SIGBUS happens, we can jump
 366                 * back here.
 367                 */
 368                if (huge_wrap_sigsetjmp()) {
 369                        RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
 370                                "hugepages of size %u MB\n",
 371                                (unsigned int)(hugepage_sz / 0x100000));
 372                        munmap(virtaddr, hugepage_sz);
 373                        close(fd);
 374                        unlink(hugepg_tbl[i].filepath);
 375#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 376                        if (maxnode)
 377                                essential_memory[node_id] =
 378                                        essential_prev;
 379#endif
 380                        goto out;
 381                }
 382                *(int *)virtaddr = 0;
 383
 384                /* set shared lock on the file. */
 385                if (flock(fd, LOCK_SH) < 0) {
 386                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
 387                                __func__, strerror(errno));
 388                        close(fd);
 389                        goto out;
 390                }
 391
 392                close(fd);
 393        }
 394
 395out:
 396#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 397        if (maxnode) {
 398                RTE_LOG(DEBUG, EAL,
 399                        "Restoring previous memory policy: %d\n", oldpolicy);
 400                if (oldpolicy == MPOL_DEFAULT) {
 401                        numa_set_localalloc();
 402                } else if (set_mempolicy(oldpolicy, oldmask->maskp,
 403                                         oldmask->size + 1) < 0) {
 404                        RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
 405                                strerror(errno));
 406                        numa_set_localalloc();
 407                }
 408        }
 409        if (oldmask != NULL)
 410                numa_free_cpumask(oldmask);
 411#endif
 412        return i;
 413}
 414
 415/*
 416 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
 417 * page.
 418 */
 419static int
 420find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 421{
 422        int socket_id;
 423        char *end, *nodestr;
 424        unsigned i, hp_count = 0;
 425        uint64_t virt_addr;
 426        char buf[BUFSIZ];
 427        char hugedir_str[PATH_MAX];
 428        FILE *f;
 429
 430        f = fopen("/proc/self/numa_maps", "r");
 431        if (f == NULL) {
 432                RTE_LOG(NOTICE, EAL, "NUMA support not available"
 433                        " consider that all memory is in socket_id 0\n");
 434                return 0;
 435        }
 436
 437        snprintf(hugedir_str, sizeof(hugedir_str),
 438                        "%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
 439
 440        /* parse numa map */
 441        while (fgets(buf, sizeof(buf), f) != NULL) {
 442
 443                /* ignore non huge page */
 444                if (strstr(buf, " huge ") == NULL &&
 445                                strstr(buf, hugedir_str) == NULL)
 446                        continue;
 447
 448                /* get zone addr */
 449                virt_addr = strtoull(buf, &end, 16);
 450                if (virt_addr == 0 || end == buf) {
 451                        RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
 452                        goto error;
 453                }
 454
 455                /* get node id (socket id) */
 456                nodestr = strstr(buf, " N");
 457                if (nodestr == NULL) {
 458                        RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
 459                        goto error;
 460                }
 461                nodestr += 2;
 462                end = strstr(nodestr, "=");
 463                if (end == NULL) {
 464                        RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
 465                        goto error;
 466                }
 467                end[0] = '\0';
 468                end = NULL;
 469
 470                socket_id = strtoul(nodestr, &end, 0);
 471                if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
 472                        RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
 473                        goto error;
 474                }
 475
 476                /* if we find this page in our mappings, set socket_id */
 477                for (i = 0; i < hpi->num_pages[0]; i++) {
 478                        void *va = (void *)(unsigned long)virt_addr;
 479                        if (hugepg_tbl[i].orig_va == va) {
 480                                hugepg_tbl[i].socket_id = socket_id;
 481                                hp_count++;
 482#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
 483                                RTE_LOG(DEBUG, EAL,
 484                                        "Hugepage %s is on socket %d\n",
 485                                        hugepg_tbl[i].filepath, socket_id);
 486#endif
 487                        }
 488                }
 489        }
 490
 491        if (hp_count < hpi->num_pages[0])
 492                goto error;
 493
 494        fclose(f);
 495        return 0;
 496
 497error:
 498        fclose(f);
 499        return -1;
 500}
 501
 502static int
 503cmp_physaddr(const void *a, const void *b)
 504{
 505#ifndef RTE_ARCH_PPC_64
 506        const struct hugepage_file *p1 = a;
 507        const struct hugepage_file *p2 = b;
 508#else
 509        /* PowerPC needs memory sorted in reverse order from x86 */
 510        const struct hugepage_file *p1 = b;
 511        const struct hugepage_file *p2 = a;
 512#endif
 513        if (p1->physaddr < p2->physaddr)
 514                return -1;
 515        else if (p1->physaddr > p2->physaddr)
 516                return 1;
 517        else
 518                return 0;
 519}
 520
 521/*
 522 * Uses mmap to create a shared memory area for storage of data
 523 * Used in this file to store the hugepage file map on disk
 524 */
 525static void *
 526create_shared_memory(const char *filename, const size_t mem_size)
 527{
 528        void *retval;
 529        int fd;
 530        const struct internal_config *internal_conf =
 531                eal_get_internal_configuration();
 532
 533        /* if no shared files mode is used, create anonymous memory instead */
 534        if (internal_conf->no_shconf) {
 535                retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
 536                                MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 537                if (retval == MAP_FAILED)
 538                        return NULL;
 539                return retval;
 540        }
 541
 542        fd = open(filename, O_CREAT | O_RDWR, 0600);
 543        if (fd < 0)
 544                return NULL;
 545        if (ftruncate(fd, mem_size) < 0) {
 546                close(fd);
 547                return NULL;
 548        }
 549        retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 550        close(fd);
 551        if (retval == MAP_FAILED)
 552                return NULL;
 553        return retval;
 554}
 555
 556/*
 557 * this copies *active* hugepages from one hugepage table to another.
 558 * destination is typically the shared memory.
 559 */
 560static int
 561copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
 562                const struct hugepage_file * src, int src_size)
 563{
 564        int src_pos, dst_pos = 0;
 565
 566        for (src_pos = 0; src_pos < src_size; src_pos++) {
 567                if (src[src_pos].orig_va != NULL) {
 568                        /* error on overflow attempt */
 569                        if (dst_pos == dest_size)
 570                                return -1;
 571                        memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
 572                        dst_pos++;
 573                }
 574        }
 575        return 0;
 576}
 577
 578static int
 579unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
 580                unsigned num_hp_info)
 581{
 582        unsigned socket, size;
 583        int page, nrpages = 0;
 584        const struct internal_config *internal_conf =
 585                eal_get_internal_configuration();
 586
 587        /* get total number of hugepages */
 588        for (size = 0; size < num_hp_info; size++)
 589                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
 590                        nrpages +=
 591                        internal_conf->hugepage_info[size].num_pages[socket];
 592
 593        for (page = 0; page < nrpages; page++) {
 594                struct hugepage_file *hp = &hugepg_tbl[page];
 595
 596                if (hp->orig_va != NULL && unlink(hp->filepath)) {
 597                        RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
 598                                __func__, hp->filepath, strerror(errno));
 599                }
 600        }
 601        return 0;
 602}
 603
 604/*
 605 * unmaps hugepages that are not going to be used. since we originally allocate
 606 * ALL hugepages (not just those we need), additional unmapping needs to be done.
 607 */
 608static int
 609unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
 610                struct hugepage_info *hpi,
 611                unsigned num_hp_info)
 612{
 613        unsigned socket, size;
 614        int page, nrpages = 0;
 615        const struct internal_config *internal_conf =
 616                eal_get_internal_configuration();
 617
 618        /* get total number of hugepages */
 619        for (size = 0; size < num_hp_info; size++)
 620                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
 621                        nrpages += internal_conf->hugepage_info[size].num_pages[socket];
 622
 623        for (size = 0; size < num_hp_info; size++) {
 624                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
 625                        unsigned pages_found = 0;
 626
 627                        /* traverse until we have unmapped all the unused pages */
 628                        for (page = 0; page < nrpages; page++) {
 629                                struct hugepage_file *hp = &hugepg_tbl[page];
 630
 631                                /* find a page that matches the criteria */
 632                                if ((hp->size == hpi[size].hugepage_sz) &&
 633                                                (hp->socket_id == (int) socket)) {
 634
 635                                        /* if we skipped enough pages, unmap the rest */
 636                                        if (pages_found == hpi[size].num_pages[socket]) {
 637                                                uint64_t unmap_len;
 638
 639                                                unmap_len = hp->size;
 640
 641                                                /* get start addr and len of the remaining segment */
 642                                                munmap(hp->orig_va,
 643                                                        (size_t)unmap_len);
 644
 645                                                hp->orig_va = NULL;
 646                                                if (unlink(hp->filepath) == -1) {
 647                                                        RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
 648                                                                        __func__, hp->filepath, strerror(errno));
 649                                                        return -1;
 650                                                }
 651                                        } else {
 652                                                /* lock the page and skip */
 653                                                pages_found++;
 654                                        }
 655
 656                                } /* match page */
 657                        } /* foreach page */
 658                } /* foreach socket */
 659        } /* foreach pagesize */
 660
 661        return 0;
 662}
 663
 664static int
 665remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
 666{
 667        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
 668        struct rte_memseg_list *msl;
 669        struct rte_fbarray *arr;
 670        int cur_page, seg_len;
 671        unsigned int msl_idx;
 672        int ms_idx;
 673        uint64_t page_sz;
 674        size_t memseg_len;
 675        int socket_id;
 676#ifndef RTE_ARCH_64
 677        const struct internal_config *internal_conf =
 678                eal_get_internal_configuration();
 679#endif
 680        page_sz = hugepages[seg_start].size;
 681        socket_id = hugepages[seg_start].socket_id;
 682        seg_len = seg_end - seg_start;
 683
 684        RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
 685                        (seg_len * page_sz) >> 20ULL, socket_id);
 686
 687        /* find free space in memseg lists */
 688        for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
 689                bool empty;
 690                msl = &mcfg->memsegs[msl_idx];
 691                arr = &msl->memseg_arr;
 692
 693                if (msl->page_sz != page_sz)
 694                        continue;
 695                if (msl->socket_id != socket_id)
 696                        continue;
 697
 698                /* leave space for a hole if array is not empty */
 699                empty = arr->count == 0;
 700                ms_idx = rte_fbarray_find_next_n_free(arr, 0,
 701                                seg_len + (empty ? 0 : 1));
 702
 703                /* memseg list is full? */
 704                if (ms_idx < 0)
 705                        continue;
 706
 707                /* leave some space between memsegs, they are not IOVA
 708                 * contiguous, so they shouldn't be VA contiguous either.
 709                 */
 710                if (!empty)
 711                        ms_idx++;
 712                break;
 713        }
 714        if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
 715                RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
 716                                RTE_STR(RTE_MAX_MEMSEG_PER_TYPE),
 717                                RTE_STR(RTE_MAX_MEM_MB_PER_TYPE));
 718                return -1;
 719        }
 720
 721#ifdef RTE_ARCH_PPC_64
 722        /* for PPC64 we go through the list backwards */
 723        for (cur_page = seg_end - 1; cur_page >= seg_start;
 724                        cur_page--, ms_idx++) {
 725#else
 726        for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
 727#endif
 728                struct hugepage_file *hfile = &hugepages[cur_page];
 729                struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
 730                void *addr;
 731                int fd;
 732
 733                fd = open(hfile->filepath, O_RDWR);
 734                if (fd < 0) {
 735                        RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
 736                                        hfile->filepath, strerror(errno));
 737                        return -1;
 738                }
 739                /* set shared lock on the file. */
 740                if (flock(fd, LOCK_SH) < 0) {
 741                        RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
 742                                        hfile->filepath, strerror(errno));
 743                        close(fd);
 744                        return -1;
 745                }
 746                memseg_len = (size_t)page_sz;
 747                addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
 748
 749                /* we know this address is already mmapped by memseg list, so
 750                 * using MAP_FIXED here is safe
 751                 */
 752                addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
 753                                MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
 754                if (addr == MAP_FAILED) {
 755                        RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
 756                                        hfile->filepath, strerror(errno));
 757                        close(fd);
 758                        return -1;
 759                }
 760
 761                /* we have a new address, so unmap previous one */
 762#ifndef RTE_ARCH_64
 763                /* in 32-bit legacy mode, we have already unmapped the page */
 764                if (!internal_conf->legacy_mem)
 765                        munmap(hfile->orig_va, page_sz);
 766#else
 767                munmap(hfile->orig_va, page_sz);
 768#endif
 769
 770                hfile->orig_va = NULL;
 771                hfile->final_va = addr;
 772
 773                /* rewrite physical addresses in IOVA as VA mode */
 774                if (rte_eal_iova_mode() == RTE_IOVA_VA)
 775                        hfile->physaddr = (uintptr_t)addr;
 776
 777                /* set up memseg data */
 778                ms->addr = addr;
 779                ms->hugepage_sz = page_sz;
 780                ms->len = memseg_len;
 781                ms->iova = hfile->physaddr;
 782                ms->socket_id = hfile->socket_id;
 783                ms->nchannel = rte_memory_get_nchannel();
 784                ms->nrank = rte_memory_get_nrank();
 785
 786                rte_fbarray_set_used(arr, ms_idx);
 787
 788                /* store segment fd internally */
 789                if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
 790                        RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
 791                                rte_strerror(rte_errno));
 792        }
 793        RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
 794                        (seg_len * page_sz) >> 20, socket_id);
 795        return 0;
 796}
 797
 798static uint64_t
 799get_mem_amount(uint64_t page_sz, uint64_t max_mem)
 800{
 801        uint64_t area_sz, max_pages;
 802
 803        /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
 804        max_pages = RTE_MAX_MEMSEG_PER_LIST;
 805        max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
 806
 807        area_sz = RTE_MIN(page_sz * max_pages, max_mem);
 808
 809        /* make sure the list isn't smaller than the page size */
 810        area_sz = RTE_MAX(area_sz, page_sz);
 811
 812        return RTE_ALIGN(area_sz, page_sz);
 813}
 814
 815static int
 816memseg_list_free(struct rte_memseg_list *msl)
 817{
 818        if (rte_fbarray_destroy(&msl->memseg_arr)) {
 819                RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
 820                return -1;
 821        }
 822        memset(msl, 0, sizeof(*msl));
 823        return 0;
 824}
 825
 826/*
 827 * Our VA space is not preallocated yet, so preallocate it here. We need to know
 828 * how many segments there are in order to map all pages into one address space,
 829 * and leave appropriate holes between segments so that rte_malloc does not
 830 * concatenate them into one big segment.
 831 *
 832 * we also need to unmap original pages to free up address space.
 833 */
 834static int __rte_unused
 835prealloc_segments(struct hugepage_file *hugepages, int n_pages)
 836{
 837        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
 838        int cur_page, seg_start_page, end_seg, new_memseg;
 839        unsigned int hpi_idx, socket, i;
 840        int n_contig_segs, n_segs;
 841        int msl_idx;
 842        const struct internal_config *internal_conf =
 843                eal_get_internal_configuration();
 844
 845        /* before we preallocate segments, we need to free up our VA space.
 846         * we're not removing files, and we already have information about
 847         * PA-contiguousness, so it is safe to unmap everything.
 848         */
 849        for (cur_page = 0; cur_page < n_pages; cur_page++) {
 850                struct hugepage_file *hpi = &hugepages[cur_page];
 851                munmap(hpi->orig_va, hpi->size);
 852                hpi->orig_va = NULL;
 853        }
 854
 855        /* we cannot know how many page sizes and sockets we have discovered, so
 856         * loop over all of them
 857         */
 858        for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
 859                        hpi_idx++) {
 860                uint64_t page_sz =
 861                        internal_conf->hugepage_info[hpi_idx].hugepage_sz;
 862
 863                for (i = 0; i < rte_socket_count(); i++) {
 864                        struct rte_memseg_list *msl;
 865
 866                        socket = rte_socket_id_by_idx(i);
 867                        n_contig_segs = 0;
 868                        n_segs = 0;
 869                        seg_start_page = -1;
 870
 871                        for (cur_page = 0; cur_page < n_pages; cur_page++) {
 872                                struct hugepage_file *prev, *cur;
 873                                int prev_seg_start_page = -1;
 874
 875                                cur = &hugepages[cur_page];
 876                                prev = cur_page == 0 ? NULL :
 877                                                &hugepages[cur_page - 1];
 878
 879                                new_memseg = 0;
 880                                end_seg = 0;
 881
 882                                if (cur->size == 0)
 883                                        end_seg = 1;
 884                                else if (cur->socket_id != (int) socket)
 885                                        end_seg = 1;
 886                                else if (cur->size != page_sz)
 887                                        end_seg = 1;
 888                                else if (cur_page == 0)
 889                                        new_memseg = 1;
 890#ifdef RTE_ARCH_PPC_64
 891                                /* On PPC64 architecture, the mmap always start
 892                                 * from higher address to lower address. Here,
 893                                 * physical addresses are in descending order.
 894                                 */
 895                                else if ((prev->physaddr - cur->physaddr) !=
 896                                                cur->size)
 897                                        new_memseg = 1;
 898#else
 899                                else if ((cur->physaddr - prev->physaddr) !=
 900                                                cur->size)
 901                                        new_memseg = 1;
 902#endif
 903                                if (new_memseg) {
 904                                        /* if we're already inside a segment,
 905                                         * new segment means end of current one
 906                                         */
 907                                        if (seg_start_page != -1) {
 908                                                end_seg = 1;
 909                                                prev_seg_start_page =
 910                                                                seg_start_page;
 911                                        }
 912                                        seg_start_page = cur_page;
 913                                }
 914
 915                                if (end_seg) {
 916                                        if (prev_seg_start_page != -1) {
 917                                                /* we've found a new segment */
 918                                                n_contig_segs++;
 919                                                n_segs += cur_page -
 920                                                        prev_seg_start_page;
 921                                        } else if (seg_start_page != -1) {
 922                                                /* we didn't find new segment,
 923                                                 * but did end current one
 924                                                 */
 925                                                n_contig_segs++;
 926                                                n_segs += cur_page -
 927                                                                seg_start_page;
 928                                                seg_start_page = -1;
 929                                                continue;
 930                                        } else {
 931                                                /* we're skipping this page */
 932                                                continue;
 933                                        }
 934                                }
 935                                /* segment continues */
 936                        }
 937                        /* check if we missed last segment */
 938                        if (seg_start_page != -1) {
 939                                n_contig_segs++;
 940                                n_segs += cur_page - seg_start_page;
 941                        }
 942
 943                        /* if no segments were found, do not preallocate */
 944                        if (n_segs == 0)
 945                                continue;
 946
 947                        /* we now have total number of pages that we will
 948                         * allocate for this segment list. add separator pages
 949                         * to the total count, and preallocate VA space.
 950                         */
 951                        n_segs += n_contig_segs - 1;
 952
 953                        /* now, preallocate VA space for these segments */
 954
 955                        /* first, find suitable memseg list for this */
 956                        for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
 957                                        msl_idx++) {
 958                                msl = &mcfg->memsegs[msl_idx];
 959
 960                                if (msl->base_va != NULL)
 961                                        continue;
 962                                break;
 963                        }
 964                        if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
 965                                RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
 966                                        RTE_STR(RTE_MAX_MEMSEG_LISTS));
 967                                return -1;
 968                        }
 969
 970                        /* now, allocate fbarray itself */
 971                        if (eal_memseg_list_init(msl, page_sz, n_segs,
 972                                        socket, msl_idx, true) < 0)
 973                                return -1;
 974
 975                        /* finally, allocate VA space */
 976                        if (eal_memseg_list_alloc(msl, 0) < 0) {
 977                                RTE_LOG(ERR, EAL, "Cannot preallocate 0x%"PRIx64"kB hugepages\n",
 978                                        page_sz >> 10);
 979                                return -1;
 980                        }
 981                }
 982        }
 983        return 0;
 984}
 985
 986/*
 987 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
 988 * backwards, therefore we have to process the entire memseg first before
 989 * remapping it into memseg list VA space.
 990 */
 991static int
 992remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
 993{
 994        int cur_page, seg_start_page, new_memseg, ret;
 995
 996        seg_start_page = 0;
 997        for (cur_page = 0; cur_page < n_pages; cur_page++) {
 998                struct hugepage_file *prev, *cur;
 999
1000                new_memseg = 0;
1001
1002                cur = &hugepages[cur_page];
1003                prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1004
1005                /* if size is zero, no more pages left */
1006                if (cur->size == 0)
1007                        break;
1008
1009                if (cur_page == 0)
1010                        new_memseg = 1;
1011                else if (cur->socket_id != prev->socket_id)
1012                        new_memseg = 1;
1013                else if (cur->size != prev->size)
1014                        new_memseg = 1;
1015#ifdef RTE_ARCH_PPC_64
1016                /* On PPC64 architecture, the mmap always start from higher
1017                 * address to lower address. Here, physical addresses are in
1018                 * descending order.
1019                 */
1020                else if ((prev->physaddr - cur->physaddr) != cur->size)
1021                        new_memseg = 1;
1022#else
1023                else if ((cur->physaddr - prev->physaddr) != cur->size)
1024                        new_memseg = 1;
1025#endif
1026
1027                if (new_memseg) {
1028                        /* if this isn't the first time, remap segment */
1029                        if (cur_page != 0) {
1030                                ret = remap_segment(hugepages, seg_start_page,
1031                                                cur_page);
1032                                if (ret != 0)
1033                                        return -1;
1034                        }
1035                        /* remember where we started */
1036                        seg_start_page = cur_page;
1037                }
1038                /* continuation of previous memseg */
1039        }
1040        /* we were stopped, but we didn't remap the last segment, do it now */
1041        if (cur_page != 0) {
1042                ret = remap_segment(hugepages, seg_start_page,
1043                                cur_page);
1044                if (ret != 0)
1045                        return -1;
1046        }
1047        return 0;
1048}
1049
1050static inline size_t
1051eal_get_hugepage_mem_size(void)
1052{
1053        uint64_t size = 0;
1054        unsigned i, j;
1055        struct internal_config *internal_conf =
1056                eal_get_internal_configuration();
1057
1058        for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
1059                struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
1060                if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1061                        for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1062                                size += hpi->hugepage_sz * hpi->num_pages[j];
1063                        }
1064                }
1065        }
1066
1067        return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1068}
1069
1070static struct sigaction huge_action_old;
1071static int huge_need_recover;
1072
1073static void
1074huge_register_sigbus(void)
1075{
1076        sigset_t mask;
1077        struct sigaction action;
1078
1079        sigemptyset(&mask);
1080        sigaddset(&mask, SIGBUS);
1081        action.sa_flags = 0;
1082        action.sa_mask = mask;
1083        action.sa_handler = huge_sigbus_handler;
1084
1085        huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1086}
1087
1088static void
1089huge_recover_sigbus(void)
1090{
1091        if (huge_need_recover) {
1092                sigaction(SIGBUS, &huge_action_old, NULL);
1093                huge_need_recover = 0;
1094        }
1095}
1096
1097/*
1098 * Prepare physical memory mapping: fill configuration structure with
1099 * these infos, return 0 on success.
1100 *  1. map N huge pages in separate files in hugetlbfs
1101 *  2. find associated physical addr
1102 *  3. find associated NUMA socket ID
1103 *  4. sort all huge pages by physical address
1104 *  5. remap these N huge pages in the correct order
1105 *  6. unmap the first mapping
1106 *  7. fill memsegs in configuration with contiguous zones
1107 */
1108static int
1109eal_legacy_hugepage_init(void)
1110{
1111        struct rte_mem_config *mcfg;
1112        struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1113        struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1114        struct internal_config *internal_conf =
1115                eal_get_internal_configuration();
1116
1117        uint64_t memory[RTE_MAX_NUMA_NODES];
1118
1119        unsigned hp_offset;
1120        int i, j;
1121        int nr_hugefiles, nr_hugepages = 0;
1122        void *addr;
1123
1124        memset(used_hp, 0, sizeof(used_hp));
1125
1126        /* get pointer to global configuration */
1127        mcfg = rte_eal_get_configuration()->mem_config;
1128
1129        /* hugetlbfs can be disabled */
1130        if (internal_conf->no_hugetlbfs) {
1131                void *prealloc_addr;
1132                size_t mem_sz;
1133                struct rte_memseg_list *msl;
1134                int n_segs, fd, flags;
1135#ifdef MEMFD_SUPPORTED
1136                int memfd;
1137#endif
1138                uint64_t page_sz;
1139
1140                /* nohuge mode is legacy mode */
1141                internal_conf->legacy_mem = 1;
1142
1143                /* nohuge mode is single-file segments mode */
1144                internal_conf->single_file_segments = 1;
1145
1146                /* create a memseg list */
1147                msl = &mcfg->memsegs[0];
1148
1149                mem_sz = internal_conf->memory;
1150                page_sz = RTE_PGSIZE_4K;
1151                n_segs = mem_sz / page_sz;
1152
1153                if (eal_memseg_list_init_named(
1154                                msl, "nohugemem", page_sz, n_segs, 0, true)) {
1155                        return -1;
1156                }
1157
1158                /* set up parameters for anonymous mmap */
1159                fd = -1;
1160                flags = MAP_PRIVATE | MAP_ANONYMOUS;
1161
1162#ifdef MEMFD_SUPPORTED
1163                /* create a memfd and store it in the segment fd table */
1164                memfd = memfd_create("nohuge", 0);
1165                if (memfd < 0) {
1166                        RTE_LOG(DEBUG, EAL, "Cannot create memfd: %s\n",
1167                                        strerror(errno));
1168                        RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
1169                } else {
1170                        /* we got an fd - now resize it */
1171                        if (ftruncate(memfd, internal_conf->memory) < 0) {
1172                                RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
1173                                                strerror(errno));
1174                                RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
1175                                close(memfd);
1176                        } else {
1177                                /* creating memfd-backed file was successful.
1178                                 * we want changes to memfd to be visible to
1179                                 * other processes (such as vhost backend), so
1180                                 * map it as shared memory.
1181                                 */
1182                                RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1183                                fd = memfd;
1184                                flags = MAP_SHARED;
1185                        }
1186                }
1187#endif
1188                /* preallocate address space for the memory, so that it can be
1189                 * fit into the DMA mask.
1190                 */
1191                if (eal_memseg_list_alloc(msl, 0)) {
1192                        RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1193                        return -1;
1194                }
1195
1196                prealloc_addr = msl->base_va;
1197                addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
1198                                flags | MAP_FIXED, fd, 0);
1199                if (addr == MAP_FAILED || addr != prealloc_addr) {
1200                        RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1201                                        strerror(errno));
1202                        munmap(prealloc_addr, mem_sz);
1203                        return -1;
1204                }
1205
1206                /* we're in single-file segments mode, so only the segment list
1207                 * fd needs to be set up.
1208                 */
1209                if (fd != -1) {
1210                        if (eal_memalloc_set_seg_list_fd(0, fd) < 0) {
1211                                RTE_LOG(ERR, EAL, "Cannot set up segment list fd\n");
1212                                /* not a serious error, proceed */
1213                        }
1214                }
1215
1216                eal_memseg_list_populate(msl, addr, n_segs);
1217
1218                if (mcfg->dma_maskbits &&
1219                    rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1220                        RTE_LOG(ERR, EAL,
1221                                "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1222                                __func__);
1223                        if (rte_eal_iova_mode() == RTE_IOVA_VA &&
1224                            rte_eal_using_phys_addrs())
1225                                RTE_LOG(ERR, EAL,
1226                                        "%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
1227                                        __func__);
1228                        goto fail;
1229                }
1230                return 0;
1231        }
1232
1233        /* calculate total number of hugepages available. at this point we haven't
1234         * yet started sorting them so they all are on socket 0 */
1235        for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1236                /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1237                used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
1238
1239                nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
1240        }
1241
1242        /*
1243         * allocate a memory area for hugepage table.
1244         * this isn't shared memory yet. due to the fact that we need some
1245         * processing done on these pages, shared memory will be created
1246         * at a later stage.
1247         */
1248        tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1249        if (tmp_hp == NULL)
1250                goto fail;
1251
1252        memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1253
1254        hp_offset = 0; /* where we start the current page size entries */
1255
1256        huge_register_sigbus();
1257
1258        /* make a copy of socket_mem, needed for balanced allocation. */
1259        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1260                memory[i] = internal_conf->socket_mem[i];
1261
1262        /* map all hugepages and sort them */
1263        for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
1264                unsigned pages_old, pages_new;
1265                struct hugepage_info *hpi;
1266
1267                /*
1268                 * we don't yet mark hugepages as used at this stage, so
1269                 * we just map all hugepages available to the system
1270                 * all hugepages are still located on socket 0
1271                 */
1272                hpi = &internal_conf->hugepage_info[i];
1273
1274                if (hpi->num_pages[0] == 0)
1275                        continue;
1276
1277                /* map all hugepages available */
1278                pages_old = hpi->num_pages[0];
1279                pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1280                if (pages_new < pages_old) {
1281                        RTE_LOG(DEBUG, EAL,
1282                                "%d not %d hugepages of size %u MB allocated\n",
1283                                pages_new, pages_old,
1284                                (unsigned)(hpi->hugepage_sz / 0x100000));
1285
1286                        int pages = pages_old - pages_new;
1287
1288                        nr_hugepages -= pages;
1289                        hpi->num_pages[0] = pages_new;
1290                        if (pages_new == 0)
1291                                continue;
1292                }
1293
1294                if (rte_eal_using_phys_addrs() &&
1295                                rte_eal_iova_mode() != RTE_IOVA_VA) {
1296                        /* find physical addresses for each hugepage */
1297                        if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1298                                RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1299                                        "for %u MB pages\n",
1300                                        (unsigned int)(hpi->hugepage_sz / 0x100000));
1301                                goto fail;
1302                        }
1303                } else {
1304                        /* set physical addresses for each hugepage */
1305                        if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1306                                RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1307                                        "for %u MB pages\n",
1308                                        (unsigned int)(hpi->hugepage_sz / 0x100000));
1309                                goto fail;
1310                        }
1311                }
1312
1313                if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1314                        RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1315                                        (unsigned)(hpi->hugepage_sz / 0x100000));
1316                        goto fail;
1317                }
1318
1319                qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1320                      sizeof(struct hugepage_file), cmp_physaddr);
1321
1322                /* we have processed a num of hugepages of this size, so inc offset */
1323                hp_offset += hpi->num_pages[0];
1324        }
1325
1326        huge_recover_sigbus();
1327
1328        if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
1329                internal_conf->memory = eal_get_hugepage_mem_size();
1330
1331        nr_hugefiles = nr_hugepages;
1332
1333
1334        /* clean out the numbers of pages */
1335        for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
1336                for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1337                        internal_conf->hugepage_info[i].num_pages[j] = 0;
1338
1339        /* get hugepages for each socket */
1340        for (i = 0; i < nr_hugefiles; i++) {
1341                int socket = tmp_hp[i].socket_id;
1342
1343                /* find a hugepage info with right size and increment num_pages */
1344                const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1345                                (int)internal_conf->num_hugepage_sizes);
1346                for (j = 0; j < nb_hpsizes; j++) {
1347                        if (tmp_hp[i].size ==
1348                                        internal_conf->hugepage_info[j].hugepage_sz) {
1349                                internal_conf->hugepage_info[j].num_pages[socket]++;
1350                        }
1351                }
1352        }
1353
1354        /* make a copy of socket_mem, needed for number of pages calculation */
1355        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1356                memory[i] = internal_conf->socket_mem[i];
1357
1358        /* calculate final number of pages */
1359        nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
1360                        internal_conf->hugepage_info, used_hp,
1361                        internal_conf->num_hugepage_sizes);
1362
1363        /* error if not enough memory available */
1364        if (nr_hugepages < 0)
1365                goto fail;
1366
1367        /* reporting in! */
1368        for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1369                for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1370                        if (used_hp[i].num_pages[j] > 0) {
1371                                RTE_LOG(DEBUG, EAL,
1372                                        "Requesting %u pages of size %uMB"
1373                                        " from socket %i\n",
1374                                        used_hp[i].num_pages[j],
1375                                        (unsigned)
1376                                        (used_hp[i].hugepage_sz / 0x100000),
1377                                        j);
1378                        }
1379                }
1380        }
1381
1382        /* create shared memory */
1383        hugepage = create_shared_memory(eal_hugepage_data_path(),
1384                        nr_hugefiles * sizeof(struct hugepage_file));
1385
1386        if (hugepage == NULL) {
1387                RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1388                goto fail;
1389        }
1390        memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1391
1392        /*
1393         * unmap pages that we won't need (looks at used_hp).
1394         * also, sets final_va to NULL on pages that were unmapped.
1395         */
1396        if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1397                        internal_conf->num_hugepage_sizes) < 0) {
1398                RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1399                goto fail;
1400        }
1401
1402        /*
1403         * copy stuff from malloc'd hugepage* to the actual shared memory.
1404         * this procedure only copies those hugepages that have orig_va
1405         * not NULL. has overflow protection.
1406         */
1407        if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1408                        tmp_hp, nr_hugefiles) < 0) {
1409                RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1410                goto fail;
1411        }
1412
1413#ifndef RTE_ARCH_64
1414        /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1415        if (internal_conf->legacy_mem &&
1416                        prealloc_segments(hugepage, nr_hugefiles)) {
1417                RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1418                goto fail;
1419        }
1420#endif
1421
1422        /* remap all pages we do need into memseg list VA space, so that those
1423         * pages become first-class citizens in DPDK memory subsystem
1424         */
1425        if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1426                RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1427                goto fail;
1428        }
1429
1430        /* free the hugepage backing files */
1431        if (internal_conf->hugepage_unlink &&
1432                unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
1433                RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1434                goto fail;
1435        }
1436
1437        /* free the temporary hugepage table */
1438        free(tmp_hp);
1439        tmp_hp = NULL;
1440
1441        munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1442        hugepage = NULL;
1443
1444        /* we're not going to allocate more pages, so release VA space for
1445         * unused memseg lists
1446         */
1447        for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1448                struct rte_memseg_list *msl = &mcfg->memsegs[i];
1449                size_t mem_sz;
1450
1451                /* skip inactive lists */
1452                if (msl->base_va == NULL)
1453                        continue;
1454                /* skip lists where there is at least one page allocated */
1455                if (msl->memseg_arr.count > 0)
1456                        continue;
1457                /* this is an unused list, deallocate it */
1458                mem_sz = msl->len;
1459                munmap(msl->base_va, mem_sz);
1460                msl->base_va = NULL;
1461                msl->heap = 0;
1462
1463                /* destroy backing fbarray */
1464                rte_fbarray_destroy(&msl->memseg_arr);
1465        }
1466
1467        if (mcfg->dma_maskbits &&
1468            rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1469                RTE_LOG(ERR, EAL,
1470                        "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1471                        __func__);
1472                goto fail;
1473        }
1474
1475        return 0;
1476
1477fail:
1478        huge_recover_sigbus();
1479        free(tmp_hp);
1480        if (hugepage != NULL)
1481                munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1482
1483        return -1;
1484}
1485
1486/*
1487 * uses fstat to report the size of a file on disk
1488 */
1489static off_t
1490getFileSize(int fd)
1491{
1492        struct stat st;
1493        if (fstat(fd, &st) < 0)
1494                return 0;
1495        return st.st_size;
1496}
1497
1498/*
1499 * This creates the memory mappings in the secondary process to match that of
1500 * the server process. It goes through each memory segment in the DPDK runtime
1501 * configuration and finds the hugepages which form that segment, mapping them
1502 * in order to form a contiguous block in the virtual memory space
1503 */
1504static int
1505eal_legacy_hugepage_attach(void)
1506{
1507        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1508        struct hugepage_file *hp = NULL;
1509        unsigned int num_hp = 0;
1510        unsigned int i = 0;
1511        unsigned int cur_seg;
1512        off_t size = 0;
1513        int fd, fd_hugepage = -1;
1514
1515        if (aslr_enabled() > 0) {
1516                RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1517                                "(ASLR) is enabled in the kernel.\n");
1518                RTE_LOG(WARNING, EAL, "   This may cause issues with mapping memory "
1519                                "into secondary processes\n");
1520        }
1521
1522        fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1523        if (fd_hugepage < 0) {
1524                RTE_LOG(ERR, EAL, "Could not open %s\n",
1525                                eal_hugepage_data_path());
1526                goto error;
1527        }
1528
1529        size = getFileSize(fd_hugepage);
1530        hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1531        if (hp == MAP_FAILED) {
1532                RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1533                                eal_hugepage_data_path());
1534                goto error;
1535        }
1536
1537        num_hp = size / sizeof(struct hugepage_file);
1538        RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1539
1540        /* map all segments into memory to make sure we get the addrs. the
1541         * segments themselves are already in memseg list (which is shared and
1542         * has its VA space already preallocated), so we just need to map
1543         * everything into correct addresses.
1544         */
1545        for (i = 0; i < num_hp; i++) {
1546                struct hugepage_file *hf = &hp[i];
1547                size_t map_sz = hf->size;
1548                void *map_addr = hf->final_va;
1549                int msl_idx, ms_idx;
1550                struct rte_memseg_list *msl;
1551                struct rte_memseg *ms;
1552
1553                /* if size is zero, no more pages left */
1554                if (map_sz == 0)
1555                        break;
1556
1557                fd = open(hf->filepath, O_RDWR);
1558                if (fd < 0) {
1559                        RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1560                                hf->filepath, strerror(errno));
1561                        goto error;
1562                }
1563
1564                map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1565                                MAP_SHARED | MAP_FIXED, fd, 0);
1566                if (map_addr == MAP_FAILED) {
1567                        RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1568                                hf->filepath, strerror(errno));
1569                        goto fd_error;
1570                }
1571
1572                /* set shared lock on the file. */
1573                if (flock(fd, LOCK_SH) < 0) {
1574                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1575                                __func__, strerror(errno));
1576                        goto mmap_error;
1577                }
1578
1579                /* find segment data */
1580                msl = rte_mem_virt2memseg_list(map_addr);
1581                if (msl == NULL) {
1582                        RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
1583                                __func__);
1584                        goto mmap_error;
1585                }
1586                ms = rte_mem_virt2memseg(map_addr, msl);
1587                if (ms == NULL) {
1588                        RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
1589                                __func__);
1590                        goto mmap_error;
1591                }
1592
1593                msl_idx = msl - mcfg->memsegs;
1594                ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1595                if (ms_idx < 0) {
1596                        RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg idx\n",
1597                                __func__);
1598                        goto mmap_error;
1599                }
1600
1601                /* store segment fd internally */
1602                if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
1603                        RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
1604                                rte_strerror(rte_errno));
1605        }
1606        /* unmap the hugepage config file, since we are done using it */
1607        munmap(hp, size);
1608        close(fd_hugepage);
1609        return 0;
1610
1611mmap_error:
1612        munmap(hp[i].final_va, hp[i].size);
1613fd_error:
1614        close(fd);
1615error:
1616        /* unwind mmap's done so far */
1617        for (cur_seg = 0; cur_seg < i; cur_seg++)
1618                munmap(hp[cur_seg].final_va, hp[cur_seg].size);
1619
1620        if (hp != NULL && hp != MAP_FAILED)
1621                munmap(hp, size);
1622        if (fd_hugepage >= 0)
1623                close(fd_hugepage);
1624        return -1;
1625}
1626
1627static int
1628eal_hugepage_attach(void)
1629{
1630        if (eal_memalloc_sync_with_primary()) {
1631                RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
1632                if (aslr_enabled() > 0)
1633                        RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
1634                return -1;
1635        }
1636        return 0;
1637}
1638
1639int
1640rte_eal_hugepage_init(void)
1641{
1642        const struct internal_config *internal_conf =
1643                eal_get_internal_configuration();
1644
1645        return internal_conf->legacy_mem ?
1646                        eal_legacy_hugepage_init() :
1647                        eal_dynmem_hugepage_init();
1648}
1649
1650int
1651rte_eal_hugepage_attach(void)
1652{
1653        const struct internal_config *internal_conf =
1654                eal_get_internal_configuration();
1655
1656        return internal_conf->legacy_mem ?
1657                        eal_legacy_hugepage_attach() :
1658                        eal_hugepage_attach();
1659}
1660
1661int
1662rte_eal_using_phys_addrs(void)
1663{
1664        if (phys_addrs_available == -1) {
1665                uint64_t tmp = 0;
1666
1667                if (rte_eal_has_hugepages() != 0 &&
1668                    rte_mem_virt2phy(&tmp) != RTE_BAD_PHYS_ADDR)
1669                        phys_addrs_available = 1;
1670                else
1671                        phys_addrs_available = 0;
1672        }
1673        return phys_addrs_available;
1674}
1675
1676static int __rte_unused
1677memseg_primary_init_32(void)
1678{
1679        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1680        int active_sockets, hpi_idx, msl_idx = 0;
1681        unsigned int socket_id, i;
1682        struct rte_memseg_list *msl;
1683        uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
1684        uint64_t max_mem;
1685        struct internal_config *internal_conf =
1686                eal_get_internal_configuration();
1687
1688        /* no-huge does not need this at all */
1689        if (internal_conf->no_hugetlbfs)
1690                return 0;
1691
1692        /* this is a giant hack, but desperate times call for desperate
1693         * measures. in legacy 32-bit mode, we cannot preallocate VA space,
1694         * because having upwards of 2 gigabytes of VA space already mapped will
1695         * interfere with our ability to map and sort hugepages.
1696         *
1697         * therefore, in legacy 32-bit mode, we will be initializing memseg
1698         * lists much later - in eal_memory.c, right after we unmap all the
1699         * unneeded pages. this will not affect secondary processes, as those
1700         * should be able to mmap the space without (too many) problems.
1701         */
1702        if (internal_conf->legacy_mem)
1703                return 0;
1704
1705        /* 32-bit mode is a very special case. we cannot know in advance where
1706         * the user will want to allocate their memory, so we have to do some
1707         * heuristics.
1708         */
1709        active_sockets = 0;
1710        total_requested_mem = 0;
1711        if (internal_conf->force_sockets)
1712                for (i = 0; i < rte_socket_count(); i++) {
1713                        uint64_t mem;
1714
1715                        socket_id = rte_socket_id_by_idx(i);
1716                        mem = internal_conf->socket_mem[socket_id];
1717
1718                        if (mem == 0)
1719                                continue;
1720
1721                        active_sockets++;
1722                        total_requested_mem += mem;
1723                }
1724        else
1725                total_requested_mem = internal_conf->memory;
1726
1727        max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
1728        if (total_requested_mem > max_mem) {
1729                RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
1730                                (unsigned int)(max_mem >> 20));
1731                return -1;
1732        }
1733        total_extra_mem = max_mem - total_requested_mem;
1734        extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
1735                        total_extra_mem / active_sockets;
1736
1737        /* the allocation logic is a little bit convoluted, but here's how it
1738         * works, in a nutshell:
1739         *  - if user hasn't specified on which sockets to allocate memory via
1740         *    --socket-mem, we allocate all of our memory on main core socket.
1741         *  - if user has specified sockets to allocate memory on, there may be
1742         *    some "unused" memory left (e.g. if user has specified --socket-mem
1743         *    such that not all memory adds up to 2 gigabytes), so add it to all
1744         *    sockets that are in use equally.
1745         *
1746         * page sizes are sorted by size in descending order, so we can safely
1747         * assume that we dispense with bigger page sizes first.
1748         */
1749
1750        /* create memseg lists */
1751        for (i = 0; i < rte_socket_count(); i++) {
1752                int hp_sizes = (int) internal_conf->num_hugepage_sizes;
1753                uint64_t max_socket_mem, cur_socket_mem;
1754                unsigned int main_lcore_socket;
1755                struct rte_config *cfg = rte_eal_get_configuration();
1756                bool skip;
1757
1758                socket_id = rte_socket_id_by_idx(i);
1759
1760#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1761                /* we can still sort pages by socket in legacy mode */
1762                if (!internal_conf->legacy_mem && socket_id > 0)
1763                        break;
1764#endif
1765
1766                /* if we didn't specifically request memory on this socket */
1767                skip = active_sockets != 0 &&
1768                                internal_conf->socket_mem[socket_id] == 0;
1769                /* ...or if we didn't specifically request memory on *any*
1770                 * socket, and this is not main lcore
1771                 */
1772                main_lcore_socket = rte_lcore_to_socket_id(cfg->main_lcore);
1773                skip |= active_sockets == 0 && socket_id != main_lcore_socket;
1774
1775                if (skip) {
1776                        RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
1777                                        socket_id);
1778                        continue;
1779                }
1780
1781                /* max amount of memory on this socket */
1782                max_socket_mem = (active_sockets != 0 ?
1783                                        internal_conf->socket_mem[socket_id] :
1784                                        internal_conf->memory) +
1785                                        extra_mem_per_socket;
1786                cur_socket_mem = 0;
1787
1788                for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
1789                        uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
1790                        uint64_t hugepage_sz;
1791                        struct hugepage_info *hpi;
1792                        int type_msl_idx, max_segs, total_segs = 0;
1793
1794                        hpi = &internal_conf->hugepage_info[hpi_idx];
1795                        hugepage_sz = hpi->hugepage_sz;
1796
1797                        /* check if pages are actually available */
1798                        if (hpi->num_pages[socket_id] == 0)
1799                                continue;
1800
1801                        max_segs = RTE_MAX_MEMSEG_PER_TYPE;
1802                        max_pagesz_mem = max_socket_mem - cur_socket_mem;
1803
1804                        /* make it multiple of page size */
1805                        max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
1806                                        hugepage_sz);
1807
1808                        RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
1809                                        "%" PRIu64 "M on socket %i\n",
1810                                        max_pagesz_mem >> 20, socket_id);
1811
1812                        type_msl_idx = 0;
1813                        while (cur_pagesz_mem < max_pagesz_mem &&
1814                                        total_segs < max_segs) {
1815                                uint64_t cur_mem;
1816                                unsigned int n_segs;
1817
1818                                if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
1819                                        RTE_LOG(ERR, EAL,
1820                                                "No more space in memseg lists, please increase %s\n",
1821                                                RTE_STR(RTE_MAX_MEMSEG_LISTS));
1822                                        return -1;
1823                                }
1824
1825                                msl = &mcfg->memsegs[msl_idx];
1826
1827                                cur_mem = get_mem_amount(hugepage_sz,
1828                                                max_pagesz_mem);
1829                                n_segs = cur_mem / hugepage_sz;
1830
1831                                if (eal_memseg_list_init(msl, hugepage_sz,
1832                                                n_segs, socket_id, type_msl_idx,
1833                                                true)) {
1834                                        /* failing to allocate a memseg list is
1835                                         * a serious error.
1836                                         */
1837                                        RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1838                                        return -1;
1839                                }
1840
1841                                if (eal_memseg_list_alloc(msl, 0)) {
1842                                        /* if we couldn't allocate VA space, we
1843                                         * can try with smaller page sizes.
1844                                         */
1845                                        RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
1846                                        /* deallocate memseg list */
1847                                        if (memseg_list_free(msl))
1848                                                return -1;
1849                                        break;
1850                                }
1851
1852                                total_segs += msl->memseg_arr.len;
1853                                cur_pagesz_mem = total_segs * hugepage_sz;
1854                                type_msl_idx++;
1855                                msl_idx++;
1856                        }
1857                        cur_socket_mem += cur_pagesz_mem;
1858                }
1859                if (cur_socket_mem == 0) {
1860                        RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
1861                                socket_id);
1862                        return -1;
1863                }
1864        }
1865
1866        return 0;
1867}
1868
1869static int __rte_unused
1870memseg_primary_init(void)
1871{
1872        return eal_dynmem_memseg_lists_init();
1873}
1874
1875static int
1876memseg_secondary_init(void)
1877{
1878        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1879        int msl_idx = 0;
1880        struct rte_memseg_list *msl;
1881
1882        for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
1883
1884                msl = &mcfg->memsegs[msl_idx];
1885
1886                /* skip empty memseg lists */
1887                if (msl->memseg_arr.len == 0)
1888                        continue;
1889
1890                if (rte_fbarray_attach(&msl->memseg_arr)) {
1891                        RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
1892                        return -1;
1893                }
1894
1895                /* preallocate VA space */
1896                if (eal_memseg_list_alloc(msl, 0)) {
1897                        RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1898                        return -1;
1899                }
1900        }
1901
1902        return 0;
1903}
1904
1905int
1906rte_eal_memseg_init(void)
1907{
1908        /* increase rlimit to maximum */
1909        struct rlimit lim;
1910
1911#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1912        const struct internal_config *internal_conf =
1913                eal_get_internal_configuration();
1914#endif
1915        if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
1916                /* set limit to maximum */
1917                lim.rlim_cur = lim.rlim_max;
1918
1919                if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
1920                        RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n",
1921                                        strerror(errno));
1922                } else {
1923                        RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %"
1924                                        PRIu64 "\n",
1925                                        (uint64_t)lim.rlim_cur);
1926                }
1927        } else {
1928                RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
1929        }
1930#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1931        if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
1932                RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
1933                RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
1934                RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
1935        }
1936#endif
1937
1938        return rte_eal_process_type() == RTE_PROC_PRIMARY ?
1939#ifndef RTE_ARCH_64
1940                        memseg_primary_init_32() :
1941#else
1942                        memseg_primary_init() :
1943#endif
1944                        memseg_secondary_init();
1945}
1946