linux/arch/ia64/kernel/uncached.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
   4 *
   5 * A simple uncached page allocator using the generic allocator. This
   6 * allocator first utilizes the spare (spill) pages found in the EFI
   7 * memmap and will then start converting cached pages to uncached ones
   8 * at a granule at a time. Node awareness is implemented by having a
   9 * pool of pages per node.
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/init.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/efi.h>
  19#include <linux/nmi.h>
  20#include <linux/genalloc.h>
  21#include <linux/gfp.h>
  22#include <linux/pgtable.h>
  23#include <asm/page.h>
  24#include <asm/pal.h>
  25#include <linux/atomic.h>
  26#include <asm/tlbflush.h>
  27
  28
  29extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
  30
  31struct uncached_pool {
  32        struct gen_pool *pool;
  33        struct mutex add_chunk_mutex;   /* serialize adding a converted chunk */
  34        int nchunks_added;              /* #of converted chunks added to pool */
  35        atomic_t status;                /* smp called function's return status*/
  36};
  37
  38#define MAX_CONVERTED_CHUNKS_PER_NODE   2
  39
  40struct uncached_pool uncached_pools[MAX_NUMNODES];
  41
  42
  43static void uncached_ipi_visibility(void *data)
  44{
  45        int status;
  46        struct uncached_pool *uc_pool = (struct uncached_pool *)data;
  47
  48        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  49        if ((status != PAL_VISIBILITY_OK) &&
  50            (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
  51                atomic_inc(&uc_pool->status);
  52}
  53
  54
  55static void uncached_ipi_mc_drain(void *data)
  56{
  57        int status;
  58        struct uncached_pool *uc_pool = (struct uncached_pool *)data;
  59
  60        status = ia64_pal_mc_drain();
  61        if (status != PAL_STATUS_SUCCESS)
  62                atomic_inc(&uc_pool->status);
  63}
  64
  65
  66/*
  67 * Add a new chunk of uncached memory pages to the specified pool.
  68 *
  69 * @pool: pool to add new chunk of uncached memory to
  70 * @nid: node id of node to allocate memory from, or -1
  71 *
  72 * This is accomplished by first allocating a granule of cached memory pages
  73 * and then converting them to uncached memory pages.
  74 */
  75static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
  76{
  77        struct page *page;
  78        int status, i, nchunks_added = uc_pool->nchunks_added;
  79        unsigned long c_addr, uc_addr;
  80
  81        if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
  82                return -1;      /* interrupted by a signal */
  83
  84        if (uc_pool->nchunks_added > nchunks_added) {
  85                /* someone added a new chunk while we were waiting */
  86                mutex_unlock(&uc_pool->add_chunk_mutex);
  87                return 0;
  88        }
  89
  90        if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
  91                mutex_unlock(&uc_pool->add_chunk_mutex);
  92                return -1;
  93        }
  94
  95        /* attempt to allocate a granule's worth of cached memory pages */
  96
  97        page = __alloc_pages_node(nid,
  98                                GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
  99                                IA64_GRANULE_SHIFT-PAGE_SHIFT);
 100        if (!page) {
 101                mutex_unlock(&uc_pool->add_chunk_mutex);
 102                return -1;
 103        }
 104
 105        /* convert the memory pages from cached to uncached */
 106
 107        c_addr = (unsigned long)page_address(page);
 108        uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
 109
 110        /*
 111         * There's a small race here where it's possible for someone to
 112         * access the page through /dev/mem halfway through the conversion
 113         * to uncached - not sure it's really worth bothering about
 114         */
 115        for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
 116                SetPageUncached(&page[i]);
 117
 118        flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
 119
 120        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
 121        if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
 122                atomic_set(&uc_pool->status, 0);
 123                smp_call_function(uncached_ipi_visibility, uc_pool, 1);
 124                if (atomic_read(&uc_pool->status))
 125                        goto failed;
 126        } else if (status != PAL_VISIBILITY_OK)
 127                goto failed;
 128
 129        preempt_disable();
 130
 131        flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
 132
 133        /* flush the just introduced uncached translation from the TLB */
 134        local_flush_tlb_all();
 135
 136        preempt_enable();
 137
 138        status = ia64_pal_mc_drain();
 139        if (status != PAL_STATUS_SUCCESS)
 140                goto failed;
 141        atomic_set(&uc_pool->status, 0);
 142        smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
 143        if (atomic_read(&uc_pool->status))
 144                goto failed;
 145
 146        /*
 147         * The chunk of memory pages has been converted to uncached so now we
 148         * can add it to the pool.
 149         */
 150        status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
 151        if (status)
 152                goto failed;
 153
 154        uc_pool->nchunks_added++;
 155        mutex_unlock(&uc_pool->add_chunk_mutex);
 156        return 0;
 157
 158        /* failed to convert or add the chunk so give it back to the kernel */
 159failed:
 160        for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
 161                ClearPageUncached(&page[i]);
 162
 163        free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
 164        mutex_unlock(&uc_pool->add_chunk_mutex);
 165        return -1;
 166}
 167
 168
 169/*
 170 * uncached_alloc_page
 171 *
 172 * @starting_nid: node id of node to start with, or -1
 173 * @n_pages: number of contiguous pages to allocate
 174 *
 175 * Allocate the specified number of contiguous uncached pages on the
 176 * the requested node. If not enough contiguous uncached pages are available
 177 * on the requested node, roundrobin starting with the next higher node.
 178 */
 179unsigned long uncached_alloc_page(int starting_nid, int n_pages)
 180{
 181        unsigned long uc_addr;
 182        struct uncached_pool *uc_pool;
 183        int nid;
 184
 185        if (unlikely(starting_nid >= MAX_NUMNODES))
 186                return 0;
 187
 188        if (starting_nid < 0)
 189                starting_nid = numa_node_id();
 190        nid = starting_nid;
 191
 192        do {
 193                if (!node_state(nid, N_HIGH_MEMORY))
 194                        continue;
 195                uc_pool = &uncached_pools[nid];
 196                if (uc_pool->pool == NULL)
 197                        continue;
 198                do {
 199                        uc_addr = gen_pool_alloc(uc_pool->pool,
 200                                                 n_pages * PAGE_SIZE);
 201                        if (uc_addr != 0)
 202                                return uc_addr;
 203                } while (uncached_add_chunk(uc_pool, nid) == 0);
 204
 205        } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
 206
 207        return 0;
 208}
 209EXPORT_SYMBOL(uncached_alloc_page);
 210
 211
 212/*
 213 * uncached_free_page
 214 *
 215 * @uc_addr: uncached address of first page to free
 216 * @n_pages: number of contiguous pages to free
 217 *
 218 * Free the specified number of uncached pages.
 219 */
 220void uncached_free_page(unsigned long uc_addr, int n_pages)
 221{
 222        int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
 223        struct gen_pool *pool = uncached_pools[nid].pool;
 224
 225        if (unlikely(pool == NULL))
 226                return;
 227
 228        if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
 229                panic("uncached_free_page invalid address %lx\n", uc_addr);
 230
 231        gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
 232}
 233EXPORT_SYMBOL(uncached_free_page);
 234
 235
 236/*
 237 * uncached_build_memmap,
 238 *
 239 * @uc_start: uncached starting address of a chunk of uncached memory
 240 * @uc_end: uncached ending address of a chunk of uncached memory
 241 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
 242 *
 243 * Called at boot time to build a map of pages that can be used for
 244 * memory special operations.
 245 */
 246static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
 247{
 248        int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
 249        struct gen_pool *pool = uncached_pools[nid].pool;
 250        size_t size = uc_end - uc_start;
 251
 252        touch_softlockup_watchdog();
 253
 254        if (pool != NULL) {
 255                memset((char *)uc_start, 0, size);
 256                (void) gen_pool_add(pool, uc_start, size, nid);
 257        }
 258        return 0;
 259}
 260
 261
 262static int __init uncached_init(void)
 263{
 264        int nid;
 265
 266        for_each_node_state(nid, N_ONLINE) {
 267                uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
 268                mutex_init(&uncached_pools[nid].add_chunk_mutex);
 269        }
 270
 271        efi_memmap_walk_uc(uncached_build_memmap, NULL);
 272        return 0;
 273}
 274
 275__initcall(uncached_init);
 276