linux/arch/ia64/kernel/uncached.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
   4 *
   5 * A simple uncached page allocator using the generic allocator. This
   6 * allocator first utilizes the spare (spill) pages found in the EFI
   7 * memmap and will then start converting cached pages to uncached ones
   8 * at a granule at a time. Node awareness is implemented by having a
   9 * pool of pages per node.
  10 */
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/init.h>
  16#include <linux/errno.h>
  17#include <linux/string.h>
  18#include <linux/efi.h>
  19#include <linux/nmi.h>
  20#include <linux/genalloc.h>
  21#include <linux/gfp.h>
  22#include <linux/pgtable.h>
  23#include <asm/efi.h>
  24#include <asm/page.h>
  25#include <asm/pal.h>
  26#include <linux/atomic.h>
  27#include <asm/tlbflush.h>
  28
  29struct uncached_pool {
  30        struct gen_pool *pool;
  31        struct mutex add_chunk_mutex;   /* serialize adding a converted chunk */
  32        int nchunks_added;              /* #of converted chunks added to pool */
  33        atomic_t status;                /* smp called function's return status*/
  34};
  35
  36#define MAX_CONVERTED_CHUNKS_PER_NODE   2
  37
  38struct uncached_pool uncached_pools[MAX_NUMNODES];
  39
  40
  41static void uncached_ipi_visibility(void *data)
  42{
  43        int status;
  44        struct uncached_pool *uc_pool = (struct uncached_pool *)data;
  45
  46        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
  47        if ((status != PAL_VISIBILITY_OK) &&
  48            (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
  49                atomic_inc(&uc_pool->status);
  50}
  51
  52
  53static void uncached_ipi_mc_drain(void *data)
  54{
  55        int status;
  56        struct uncached_pool *uc_pool = (struct uncached_pool *)data;
  57
  58        status = ia64_pal_mc_drain();
  59        if (status != PAL_STATUS_SUCCESS)
  60                atomic_inc(&uc_pool->status);
  61}
  62
  63
  64/*
  65 * Add a new chunk of uncached memory pages to the specified pool.
  66 *
  67 * @pool: pool to add new chunk of uncached memory to
  68 * @nid: node id of node to allocate memory from, or -1
  69 *
  70 * This is accomplished by first allocating a granule of cached memory pages
  71 * and then converting them to uncached memory pages.
  72 */
  73static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
  74{
  75        struct page *page;
  76        int status, i, nchunks_added = uc_pool->nchunks_added;
  77        unsigned long c_addr, uc_addr;
  78
  79        if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
  80                return -1;      /* interrupted by a signal */
  81
  82        if (uc_pool->nchunks_added > nchunks_added) {
  83                /* someone added a new chunk while we were waiting */
  84                mutex_unlock(&uc_pool->add_chunk_mutex);
  85                return 0;
  86        }
  87
  88        if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
  89                mutex_unlock(&uc_pool->add_chunk_mutex);
  90                return -1;
  91        }
  92
  93        /* attempt to allocate a granule's worth of cached memory pages */
  94
  95        page = __alloc_pages_node(nid,
  96                                GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
  97                                IA64_GRANULE_SHIFT-PAGE_SHIFT);
  98        if (!page) {
  99                mutex_unlock(&uc_pool->add_chunk_mutex);
 100                return -1;
 101        }
 102
 103        /* convert the memory pages from cached to uncached */
 104
 105        c_addr = (unsigned long)page_address(page);
 106        uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
 107
 108        /*
 109         * There's a small race here where it's possible for someone to
 110         * access the page through /dev/mem halfway through the conversion
 111         * to uncached - not sure it's really worth bothering about
 112         */
 113        for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
 114                SetPageUncached(&page[i]);
 115
 116        flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
 117
 118        status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
 119        if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
 120                atomic_set(&uc_pool->status, 0);
 121                smp_call_function(uncached_ipi_visibility, uc_pool, 1);
 122                if (atomic_read(&uc_pool->status))
 123                        goto failed;
 124        } else if (status != PAL_VISIBILITY_OK)
 125                goto failed;
 126
 127        preempt_disable();
 128
 129        flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
 130
 131        /* flush the just introduced uncached translation from the TLB */
 132        local_flush_tlb_all();
 133
 134        preempt_enable();
 135
 136        status = ia64_pal_mc_drain();
 137        if (status != PAL_STATUS_SUCCESS)
 138                goto failed;
 139        atomic_set(&uc_pool->status, 0);
 140        smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
 141        if (atomic_read(&uc_pool->status))
 142                goto failed;
 143
 144        /*
 145         * The chunk of memory pages has been converted to uncached so now we
 146         * can add it to the pool.
 147         */
 148        status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
 149        if (status)
 150                goto failed;
 151
 152        uc_pool->nchunks_added++;
 153        mutex_unlock(&uc_pool->add_chunk_mutex);
 154        return 0;
 155
 156        /* failed to convert or add the chunk so give it back to the kernel */
 157failed:
 158        for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
 159                ClearPageUncached(&page[i]);
 160
 161        free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
 162        mutex_unlock(&uc_pool->add_chunk_mutex);
 163        return -1;
 164}
 165
 166
 167/*
 168 * uncached_alloc_page
 169 *
 170 * @starting_nid: node id of node to start with, or -1
 171 * @n_pages: number of contiguous pages to allocate
 172 *
 173 * Allocate the specified number of contiguous uncached pages on the
 174 * the requested node. If not enough contiguous uncached pages are available
 175 * on the requested node, roundrobin starting with the next higher node.
 176 */
 177unsigned long uncached_alloc_page(int starting_nid, int n_pages)
 178{
 179        unsigned long uc_addr;
 180        struct uncached_pool *uc_pool;
 181        int nid;
 182
 183        if (unlikely(starting_nid >= MAX_NUMNODES))
 184                return 0;
 185
 186        if (starting_nid < 0)
 187                starting_nid = numa_node_id();
 188        nid = starting_nid;
 189
 190        do {
 191                if (!node_state(nid, N_HIGH_MEMORY))
 192                        continue;
 193                uc_pool = &uncached_pools[nid];
 194                if (uc_pool->pool == NULL)
 195                        continue;
 196                do {
 197                        uc_addr = gen_pool_alloc(uc_pool->pool,
 198                                                 n_pages * PAGE_SIZE);
 199                        if (uc_addr != 0)
 200                                return uc_addr;
 201                } while (uncached_add_chunk(uc_pool, nid) == 0);
 202
 203        } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
 204
 205        return 0;
 206}
 207EXPORT_SYMBOL(uncached_alloc_page);
 208
 209
 210/*
 211 * uncached_free_page
 212 *
 213 * @uc_addr: uncached address of first page to free
 214 * @n_pages: number of contiguous pages to free
 215 *
 216 * Free the specified number of uncached pages.
 217 */
 218void uncached_free_page(unsigned long uc_addr, int n_pages)
 219{
 220        int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
 221        struct gen_pool *pool = uncached_pools[nid].pool;
 222
 223        if (unlikely(pool == NULL))
 224                return;
 225
 226        if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
 227                panic("uncached_free_page invalid address %lx\n", uc_addr);
 228
 229        gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
 230}
 231EXPORT_SYMBOL(uncached_free_page);
 232
 233
 234/*
 235 * uncached_build_memmap,
 236 *
 237 * @uc_start: uncached starting address of a chunk of uncached memory
 238 * @uc_end: uncached ending address of a chunk of uncached memory
 239 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
 240 *
 241 * Called at boot time to build a map of pages that can be used for
 242 * memory special operations.
 243 */
 244static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
 245{
 246        int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
 247        struct gen_pool *pool = uncached_pools[nid].pool;
 248        size_t size = uc_end - uc_start;
 249
 250        touch_softlockup_watchdog();
 251
 252        if (pool != NULL) {
 253                memset((char *)uc_start, 0, size);
 254                (void) gen_pool_add(pool, uc_start, size, nid);
 255        }
 256        return 0;
 257}
 258
 259
 260static int __init uncached_init(void)
 261{
 262        int nid;
 263
 264        for_each_node_state(nid, N_ONLINE) {
 265                uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
 266                mutex_init(&uncached_pools[nid].add_chunk_mutex);
 267        }
 268
 269        efi_memmap_walk_uc(uncached_build_memmap, NULL);
 270        return 0;
 271}
 272
 273__initcall(uncached_init);
 274