linux/arch/s390/mm/mem_detect.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 2008, 2009
   3 *
   4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <asm/ipl.h>
  10#include <asm/sclp.h>
  11#include <asm/setup.h>
  12
  13#define ADDR2G (1ULL << 31)
  14
  15static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize)
  16{
  17        unsigned long long memsize, rnmax, rzm;
  18        unsigned long addr = 0, size;
  19        int i = 0, type;
  20
  21        rzm = sclp_get_rzm();
  22        rnmax = sclp_get_rnmax();
  23        memsize = rzm * rnmax;
  24        if (!rzm)
  25                rzm = 1ULL << 17;
  26        if (sizeof(long) == 4) {
  27                rzm = min(ADDR2G, rzm);
  28                memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
  29        }
  30        if (maxsize)
  31                memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize;
  32        do {
  33                size = 0;
  34                type = tprot(addr);
  35                do {
  36                        size += rzm;
  37                        if (memsize && addr + size >= memsize)
  38                                break;
  39                } while (type == tprot(addr + size));
  40                if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
  41                        if (memsize && (addr + size > memsize))
  42                                size = memsize - addr;
  43                        chunk[i].addr = addr;
  44                        chunk[i].size = size;
  45                        chunk[i].type = type;
  46                        i++;
  47                }
  48                addr += size;
  49        } while (addr < memsize && i < MEMORY_CHUNKS);
  50}
  51
  52/**
  53 * detect_memory_layout - fill mem_chunk array with memory layout data
  54 * @chunk: mem_chunk array to be filled
  55 * @maxsize: maximum address where memory detection should stop
  56 *
  57 * Fills the passed in memory chunk array with the memory layout of the
  58 * machine. The array must have a size of at least MEMORY_CHUNKS and will
  59 * be fully initialized afterwards.
  60 * If the maxsize paramater has a value > 0 memory detection will stop at
  61 * that address. It is guaranteed that all chunks have an ending address
  62 * that is smaller than maxsize.
  63 * If maxsize is 0 all memory will be detected.
  64 */
  65void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
  66{
  67        unsigned long flags, flags_dat, cr0;
  68
  69        memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
  70        /*
  71         * Disable IRQs, DAT and low address protection so tprot does the
  72         * right thing and we don't get scheduled away with low address
  73         * protection disabled.
  74         */
  75        local_irq_save(flags);
  76        flags_dat = __arch_local_irq_stnsm(0xfb);
  77        /*
  78         * In case DAT was enabled, make sure chunk doesn't reside in vmalloc
  79         * space. We have disabled DAT and any access to vmalloc area will
  80         * cause an exception.
  81         * If DAT was disabled we are called from early ipl code.
  82         */
  83        if (test_bit(5, &flags_dat)) {
  84                if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
  85                        goto out;
  86        }
  87        __ctl_store(cr0, 0, 0);
  88        __ctl_clear_bit(0, 28);
  89        find_memory_chunks(chunk, maxsize);
  90        __ctl_load(cr0, 0, 0);
  91out:
  92        __arch_local_irq_ssm(flags_dat);
  93        local_irq_restore(flags);
  94}
  95EXPORT_SYMBOL(detect_memory_layout);
  96
  97/*
  98 * Create memory hole with given address and size.
  99 */
 100void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
 101                     unsigned long size)
 102{
 103        int i;
 104
 105        for (i = 0; i < MEMORY_CHUNKS; i++) {
 106                struct mem_chunk *chunk = &mem_chunk[i];
 107
 108                if (chunk->size == 0)
 109                        continue;
 110                if (addr > chunk->addr + chunk->size)
 111                        continue;
 112                if (addr + size <= chunk->addr)
 113                        continue;
 114                /* Split */
 115                if ((addr > chunk->addr) &&
 116                    (addr + size < chunk->addr + chunk->size)) {
 117                        struct mem_chunk *new = chunk + 1;
 118
 119                        memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
 120                        new->addr = addr + size;
 121                        new->size = chunk->addr + chunk->size - new->addr;
 122                        chunk->size = addr - chunk->addr;
 123                        continue;
 124                } else if ((addr <= chunk->addr) &&
 125                           (addr + size >= chunk->addr + chunk->size)) {
 126                        memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
 127                        memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
 128                } else if (addr + size < chunk->addr + chunk->size) {
 129                        chunk->size =  chunk->addr + chunk->size - addr - size;
 130                        chunk->addr = addr + size;
 131                } else if (addr > chunk->addr) {
 132                        chunk->size = addr - chunk->addr;
 133                }
 134        }
 135}
 136