linux/mm/page_poison.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/string.h>
   4#include <linux/mm.h>
   5#include <linux/highmem.h>
   6#include <linux/page_ext.h>
   7#include <linux/poison.h>
   8#include <linux/ratelimit.h>
   9#include <linux/kasan.h>
  10
  11static bool want_page_poisoning __read_mostly;
  12
  13static int __init early_page_poison_param(char *buf)
  14{
  15        if (!buf)
  16                return -EINVAL;
  17        return strtobool(buf, &want_page_poisoning);
  18}
  19early_param("page_poison", early_page_poison_param);
  20
  21bool page_poisoning_enabled(void)
  22{
  23        /*
  24         * Assumes that debug_pagealloc_enabled is set before
  25         * free_all_bootmem.
  26         * Page poisoning is debug page alloc for some arches. If
  27         * either of those options are enabled, enable poisoning.
  28         */
  29        return (want_page_poisoning ||
  30                (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
  31                debug_pagealloc_enabled()));
  32}
  33
  34static void poison_page(struct page *page)
  35{
  36        void *addr = kmap_atomic(page);
  37
  38        /* KASAN still think the page is in-use, so skip it. */
  39        kasan_disable_current();
  40        memset(addr, PAGE_POISON, PAGE_SIZE);
  41        kasan_enable_current();
  42        kunmap_atomic(addr);
  43}
  44
  45static void poison_pages(struct page *page, int n)
  46{
  47        int i;
  48
  49        for (i = 0; i < n; i++)
  50                poison_page(page + i);
  51}
  52
  53static bool single_bit_flip(unsigned char a, unsigned char b)
  54{
  55        unsigned char error = a ^ b;
  56
  57        return error && !(error & (error - 1));
  58}
  59
  60static void check_poison_mem(unsigned char *mem, size_t bytes)
  61{
  62        static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
  63        unsigned char *start;
  64        unsigned char *end;
  65
  66        if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
  67                return;
  68
  69        start = memchr_inv(mem, PAGE_POISON, bytes);
  70        if (!start)
  71                return;
  72
  73        for (end = mem + bytes - 1; end > start; end--) {
  74                if (*end != PAGE_POISON)
  75                        break;
  76        }
  77
  78        if (!__ratelimit(&ratelimit))
  79                return;
  80        else if (start == end && single_bit_flip(*start, PAGE_POISON))
  81                pr_err("pagealloc: single bit error\n");
  82        else
  83                pr_err("pagealloc: memory corruption\n");
  84
  85        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  86                        end - start + 1, 1);
  87        dump_stack();
  88}
  89
  90static void unpoison_page(struct page *page)
  91{
  92        void *addr;
  93
  94        addr = kmap_atomic(page);
  95        /*
  96         * Page poisoning when enabled poisons each and every page
  97         * that is freed to buddy. Thus no extra check is done to
  98         * see if a page was posioned.
  99         */
 100        check_poison_mem(addr, PAGE_SIZE);
 101        kunmap_atomic(addr);
 102}
 103
 104static void unpoison_pages(struct page *page, int n)
 105{
 106        int i;
 107
 108        for (i = 0; i < n; i++)
 109                unpoison_page(page + i);
 110}
 111
 112void kernel_poison_pages(struct page *page, int numpages, int enable)
 113{
 114        if (!page_poisoning_enabled())
 115                return;
 116
 117        if (enable)
 118                unpoison_pages(page, numpages);
 119        else
 120                poison_pages(page, numpages);
 121}
 122
 123#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
 124void __kernel_map_pages(struct page *page, int numpages, int enable)
 125{
 126        /* This function does nothing, all work is done via poison pages */
 127}
 128#endif
 129