linux/mm/page_poison.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/string.h>
   4#include <linux/mm.h>
   5#include <linux/highmem.h>
   6#include <linux/page_ext.h>
   7#include <linux/poison.h>
   8#include <linux/ratelimit.h>
   9
  10static bool want_page_poisoning __read_mostly;
  11
  12static int __init early_page_poison_param(char *buf)
  13{
  14        if (!buf)
  15                return -EINVAL;
  16        return strtobool(buf, &want_page_poisoning);
  17}
  18early_param("page_poison", early_page_poison_param);
  19
  20bool page_poisoning_enabled(void)
  21{
  22        /*
  23         * Assumes that debug_pagealloc_enabled is set before
  24         * free_all_bootmem.
  25         * Page poisoning is debug page alloc for some arches. If
  26         * either of those options are enabled, enable poisoning.
  27         */
  28        return (want_page_poisoning ||
  29                (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
  30                debug_pagealloc_enabled()));
  31}
  32
  33static void poison_page(struct page *page)
  34{
  35        void *addr = kmap_atomic(page);
  36
  37        memset(addr, PAGE_POISON, PAGE_SIZE);
  38        kunmap_atomic(addr);
  39}
  40
  41static void poison_pages(struct page *page, int n)
  42{
  43        int i;
  44
  45        for (i = 0; i < n; i++)
  46                poison_page(page + i);
  47}
  48
  49static bool single_bit_flip(unsigned char a, unsigned char b)
  50{
  51        unsigned char error = a ^ b;
  52
  53        return error && !(error & (error - 1));
  54}
  55
  56static void check_poison_mem(unsigned char *mem, size_t bytes)
  57{
  58        static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
  59        unsigned char *start;
  60        unsigned char *end;
  61
  62        if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
  63                return;
  64
  65        start = memchr_inv(mem, PAGE_POISON, bytes);
  66        if (!start)
  67                return;
  68
  69        for (end = mem + bytes - 1; end > start; end--) {
  70                if (*end != PAGE_POISON)
  71                        break;
  72        }
  73
  74        if (!__ratelimit(&ratelimit))
  75                return;
  76        else if (start == end && single_bit_flip(*start, PAGE_POISON))
  77                pr_err("pagealloc: single bit error\n");
  78        else
  79                pr_err("pagealloc: memory corruption\n");
  80
  81        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  82                        end - start + 1, 1);
  83        dump_stack();
  84}
  85
  86static void unpoison_page(struct page *page)
  87{
  88        void *addr;
  89
  90        addr = kmap_atomic(page);
  91        /*
  92         * Page poisoning when enabled poisons each and every page
  93         * that is freed to buddy. Thus no extra check is done to
  94         * see if a page was posioned.
  95         */
  96        check_poison_mem(addr, PAGE_SIZE);
  97        kunmap_atomic(addr);
  98}
  99
 100static void unpoison_pages(struct page *page, int n)
 101{
 102        int i;
 103
 104        for (i = 0; i < n; i++)
 105                unpoison_page(page + i);
 106}
 107
 108void kernel_poison_pages(struct page *page, int numpages, int enable)
 109{
 110        if (!page_poisoning_enabled())
 111                return;
 112
 113        if (enable)
 114                unpoison_pages(page, numpages);
 115        else
 116                poison_pages(page, numpages);
 117}
 118
 119#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
 120void __kernel_map_pages(struct page *page, int numpages, int enable)
 121{
 122        /* This function does nothing, all work is done via poison pages */
 123}
 124#endif
 125