linux/drivers/android/binder_alloc_selftest.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* binder_alloc_selftest.c
   3 *
   4 * Android IPC Subsystem
   5 *
   6 * Copyright (C) 2017 Google, Inc.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/mm_types.h>
  12#include <linux/err.h>
  13#include "binder_alloc.h"
  14
  15#define BUFFER_NUM 5
  16#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
  17
  18static bool binder_selftest_run = true;
  19static int binder_selftest_failures;
  20static DEFINE_MUTEX(binder_selftest_lock);
  21
  22/**
  23 * enum buf_end_align_type - Page alignment of a buffer
  24 * end with regard to the end of the previous buffer.
  25 *
  26 * In the pictures below, buf2 refers to the buffer we
  27 * are aligning. buf1 refers to previous buffer by addr.
  28 * Symbol [ means the start of a buffer, ] means the end
  29 * of a buffer, and | means page boundaries.
  30 */
  31enum buf_end_align_type {
  32        /**
  33         * @SAME_PAGE_UNALIGNED: The end of this buffer is on
  34         * the same page as the end of the previous buffer and
  35         * is not page aligned. Examples:
  36         * buf1 ][ buf2 ][ ...
  37         * buf1 ]|[ buf2 ][ ...
  38         */
  39        SAME_PAGE_UNALIGNED = 0,
  40        /**
  41         * @SAME_PAGE_ALIGNED: When the end of the previous buffer
  42         * is not page aligned, the end of this buffer is on the
  43         * same page as the end of the previous buffer and is page
  44         * aligned. When the previous buffer is page aligned, the
  45         * end of this buffer is aligned to the next page boundary.
  46         * Examples:
  47         * buf1 ][ buf2 ]| ...
  48         * buf1 ]|[ buf2 ]| ...
  49         */
  50        SAME_PAGE_ALIGNED,
  51        /**
  52         * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
  53         * the page next to the end of the previous buffer and
  54         * is not page aligned. Examples:
  55         * buf1 ][ buf2 | buf2 ][ ...
  56         * buf1 ]|[ buf2 | buf2 ][ ...
  57         */
  58        NEXT_PAGE_UNALIGNED,
  59        /**
  60         * @NEXT_PAGE_ALIGNED: The end of this buffer is on
  61         * the page next to the end of the previous buffer and
  62         * is page aligned. Examples:
  63         * buf1 ][ buf2 | buf2 ]| ...
  64         * buf1 ]|[ buf2 | buf2 ]| ...
  65         */
  66        NEXT_PAGE_ALIGNED,
  67        /**
  68         * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
  69         * the page that follows the page after the end of the
  70         * previous buffer and is not page aligned. Examples:
  71         * buf1 ][ buf2 | buf2 | buf2 ][ ...
  72         * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
  73         */
  74        NEXT_NEXT_UNALIGNED,
  75        LOOP_END,
  76};
  77
  78static void pr_err_size_seq(size_t *sizes, int *seq)
  79{
  80        int i;
  81
  82        pr_err("alloc sizes: ");
  83        for (i = 0; i < BUFFER_NUM; i++)
  84                pr_cont("[%zu]", sizes[i]);
  85        pr_cont("\n");
  86        pr_err("free seq: ");
  87        for (i = 0; i < BUFFER_NUM; i++)
  88                pr_cont("[%d]", seq[i]);
  89        pr_cont("\n");
  90}
  91
  92static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
  93                                         struct binder_buffer *buffer,
  94                                         size_t size)
  95{
  96        void __user *page_addr;
  97        void __user *end;
  98        int page_index;
  99
 100        end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
 101        page_addr = buffer->user_data;
 102        for (; page_addr < end; page_addr += PAGE_SIZE) {
 103                page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
 104                if (!alloc->pages[page_index].page_ptr ||
 105                    !list_empty(&alloc->pages[page_index].lru)) {
 106                        pr_err("expect alloc but is %s at page index %d\n",
 107                               alloc->pages[page_index].page_ptr ?
 108                               "lru" : "free", page_index);
 109                        return false;
 110                }
 111        }
 112        return true;
 113}
 114
 115static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
 116                                      struct binder_buffer *buffers[],
 117                                      size_t *sizes, int *seq)
 118{
 119        int i;
 120
 121        for (i = 0; i < BUFFER_NUM; i++) {
 122                buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
 123                if (IS_ERR(buffers[i]) ||
 124                    !check_buffer_pages_allocated(alloc, buffers[i],
 125                                                  sizes[i])) {
 126                        pr_err_size_seq(sizes, seq);
 127                        binder_selftest_failures++;
 128                }
 129        }
 130}
 131
 132static void binder_selftest_free_buf(struct binder_alloc *alloc,
 133                                     struct binder_buffer *buffers[],
 134                                     size_t *sizes, int *seq, size_t end)
 135{
 136        int i;
 137
 138        for (i = 0; i < BUFFER_NUM; i++)
 139                binder_alloc_free_buf(alloc, buffers[seq[i]]);
 140
 141        for (i = 0; i < end / PAGE_SIZE; i++) {
 142                /**
 143                 * Error message on a free page can be false positive
 144                 * if binder shrinker ran during binder_alloc_free_buf
 145                 * calls above.
 146                 */
 147                if (list_empty(&alloc->pages[i].lru)) {
 148                        pr_err_size_seq(sizes, seq);
 149                        pr_err("expect lru but is %s at page index %d\n",
 150                               alloc->pages[i].page_ptr ? "alloc" : "free", i);
 151                        binder_selftest_failures++;
 152                }
 153        }
 154}
 155
 156static void binder_selftest_free_page(struct binder_alloc *alloc)
 157{
 158        int i;
 159        unsigned long count;
 160
 161        while ((count = list_lru_count(&binder_alloc_lru))) {
 162                list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
 163                              NULL, count);
 164        }
 165
 166        for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
 167                if (alloc->pages[i].page_ptr) {
 168                        pr_err("expect free but is %s at page index %d\n",
 169                               list_empty(&alloc->pages[i].lru) ?
 170                               "alloc" : "lru", i);
 171                        binder_selftest_failures++;
 172                }
 173        }
 174}
 175
 176static void binder_selftest_alloc_free(struct binder_alloc *alloc,
 177                                       size_t *sizes, int *seq, size_t end)
 178{
 179        struct binder_buffer *buffers[BUFFER_NUM];
 180
 181        binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
 182        binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
 183
 184        /* Allocate from lru. */
 185        binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
 186        if (list_lru_count(&binder_alloc_lru))
 187                pr_err("lru list should be empty but is not\n");
 188
 189        binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
 190        binder_selftest_free_page(alloc);
 191}
 192
 193static bool is_dup(int *seq, int index, int val)
 194{
 195        int i;
 196
 197        for (i = 0; i < index; i++) {
 198                if (seq[i] == val)
 199                        return true;
 200        }
 201        return false;
 202}
 203
 204/* Generate BUFFER_NUM factorial free orders. */
 205static void binder_selftest_free_seq(struct binder_alloc *alloc,
 206                                     size_t *sizes, int *seq,
 207                                     int index, size_t end)
 208{
 209        int i;
 210
 211        if (index == BUFFER_NUM) {
 212                binder_selftest_alloc_free(alloc, sizes, seq, end);
 213                return;
 214        }
 215        for (i = 0; i < BUFFER_NUM; i++) {
 216                if (is_dup(seq, index, i))
 217                        continue;
 218                seq[index] = i;
 219                binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
 220        }
 221}
 222
 223static void binder_selftest_alloc_size(struct binder_alloc *alloc,
 224                                       size_t *end_offset)
 225{
 226        int i;
 227        int seq[BUFFER_NUM] = {0};
 228        size_t front_sizes[BUFFER_NUM];
 229        size_t back_sizes[BUFFER_NUM];
 230        size_t last_offset, offset = 0;
 231
 232        for (i = 0; i < BUFFER_NUM; i++) {
 233                last_offset = offset;
 234                offset = end_offset[i];
 235                front_sizes[i] = offset - last_offset;
 236                back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
 237        }
 238        /*
 239         * Buffers share the first or last few pages.
 240         * Only BUFFER_NUM - 1 buffer sizes are adjustable since
 241         * we need one giant buffer before getting to the last page.
 242         */
 243        back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
 244        binder_selftest_free_seq(alloc, front_sizes, seq, 0,
 245                                 end_offset[BUFFER_NUM - 1]);
 246        binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
 247}
 248
 249static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
 250                                         size_t *end_offset, int index)
 251{
 252        int align;
 253        size_t end, prev;
 254
 255        if (index == BUFFER_NUM) {
 256                binder_selftest_alloc_size(alloc, end_offset);
 257                return;
 258        }
 259        prev = index == 0 ? 0 : end_offset[index - 1];
 260        end = prev;
 261
 262        BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
 263
 264        for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
 265                if (align % 2)
 266                        end = ALIGN(end, PAGE_SIZE);
 267                else
 268                        end += BUFFER_MIN_SIZE;
 269                end_offset[index] = end;
 270                binder_selftest_alloc_offset(alloc, end_offset, index + 1);
 271        }
 272}
 273
 274/**
 275 * binder_selftest_alloc() - Test alloc and free of buffer pages.
 276 * @alloc: Pointer to alloc struct.
 277 *
 278 * Allocate BUFFER_NUM buffers to cover all page alignment cases,
 279 * then free them in all orders possible. Check that pages are
 280 * correctly allocated, put onto lru when buffers are freed, and
 281 * are freed when binder_alloc_free_page is called.
 282 */
 283void binder_selftest_alloc(struct binder_alloc *alloc)
 284{
 285        size_t end_offset[BUFFER_NUM];
 286
 287        if (!binder_selftest_run)
 288                return;
 289        mutex_lock(&binder_selftest_lock);
 290        if (!binder_selftest_run || !alloc->vma)
 291                goto done;
 292        pr_info("STARTED\n");
 293        binder_selftest_alloc_offset(alloc, end_offset, 0);
 294        binder_selftest_run = false;
 295        if (binder_selftest_failures > 0)
 296                pr_info("%d tests FAILED\n", binder_selftest_failures);
 297        else
 298                pr_info("PASSED\n");
 299
 300done:
 301        mutex_unlock(&binder_selftest_lock);
 302}
 303