linux/lib/sort.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
   4 *
   5 * Jan 23 2005  Matt Mackall <mpm@selenic.com>
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/types.h>
  11#include <linux/export.h>
  12#include <linux/sort.h>
  13
  14static int alignment_ok(const void *base, int align)
  15{
  16        return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
  17                ((unsigned long)base & (align - 1)) == 0;
  18}
  19
  20static void u32_swap(void *a, void *b, int size)
  21{
  22        u32 t = *(u32 *)a;
  23        *(u32 *)a = *(u32 *)b;
  24        *(u32 *)b = t;
  25}
  26
  27static void u64_swap(void *a, void *b, int size)
  28{
  29        u64 t = *(u64 *)a;
  30        *(u64 *)a = *(u64 *)b;
  31        *(u64 *)b = t;
  32}
  33
  34static void generic_swap(void *a, void *b, int size)
  35{
  36        char t;
  37
  38        do {
  39                t = *(char *)a;
  40                *(char *)a++ = *(char *)b;
  41                *(char *)b++ = t;
  42        } while (--size > 0);
  43}
  44
  45/**
  46 * sort - sort an array of elements
  47 * @base: pointer to data to sort
  48 * @num: number of elements
  49 * @size: size of each element
  50 * @cmp_func: pointer to comparison function
  51 * @swap_func: pointer to swap function or NULL
  52 *
  53 * This function does a heapsort on the given array. You may provide a
  54 * swap_func function optimized to your element type.
  55 *
  56 * Sorting time is O(n log n) both on average and worst-case. While
  57 * qsort is about 20% faster on average, it suffers from exploitable
  58 * O(n*n) worst-case behavior and extra memory requirements that make
  59 * it less suitable for kernel use.
  60 */
  61
  62void sort(void *base, size_t num, size_t size,
  63          int (*cmp_func)(const void *, const void *),
  64          void (*swap_func)(void *, void *, int size))
  65{
  66        /* pre-scale counters for performance */
  67        int i = (num/2 - 1) * size, n = num * size, c, r;
  68
  69        if (!swap_func) {
  70                if (size == 4 && alignment_ok(base, 4))
  71                        swap_func = u32_swap;
  72                else if (size == 8 && alignment_ok(base, 8))
  73                        swap_func = u64_swap;
  74                else
  75                        swap_func = generic_swap;
  76        }
  77
  78        /* heapify */
  79        for ( ; i >= 0; i -= size) {
  80                for (r = i; r * 2 + size < n; r  = c) {
  81                        c = r * 2 + size;
  82                        if (c < n - size &&
  83                                        cmp_func(base + c, base + c + size) < 0)
  84                                c += size;
  85                        if (cmp_func(base + r, base + c) >= 0)
  86                                break;
  87                        swap_func(base + r, base + c, size);
  88                }
  89        }
  90
  91        /* sort */
  92        for (i = n - size; i > 0; i -= size) {
  93                swap_func(base, base + i, size);
  94                for (r = 0; r * 2 + size < i; r = c) {
  95                        c = r * 2 + size;
  96                        if (c < i - size &&
  97                                        cmp_func(base + c, base + c + size) < 0)
  98                                c += size;
  99                        if (cmp_func(base + r, base + c) >= 0)
 100                                break;
 101                        swap_func(base + r, base + c, size);
 102                }
 103        }
 104}
 105
 106EXPORT_SYMBOL(sort);
 107