root/lib/sort.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_aligned
  2. swap_words_32
  3. swap_words_64
  4. swap_bytes
  5. do_swap
  6. do_cmp
  7. parent
  8. sort_r
  9. sort

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * A fast, small, non-recursive O(n log n) sort for the Linux kernel
   4  *
   5  * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
   6  * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
   7  *
   8  * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
   9  * better) at the expense of stack usage and much larger code to avoid
  10  * quicksort's O(n^2) worst case.
  11  */
  12 
  13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14 
  15 #include <linux/types.h>
  16 #include <linux/export.h>
  17 #include <linux/sort.h>
  18 
  19 /**
  20  * is_aligned - is this pointer & size okay for word-wide copying?
  21  * @base: pointer to data
  22  * @size: size of each element
  23  * @align: required alignment (typically 4 or 8)
  24  *
  25  * Returns true if elements can be copied using word loads and stores.
  26  * The size must be a multiple of the alignment, and the base address must
  27  * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
  28  *
  29  * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
  30  * to "if ((a | b) & mask)", so we do that by hand.
  31  */
  32 __attribute_const__ __always_inline
  33 static bool is_aligned(const void *base, size_t size, unsigned char align)
  34 {
  35         unsigned char lsbits = (unsigned char)size;
  36 
  37         (void)base;
  38 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  39         lsbits |= (unsigned char)(uintptr_t)base;
  40 #endif
  41         return (lsbits & (align - 1)) == 0;
  42 }
  43 
  44 /**
  45  * swap_words_32 - swap two elements in 32-bit chunks
  46  * @a: pointer to the first element to swap
  47  * @b: pointer to the second element to swap
  48  * @n: element size (must be a multiple of 4)
  49  *
  50  * Exchange the two objects in memory.  This exploits base+index addressing,
  51  * which basically all CPUs have, to minimize loop overhead computations.
  52  *
  53  * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
  54  * bottom of the loop, even though the zero flag is stil valid from the
  55  * subtract (since the intervening mov instructions don't alter the flags).
  56  * Gcc 8.1.0 doesn't have that problem.
  57  */
  58 static void swap_words_32(void *a, void *b, size_t n)
  59 {
  60         do {
  61                 u32 t = *(u32 *)(a + (n -= 4));
  62                 *(u32 *)(a + n) = *(u32 *)(b + n);
  63                 *(u32 *)(b + n) = t;
  64         } while (n);
  65 }
  66 
  67 /**
  68  * swap_words_64 - swap two elements in 64-bit chunks
  69  * @a: pointer to the first element to swap
  70  * @b: pointer to the second element to swap
  71  * @n: element size (must be a multiple of 8)
  72  *
  73  * Exchange the two objects in memory.  This exploits base+index
  74  * addressing, which basically all CPUs have, to minimize loop overhead
  75  * computations.
  76  *
  77  * We'd like to use 64-bit loads if possible.  If they're not, emulating
  78  * one requires base+index+4 addressing which x86 has but most other
  79  * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
  80  * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
  81  * x32 ABI).  Are there any cases the kernel needs to worry about?
  82  */
  83 static void swap_words_64(void *a, void *b, size_t n)
  84 {
  85         do {
  86 #ifdef CONFIG_64BIT
  87                 u64 t = *(u64 *)(a + (n -= 8));
  88                 *(u64 *)(a + n) = *(u64 *)(b + n);
  89                 *(u64 *)(b + n) = t;
  90 #else
  91                 /* Use two 32-bit transfers to avoid base+index+4 addressing */
  92                 u32 t = *(u32 *)(a + (n -= 4));
  93                 *(u32 *)(a + n) = *(u32 *)(b + n);
  94                 *(u32 *)(b + n) = t;
  95 
  96                 t = *(u32 *)(a + (n -= 4));
  97                 *(u32 *)(a + n) = *(u32 *)(b + n);
  98                 *(u32 *)(b + n) = t;
  99 #endif
 100         } while (n);
 101 }
 102 
 103 /**
 104  * swap_bytes - swap two elements a byte at a time
 105  * @a: pointer to the first element to swap
 106  * @b: pointer to the second element to swap
 107  * @n: element size
 108  *
 109  * This is the fallback if alignment doesn't allow using larger chunks.
 110  */
 111 static void swap_bytes(void *a, void *b, size_t n)
 112 {
 113         do {
 114                 char t = ((char *)a)[--n];
 115                 ((char *)a)[n] = ((char *)b)[n];
 116                 ((char *)b)[n] = t;
 117         } while (n);
 118 }
 119 
 120 typedef void (*swap_func_t)(void *a, void *b, int size);
 121 
 122 /*
 123  * The values are arbitrary as long as they can't be confused with
 124  * a pointer, but small integers make for the smallest compare
 125  * instructions.
 126  */
 127 #define SWAP_WORDS_64 (swap_func_t)0
 128 #define SWAP_WORDS_32 (swap_func_t)1
 129 #define SWAP_BYTES    (swap_func_t)2
 130 
 131 /*
 132  * The function pointer is last to make tail calls most efficient if the
 133  * compiler decides not to inline this function.
 134  */
 135 static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
 136 {
 137         if (swap_func == SWAP_WORDS_64)
 138                 swap_words_64(a, b, size);
 139         else if (swap_func == SWAP_WORDS_32)
 140                 swap_words_32(a, b, size);
 141         else if (swap_func == SWAP_BYTES)
 142                 swap_bytes(a, b, size);
 143         else
 144                 swap_func(a, b, (int)size);
 145 }
 146 
 147 typedef int (*cmp_func_t)(const void *, const void *);
 148 typedef int (*cmp_r_func_t)(const void *, const void *, const void *);
 149 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
 150 
 151 static int do_cmp(const void *a, const void *b,
 152                   cmp_r_func_t cmp, const void *priv)
 153 {
 154         if (cmp == _CMP_WRAPPER)
 155                 return ((cmp_func_t)(priv))(a, b);
 156         return cmp(a, b, priv);
 157 }
 158 
 159 /**
 160  * parent - given the offset of the child, find the offset of the parent.
 161  * @i: the offset of the heap element whose parent is sought.  Non-zero.
 162  * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
 163  * @size: size of each element
 164  *
 165  * In terms of array indexes, the parent of element j = @i/@size is simply
 166  * (j-1)/2.  But when working in byte offsets, we can't use implicit
 167  * truncation of integer divides.
 168  *
 169  * Fortunately, we only need one bit of the quotient, not the full divide.
 170  * @size has a least significant bit.  That bit will be clear if @i is
 171  * an even multiple of @size, and set if it's an odd multiple.
 172  *
 173  * Logically, we're doing "if (i & lsbit) i -= size;", but since the
 174  * branch is unpredictable, it's done with a bit of clever branch-free
 175  * code instead.
 176  */
 177 __attribute_const__ __always_inline
 178 static size_t parent(size_t i, unsigned int lsbit, size_t size)
 179 {
 180         i -= size;
 181         i -= size & -(i & lsbit);
 182         return i / 2;
 183 }
 184 
 185 /**
 186  * sort_r - sort an array of elements
 187  * @base: pointer to data to sort
 188  * @num: number of elements
 189  * @size: size of each element
 190  * @cmp_func: pointer to comparison function
 191  * @swap_func: pointer to swap function or NULL
 192  * @priv: third argument passed to comparison function
 193  *
 194  * This function does a heapsort on the given array.  You may provide
 195  * a swap_func function if you need to do something more than a memory
 196  * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
 197  * avoids a slow retpoline and so is significantly faster.
 198  *
 199  * Sorting time is O(n log n) both on average and worst-case. While
 200  * quicksort is slightly faster on average, it suffers from exploitable
 201  * O(n*n) worst-case behavior and extra memory requirements that make
 202  * it less suitable for kernel use.
 203  */
 204 void sort_r(void *base, size_t num, size_t size,
 205             int (*cmp_func)(const void *, const void *, const void *),
 206             void (*swap_func)(void *, void *, int size),
 207             const void *priv)
 208 {
 209         /* pre-scale counters for performance */
 210         size_t n = num * size, a = (num/2) * size;
 211         const unsigned int lsbit = size & -size;  /* Used to find parent */
 212 
 213         if (!a)         /* num < 2 || size == 0 */
 214                 return;
 215 
 216         if (!swap_func) {
 217                 if (is_aligned(base, size, 8))
 218                         swap_func = SWAP_WORDS_64;
 219                 else if (is_aligned(base, size, 4))
 220                         swap_func = SWAP_WORDS_32;
 221                 else
 222                         swap_func = SWAP_BYTES;
 223         }
 224 
 225         /*
 226          * Loop invariants:
 227          * 1. elements [a,n) satisfy the heap property (compare greater than
 228          *    all of their children),
 229          * 2. elements [n,num*size) are sorted, and
 230          * 3. a <= b <= c <= d <= n (whenever they are valid).
 231          */
 232         for (;;) {
 233                 size_t b, c, d;
 234 
 235                 if (a)                  /* Building heap: sift down --a */
 236                         a -= size;
 237                 else if (n -= size)     /* Sorting: Extract root to --n */
 238                         do_swap(base, base + n, size, swap_func);
 239                 else                    /* Sort complete */
 240                         break;
 241 
 242                 /*
 243                  * Sift element at "a" down into heap.  This is the
 244                  * "bottom-up" variant, which significantly reduces
 245                  * calls to cmp_func(): we find the sift-down path all
 246                  * the way to the leaves (one compare per level), then
 247                  * backtrack to find where to insert the target element.
 248                  *
 249                  * Because elements tend to sift down close to the leaves,
 250                  * this uses fewer compares than doing two per level
 251                  * on the way down.  (A bit more than half as many on
 252                  * average, 3/4 worst-case.)
 253                  */
 254                 for (b = a; c = 2*b + size, (d = c + size) < n;)
 255                         b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
 256                 if (d == n)     /* Special case last leaf with no sibling */
 257                         b = c;
 258 
 259                 /* Now backtrack from "b" to the correct location for "a" */
 260                 while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
 261                         b = parent(b, lsbit, size);
 262                 c = b;                  /* Where "a" belongs */
 263                 while (b != a) {        /* Shift it into place */
 264                         b = parent(b, lsbit, size);
 265                         do_swap(base + b, base + c, size, swap_func);
 266                 }
 267         }
 268 }
 269 EXPORT_SYMBOL(sort_r);
 270 
 271 void sort(void *base, size_t num, size_t size,
 272           int (*cmp_func)(const void *, const void *),
 273           void (*swap_func)(void *, void *, int size))
 274 {
 275         return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
 276 }
 277 EXPORT_SYMBOL(sort);

/* [<][>][^][v][top][bottom][index][help] */