1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * A fast, small, non-recursive O(n log n) sort for the Linux kernel
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
6*4882a593Smuzhiyun * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
9*4882a593Smuzhiyun * better) at the expense of stack usage and much larger code to avoid
10*4882a593Smuzhiyun * quicksort's O(n^2) worst case.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/export.h>
17*4882a593Smuzhiyun #include <linux/sort.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun * is_aligned - is this pointer & size okay for word-wide copying?
21*4882a593Smuzhiyun * @base: pointer to data
22*4882a593Smuzhiyun * @size: size of each element
23*4882a593Smuzhiyun * @align: required alignment (typically 4 or 8)
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Returns true if elements can be copied using word loads and stores.
26*4882a593Smuzhiyun * The size must be a multiple of the alignment, and the base address must
27*4882a593Smuzhiyun * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
30*4882a593Smuzhiyun * to "if ((a | b) & mask)", so we do that by hand.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun __attribute_const__ __always_inline
is_aligned(const void * base,size_t size,unsigned char align)33*4882a593Smuzhiyun static bool is_aligned(const void *base, size_t size, unsigned char align)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun unsigned char lsbits = (unsigned char)size;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun (void)base;
38*4882a593Smuzhiyun #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
39*4882a593Smuzhiyun lsbits |= (unsigned char)(uintptr_t)base;
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun return (lsbits & (align - 1)) == 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun * swap_words_32 - swap two elements in 32-bit chunks
46*4882a593Smuzhiyun * @a: pointer to the first element to swap
47*4882a593Smuzhiyun * @b: pointer to the second element to swap
48*4882a593Smuzhiyun * @n: element size (must be a multiple of 4)
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Exchange the two objects in memory. This exploits base+index addressing,
51*4882a593Smuzhiyun * which basically all CPUs have, to minimize loop overhead computations.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
54*4882a593Smuzhiyun * bottom of the loop, even though the zero flag is stil valid from the
55*4882a593Smuzhiyun * subtract (since the intervening mov instructions don't alter the flags).
56*4882a593Smuzhiyun * Gcc 8.1.0 doesn't have that problem.
57*4882a593Smuzhiyun */
swap_words_32(void * a,void * b,size_t n)58*4882a593Smuzhiyun static void swap_words_32(void *a, void *b, size_t n)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun do {
61*4882a593Smuzhiyun u32 t = *(u32 *)(a + (n -= 4));
62*4882a593Smuzhiyun *(u32 *)(a + n) = *(u32 *)(b + n);
63*4882a593Smuzhiyun *(u32 *)(b + n) = t;
64*4882a593Smuzhiyun } while (n);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun * swap_words_64 - swap two elements in 64-bit chunks
69*4882a593Smuzhiyun * @a: pointer to the first element to swap
70*4882a593Smuzhiyun * @b: pointer to the second element to swap
71*4882a593Smuzhiyun * @n: element size (must be a multiple of 8)
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Exchange the two objects in memory. This exploits base+index
74*4882a593Smuzhiyun * addressing, which basically all CPUs have, to minimize loop overhead
75*4882a593Smuzhiyun * computations.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * We'd like to use 64-bit loads if possible. If they're not, emulating
78*4882a593Smuzhiyun * one requires base+index+4 addressing which x86 has but most other
79*4882a593Smuzhiyun * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
80*4882a593Smuzhiyun * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
81*4882a593Smuzhiyun * x32 ABI). Are there any cases the kernel needs to worry about?
82*4882a593Smuzhiyun */
swap_words_64(void * a,void * b,size_t n)83*4882a593Smuzhiyun static void swap_words_64(void *a, void *b, size_t n)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun do {
86*4882a593Smuzhiyun #ifdef CONFIG_64BIT
87*4882a593Smuzhiyun u64 t = *(u64 *)(a + (n -= 8));
88*4882a593Smuzhiyun *(u64 *)(a + n) = *(u64 *)(b + n);
89*4882a593Smuzhiyun *(u64 *)(b + n) = t;
90*4882a593Smuzhiyun #else
91*4882a593Smuzhiyun /* Use two 32-bit transfers to avoid base+index+4 addressing */
92*4882a593Smuzhiyun u32 t = *(u32 *)(a + (n -= 4));
93*4882a593Smuzhiyun *(u32 *)(a + n) = *(u32 *)(b + n);
94*4882a593Smuzhiyun *(u32 *)(b + n) = t;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun t = *(u32 *)(a + (n -= 4));
97*4882a593Smuzhiyun *(u32 *)(a + n) = *(u32 *)(b + n);
98*4882a593Smuzhiyun *(u32 *)(b + n) = t;
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun } while (n);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun * swap_bytes - swap two elements a byte at a time
105*4882a593Smuzhiyun * @a: pointer to the first element to swap
106*4882a593Smuzhiyun * @b: pointer to the second element to swap
107*4882a593Smuzhiyun * @n: element size
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * This is the fallback if alignment doesn't allow using larger chunks.
110*4882a593Smuzhiyun */
swap_bytes(void * a,void * b,size_t n)111*4882a593Smuzhiyun static void swap_bytes(void *a, void *b, size_t n)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun do {
114*4882a593Smuzhiyun char t = ((char *)a)[--n];
115*4882a593Smuzhiyun ((char *)a)[n] = ((char *)b)[n];
116*4882a593Smuzhiyun ((char *)b)[n] = t;
117*4882a593Smuzhiyun } while (n);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * The values are arbitrary as long as they can't be confused with
122*4882a593Smuzhiyun * a pointer, but small integers make for the smallest compare
123*4882a593Smuzhiyun * instructions.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun #define SWAP_WORDS_64 (swap_func_t)0
126*4882a593Smuzhiyun #define SWAP_WORDS_32 (swap_func_t)1
127*4882a593Smuzhiyun #define SWAP_BYTES (swap_func_t)2
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * The function pointer is last to make tail calls most efficient if the
131*4882a593Smuzhiyun * compiler decides not to inline this function.
132*4882a593Smuzhiyun */
do_swap(void * a,void * b,size_t size,swap_func_t swap_func)133*4882a593Smuzhiyun static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun if (swap_func == SWAP_WORDS_64)
136*4882a593Smuzhiyun swap_words_64(a, b, size);
137*4882a593Smuzhiyun else if (swap_func == SWAP_WORDS_32)
138*4882a593Smuzhiyun swap_words_32(a, b, size);
139*4882a593Smuzhiyun else if (swap_func == SWAP_BYTES)
140*4882a593Smuzhiyun swap_bytes(a, b, size);
141*4882a593Smuzhiyun else
142*4882a593Smuzhiyun swap_func(a, b, (int)size);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun #define _CMP_WRAPPER ((cmp_r_func_t)0L)
146*4882a593Smuzhiyun
do_cmp(const void * a,const void * b,cmp_r_func_t cmp,const void * priv)147*4882a593Smuzhiyun static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun if (cmp == _CMP_WRAPPER)
150*4882a593Smuzhiyun return ((cmp_func_t)(priv))(a, b);
151*4882a593Smuzhiyun return cmp(a, b, priv);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * parent - given the offset of the child, find the offset of the parent.
156*4882a593Smuzhiyun * @i: the offset of the heap element whose parent is sought. Non-zero.
157*4882a593Smuzhiyun * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
158*4882a593Smuzhiyun * @size: size of each element
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * In terms of array indexes, the parent of element j = @i/@size is simply
161*4882a593Smuzhiyun * (j-1)/2. But when working in byte offsets, we can't use implicit
162*4882a593Smuzhiyun * truncation of integer divides.
163*4882a593Smuzhiyun *
164*4882a593Smuzhiyun * Fortunately, we only need one bit of the quotient, not the full divide.
165*4882a593Smuzhiyun * @size has a least significant bit. That bit will be clear if @i is
166*4882a593Smuzhiyun * an even multiple of @size, and set if it's an odd multiple.
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * Logically, we're doing "if (i & lsbit) i -= size;", but since the
169*4882a593Smuzhiyun * branch is unpredictable, it's done with a bit of clever branch-free
170*4882a593Smuzhiyun * code instead.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun __attribute_const__ __always_inline
parent(size_t i,unsigned int lsbit,size_t size)173*4882a593Smuzhiyun static size_t parent(size_t i, unsigned int lsbit, size_t size)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun i -= size;
176*4882a593Smuzhiyun i -= size & -(i & lsbit);
177*4882a593Smuzhiyun return i / 2;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun * sort_r - sort an array of elements
182*4882a593Smuzhiyun * @base: pointer to data to sort
183*4882a593Smuzhiyun * @num: number of elements
184*4882a593Smuzhiyun * @size: size of each element
185*4882a593Smuzhiyun * @cmp_func: pointer to comparison function
186*4882a593Smuzhiyun * @swap_func: pointer to swap function or NULL
187*4882a593Smuzhiyun * @priv: third argument passed to comparison function
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun * This function does a heapsort on the given array. You may provide
190*4882a593Smuzhiyun * a swap_func function if you need to do something more than a memory
191*4882a593Smuzhiyun * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
192*4882a593Smuzhiyun * avoids a slow retpoline and so is significantly faster.
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Sorting time is O(n log n) both on average and worst-case. While
195*4882a593Smuzhiyun * quicksort is slightly faster on average, it suffers from exploitable
196*4882a593Smuzhiyun * O(n*n) worst-case behavior and extra memory requirements that make
197*4882a593Smuzhiyun * it less suitable for kernel use.
198*4882a593Smuzhiyun */
sort_r(void * base,size_t num,size_t size,cmp_r_func_t cmp_func,swap_func_t swap_func,const void * priv)199*4882a593Smuzhiyun void sort_r(void *base, size_t num, size_t size,
200*4882a593Smuzhiyun cmp_r_func_t cmp_func,
201*4882a593Smuzhiyun swap_func_t swap_func,
202*4882a593Smuzhiyun const void *priv)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun /* pre-scale counters for performance */
205*4882a593Smuzhiyun size_t n = num * size, a = (num/2) * size;
206*4882a593Smuzhiyun const unsigned int lsbit = size & -size; /* Used to find parent */
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (!a) /* num < 2 || size == 0 */
209*4882a593Smuzhiyun return;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (!swap_func) {
212*4882a593Smuzhiyun if (is_aligned(base, size, 8))
213*4882a593Smuzhiyun swap_func = SWAP_WORDS_64;
214*4882a593Smuzhiyun else if (is_aligned(base, size, 4))
215*4882a593Smuzhiyun swap_func = SWAP_WORDS_32;
216*4882a593Smuzhiyun else
217*4882a593Smuzhiyun swap_func = SWAP_BYTES;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Loop invariants:
222*4882a593Smuzhiyun * 1. elements [a,n) satisfy the heap property (compare greater than
223*4882a593Smuzhiyun * all of their children),
224*4882a593Smuzhiyun * 2. elements [n,num*size) are sorted, and
225*4882a593Smuzhiyun * 3. a <= b <= c <= d <= n (whenever they are valid).
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun for (;;) {
228*4882a593Smuzhiyun size_t b, c, d;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (a) /* Building heap: sift down --a */
231*4882a593Smuzhiyun a -= size;
232*4882a593Smuzhiyun else if (n -= size) /* Sorting: Extract root to --n */
233*4882a593Smuzhiyun do_swap(base, base + n, size, swap_func);
234*4882a593Smuzhiyun else /* Sort complete */
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Sift element at "a" down into heap. This is the
239*4882a593Smuzhiyun * "bottom-up" variant, which significantly reduces
240*4882a593Smuzhiyun * calls to cmp_func(): we find the sift-down path all
241*4882a593Smuzhiyun * the way to the leaves (one compare per level), then
242*4882a593Smuzhiyun * backtrack to find where to insert the target element.
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * Because elements tend to sift down close to the leaves,
245*4882a593Smuzhiyun * this uses fewer compares than doing two per level
246*4882a593Smuzhiyun * on the way down. (A bit more than half as many on
247*4882a593Smuzhiyun * average, 3/4 worst-case.)
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun for (b = a; c = 2*b + size, (d = c + size) < n;)
250*4882a593Smuzhiyun b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
251*4882a593Smuzhiyun if (d == n) /* Special case last leaf with no sibling */
252*4882a593Smuzhiyun b = c;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Now backtrack from "b" to the correct location for "a" */
255*4882a593Smuzhiyun while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
256*4882a593Smuzhiyun b = parent(b, lsbit, size);
257*4882a593Smuzhiyun c = b; /* Where "a" belongs */
258*4882a593Smuzhiyun while (b != a) { /* Shift it into place */
259*4882a593Smuzhiyun b = parent(b, lsbit, size);
260*4882a593Smuzhiyun do_swap(base + b, base + c, size, swap_func);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun EXPORT_SYMBOL(sort_r);
265*4882a593Smuzhiyun
sort(void * base,size_t num,size_t size,cmp_func_t cmp_func,swap_func_t swap_func)266*4882a593Smuzhiyun void sort(void *base, size_t num, size_t size,
267*4882a593Smuzhiyun cmp_func_t cmp_func,
268*4882a593Smuzhiyun swap_func_t swap_func)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun EXPORT_SYMBOL(sort);
273