1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_HASH_H
3*4882a593Smuzhiyun #define _ASM_HASH_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * HP-PA only implements integer multiply in the FPU. However, for
7*4882a593Smuzhiyun * integer multiplies by constant, it has a number of shift-and-add
8*4882a593Smuzhiyun * (but no shift-and-subtract, sigh!) instructions that a compiler
9*4882a593Smuzhiyun * can synthesize a code sequence with.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Unfortunately, GCC isn't very efficient at using them. For example
12*4882a593Smuzhiyun * it uses three instructions for "x *= 21" when only two are needed.
13*4882a593Smuzhiyun * But we can find a sequence manually.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define HAVE_ARCH__HASH_32 1
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the
20*4882a593Smuzhiyun * PA7100 pairing rules. This is an in-order 2-way superscalar processor.
21*4882a593Smuzhiyun * Only one instruction in a pair may be a shift (by more than 3 bits),
22*4882a593Smuzhiyun * but other than that, simple ALU ops (including shift-and-add by up
23*4882a593Smuzhiyun * to 3 bits) may be paired arbitrarily.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * PA8xxx processors also dual-issue ALU instructions, although with
26*4882a593Smuzhiyun * fewer constraints, so this schedule is good for them, too.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * This 6-step sequence was found by Yevgen Voronenko's implementation
29*4882a593Smuzhiyun * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
30*4882a593Smuzhiyun */
__hash_32(u32 x)31*4882a593Smuzhiyun static inline u32 __attribute_const__ __hash_32(u32 x)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun u32 a, b, c;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Phase 1: Compute a = (x << 19) + x,
37*4882a593Smuzhiyun * b = (x << 9) + a, c = (x << 23) + b.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun a = x << 19; /* Two shifts can't be paired */
40*4882a593Smuzhiyun b = x << 9; a += x;
41*4882a593Smuzhiyun c = x << 23; b += a;
42*4882a593Smuzhiyun c += b;
43*4882a593Smuzhiyun /* Phase 2: Return (b<<11) + (c<<6) + (a<<3) - c */
44*4882a593Smuzhiyun b <<= 11;
45*4882a593Smuzhiyun a += c << 3; b -= c;
46*4882a593Smuzhiyun return (a << 3) + b;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #if BITS_PER_LONG == 64
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define HAVE_ARCH_HASH_64 1
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Finding a good shift-and-add chain for GOLDEN_RATIO_64 is tricky,
55*4882a593Smuzhiyun * because available software for the purpose chokes on constants this
56*4882a593Smuzhiyun * large. (It's mostly designed for compiling FIR filter coefficients
57*4882a593Smuzhiyun * into FPGAs.)
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * However, Jason Thong pointed out a work-around. The Hcub software
60*4882a593Smuzhiyun * (http://spiral.ece.cmu.edu/mcm/gen.html) is designed for *multiple*
61*4882a593Smuzhiyun * constant multiplication, and is good at finding shift-and-add chains
62*4882a593Smuzhiyun * which share common terms.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * Looking at 0x0x61C8864680B583EB in binary:
65*4882a593Smuzhiyun * 0110000111001000100001100100011010000000101101011000001111101011
66*4882a593Smuzhiyun * \______________/ \__________/ \_______/ \________/
67*4882a593Smuzhiyun * \____________________________/ \____________________/
68*4882a593Smuzhiyun * you can see the non-zero bits are divided into several well-separated
69*4882a593Smuzhiyun * blocks. Hcub can find algorithms for those terms separately, which
70*4882a593Smuzhiyun * can then be shifted and added together.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Dividing the input into 2, 3 or 4 blocks, Hcub can find solutions
73*4882a593Smuzhiyun * with 10, 9 or 8 adds, respectively, making a total of 11 for the
74*4882a593Smuzhiyun * whole number.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * Using just two large blocks, 0xC3910C8D << 31 in the high bits,
77*4882a593Smuzhiyun * and 0xB583EB in the low bits, produces as good an algorithm as any,
78*4882a593Smuzhiyun * and with one more small shift than alternatives.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * The high bits are a larger number and more work to compute, as well
81*4882a593Smuzhiyun * as needing one extra cycle to shift left 31 bits before the final
82*4882a593Smuzhiyun * addition, so they are the critical path for scheduling. The low bits
83*4882a593Smuzhiyun * can fit into the scheduling slots left over.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * This _ASSIGN(dst, src) macro performs "dst = src", but prevents GCC
89*4882a593Smuzhiyun * from inferring anything about the value assigned to "dest".
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * This prevents it from mis-optimizing certain sequences.
92*4882a593Smuzhiyun * In particular, gcc is annoyingly eager to combine consecutive shifts.
93*4882a593Smuzhiyun * Given "x <<= 19; y += x; z += x << 1;", GCC will turn this into
94*4882a593Smuzhiyun * "y += x << 19; z += x << 20;" even though the latter sequence needs
95*4882a593Smuzhiyun * an additional instruction and temporary register.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Because no actual assembly code is generated, this construct is
98*4882a593Smuzhiyun * usefully portable across all GCC platforms, and so can be test-compiled
99*4882a593Smuzhiyun * on non-PA systems.
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * In two places, additional unused input dependencies are added. This
102*4882a593Smuzhiyun * forces GCC's scheduling so it does not rearrange instructions too much.
103*4882a593Smuzhiyun * Because the PA-8xxx is out of order, I'm not sure how much this matters,
104*4882a593Smuzhiyun * but why make it more difficult for the processor than necessary?
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun #define _ASSIGN(dst, src, ...) asm("" : "=r" (dst) : "0" (src), ##__VA_ARGS__)
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily
110*4882a593Smuzhiyun * optimized shift-and-add sequence.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * Without the final shift, the multiply proper is 19 instructions,
113*4882a593Smuzhiyun * 10 cycles and uses only 4 temporaries. Whew!
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * You are not expected to understand this.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun static __always_inline u32 __attribute_const__
hash_64(u64 a,unsigned int bits)118*4882a593Smuzhiyun hash_64(u64 a, unsigned int bits)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u64 b, c, d;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Encourage GCC to move a dynamic shift to %sar early,
124*4882a593Smuzhiyun * thereby freeing up an additional temporary register.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun if (!__builtin_constant_p(bits))
127*4882a593Smuzhiyun asm("" : "=q" (bits) : "0" (64 - bits));
128*4882a593Smuzhiyun else
129*4882a593Smuzhiyun bits = 64 - bits;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun _ASSIGN(b, a*5); c = a << 13;
132*4882a593Smuzhiyun b = (b << 2) + a; _ASSIGN(d, a << 17);
133*4882a593Smuzhiyun a = b + (a << 1); c += d;
134*4882a593Smuzhiyun d = a << 10; _ASSIGN(a, a << 19);
135*4882a593Smuzhiyun d = a - d; _ASSIGN(a, a << 4, "X" (d));
136*4882a593Smuzhiyun c += b; a += b;
137*4882a593Smuzhiyun d -= c; c += a << 1;
138*4882a593Smuzhiyun a += c << 3; _ASSIGN(b, b << (7+31), "X" (c), "X" (d));
139*4882a593Smuzhiyun a <<= 31; b += d;
140*4882a593Smuzhiyun a += b;
141*4882a593Smuzhiyun return a >> bits;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun #undef _ASSIGN /* We're a widely-used header file, so don't litter! */
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun #endif /* BITS_PER_LONG == 64 */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #endif /* _ASM_HASH_H */
148