1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_HASH_H
3*4882a593Smuzhiyun #define _ASM_HASH_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * The later H8SX models have a 32x32-bit multiply, but the H8/300H
7*4882a593Smuzhiyun * and H8S have only 16x16->32. Since it's tolerably compact, this is
8*4882a593Smuzhiyun * basically an inlined version of the __mulsi3 code. Since the inputs
9*4882a593Smuzhiyun * are not expected to be small, it's also simplfied by skipping the
10*4882a593Smuzhiyun * early-out checks.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * (Since neither CPU has any multi-bit shift instructions, a
13*4882a593Smuzhiyun * shift-and-add version is a non-starter.)
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * TODO: come up with an arch-specific version of the hashing in fs/namei.c,
16*4882a593Smuzhiyun * since that is heavily dependent on rotates. Which, as mentioned, suck
17*4882a593Smuzhiyun * horribly on H8.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define HAVE_ARCH__HASH_32 1
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Multiply by k = 0x61C88647. Fitting this into three registers requires
26*4882a593Smuzhiyun * one extra instruction, but reducing register pressure will probably
27*4882a593Smuzhiyun * make that back and then some.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * GCC asm note: %e1 is the high half of operand %1, while %f1 is the
30*4882a593Smuzhiyun * low half. So if %1 is er4, then %e1 is e4 and %f1 is r4.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * This has been designed to modify x in place, since that's the most
33*4882a593Smuzhiyun * common usage, but preserve k, since hash_64() makes two calls in
34*4882a593Smuzhiyun * quick succession.
35*4882a593Smuzhiyun */
__hash_32(u32 x)36*4882a593Smuzhiyun static inline u32 __attribute_const__ __hash_32(u32 x)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun u32 temp;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun asm( "mov.w %e1,%f0"
41*4882a593Smuzhiyun "\n mulxu.w %f2,%0" /* klow * xhigh */
42*4882a593Smuzhiyun "\n mov.w %f0,%e1" /* The extra instruction */
43*4882a593Smuzhiyun "\n mov.w %f1,%f0"
44*4882a593Smuzhiyun "\n mulxu.w %e2,%0" /* khigh * xlow */
45*4882a593Smuzhiyun "\n add.w %e1,%f0"
46*4882a593Smuzhiyun "\n mulxu.w %f2,%1" /* klow * xlow */
47*4882a593Smuzhiyun "\n add.w %f0,%e1"
48*4882a593Smuzhiyun : "=&r" (temp), "=r" (x)
49*4882a593Smuzhiyun : "%r" (GOLDEN_RATIO_32), "1" (x));
50*4882a593Smuzhiyun return x;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun #endif /* _ASM_HASH_H */
55