xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/word-at-a-time.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_WORD_AT_A_TIME_H
3*4882a593Smuzhiyun #define _ASM_WORD_AT_A_TIME_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * This is largely generic for little-endian machines, but the
9*4882a593Smuzhiyun  * optimal byte mask counting is probably going to be something
10*4882a593Smuzhiyun  * that is architecture-specific. If you have a reliably fast
11*4882a593Smuzhiyun  * bit count instruction, that might be better than the multiply
12*4882a593Smuzhiyun  * and shift, for example.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun struct word_at_a_time {
15*4882a593Smuzhiyun 	const unsigned long one_bits, high_bits;
16*4882a593Smuzhiyun };
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #ifdef CONFIG_64BIT
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Jan Achrenius on G+: microoptimized version of
24*4882a593Smuzhiyun  * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
25*4882a593Smuzhiyun  * that works for the bytemasks without having to
26*4882a593Smuzhiyun  * mask them first.
27*4882a593Smuzhiyun  */
count_masked_bytes(unsigned long mask)28*4882a593Smuzhiyun static inline long count_masked_bytes(unsigned long mask)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	return mask*0x0001020304050608ul >> 56;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #else	/* 32-bit case */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
count_masked_bytes(long mask)36*4882a593Smuzhiyun static inline long count_masked_bytes(long mask)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
39*4882a593Smuzhiyun 	long a = (0x0ff0001+mask) >> 23;
40*4882a593Smuzhiyun 	/* Fix the 1 for 00 case */
41*4882a593Smuzhiyun 	return a & mask;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* Return nonzero if it has a zero */
has_zero(unsigned long a,unsigned long * bits,const struct word_at_a_time * c)47*4882a593Smuzhiyun static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
50*4882a593Smuzhiyun 	*bits = mask;
51*4882a593Smuzhiyun 	return mask;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
prep_zero_mask(unsigned long a,unsigned long bits,const struct word_at_a_time * c)54*4882a593Smuzhiyun static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	return bits;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
create_zero_mask(unsigned long bits)59*4882a593Smuzhiyun static inline unsigned long create_zero_mask(unsigned long bits)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	bits = (bits - 1) & ~bits;
62*4882a593Smuzhiyun 	return bits >> 7;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* The mask we created is directly usable as a bytemask */
66*4882a593Smuzhiyun #define zero_bytemask(mask) (mask)
67*4882a593Smuzhiyun 
find_zero(unsigned long mask)68*4882a593Smuzhiyun static inline unsigned long find_zero(unsigned long mask)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	return count_masked_bytes(mask);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * Load an unaligned word from kernel space.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * In the (very unlikely) case of the word being a page-crosser
77*4882a593Smuzhiyun  * and the next page not being mapped, take the exception and
78*4882a593Smuzhiyun  * return zeroes in the non-existing part.
79*4882a593Smuzhiyun  */
load_unaligned_zeropad(const void * addr)80*4882a593Smuzhiyun static inline unsigned long load_unaligned_zeropad(const void *addr)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	unsigned long ret, dummy;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	asm(
85*4882a593Smuzhiyun 		"1:\tmov %2,%0\n"
86*4882a593Smuzhiyun 		"2:\n"
87*4882a593Smuzhiyun 		".section .fixup,\"ax\"\n"
88*4882a593Smuzhiyun 		"3:\t"
89*4882a593Smuzhiyun 		"lea %2,%1\n\t"
90*4882a593Smuzhiyun 		"and %3,%1\n\t"
91*4882a593Smuzhiyun 		"mov (%1),%0\n\t"
92*4882a593Smuzhiyun 		"leal %2,%%ecx\n\t"
93*4882a593Smuzhiyun 		"andl %4,%%ecx\n\t"
94*4882a593Smuzhiyun 		"shll $3,%%ecx\n\t"
95*4882a593Smuzhiyun 		"shr %%cl,%0\n\t"
96*4882a593Smuzhiyun 		"jmp 2b\n"
97*4882a593Smuzhiyun 		".previous\n"
98*4882a593Smuzhiyun 		_ASM_EXTABLE(1b, 3b)
99*4882a593Smuzhiyun 		:"=&r" (ret),"=&c" (dummy)
100*4882a593Smuzhiyun 		:"m" (*(unsigned long *)addr),
101*4882a593Smuzhiyun 		 "i" (-sizeof(unsigned long)),
102*4882a593Smuzhiyun 		 "i" (sizeof(unsigned long)-1));
103*4882a593Smuzhiyun 	return ret;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #endif /* _ASM_WORD_AT_A_TIME_H */
107