1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
4 */
5
6 #ifndef __MPP_HASH_H__
7 #define __MPP_HASH_H__
8
9 #include <stdbool.h>
10
11 #include "rk_type.h"
12
13 #ifdef __cplusplus
14 extern "C" {
15 #endif
16
17 #define GOLDEN_RATIO_32 0x61C88647
18 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
19
20 #if __SIZEOF_POINTER__ == 4
21 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
22 #define hash_long(val, bits) hash_32(val, bits)
23 #elif __SIZEOF_POINTER__ == 8
24 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
25 #define hash_long(val, bits) hash_64(val, bits)
26 #else
27 #error __SIZEOF_POINTER__ not 4 or 8
28 #endif
29
30 struct hlist_node {
31 struct hlist_node *next, **pprev;
32 };
33
34 struct hlist_head {
35 struct hlist_node *first;
36 };
37
38 #define HLIST_HEAD_INIT { .first = NULL }
39 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
40 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
41
42 #define LIST_POISON1 ((void *) 0x100)
43 #define LIST_POISON2 ((void *) 0x200)
44
45 #define WRITE_ONCE(var, val) \
46 (*((volatile typeof(val) *)(&(var))) = (val))
47
48 #define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
49
INIT_HLIST_NODE(struct hlist_node * h)50 static inline void INIT_HLIST_NODE(struct hlist_node *h)
51 {
52 h->next = NULL;
53 h->pprev = NULL;
54 }
55
hlist_unhashed(const struct hlist_node * h)56 static inline int hlist_unhashed(const struct hlist_node *h)
57 {
58 return !h->pprev;
59 }
60
hlist_empty(const struct hlist_head * h)61 static inline int hlist_empty(const struct hlist_head *h)
62 {
63 return !READ_ONCE(h->first);
64 }
65
__hlist_del(struct hlist_node * n)66 static inline void __hlist_del(struct hlist_node *n)
67 {
68 struct hlist_node *next = n->next;
69 struct hlist_node **pprev = n->pprev;
70
71 WRITE_ONCE(*pprev, next);
72 if (next)
73 next->pprev = pprev;
74 }
75
hlist_del(struct hlist_node * n)76 static inline void hlist_del(struct hlist_node *n)
77 {
78 __hlist_del(n);
79 n->next = (struct hlist_node*)LIST_POISON1;
80 n->pprev = (struct hlist_node**)LIST_POISON2;
81 }
82
hlist_del_init(struct hlist_node * n)83 static inline void hlist_del_init(struct hlist_node *n)
84 {
85 if (!hlist_unhashed(n)) {
86 __hlist_del(n);
87 INIT_HLIST_NODE(n);
88 }
89 }
90
hlist_add_head(struct hlist_node * n,struct hlist_head * h)91 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
92 {
93 struct hlist_node *first = h->first;
94 n->next = first;
95 if (first)
96 first->pprev = &n->next;
97 WRITE_ONCE(h->first, n);
98 n->pprev = &h->first;
99 }
100
hlist_add_before(struct hlist_node * n,struct hlist_node * next)101 static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next)
102 {
103 n->pprev = next->pprev;
104 n->next = next;
105 next->pprev = &n->next;
106 WRITE_ONCE(*(n->pprev), n);
107 }
108
hlist_add_behind(struct hlist_node * n,struct hlist_node * prev)109 static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev)
110 {
111 n->next = prev->next;
112 WRITE_ONCE(prev->next, n);
113 n->pprev = &prev->next;
114
115 if (n->next)
116 n->next->pprev = &n->next;
117 }
118
hlist_add_fake(struct hlist_node * n)119 static inline void hlist_add_fake(struct hlist_node *n)
120 {
121 n->pprev = &n->next;
122 }
123
hlist_fake(struct hlist_node * h)124 static inline int hlist_fake(struct hlist_node *h)
125 {
126 return h->pprev == &h->next;
127 }
128
129 static inline int
hlist_is_singular_node(struct hlist_node * n,struct hlist_head * h)130 hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
131 {
132 return !n->next && n->pprev == &h->first;
133 }
134
hlist_move_list(struct hlist_head * old,struct hlist_head * _new)135 static inline void hlist_move_list(struct hlist_head *old,
136 struct hlist_head *_new)
137 {
138 _new->first = old->first;
139 if (_new->first)
140 _new->first->pprev = &_new->first;
141 old->first = NULL;
142 }
143
144 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
145
146 #define hlist_for_each(pos, head) \
147 for (pos = (head)->first; pos ; pos = pos->next)
148
149 #define hlist_for_each_safe(pos, n, head) \
150 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
151 pos = n)
152
153 #define hlist_entry_safe(ptr, type, member) \
154 ({ typeof(ptr) ____ptr = (ptr); \
155 ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
156 })
157
158 #define hlist_for_each_entry(pos, head, member) \
159 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
160 pos; \
161 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
162
163 #define hlist_for_each_entry_continue(pos, member) \
164 for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
165 pos; \
166 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
167
168 #define hlist_for_each_entry_from(pos, member) \
169 for (; pos; \
170 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
171
172 #define hlist_for_each_entry_safe(pos, n, head, member) \
173 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
174 pos && ({ n = pos->member.next; 1; }); \
175 pos = hlist_entry_safe(n, typeof(*pos), member))
176
177 #define DEFINE_HASHTABLE(name, bits) \
178 struct hlist_head name[1 << (bits)] = \
179 { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
180
181 #define DECLARE_HASHTABLE(name, bits) \
182 struct hlist_head name[1 << (bits)]
183
184 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
185
186 #define ilog2(n) \
187 ( \
188 (n) & (1ULL << 63) ? 63 : \
189 (n) & (1ULL << 62) ? 62 : \
190 (n) & (1ULL << 61) ? 61 : \
191 (n) & (1ULL << 60) ? 60 : \
192 (n) & (1ULL << 59) ? 59 : \
193 (n) & (1ULL << 58) ? 58 : \
194 (n) & (1ULL << 57) ? 57 : \
195 (n) & (1ULL << 56) ? 56 : \
196 (n) & (1ULL << 55) ? 55 : \
197 (n) & (1ULL << 54) ? 54 : \
198 (n) & (1ULL << 53) ? 53 : \
199 (n) & (1ULL << 52) ? 52 : \
200 (n) & (1ULL << 51) ? 51 : \
201 (n) & (1ULL << 50) ? 50 : \
202 (n) & (1ULL << 49) ? 49 : \
203 (n) & (1ULL << 48) ? 48 : \
204 (n) & (1ULL << 47) ? 47 : \
205 (n) & (1ULL << 46) ? 46 : \
206 (n) & (1ULL << 45) ? 45 : \
207 (n) & (1ULL << 44) ? 44 : \
208 (n) & (1ULL << 43) ? 43 : \
209 (n) & (1ULL << 42) ? 42 : \
210 (n) & (1ULL << 41) ? 41 : \
211 (n) & (1ULL << 40) ? 40 : \
212 (n) & (1ULL << 39) ? 39 : \
213 (n) & (1ULL << 38) ? 38 : \
214 (n) & (1ULL << 37) ? 37 : \
215 (n) & (1ULL << 36) ? 36 : \
216 (n) & (1ULL << 35) ? 35 : \
217 (n) & (1ULL << 34) ? 34 : \
218 (n) & (1ULL << 33) ? 33 : \
219 (n) & (1ULL << 32) ? 32 : \
220 (n) & (1ULL << 31) ? 31 : \
221 (n) & (1ULL << 30) ? 30 : \
222 (n) & (1ULL << 29) ? 29 : \
223 (n) & (1ULL << 28) ? 28 : \
224 (n) & (1ULL << 27) ? 27 : \
225 (n) & (1ULL << 26) ? 26 : \
226 (n) & (1ULL << 25) ? 25 : \
227 (n) & (1ULL << 24) ? 24 : \
228 (n) & (1ULL << 23) ? 23 : \
229 (n) & (1ULL << 22) ? 22 : \
230 (n) & (1ULL << 21) ? 21 : \
231 (n) & (1ULL << 20) ? 20 : \
232 (n) & (1ULL << 19) ? 19 : \
233 (n) & (1ULL << 18) ? 18 : \
234 (n) & (1ULL << 17) ? 17 : \
235 (n) & (1ULL << 16) ? 16 : \
236 (n) & (1ULL << 15) ? 15 : \
237 (n) & (1ULL << 14) ? 14 : \
238 (n) & (1ULL << 13) ? 13 : \
239 (n) & (1ULL << 12) ? 12 : \
240 (n) & (1ULL << 11) ? 11 : \
241 (n) & (1ULL << 10) ? 10 : \
242 (n) & (1ULL << 9) ? 9 : \
243 (n) & (1ULL << 8) ? 8 : \
244 (n) & (1ULL << 7) ? 7 : \
245 (n) & (1ULL << 6) ? 6 : \
246 (n) & (1ULL << 5) ? 5 : \
247 (n) & (1ULL << 4) ? 4 : \
248 (n) & (1ULL << 3) ? 3 : \
249 (n) & (1ULL << 2) ? 2 : \
250 (n) & (1ULL << 1) ? 1 : 0 \
251 )
252
253 #define HASH_SIZE(name) (ARRAY_SIZE(name))
254 #define HASH_BITS(name) ilog2(HASH_SIZE(name))
255
256 /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
257 #define hash_min(val, bits) \
258 (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
259
260 #define hash_add(hashtable, node, key) \
261 hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
262
263 /**
264 * hash_empty - check whether a hashtable is empty
265 * @hashtable: hashtable to check
266 *
267 * This has to be a macro since HASH_BITS() will not work on pointers since
268 * it calculates the size during preprocessing.
269 */
270 #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
271
272 /**
273 * hash_for_each - iterate over a hashtable
274 * @name: hashtable to iterate
275 * @bkt: integer to use as bucket loop cursor
276 * @obj: the type * to use as a loop cursor for each entry
277 * @member: the name of the hlist_node within the struct
278 */
279 #define hash_for_each(name, bkt, obj, member) \
280 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name); \
281 (bkt)++) \
282 hlist_for_each_entry(obj, &name[bkt], member)
283
284 /**
285 * hash_for_each_safe - iterate over a hashtable safe against removal of
286 * hash entry
287 * @name: hashtable to iterate
288 * @bkt: integer to use as bucket loop cursor
289 * @tmp: a &struct used for temporary storage
290 * @obj: the type * to use as a loop cursor for each entry
291 * @member: the name of the hlist_node within the struct
292 */
293 #define hash_for_each_safe(name, bkt, tmp, obj, member) \
294 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name); \
295 (bkt)++) \
296 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
297
298 #define hash_for_each_possible(name, obj, member, key) \
299 hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
300
hash_32(RK_U32 val,unsigned int bits)301 static inline RK_U32 hash_32(RK_U32 val, unsigned int bits)
302 {
303 /* On some cpus multiply is faster, on others gcc will do shifts */
304 RK_U32 hash = val * GOLDEN_RATIO_32;
305
306 /* High bits are more random, so use them. */
307 return hash >> (32 - bits);
308 }
309
__hash_32(RK_U32 val)310 static inline RK_U32 __hash_32(RK_U32 val)
311 {
312 return val * GOLDEN_RATIO_32;
313 }
314
hash_64(RK_U64 val,unsigned int bits)315 static inline RK_U32 hash_64(RK_U64 val, unsigned int bits)
316 {
317 #if __SIZEOF_POINTER__ == 8
318 /* 64x64-bit multiply is efficient on all 64-bit processors */
319 return val * GOLDEN_RATIO_64 >> (64 - bits);
320 #else
321 /* Hash 64 bits using only 32x32-bit multiply. */
322 return hash_32((RK_U32)val ^ ((val >> 32) * GOLDEN_RATIO_32), bits);
323 #endif
324 }
325
hash_ptr(const void * ptr,unsigned int bits)326 static inline RK_U32 hash_ptr(const void *ptr, unsigned int bits)
327 {
328 return hash_long((unsigned long)ptr, bits);
329 }
330
331 /* This really should be called fold32_ptr; it does no hashing to speak of. */
hash32_ptr(const void * ptr)332 static inline RK_U32 hash32_ptr(const void *ptr)
333 {
334 unsigned long val = (unsigned long)ptr;
335
336 #if __SIZEOF_POINTER__ == 8
337 val ^= (val >> 32);
338 #endif
339 return (RK_U32)val;
340 }
341
342 /**
343 * hash_hashed - check whether an object is in any hashtable
344 * @node: the &struct hlist_node of the object to be checked
345 */
hash_hashed(struct hlist_node * node)346 static inline bool hash_hashed(struct hlist_node *node)
347 {
348 return !hlist_unhashed(node);
349 }
350
__hash_empty(struct hlist_head * ht,unsigned int sz)351 static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
352 {
353 unsigned int i;
354
355 for (i = 0; i < sz; i++)
356 if (!hlist_empty(&ht[i]))
357 return false;
358
359 return true;
360 }
361
362 /**
363 * hash_del - remove an object from a hashtable
364 * @node: &struct hlist_node of the object to remove
365 */
hash_del(struct hlist_node * node)366 static inline void hash_del(struct hlist_node *node)
367 {
368 hlist_del_init(node);
369 }
370
371 #ifdef __cplusplus
372 }
373 #endif
374
375 #endif /*__MPP_HASH_H__*/
376