1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Resizable, Scalable, Concurrent Hash Table
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5*4882a593Smuzhiyun * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6*4882a593Smuzhiyun * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Code partially derived from nft_hash
9*4882a593Smuzhiyun * Rewritten with rehash code from br_multicast plus single list
10*4882a593Smuzhiyun * pointer as suggested by Josh Triplett
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
13*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
14*4882a593Smuzhiyun * published by the Free Software Foundation.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #ifndef _LINUX_RHASHTABLE_H
18*4882a593Smuzhiyun #define _LINUX_RHASHTABLE_H
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/atomic.h>
21*4882a593Smuzhiyun #include <linux/compiler.h>
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <linux/errno.h>
24*4882a593Smuzhiyun #include <linux/jhash.h>
25*4882a593Smuzhiyun #include <linux/list_nulls.h>
26*4882a593Smuzhiyun #include <linux/workqueue.h>
27*4882a593Smuzhiyun #include <linux/mutex.h>
28*4882a593Smuzhiyun #include <linux/rcupdate.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * The end of the chain is marked with a special nulls marks which has
32*4882a593Smuzhiyun * the following format:
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * +-------+-----------------------------------------------------+-+
35*4882a593Smuzhiyun * | Base | Hash |1|
36*4882a593Smuzhiyun * +-------+-----------------------------------------------------+-+
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Base (4 bits) : Reserved to distinguish between multiple tables.
39*4882a593Smuzhiyun * Specified via &struct rhashtable_params.nulls_base.
40*4882a593Smuzhiyun * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41*4882a593Smuzhiyun * 1 (1 bit) : Nulls marker (always set)
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * The remaining bits of the next pointer remain unused for now.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define RHT_BASE_BITS 4
46*4882a593Smuzhiyun #define RHT_HASH_BITS 27
47*4882a593Smuzhiyun #define RHT_BASE_SHIFT RHT_HASH_BITS
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Base bits plus 1 bit for nulls marker */
50*4882a593Smuzhiyun #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct rhash_head {
53*4882a593Smuzhiyun struct rhash_head __rcu *next;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun * struct bucket_table - Table of hash buckets
58*4882a593Smuzhiyun * @size: Number of hash buckets
59*4882a593Smuzhiyun * @rehash: Current bucket being rehashed
60*4882a593Smuzhiyun * @hash_rnd: Random seed to fold into hash
61*4882a593Smuzhiyun * @locks_mask: Mask to apply before accessing locks[]
62*4882a593Smuzhiyun * @locks: Array of spinlocks protecting individual buckets
63*4882a593Smuzhiyun * @walkers: List of active walkers
64*4882a593Smuzhiyun * @rcu: RCU structure for freeing the table
65*4882a593Smuzhiyun * @future_tbl: Table under construction during rehashing
66*4882a593Smuzhiyun * @buckets: size * hash buckets
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun struct bucket_table {
69*4882a593Smuzhiyun unsigned int size;
70*4882a593Smuzhiyun unsigned int rehash;
71*4882a593Smuzhiyun u32 hash_rnd;
72*4882a593Smuzhiyun unsigned int locks_mask;
73*4882a593Smuzhiyun spinlock_t *locks;
74*4882a593Smuzhiyun struct list_head walkers;
75*4882a593Smuzhiyun struct rcu_head rcu;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct bucket_table __rcu *future_tbl;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * struct rhashtable_compare_arg - Key for the function rhashtable_compare
84*4882a593Smuzhiyun * @ht: Hash table
85*4882a593Smuzhiyun * @key: Key to compare against
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun struct rhashtable_compare_arg {
88*4882a593Smuzhiyun struct rhashtable *ht;
89*4882a593Smuzhiyun const void *key;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
93*4882a593Smuzhiyun typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
94*4882a593Smuzhiyun typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
95*4882a593Smuzhiyun const void *obj);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun struct rhashtable;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun * struct rhashtable_params - Hash table construction parameters
101*4882a593Smuzhiyun * @nelem_hint: Hint on number of elements, should be 75% of desired size
102*4882a593Smuzhiyun * @key_len: Length of key
103*4882a593Smuzhiyun * @key_offset: Offset of key in struct to be hashed
104*4882a593Smuzhiyun * @head_offset: Offset of rhash_head in struct to be hashed
105*4882a593Smuzhiyun * @insecure_max_entries: Maximum number of entries (may be exceeded)
106*4882a593Smuzhiyun * @max_size: Maximum size while expanding
107*4882a593Smuzhiyun * @min_size: Minimum size while shrinking
108*4882a593Smuzhiyun * @nulls_base: Base value to generate nulls marker
109*4882a593Smuzhiyun * @insecure_elasticity: Set to true to disable chain length checks
110*4882a593Smuzhiyun * @automatic_shrinking: Enable automatic shrinking of tables
111*4882a593Smuzhiyun * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
112*4882a593Smuzhiyun * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
113*4882a593Smuzhiyun * @obj_hashfn: Function to hash object
114*4882a593Smuzhiyun * @obj_cmpfn: Function to compare key with object
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun struct rhashtable_params {
117*4882a593Smuzhiyun size_t nelem_hint;
118*4882a593Smuzhiyun size_t key_len;
119*4882a593Smuzhiyun size_t key_offset;
120*4882a593Smuzhiyun size_t head_offset;
121*4882a593Smuzhiyun unsigned int insecure_max_entries;
122*4882a593Smuzhiyun unsigned int max_size;
123*4882a593Smuzhiyun unsigned int min_size;
124*4882a593Smuzhiyun u32 nulls_base;
125*4882a593Smuzhiyun bool insecure_elasticity;
126*4882a593Smuzhiyun bool automatic_shrinking;
127*4882a593Smuzhiyun size_t locks_mul;
128*4882a593Smuzhiyun rht_hashfn_t hashfn;
129*4882a593Smuzhiyun rht_obj_hashfn_t obj_hashfn;
130*4882a593Smuzhiyun rht_obj_cmpfn_t obj_cmpfn;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun * struct rhashtable - Hash table handle
135*4882a593Smuzhiyun * @tbl: Bucket table
136*4882a593Smuzhiyun * @nelems: Number of elements in table
137*4882a593Smuzhiyun * @key_len: Key length for hashfn
138*4882a593Smuzhiyun * @elasticity: Maximum chain length before rehash
139*4882a593Smuzhiyun * @p: Configuration parameters
140*4882a593Smuzhiyun * @run_work: Deferred worker to expand/shrink asynchronously
141*4882a593Smuzhiyun * @mutex: Mutex to protect current/future table swapping
142*4882a593Smuzhiyun * @lock: Spin lock to protect walker list
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun struct rhashtable {
145*4882a593Smuzhiyun struct bucket_table __rcu *tbl;
146*4882a593Smuzhiyun atomic_t nelems;
147*4882a593Smuzhiyun unsigned int key_len;
148*4882a593Smuzhiyun unsigned int elasticity;
149*4882a593Smuzhiyun struct rhashtable_params p;
150*4882a593Smuzhiyun struct work_struct run_work;
151*4882a593Smuzhiyun struct mutex mutex;
152*4882a593Smuzhiyun spinlock_t lock;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * struct rhashtable_walker - Hash table walker
157*4882a593Smuzhiyun * @list: List entry on list of walkers
158*4882a593Smuzhiyun * @tbl: The table that we were walking over
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun struct rhashtable_walker {
161*4882a593Smuzhiyun struct list_head list;
162*4882a593Smuzhiyun struct bucket_table *tbl;
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * struct rhashtable_iter - Hash table iterator, fits into netlink cb
167*4882a593Smuzhiyun * @ht: Table to iterate through
168*4882a593Smuzhiyun * @p: Current pointer
169*4882a593Smuzhiyun * @walker: Associated rhashtable walker
170*4882a593Smuzhiyun * @slot: Current slot
171*4882a593Smuzhiyun * @skip: Number of entries to skip in slot
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun struct rhashtable_iter {
174*4882a593Smuzhiyun struct rhashtable *ht;
175*4882a593Smuzhiyun struct rhash_head *p;
176*4882a593Smuzhiyun struct rhashtable_walker *walker;
177*4882a593Smuzhiyun unsigned int slot;
178*4882a593Smuzhiyun unsigned int skip;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun
rht_marker(const struct rhashtable * ht,u32 hash)181*4882a593Smuzhiyun static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return NULLS_MARKER(ht->p.nulls_base + hash);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
187*4882a593Smuzhiyun ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
188*4882a593Smuzhiyun
rht_is_a_nulls(const struct rhash_head * ptr)189*4882a593Smuzhiyun static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return ((unsigned long) ptr & 1);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
rht_get_nulls_value(const struct rhash_head * ptr)194*4882a593Smuzhiyun static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun return ((unsigned long) ptr) >> 1;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
rht_obj(const struct rhashtable * ht,const struct rhash_head * he)199*4882a593Smuzhiyun static inline void *rht_obj(const struct rhashtable *ht,
200*4882a593Smuzhiyun const struct rhash_head *he)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun return (char *)he - ht->p.head_offset;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
rht_bucket_index(const struct bucket_table * tbl,unsigned int hash)205*4882a593Smuzhiyun static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
206*4882a593Smuzhiyun unsigned int hash)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
rht_key_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const void * key,const struct rhashtable_params params)211*4882a593Smuzhiyun static inline unsigned int rht_key_hashfn(
212*4882a593Smuzhiyun struct rhashtable *ht, const struct bucket_table *tbl,
213*4882a593Smuzhiyun const void *key, const struct rhashtable_params params)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun unsigned int hash;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* params must be equal to ht->p if it isn't constant. */
218*4882a593Smuzhiyun if (!__builtin_constant_p(params.key_len))
219*4882a593Smuzhiyun hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
220*4882a593Smuzhiyun else if (params.key_len) {
221*4882a593Smuzhiyun unsigned int key_len = params.key_len;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (params.hashfn)
224*4882a593Smuzhiyun hash = params.hashfn(key, key_len, tbl->hash_rnd);
225*4882a593Smuzhiyun else if (key_len & (sizeof(u32) - 1))
226*4882a593Smuzhiyun hash = jhash(key, key_len, tbl->hash_rnd);
227*4882a593Smuzhiyun else
228*4882a593Smuzhiyun hash = jhash2(key, key_len / sizeof(u32),
229*4882a593Smuzhiyun tbl->hash_rnd);
230*4882a593Smuzhiyun } else {
231*4882a593Smuzhiyun unsigned int key_len = ht->p.key_len;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (params.hashfn)
234*4882a593Smuzhiyun hash = params.hashfn(key, key_len, tbl->hash_rnd);
235*4882a593Smuzhiyun else
236*4882a593Smuzhiyun hash = jhash(key, key_len, tbl->hash_rnd);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return rht_bucket_index(tbl, hash);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
rht_head_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const struct rhash_head * he,const struct rhashtable_params params)242*4882a593Smuzhiyun static inline unsigned int rht_head_hashfn(
243*4882a593Smuzhiyun struct rhashtable *ht, const struct bucket_table *tbl,
244*4882a593Smuzhiyun const struct rhash_head *he, const struct rhashtable_params params)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun const char *ptr = rht_obj(ht, he);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return likely(params.obj_hashfn) ?
249*4882a593Smuzhiyun rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
250*4882a593Smuzhiyun ht->p.key_len,
251*4882a593Smuzhiyun tbl->hash_rnd)) :
252*4882a593Smuzhiyun rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
257*4882a593Smuzhiyun * @ht: hash table
258*4882a593Smuzhiyun * @tbl: current table
259*4882a593Smuzhiyun */
rht_grow_above_75(const struct rhashtable * ht,const struct bucket_table * tbl)260*4882a593Smuzhiyun static inline bool rht_grow_above_75(const struct rhashtable *ht,
261*4882a593Smuzhiyun const struct bucket_table *tbl)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun /* Expand table when exceeding 75% load */
264*4882a593Smuzhiyun return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
265*4882a593Smuzhiyun (!ht->p.max_size || tbl->size < ht->p.max_size);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
270*4882a593Smuzhiyun * @ht: hash table
271*4882a593Smuzhiyun * @tbl: current table
272*4882a593Smuzhiyun */
rht_shrink_below_30(const struct rhashtable * ht,const struct bucket_table * tbl)273*4882a593Smuzhiyun static inline bool rht_shrink_below_30(const struct rhashtable *ht,
274*4882a593Smuzhiyun const struct bucket_table *tbl)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun /* Shrink table beneath 30% load */
277*4882a593Smuzhiyun return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
278*4882a593Smuzhiyun tbl->size > ht->p.min_size;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun * rht_grow_above_100 - returns true if nelems > table-size
283*4882a593Smuzhiyun * @ht: hash table
284*4882a593Smuzhiyun * @tbl: current table
285*4882a593Smuzhiyun */
rht_grow_above_100(const struct rhashtable * ht,const struct bucket_table * tbl)286*4882a593Smuzhiyun static inline bool rht_grow_above_100(const struct rhashtable *ht,
287*4882a593Smuzhiyun const struct bucket_table *tbl)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun return atomic_read(&ht->nelems) > tbl->size &&
290*4882a593Smuzhiyun (!ht->p.max_size || tbl->size < ht->p.max_size);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun * rht_grow_above_max - returns true if table is above maximum
295*4882a593Smuzhiyun * @ht: hash table
296*4882a593Smuzhiyun * @tbl: current table
297*4882a593Smuzhiyun */
rht_grow_above_max(const struct rhashtable * ht,const struct bucket_table * tbl)298*4882a593Smuzhiyun static inline bool rht_grow_above_max(const struct rhashtable *ht,
299*4882a593Smuzhiyun const struct bucket_table *tbl)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun return ht->p.insecure_max_entries &&
302*4882a593Smuzhiyun atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* The bucket lock is selected based on the hash and protects mutations
306*4882a593Smuzhiyun * on a group of hash buckets.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
309*4882a593Smuzhiyun * a single lock always covers both buckets which may both contains
310*4882a593Smuzhiyun * entries which link to the same bucket of the old table during resizing.
311*4882a593Smuzhiyun * This allows to simplify the locking as locking the bucket in both
312*4882a593Smuzhiyun * tables during resize always guarantee protection.
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * IMPORTANT: When holding the bucket lock of both the old and new table
315*4882a593Smuzhiyun * during expansions and shrinking, the old bucket lock must always be
316*4882a593Smuzhiyun * acquired first.
317*4882a593Smuzhiyun */
rht_bucket_lock(const struct bucket_table * tbl,unsigned int hash)318*4882a593Smuzhiyun static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
319*4882a593Smuzhiyun unsigned int hash)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun return &tbl->locks[hash & tbl->locks_mask];
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
325*4882a593Smuzhiyun int lockdep_rht_mutex_is_held(struct rhashtable *ht);
326*4882a593Smuzhiyun int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
327*4882a593Smuzhiyun #else
lockdep_rht_mutex_is_held(struct rhashtable * ht)328*4882a593Smuzhiyun static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun return 1;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
lockdep_rht_bucket_is_held(const struct bucket_table * tbl,u32 hash)333*4882a593Smuzhiyun static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
334*4882a593Smuzhiyun u32 hash)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun return 1;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun #endif /* CONFIG_PROVE_LOCKING */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun int rhashtable_init(struct rhashtable *ht,
341*4882a593Smuzhiyun const struct rhashtable_params *params);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
344*4882a593Smuzhiyun const void *key,
345*4882a593Smuzhiyun struct rhash_head *obj,
346*4882a593Smuzhiyun struct bucket_table *old_tbl);
347*4882a593Smuzhiyun int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
350*4882a593Smuzhiyun void rhashtable_walk_exit(struct rhashtable_iter *iter);
351*4882a593Smuzhiyun int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
352*4882a593Smuzhiyun void *rhashtable_walk_next(struct rhashtable_iter *iter);
353*4882a593Smuzhiyun void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun void rhashtable_free_and_destroy(struct rhashtable *ht,
356*4882a593Smuzhiyun void (*free_fn)(void *ptr, void *arg),
357*4882a593Smuzhiyun void *arg);
358*4882a593Smuzhiyun void rhashtable_destroy(struct rhashtable *ht);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun #define rht_dereference(p, ht) \
361*4882a593Smuzhiyun rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun #define rht_dereference_rcu(p, ht) \
364*4882a593Smuzhiyun rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #define rht_dereference_bucket(p, tbl, hash) \
367*4882a593Smuzhiyun rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun #define rht_dereference_bucket_rcu(p, tbl, hash) \
370*4882a593Smuzhiyun rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun #define rht_entry(tpos, pos, member) \
373*4882a593Smuzhiyun ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * rht_for_each_continue - continue iterating over hash chain
377*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
378*4882a593Smuzhiyun * @head: the previous &struct rhash_head to continue from
379*4882a593Smuzhiyun * @tbl: the &struct bucket_table
380*4882a593Smuzhiyun * @hash: the hash value / bucket index
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun #define rht_for_each_continue(pos, head, tbl, hash) \
383*4882a593Smuzhiyun for (pos = rht_dereference_bucket(head, tbl, hash); \
384*4882a593Smuzhiyun !rht_is_a_nulls(pos); \
385*4882a593Smuzhiyun pos = rht_dereference_bucket((pos)->next, tbl, hash))
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /**
388*4882a593Smuzhiyun * rht_for_each - iterate over hash chain
389*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
390*4882a593Smuzhiyun * @tbl: the &struct bucket_table
391*4882a593Smuzhiyun * @hash: the hash value / bucket index
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun #define rht_for_each(pos, tbl, hash) \
394*4882a593Smuzhiyun rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * rht_for_each_entry_continue - continue iterating over hash chain
398*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
399*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
400*4882a593Smuzhiyun * @head: the previous &struct rhash_head to continue from
401*4882a593Smuzhiyun * @tbl: the &struct bucket_table
402*4882a593Smuzhiyun * @hash: the hash value / bucket index
403*4882a593Smuzhiyun * @member: name of the &struct rhash_head within the hashable struct.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
406*4882a593Smuzhiyun for (pos = rht_dereference_bucket(head, tbl, hash); \
407*4882a593Smuzhiyun (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
408*4882a593Smuzhiyun pos = rht_dereference_bucket((pos)->next, tbl, hash))
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /**
411*4882a593Smuzhiyun * rht_for_each_entry - iterate over hash chain of given type
412*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
413*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
414*4882a593Smuzhiyun * @tbl: the &struct bucket_table
415*4882a593Smuzhiyun * @hash: the hash value / bucket index
416*4882a593Smuzhiyun * @member: name of the &struct rhash_head within the hashable struct.
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
419*4882a593Smuzhiyun rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
420*4882a593Smuzhiyun tbl, hash, member)
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /**
423*4882a593Smuzhiyun * rht_for_each_entry_safe - safely iterate over hash chain of given type
424*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
425*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
426*4882a593Smuzhiyun * @next: the &struct rhash_head to use as next in loop cursor.
427*4882a593Smuzhiyun * @tbl: the &struct bucket_table
428*4882a593Smuzhiyun * @hash: the hash value / bucket index
429*4882a593Smuzhiyun * @member: name of the &struct rhash_head within the hashable struct.
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * This hash chain list-traversal primitive allows for the looped code to
432*4882a593Smuzhiyun * remove the loop cursor from the list.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
435*4882a593Smuzhiyun for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
436*4882a593Smuzhiyun next = !rht_is_a_nulls(pos) ? \
437*4882a593Smuzhiyun rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
438*4882a593Smuzhiyun (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
439*4882a593Smuzhiyun pos = next, \
440*4882a593Smuzhiyun next = !rht_is_a_nulls(pos) ? \
441*4882a593Smuzhiyun rht_dereference_bucket(pos->next, tbl, hash) : NULL)
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /**
444*4882a593Smuzhiyun * rht_for_each_rcu_continue - continue iterating over rcu hash chain
445*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
446*4882a593Smuzhiyun * @head: the previous &struct rhash_head to continue from
447*4882a593Smuzhiyun * @tbl: the &struct bucket_table
448*4882a593Smuzhiyun * @hash: the hash value / bucket index
449*4882a593Smuzhiyun *
450*4882a593Smuzhiyun * This hash chain list-traversal primitive may safely run concurrently with
451*4882a593Smuzhiyun * the _rcu mutation primitives such as rhashtable_insert() as long as the
452*4882a593Smuzhiyun * traversal is guarded by rcu_read_lock().
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
455*4882a593Smuzhiyun for (({barrier(); }), \
456*4882a593Smuzhiyun pos = rht_dereference_bucket_rcu(head, tbl, hash); \
457*4882a593Smuzhiyun !rht_is_a_nulls(pos); \
458*4882a593Smuzhiyun pos = rcu_dereference_raw(pos->next))
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * rht_for_each_rcu - iterate over rcu hash chain
462*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
463*4882a593Smuzhiyun * @tbl: the &struct bucket_table
464*4882a593Smuzhiyun * @hash: the hash value / bucket index
465*4882a593Smuzhiyun *
466*4882a593Smuzhiyun * This hash chain list-traversal primitive may safely run concurrently with
467*4882a593Smuzhiyun * the _rcu mutation primitives such as rhashtable_insert() as long as the
468*4882a593Smuzhiyun * traversal is guarded by rcu_read_lock().
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun #define rht_for_each_rcu(pos, tbl, hash) \
471*4882a593Smuzhiyun rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /**
474*4882a593Smuzhiyun * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
475*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
476*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
477*4882a593Smuzhiyun * @head: the previous &struct rhash_head to continue from
478*4882a593Smuzhiyun * @tbl: the &struct bucket_table
479*4882a593Smuzhiyun * @hash: the hash value / bucket index
480*4882a593Smuzhiyun * @member: name of the &struct rhash_head within the hashable struct.
481*4882a593Smuzhiyun *
482*4882a593Smuzhiyun * This hash chain list-traversal primitive may safely run concurrently with
483*4882a593Smuzhiyun * the _rcu mutation primitives such as rhashtable_insert() as long as the
484*4882a593Smuzhiyun * traversal is guarded by rcu_read_lock().
485*4882a593Smuzhiyun */
486*4882a593Smuzhiyun #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
487*4882a593Smuzhiyun for (({barrier(); }), \
488*4882a593Smuzhiyun pos = rht_dereference_bucket_rcu(head, tbl, hash); \
489*4882a593Smuzhiyun (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
490*4882a593Smuzhiyun pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /**
493*4882a593Smuzhiyun * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
494*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
495*4882a593Smuzhiyun * @pos: the &struct rhash_head to use as a loop cursor.
496*4882a593Smuzhiyun * @tbl: the &struct bucket_table
497*4882a593Smuzhiyun * @hash: the hash value / bucket index
498*4882a593Smuzhiyun * @member: name of the &struct rhash_head within the hashable struct.
499*4882a593Smuzhiyun *
500*4882a593Smuzhiyun * This hash chain list-traversal primitive may safely run concurrently with
501*4882a593Smuzhiyun * the _rcu mutation primitives such as rhashtable_insert() as long as the
502*4882a593Smuzhiyun * traversal is guarded by rcu_read_lock().
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
505*4882a593Smuzhiyun rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
506*4882a593Smuzhiyun tbl, hash, member)
507*4882a593Smuzhiyun
rhashtable_compare(struct rhashtable_compare_arg * arg,const void * obj)508*4882a593Smuzhiyun static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
509*4882a593Smuzhiyun const void *obj)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun struct rhashtable *ht = arg->ht;
512*4882a593Smuzhiyun const char *ptr = obj;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return (_rtw_memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len) == _TRUE)? 0: 1;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * rhashtable_lookup_fast - search hash table, inlined version
519*4882a593Smuzhiyun * @ht: hash table
520*4882a593Smuzhiyun * @key: the pointer to the key
521*4882a593Smuzhiyun * @params: hash table parameters
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * Computes the hash value for the key and traverses the bucket chain looking
524*4882a593Smuzhiyun * for a entry with an identical key. The first matching entry is returned.
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * Returns the first entry on which the compare function returned true.
527*4882a593Smuzhiyun */
rhashtable_lookup_fast(struct rhashtable * ht,const void * key,const struct rhashtable_params params)528*4882a593Smuzhiyun static inline void *rhashtable_lookup_fast(
529*4882a593Smuzhiyun struct rhashtable *ht, const void *key,
530*4882a593Smuzhiyun const struct rhashtable_params params)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct rhashtable_compare_arg arg = {
533*4882a593Smuzhiyun .ht = ht,
534*4882a593Smuzhiyun .key = key,
535*4882a593Smuzhiyun };
536*4882a593Smuzhiyun const struct bucket_table *tbl;
537*4882a593Smuzhiyun struct rhash_head *he;
538*4882a593Smuzhiyun unsigned int hash;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun rcu_read_lock();
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun tbl = rht_dereference_rcu(ht->tbl, ht);
543*4882a593Smuzhiyun restart:
544*4882a593Smuzhiyun hash = rht_key_hashfn(ht, tbl, key, params);
545*4882a593Smuzhiyun rht_for_each_rcu(he, tbl, hash) {
546*4882a593Smuzhiyun if (params.obj_cmpfn ?
547*4882a593Smuzhiyun params.obj_cmpfn(&arg, rht_obj(ht, he)) :
548*4882a593Smuzhiyun rhashtable_compare(&arg, rht_obj(ht, he)))
549*4882a593Smuzhiyun continue;
550*4882a593Smuzhiyun rcu_read_unlock();
551*4882a593Smuzhiyun return rht_obj(ht, he);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Ensure we see any new tables. */
555*4882a593Smuzhiyun smp_rmb();
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun tbl = rht_dereference_rcu(tbl->future_tbl, ht);
558*4882a593Smuzhiyun if (unlikely(tbl))
559*4882a593Smuzhiyun goto restart;
560*4882a593Smuzhiyun rcu_read_unlock();
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun return NULL;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Internal function, please use rhashtable_insert_fast() instead */
__rhashtable_insert_fast(struct rhashtable * ht,const void * key,struct rhash_head * obj,const struct rhashtable_params params)566*4882a593Smuzhiyun static inline int __rhashtable_insert_fast(
567*4882a593Smuzhiyun struct rhashtable *ht, const void *key, struct rhash_head *obj,
568*4882a593Smuzhiyun const struct rhashtable_params params)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct rhashtable_compare_arg arg = {
571*4882a593Smuzhiyun .ht = ht,
572*4882a593Smuzhiyun .key = key,
573*4882a593Smuzhiyun };
574*4882a593Smuzhiyun struct bucket_table *tbl, *new_tbl;
575*4882a593Smuzhiyun struct rhash_head *head;
576*4882a593Smuzhiyun spinlock_t *lock;
577*4882a593Smuzhiyun unsigned int elasticity;
578*4882a593Smuzhiyun unsigned int hash;
579*4882a593Smuzhiyun int err;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun restart:
582*4882a593Smuzhiyun rcu_read_lock();
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun tbl = rht_dereference_rcu(ht->tbl, ht);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* All insertions must grab the oldest table containing
587*4882a593Smuzhiyun * the hashed bucket that is yet to be rehashed.
588*4882a593Smuzhiyun */
589*4882a593Smuzhiyun for (;;) {
590*4882a593Smuzhiyun hash = rht_head_hashfn(ht, tbl, obj, params);
591*4882a593Smuzhiyun lock = rht_bucket_lock(tbl, hash);
592*4882a593Smuzhiyun spin_lock_bh(lock);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (tbl->rehash <= hash)
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun spin_unlock_bh(lock);
598*4882a593Smuzhiyun tbl = rht_dereference_rcu(tbl->future_tbl, ht);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
602*4882a593Smuzhiyun if (unlikely(new_tbl)) {
603*4882a593Smuzhiyun tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
604*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(tbl))
605*4882a593Smuzhiyun goto slow_path;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun err = PTR_ERR(tbl);
608*4882a593Smuzhiyun goto out;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun err = -E2BIG;
612*4882a593Smuzhiyun if (unlikely(rht_grow_above_max(ht, tbl)))
613*4882a593Smuzhiyun goto out;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (unlikely(rht_grow_above_100(ht, tbl))) {
616*4882a593Smuzhiyun slow_path:
617*4882a593Smuzhiyun spin_unlock_bh(lock);
618*4882a593Smuzhiyun err = rhashtable_insert_rehash(ht, tbl);
619*4882a593Smuzhiyun rcu_read_unlock();
620*4882a593Smuzhiyun if (err)
621*4882a593Smuzhiyun return err;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun goto restart;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun err = -EEXIST;
627*4882a593Smuzhiyun elasticity = ht->elasticity;
628*4882a593Smuzhiyun rht_for_each(head, tbl, hash) {
629*4882a593Smuzhiyun if (key &&
630*4882a593Smuzhiyun unlikely(!(params.obj_cmpfn ?
631*4882a593Smuzhiyun params.obj_cmpfn(&arg, rht_obj(ht, head)) :
632*4882a593Smuzhiyun rhashtable_compare(&arg, rht_obj(ht, head)))))
633*4882a593Smuzhiyun goto out;
634*4882a593Smuzhiyun if (!--elasticity)
635*4882a593Smuzhiyun goto slow_path;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun err = 0;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun RCU_INIT_POINTER(obj->next, head);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun rcu_assign_pointer(tbl->buckets[hash], obj);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun atomic_inc(&ht->nelems);
647*4882a593Smuzhiyun if (rht_grow_above_75(ht, tbl))
648*4882a593Smuzhiyun schedule_work(&ht->run_work);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun out:
651*4882a593Smuzhiyun spin_unlock_bh(lock);
652*4882a593Smuzhiyun rcu_read_unlock();
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return err;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun * rhashtable_insert_fast - insert object into hash table
659*4882a593Smuzhiyun * @ht: hash table
660*4882a593Smuzhiyun * @obj: pointer to hash head inside object
661*4882a593Smuzhiyun * @params: hash table parameters
662*4882a593Smuzhiyun *
663*4882a593Smuzhiyun * Will take a per bucket spinlock to protect against mutual mutations
664*4882a593Smuzhiyun * on the same bucket. Multiple insertions may occur in parallel unless
665*4882a593Smuzhiyun * they map to the same bucket lock.
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * It is safe to call this function from atomic context.
668*4882a593Smuzhiyun *
669*4882a593Smuzhiyun * Will trigger an automatic deferred table resizing if the size grows
670*4882a593Smuzhiyun * beyond the watermark indicated by grow_decision() which can be passed
671*4882a593Smuzhiyun * to rhashtable_init().
672*4882a593Smuzhiyun */
rhashtable_insert_fast(struct rhashtable * ht,struct rhash_head * obj,const struct rhashtable_params params)673*4882a593Smuzhiyun static inline int rhashtable_insert_fast(
674*4882a593Smuzhiyun struct rhashtable *ht, struct rhash_head *obj,
675*4882a593Smuzhiyun const struct rhashtable_params params)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun return __rhashtable_insert_fast(ht, NULL, obj, params);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /**
681*4882a593Smuzhiyun * rhashtable_lookup_insert_fast - lookup and insert object into hash table
682*4882a593Smuzhiyun * @ht: hash table
683*4882a593Smuzhiyun * @obj: pointer to hash head inside object
684*4882a593Smuzhiyun * @params: hash table parameters
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * Locks down the bucket chain in both the old and new table if a resize
687*4882a593Smuzhiyun * is in progress to ensure that writers can't remove from the old table
688*4882a593Smuzhiyun * and can't insert to the new table during the atomic operation of search
689*4882a593Smuzhiyun * and insertion. Searches for duplicates in both the old and new table if
690*4882a593Smuzhiyun * a resize is in progress.
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun * This lookup function may only be used for fixed key hash table (key_len
693*4882a593Smuzhiyun * parameter set). It will BUG() if used inappropriately.
694*4882a593Smuzhiyun *
695*4882a593Smuzhiyun * It is safe to call this function from atomic context.
696*4882a593Smuzhiyun *
697*4882a593Smuzhiyun * Will trigger an automatic deferred table resizing if the size grows
698*4882a593Smuzhiyun * beyond the watermark indicated by grow_decision() which can be passed
699*4882a593Smuzhiyun * to rhashtable_init().
700*4882a593Smuzhiyun */
rhashtable_lookup_insert_fast(struct rhashtable * ht,struct rhash_head * obj,const struct rhashtable_params params)701*4882a593Smuzhiyun static inline int rhashtable_lookup_insert_fast(
702*4882a593Smuzhiyun struct rhashtable *ht, struct rhash_head *obj,
703*4882a593Smuzhiyun const struct rhashtable_params params)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun const char *key = rht_obj(ht, obj);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun BUG_ON(ht->p.obj_hashfn);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
710*4882a593Smuzhiyun params);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun * rhashtable_lookup_insert_key - search and insert object to hash table
715*4882a593Smuzhiyun * with explicit key
716*4882a593Smuzhiyun * @ht: hash table
717*4882a593Smuzhiyun * @key: key
718*4882a593Smuzhiyun * @obj: pointer to hash head inside object
719*4882a593Smuzhiyun * @params: hash table parameters
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * Locks down the bucket chain in both the old and new table if a resize
722*4882a593Smuzhiyun * is in progress to ensure that writers can't remove from the old table
723*4882a593Smuzhiyun * and can't insert to the new table during the atomic operation of search
724*4882a593Smuzhiyun * and insertion. Searches for duplicates in both the old and new table if
725*4882a593Smuzhiyun * a resize is in progress.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * Lookups may occur in parallel with hashtable mutations and resizing.
728*4882a593Smuzhiyun *
729*4882a593Smuzhiyun * Will trigger an automatic deferred table resizing if the size grows
730*4882a593Smuzhiyun * beyond the watermark indicated by grow_decision() which can be passed
731*4882a593Smuzhiyun * to rhashtable_init().
732*4882a593Smuzhiyun *
733*4882a593Smuzhiyun * Returns zero on success.
734*4882a593Smuzhiyun */
rhashtable_lookup_insert_key(struct rhashtable * ht,const void * key,struct rhash_head * obj,const struct rhashtable_params params)735*4882a593Smuzhiyun static inline int rhashtable_lookup_insert_key(
736*4882a593Smuzhiyun struct rhashtable *ht, const void *key, struct rhash_head *obj,
737*4882a593Smuzhiyun const struct rhashtable_params params)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun BUG_ON(!ht->p.obj_hashfn || !key);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun return __rhashtable_insert_fast(ht, key, obj, params);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* Internal function, please use rhashtable_remove_fast() instead */
__rhashtable_remove_fast(struct rhashtable * ht,struct bucket_table * tbl,struct rhash_head * obj,const struct rhashtable_params params)745*4882a593Smuzhiyun static inline int __rhashtable_remove_fast(
746*4882a593Smuzhiyun struct rhashtable *ht, struct bucket_table *tbl,
747*4882a593Smuzhiyun struct rhash_head *obj, const struct rhashtable_params params)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct rhash_head __rcu **pprev;
750*4882a593Smuzhiyun struct rhash_head *he;
751*4882a593Smuzhiyun spinlock_t * lock;
752*4882a593Smuzhiyun unsigned int hash;
753*4882a593Smuzhiyun int err = -ENOENT;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun hash = rht_head_hashfn(ht, tbl, obj, params);
756*4882a593Smuzhiyun lock = rht_bucket_lock(tbl, hash);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun spin_lock_bh(lock);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun pprev = &tbl->buckets[hash];
761*4882a593Smuzhiyun rht_for_each(he, tbl, hash) {
762*4882a593Smuzhiyun if (he != obj) {
763*4882a593Smuzhiyun pprev = &he->next;
764*4882a593Smuzhiyun continue;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun rcu_assign_pointer(*pprev, obj->next);
768*4882a593Smuzhiyun err = 0;
769*4882a593Smuzhiyun break;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun spin_unlock_bh(lock);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return err;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /**
778*4882a593Smuzhiyun * rhashtable_remove_fast - remove object from hash table
779*4882a593Smuzhiyun * @ht: hash table
780*4882a593Smuzhiyun * @obj: pointer to hash head inside object
781*4882a593Smuzhiyun * @params: hash table parameters
782*4882a593Smuzhiyun *
783*4882a593Smuzhiyun * Since the hash chain is single linked, the removal operation needs to
784*4882a593Smuzhiyun * walk the bucket chain upon removal. The removal operation is thus
785*4882a593Smuzhiyun * considerable slow if the hash table is not correctly sized.
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * Will automatically shrink the table via rhashtable_expand() if the
788*4882a593Smuzhiyun * shrink_decision function specified at rhashtable_init() returns true.
789*4882a593Smuzhiyun *
790*4882a593Smuzhiyun * Returns zero on success, -ENOENT if the entry could not be found.
791*4882a593Smuzhiyun */
rhashtable_remove_fast(struct rhashtable * ht,struct rhash_head * obj,const struct rhashtable_params params)792*4882a593Smuzhiyun static inline int rhashtable_remove_fast(
793*4882a593Smuzhiyun struct rhashtable *ht, struct rhash_head *obj,
794*4882a593Smuzhiyun const struct rhashtable_params params)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun struct bucket_table *tbl;
797*4882a593Smuzhiyun int err;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun rcu_read_lock();
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun tbl = rht_dereference_rcu(ht->tbl, ht);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* Because we have already taken (and released) the bucket
804*4882a593Smuzhiyun * lock in old_tbl, if we find that future_tbl is not yet
805*4882a593Smuzhiyun * visible then that guarantees the entry to still be in
806*4882a593Smuzhiyun * the old tbl if it exists.
807*4882a593Smuzhiyun */
808*4882a593Smuzhiyun while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
809*4882a593Smuzhiyun (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
810*4882a593Smuzhiyun ;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun if (err)
813*4882a593Smuzhiyun goto out;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun atomic_dec(&ht->nelems);
816*4882a593Smuzhiyun if (unlikely(ht->p.automatic_shrinking &&
817*4882a593Smuzhiyun rht_shrink_below_30(ht, tbl)))
818*4882a593Smuzhiyun schedule_work(&ht->run_work);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun out:
821*4882a593Smuzhiyun rcu_read_unlock();
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return err;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun #endif /* _LINUX_RHASHTABLE_H */
827*4882a593Smuzhiyun
828