Lines Matching +full:depth +full:-

1 /* SPDX-License-Identifier: GPL-2.0-only */
6 * Copyright (C) 2013-2014 Jens Axboe
18 * struct sbitmap_word - Word in a &struct sbitmap.
22 * @depth: Number of bits being used in @word/@cleared
24 unsigned long depth; member
37 * @swap_lock: Held while swapping word <-> cleared
43 * struct sbitmap - Scalable bitmap.
45 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
50 * @depth: Number of bits used in the whole bitmap.
52 unsigned int depth; member
74 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
89 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
106 * This is per-cpu, which allows multiple users to stick to different
133 * @round_robin: Allocate bits in strict round-robin order.
138 * @min_shallow_depth: The minimum shallow depth which may be passed to
145 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
147 * @depth: Number of bits to allocate.
155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
159 * sbitmap_free() - Free memory used by a &struct sbitmap.
164 kfree(sb->map); in sbitmap_free()
165 sb->map = NULL; in sbitmap_free()
169 * sbitmap_resize() - Resize a &struct sbitmap.
171 * @depth: New number of bits to resize to.
174 * depth doesn't exceed the depth that the sb was initialized with.
176 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
179 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
188 * Return: Non-negative allocated bit number if successful, -1 otherwise.
193 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
194 * limiting the depth used from each word.
200 * different allocation limits. E.g., there can be a high-priority class that
201 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
202 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
204 * from starving out the high-priority class.
206 * Return: Non-negative allocated bit number if successful, -1 otherwise.
212 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
219 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
220 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
225 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
231 * This is inline even though it's non-trivial so that the function calls to the
242 if (start >= sb->depth) in __sbitmap_for_each_set()
247 while (scanned < sb->depth) { in __sbitmap_for_each_set()
249 unsigned int depth = min_t(unsigned int, in __sbitmap_for_each_set() local
250 sb->map[index].depth - nr, in __sbitmap_for_each_set()
251 sb->depth - scanned); in __sbitmap_for_each_set()
253 scanned += depth; in __sbitmap_for_each_set()
254 word = sb->map[index].word & ~sb->map[index].cleared; in __sbitmap_for_each_set()
263 depth += nr; in __sbitmap_for_each_set()
265 nr = find_next_bit(&word, depth, nr); in __sbitmap_for_each_set()
266 if (nr >= depth) in __sbitmap_for_each_set()
268 if (!fn(sb, (index << sb->shift) + nr, data)) in __sbitmap_for_each_set()
275 if (++index >= sb->map_nr) in __sbitmap_for_each_set()
281 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
295 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; in __sbitmap_word()
312 * sets the corresponding bit in the ->cleared mask instead. Paired with
314 * will clear the previously freed entries in the corresponding ->word.
318 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; in sbitmap_deferred_clear_bit()
335 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
344 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
355 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
358 * @depth: See sbitmap_init_node().
366 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
370 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
376 kfree(sbq->ws); in sbitmap_queue_free()
377 free_percpu(sbq->alloc_hint); in sbitmap_queue_free()
378 sbitmap_free(&sbq->sb); in sbitmap_queue_free()
382 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
384 * @depth: New number of bits to resize to.
390 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
393 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
397 * Return: Non-negative allocated bit number if successful, -1 otherwise.
402 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
403 * sbitmap_queue, limiting the depth used from each word, with preemption
412 * Return: Non-negative allocated bit number if successful, -1 otherwise.
418 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
424 * Return: Non-negative allocated bit number if successful, -1 otherwise.
438 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
439 * sbitmap_queue, limiting the depth used from each word.
449 * Return: Non-negative allocated bit number if successful, -1 otherwise.
464 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
465 * minimum shallow depth that will be used.
467 * @min_shallow_depth: The minimum shallow depth that will be passed to
471 * depends on the depth of the bitmap. Since the shallow allocation functions
472 * effectively operate with a different depth, the shallow depth must be taken
474 * with the minimum shallow depth that will be used. Failure to do so can result
481 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
492 return (index + 1) & (SBQ_WAIT_QUEUES - 1); in sbq_index_inc()
503 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
513 ws = &sbq->ws[atomic_read(wait_index)]; in sbq_wait_ptr()
519 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
526 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
533 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct