Lines Matching refs:sbq

324 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,  in sbq_calc_wake_batch()  argument
346 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
347 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
348 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
355 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
361 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); in sbitmap_queue_init_node()
365 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); in sbitmap_queue_init_node()
366 if (!sbq->alloc_hint) { in sbitmap_queue_init_node()
367 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
373 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; in sbitmap_queue_init_node()
376 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
377 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
378 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
379 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
381 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
382 if (!sbq->ws) { in sbitmap_queue_init_node()
383 free_percpu(sbq->alloc_hint); in sbitmap_queue_init_node()
384 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
389 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
390 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); in sbitmap_queue_init_node()
393 sbq->round_robin = round_robin; in sbitmap_queue_init_node()
398 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_update_wake_batch() argument
401 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
404 if (sbq->wake_batch != wake_batch) { in sbitmap_queue_update_wake_batch()
405 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
413 atomic_set(&sbq->ws[i].wait_cnt, 1); in sbitmap_queue_update_wake_batch()
417 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
419 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
420 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
424 int __sbitmap_queue_get(struct sbitmap_queue *sbq) in __sbitmap_queue_get() argument
429 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get()
430 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get()
433 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
435 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); in __sbitmap_queue_get()
439 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get()
440 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get()
445 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
452 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, in __sbitmap_queue_get_shallow() argument
458 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in __sbitmap_queue_get_shallow()
460 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get_shallow()
461 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get_shallow()
464 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
466 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); in __sbitmap_queue_get_shallow()
470 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get_shallow()
471 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get_shallow()
476 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
483 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, in sbitmap_queue_min_shallow_depth() argument
486 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
487 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
491 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) in sbq_wake_ptr() argument
495 if (!atomic_read(&sbq->ws_active)) in sbq_wake_ptr()
498 wake_index = atomic_read(&sbq->wake_index); in sbq_wake_ptr()
500 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbq_wake_ptr()
503 if (wake_index != atomic_read(&sbq->wake_index)) in sbq_wake_ptr()
504 atomic_set(&sbq->wake_index, wake_index); in sbq_wake_ptr()
514 static bool __sbq_wake_up(struct sbitmap_queue *sbq) in __sbq_wake_up() argument
520 ws = sbq_wake_ptr(sbq); in __sbq_wake_up()
528 wake_batch = READ_ONCE(sbq->wake_batch); in __sbq_wake_up()
544 sbq_index_atomic_inc(&sbq->wake_index); in __sbq_wake_up()
555 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) in sbitmap_queue_wake_up() argument
557 while (__sbq_wake_up(sbq)) in sbitmap_queue_wake_up()
562 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, in sbitmap_queue_clear() argument
576 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
585 sbitmap_queue_wake_up(sbq); in sbitmap_queue_clear()
587 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) in sbitmap_queue_clear()
588 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; in sbitmap_queue_clear()
592 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) in sbitmap_queue_wake_all() argument
601 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
603 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
613 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) in sbitmap_queue_show() argument
618 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
626 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); in sbitmap_queue_show()
630 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
631 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
632 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
636 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
644 seq_printf(m, "round_robin=%d\n", sbq->round_robin); in sbitmap_queue_show()
645 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
649 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, in sbitmap_add_wait_queue() argument
653 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
654 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
655 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
664 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
665 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
666 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
671 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, in sbitmap_prepare_to_wait() argument
675 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
676 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
677 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
683 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, in sbitmap_finish_wait() argument
687 if (sbq_wait->sbq) { in sbitmap_finish_wait()
688 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
689 sbq_wait->sbq = NULL; in sbitmap_finish_wait()