Lines Matching refs:chunk

38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)  in chunk_size()  argument
40 return chunk->end_addr - chunk->start_addr + 1; in chunk_size()
186 struct gen_pool_chunk *chunk; in gen_pool_add_owner() local
191 chunk = vzalloc_node(nbytes, nid); in gen_pool_add_owner()
192 if (unlikely(chunk == NULL)) in gen_pool_add_owner()
195 chunk->phys_addr = phys; in gen_pool_add_owner()
196 chunk->start_addr = virt; in gen_pool_add_owner()
197 chunk->end_addr = virt + size - 1; in gen_pool_add_owner()
198 chunk->owner = owner; in gen_pool_add_owner()
199 atomic_long_set(&chunk->avail, size); in gen_pool_add_owner()
202 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner()
218 struct gen_pool_chunk *chunk; in gen_pool_virt_to_phys() local
222 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys()
223 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { in gen_pool_virt_to_phys()
224 paddr = chunk->phys_addr + (addr - chunk->start_addr); in gen_pool_virt_to_phys()
244 struct gen_pool_chunk *chunk; in gen_pool_destroy() local
249 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); in gen_pool_destroy()
250 list_del(&chunk->next_chunk); in gen_pool_destroy()
252 end_bit = chunk_size(chunk) >> order; in gen_pool_destroy()
253 bit = find_next_bit(chunk->bits, end_bit, 0); in gen_pool_destroy()
256 vfree(chunk); in gen_pool_destroy()
279 struct gen_pool_chunk *chunk; in gen_pool_alloc_algo_owner() local
296 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner()
297 if (size > atomic_long_read(&chunk->avail)) in gen_pool_alloc_algo_owner()
301 end_bit = chunk_size(chunk) >> order; in gen_pool_alloc_algo_owner()
303 start_bit = algo(chunk->bits, end_bit, start_bit, in gen_pool_alloc_algo_owner()
304 nbits, data, pool, chunk->start_addr); in gen_pool_alloc_algo_owner()
307 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); in gen_pool_alloc_algo_owner()
309 remain = bitmap_clear_ll(chunk->bits, start_bit, in gen_pool_alloc_algo_owner()
315 addr = chunk->start_addr + ((unsigned long)start_bit << order); in gen_pool_alloc_algo_owner()
317 atomic_long_sub(size, &chunk->avail); in gen_pool_alloc_algo_owner()
319 *owner = chunk->owner; in gen_pool_alloc_algo_owner()
489 struct gen_pool_chunk *chunk; in gen_pool_free_owner() local
502 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner()
503 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { in gen_pool_free_owner()
504 BUG_ON(addr + size - 1 > chunk->end_addr); in gen_pool_free_owner()
505 start_bit = (addr - chunk->start_addr) >> order; in gen_pool_free_owner()
506 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); in gen_pool_free_owner()
509 atomic_long_add(size, &chunk->avail); in gen_pool_free_owner()
511 *owner = chunk->owner; in gen_pool_free_owner()
531 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), in gen_pool_for_each_chunk() argument
534 struct gen_pool_chunk *chunk; in gen_pool_for_each_chunk() local
537 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk()
538 func(pool, chunk, data); in gen_pool_for_each_chunk()
557 struct gen_pool_chunk *chunk; in gen_pool_has_addr() local
560 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr()
561 if (start >= chunk->start_addr && start <= chunk->end_addr) { in gen_pool_has_addr()
562 if (end <= chunk->end_addr) { in gen_pool_has_addr()
581 struct gen_pool_chunk *chunk; in gen_pool_avail() local
585 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail()
586 avail += atomic_long_read(&chunk->avail); in gen_pool_avail()
600 struct gen_pool_chunk *chunk; in gen_pool_size() local
604 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
605 size += chunk_size(chunk); in gen_pool_size()