Lines Matching refs:zhdr

239 static inline void z3fold_page_lock(struct z3fold_header *zhdr)  in z3fold_page_lock()  argument
241 spin_lock(&zhdr->page_lock); in z3fold_page_lock()
245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) in z3fold_page_trylock() argument
247 return spin_trylock(&zhdr->page_lock); in z3fold_page_trylock()
251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) in z3fold_page_unlock() argument
253 spin_unlock(&zhdr->page_lock); in z3fold_page_unlock()
261 struct z3fold_header *zhdr; in __get_z3fold_header() local
271 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); in __get_z3fold_header()
273 locked = z3fold_page_trylock(zhdr); in __get_z3fold_header()
280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK); in __get_z3fold_header()
283 return zhdr; in __get_z3fold_header()
298 static inline void put_z3fold_header(struct z3fold_header *zhdr) in put_z3fold_header() argument
300 struct page *page = virt_to_page(zhdr); in put_z3fold_header()
303 z3fold_page_unlock(zhdr); in put_z3fold_header()
306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) in free_handle() argument
327 if (zhdr->slots != slots) in free_handle()
328 zhdr->foreign_handles--; in free_handle()
342 if (zhdr->slots == slots) in free_handle()
343 zhdr->slots = NULL; in free_handle()
400 struct z3fold_header *zhdr = page_address(page); in init_z3fold_page() local
410 return zhdr; in init_z3fold_page()
416 spin_lock_init(&zhdr->page_lock); in init_z3fold_page()
417 kref_init(&zhdr->refcount); in init_z3fold_page()
418 zhdr->first_chunks = 0; in init_z3fold_page()
419 zhdr->middle_chunks = 0; in init_z3fold_page()
420 zhdr->last_chunks = 0; in init_z3fold_page()
421 zhdr->first_num = 0; in init_z3fold_page()
422 zhdr->start_middle = 0; in init_z3fold_page()
423 zhdr->cpu = -1; in init_z3fold_page()
424 zhdr->foreign_handles = 0; in init_z3fold_page()
425 zhdr->mapped_count = 0; in init_z3fold_page()
426 zhdr->slots = slots; in init_z3fold_page()
427 zhdr->pool = pool; in init_z3fold_page()
428 INIT_LIST_HEAD(&zhdr->buddy); in init_z3fold_page()
429 INIT_WORK(&zhdr->work, compact_page_work); in init_z3fold_page()
430 return zhdr; in init_z3fold_page()
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) in __idx() argument
448 return (bud + zhdr->first_num) & BUDDY_MASK; in __idx()
455 static unsigned long __encode_handle(struct z3fold_header *zhdr, in __encode_handle() argument
459 unsigned long h = (unsigned long)zhdr; in __encode_handle()
470 idx = __idx(zhdr, bud); in __encode_handle()
473 h |= (zhdr->last_chunks << BUDDY_SHIFT); in __encode_handle()
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) in encode_handle() argument
483 return __encode_handle(zhdr, zhdr->slots, bud); in encode_handle()
505 struct z3fold_header *zhdr; in handle_to_buddy() local
513 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); in handle_to_buddy()
514 return (addr - zhdr->first_num) & BUDDY_MASK; in handle_to_buddy()
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) in zhdr_to_pool() argument
519 return zhdr->pool; in zhdr_to_pool()
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) in __release_z3fold_page() argument
524 struct page *page = virt_to_page(zhdr); in __release_z3fold_page()
525 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page()
527 WARN_ON(!list_empty(&zhdr->buddy)); in __release_z3fold_page()
536 z3fold_page_unlock(zhdr); in __release_z3fold_page()
539 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
547 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, in release_z3fold_page() local
549 __release_z3fold_page(zhdr, false); in release_z3fold_page()
554 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, in release_z3fold_page_locked() local
556 WARN_ON(z3fold_page_trylock(zhdr)); in release_z3fold_page_locked()
557 __release_z3fold_page(zhdr, true); in release_z3fold_page_locked()
562 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, in release_z3fold_page_locked_list() local
564 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list()
567 list_del_init(&zhdr->buddy); in release_z3fold_page_locked_list()
570 WARN_ON(z3fold_page_trylock(zhdr)); in release_z3fold_page_locked_list()
571 __release_z3fold_page(zhdr, true); in release_z3fold_page_locked_list()
580 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work() local
582 struct page *page = virt_to_page(zhdr); in free_pages_work()
584 list_del(&zhdr->buddy); in free_pages_work()
588 cancel_work_sync(&zhdr->work); in free_pages_work()
600 static int num_free_chunks(struct z3fold_header *zhdr) in num_free_chunks() argument
608 if (zhdr->middle_chunks != 0) { in num_free_chunks()
609 int nfree_before = zhdr->first_chunks ? in num_free_chunks()
610 0 : zhdr->start_middle - ZHDR_CHUNKS; in num_free_chunks()
611 int nfree_after = zhdr->last_chunks ? in num_free_chunks()
613 (zhdr->start_middle + zhdr->middle_chunks); in num_free_chunks()
616 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; in num_free_chunks()
622 struct z3fold_header *zhdr) in add_to_unbuddied() argument
624 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || in add_to_unbuddied()
625 zhdr->middle_chunks == 0) { in add_to_unbuddied()
628 int freechunks = num_free_chunks(zhdr); in add_to_unbuddied()
630 list_add(&zhdr->buddy, &unbuddied[freechunks]); in add_to_unbuddied()
632 zhdr->cpu = smp_processor_id(); in add_to_unbuddied()
637 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) in get_free_buddy() argument
641 if (zhdr->middle_chunks) { in get_free_buddy()
642 if (!zhdr->first_chunks && in get_free_buddy()
643 chunks <= zhdr->start_middle - ZHDR_CHUNKS) in get_free_buddy()
645 else if (!zhdr->last_chunks) in get_free_buddy()
648 if (!zhdr->first_chunks) in get_free_buddy()
650 else if (!zhdr->last_chunks) in get_free_buddy()
659 static inline void *mchunk_memmove(struct z3fold_header *zhdr, in mchunk_memmove() argument
662 void *beg = zhdr; in mchunk_memmove()
664 beg + (zhdr->start_middle << CHUNK_SHIFT), in mchunk_memmove()
665 zhdr->middle_chunks << CHUNK_SHIFT); in mchunk_memmove()
668 static inline bool buddy_single(struct z3fold_header *zhdr) in buddy_single() argument
670 return !((zhdr->first_chunks && zhdr->middle_chunks) || in buddy_single()
671 (zhdr->first_chunks && zhdr->last_chunks) || in buddy_single()
672 (zhdr->middle_chunks && zhdr->last_chunks)); in buddy_single()
675 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) in compact_single_buddy() argument
677 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy()
678 void *p = zhdr; in compact_single_buddy()
682 int first_idx = __idx(zhdr, FIRST); in compact_single_buddy()
683 int middle_idx = __idx(zhdr, MIDDLE); in compact_single_buddy()
684 int last_idx = __idx(zhdr, LAST); in compact_single_buddy()
691 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { in compact_single_buddy()
693 sz = zhdr->first_chunks << CHUNK_SHIFT; in compact_single_buddy()
694 old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; in compact_single_buddy()
695 moved_chunks = &zhdr->first_chunks; in compact_single_buddy()
696 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { in compact_single_buddy()
697 p += zhdr->start_middle << CHUNK_SHIFT; in compact_single_buddy()
698 sz = zhdr->middle_chunks << CHUNK_SHIFT; in compact_single_buddy()
699 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; in compact_single_buddy()
700 moved_chunks = &zhdr->middle_chunks; in compact_single_buddy()
701 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { in compact_single_buddy()
702 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); in compact_single_buddy()
703 sz = zhdr->last_chunks << CHUNK_SHIFT; in compact_single_buddy()
704 old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; in compact_single_buddy()
705 moved_chunks = &zhdr->last_chunks; in compact_single_buddy()
717 if (WARN_ON(new_zhdr == zhdr)) in compact_single_buddy()
742 write_lock(&zhdr->slots->lock); in compact_single_buddy()
748 write_unlock(&zhdr->slots->lock); in compact_single_buddy()
772 static int z3fold_compact_page(struct z3fold_header *zhdr) in z3fold_compact_page() argument
774 struct page *page = virt_to_page(zhdr); in z3fold_compact_page()
782 if (zhdr->middle_chunks == 0) in z3fold_compact_page()
785 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { in z3fold_compact_page()
787 mchunk_memmove(zhdr, ZHDR_CHUNKS); in z3fold_compact_page()
788 zhdr->first_chunks = zhdr->middle_chunks; in z3fold_compact_page()
789 zhdr->middle_chunks = 0; in z3fold_compact_page()
790 zhdr->start_middle = 0; in z3fold_compact_page()
791 zhdr->first_num++; in z3fold_compact_page()
799 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && in z3fold_compact_page()
800 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= in z3fold_compact_page()
802 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); in z3fold_compact_page()
803 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; in z3fold_compact_page()
805 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && in z3fold_compact_page()
806 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle in z3fold_compact_page()
807 + zhdr->middle_chunks) >= in z3fold_compact_page()
809 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - in z3fold_compact_page()
810 zhdr->middle_chunks; in z3fold_compact_page()
811 mchunk_memmove(zhdr, new_start); in z3fold_compact_page()
812 zhdr->start_middle = new_start; in z3fold_compact_page()
819 static void do_compact_page(struct z3fold_header *zhdr, bool locked) in do_compact_page() argument
821 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page()
824 page = virt_to_page(zhdr); in do_compact_page()
826 WARN_ON(z3fold_page_trylock(zhdr)); in do_compact_page()
828 z3fold_page_lock(zhdr); in do_compact_page()
830 z3fold_page_unlock(zhdr); in do_compact_page()
834 list_del_init(&zhdr->buddy); in do_compact_page()
837 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { in do_compact_page()
844 z3fold_page_unlock(zhdr); in do_compact_page()
848 if (!zhdr->foreign_handles && buddy_single(zhdr) && in do_compact_page()
849 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { in do_compact_page()
850 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) in do_compact_page()
854 z3fold_page_unlock(zhdr); in do_compact_page()
859 z3fold_compact_page(zhdr); in do_compact_page()
860 add_to_unbuddied(pool, zhdr); in do_compact_page()
862 z3fold_page_unlock(zhdr); in do_compact_page()
867 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, in compact_page_work() local
870 do_compact_page(zhdr, false); in compact_page_work()
877 struct z3fold_header *zhdr = NULL; in __z3fold_alloc() local
888 zhdr = list_first_entry_or_null(READ_ONCE(l), in __z3fold_alloc()
891 if (!zhdr) in __z3fold_alloc()
897 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), in __z3fold_alloc()
899 !z3fold_page_trylock(zhdr)) { in __z3fold_alloc()
901 zhdr = NULL; in __z3fold_alloc()
907 list_del_init(&zhdr->buddy); in __z3fold_alloc()
908 zhdr->cpu = -1; in __z3fold_alloc()
911 page = virt_to_page(zhdr); in __z3fold_alloc()
914 z3fold_page_unlock(zhdr); in __z3fold_alloc()
915 zhdr = NULL; in __z3fold_alloc()
928 kref_get(&zhdr->refcount); in __z3fold_alloc()
933 if (!zhdr) { in __z3fold_alloc()
944 zhdr = list_first_entry_or_null(READ_ONCE(l), in __z3fold_alloc()
947 if (!zhdr || !z3fold_page_trylock(zhdr)) { in __z3fold_alloc()
949 zhdr = NULL; in __z3fold_alloc()
952 list_del_init(&zhdr->buddy); in __z3fold_alloc()
953 zhdr->cpu = -1; in __z3fold_alloc()
956 page = virt_to_page(zhdr); in __z3fold_alloc()
959 z3fold_page_unlock(zhdr); in __z3fold_alloc()
960 zhdr = NULL; in __z3fold_alloc()
965 kref_get(&zhdr->refcount); in __z3fold_alloc()
970 if (zhdr && !zhdr->slots) in __z3fold_alloc()
971 zhdr->slots = alloc_slots(pool, in __z3fold_alloc()
973 return zhdr; in __z3fold_alloc()
1093 struct z3fold_header *zhdr = NULL; in z3fold_alloc() local
1108 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1109 if (zhdr) { in z3fold_alloc()
1110 bud = get_free_buddy(zhdr, chunks); in z3fold_alloc()
1112 if (kref_put(&zhdr->refcount, in z3fold_alloc()
1116 z3fold_page_unlock(zhdr); in z3fold_alloc()
1121 page = virt_to_page(zhdr); in z3fold_alloc()
1130 zhdr = list_first_entry_or_null(&pool->stale, in z3fold_alloc()
1137 if (zhdr) { in z3fold_alloc()
1138 list_del(&zhdr->buddy); in z3fold_alloc()
1140 cancel_work_sync(&zhdr->work); in z3fold_alloc()
1141 page = virt_to_page(zhdr); in z3fold_alloc()
1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1153 if (!zhdr) { in z3fold_alloc()
1173 z3fold_page_lock(zhdr); in z3fold_alloc()
1177 zhdr->first_chunks = chunks; in z3fold_alloc()
1179 zhdr->last_chunks = chunks; in z3fold_alloc()
1181 zhdr->middle_chunks = chunks; in z3fold_alloc()
1182 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; in z3fold_alloc()
1184 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1194 *handle = encode_handle(zhdr, bud); in z3fold_alloc()
1197 z3fold_page_unlock(zhdr); in z3fold_alloc()
1214 struct z3fold_header *zhdr; in z3fold_free() local
1219 zhdr = get_z3fold_header(handle); in z3fold_free()
1220 page = virt_to_page(zhdr); in z3fold_free()
1233 put_z3fold_header(zhdr); in z3fold_free()
1245 zhdr->first_chunks = 0; in z3fold_free()
1248 zhdr->middle_chunks = 0; in z3fold_free()
1251 zhdr->last_chunks = 0; in z3fold_free()
1256 put_z3fold_header(zhdr); in z3fold_free()
1261 free_handle(handle, zhdr); in z3fold_free()
1262 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { in z3fold_free()
1268 z3fold_page_unlock(zhdr); in z3fold_free()
1272 put_z3fold_header(zhdr); in z3fold_free()
1276 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { in z3fold_free()
1278 list_del_init(&zhdr->buddy); in z3fold_free()
1280 zhdr->cpu = -1; in z3fold_free()
1281 kref_get(&zhdr->refcount); in z3fold_free()
1283 do_compact_page(zhdr, true); in z3fold_free()
1286 kref_get(&zhdr->refcount); in z3fold_free()
1288 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1289 put_z3fold_header(zhdr); in z3fold_free()
1331 struct z3fold_header *zhdr = NULL; in z3fold_reclaim_page() local
1353 zhdr = page_address(page); in z3fold_reclaim_page()
1371 if (kref_get_unless_zero(&zhdr->refcount) == 0) { in z3fold_reclaim_page()
1372 zhdr = NULL; in z3fold_reclaim_page()
1375 if (!z3fold_page_trylock(zhdr)) { in z3fold_reclaim_page()
1376 if (kref_put(&zhdr->refcount, in z3fold_reclaim_page()
1379 zhdr = NULL; in z3fold_reclaim_page()
1387 if (zhdr->foreign_handles || in z3fold_reclaim_page()
1389 if (kref_put(&zhdr->refcount, in z3fold_reclaim_page()
1393 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1394 zhdr = NULL; in z3fold_reclaim_page()
1397 list_del_init(&zhdr->buddy); in z3fold_reclaim_page()
1398 zhdr->cpu = -1; in z3fold_reclaim_page()
1402 if (!zhdr) in z3fold_reclaim_page()
1419 if (zhdr->first_chunks) in z3fold_reclaim_page()
1420 first_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1422 if (zhdr->middle_chunks) in z3fold_reclaim_page()
1423 middle_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1425 if (zhdr->last_chunks) in z3fold_reclaim_page()
1426 last_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1432 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1434 first_handle = encode_handle(zhdr, HEADLESS); in z3fold_reclaim_page()
1465 struct z3fold_buddy_slots *slots = zhdr->slots; in z3fold_reclaim_page()
1466 z3fold_page_lock(zhdr); in z3fold_reclaim_page()
1467 if (kref_put(&zhdr->refcount, in z3fold_reclaim_page()
1481 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1504 struct z3fold_header *zhdr; in z3fold_map() local
1509 zhdr = get_z3fold_header(handle); in z3fold_map()
1510 addr = zhdr; in z3fold_map()
1511 page = virt_to_page(zhdr); in z3fold_map()
1522 addr += zhdr->start_middle << CHUNK_SHIFT; in z3fold_map()
1536 zhdr->mapped_count++; in z3fold_map()
1538 put_z3fold_header(zhdr); in z3fold_map()
1549 struct z3fold_header *zhdr; in z3fold_unmap() local
1553 zhdr = get_z3fold_header(handle); in z3fold_unmap()
1554 page = virt_to_page(zhdr); in z3fold_unmap()
1562 zhdr->mapped_count--; in z3fold_unmap()
1563 put_z3fold_header(zhdr); in z3fold_unmap()
1579 struct z3fold_header *zhdr; in z3fold_page_isolate() local
1588 zhdr = page_address(page); in z3fold_page_isolate()
1589 z3fold_page_lock(zhdr); in z3fold_page_isolate()
1594 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) in z3fold_page_isolate()
1599 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1601 if (!list_empty(&zhdr->buddy)) in z3fold_page_isolate()
1602 list_del_init(&zhdr->buddy); in z3fold_page_isolate()
1607 kref_get(&zhdr->refcount); in z3fold_page_isolate()
1608 z3fold_page_unlock(zhdr); in z3fold_page_isolate()
1612 z3fold_page_unlock(zhdr); in z3fold_page_isolate()
1619 struct z3fold_header *zhdr, *new_zhdr; in z3fold_page_migrate() local
1628 zhdr = page_address(page); in z3fold_page_migrate()
1629 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1631 if (!z3fold_page_trylock(zhdr)) in z3fold_page_migrate()
1633 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { in z3fold_page_migrate()
1634 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1638 if (work_pending(&zhdr->work)) { in z3fold_page_migrate()
1639 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1643 memcpy(new_zhdr, zhdr, PAGE_SIZE); in z3fold_page_migrate()
1646 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1684 struct z3fold_header *zhdr; in z3fold_page_putback() local
1687 zhdr = page_address(page); in z3fold_page_putback()
1688 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1690 z3fold_page_lock(zhdr); in z3fold_page_putback()
1691 if (!list_empty(&zhdr->buddy)) in z3fold_page_putback()
1692 list_del_init(&zhdr->buddy); in z3fold_page_putback()
1694 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { in z3fold_page_putback()
1702 z3fold_page_unlock(zhdr); in z3fold_page_putback()