Lines Matching refs:pcl

70 		struct z_erofs_pcluster *pcl;  in z_erofs_alloc_pcluster()  local
75 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); in z_erofs_alloc_pcluster()
76 if (!pcl) in z_erofs_alloc_pcluster()
78 pcl->pclusterpages = nrpages; in z_erofs_alloc_pcluster()
79 return pcl; in z_erofs_alloc_pcluster()
84 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) in z_erofs_free_pcluster() argument
91 if (pcl->pclusterpages > pcs->maxpages) in z_erofs_free_pcluster()
94 kmem_cache_free(pcs->slab, pcl); in z_erofs_free_pcluster()
205 struct z_erofs_pcluster *pcl, *tailpcl; member
242 struct z_erofs_pcluster *pcl = clt->pcl; in preload_compressed_pages() local
252 pages = pcl->compressed_pages; in preload_compressed_pages()
253 index = pcl->obj.index; in preload_compressed_pages()
254 for (; index < pcl->obj.index + pcl->pclusterpages; ++index, ++pages) { in preload_compressed_pages()
311 struct z_erofs_pcluster *const pcl = in erofs_try_to_free_all_cached_pages() local
320 for (i = 0; i < pcl->pclusterpages; ++i) { in erofs_try_to_free_all_cached_pages()
321 struct page *page = pcl->compressed_pages[i]; in erofs_try_to_free_all_cached_pages()
334 WRITE_ONCE(pcl->compressed_pages[i], NULL); in erofs_try_to_free_all_cached_pages()
344 struct z_erofs_pcluster *const pcl = (void *)page_private(page); in erofs_try_to_free_cached_page() local
347 if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { in erofs_try_to_free_cached_page()
350 for (i = 0; i < pcl->pclusterpages; ++i) { in erofs_try_to_free_cached_page()
351 if (pcl->compressed_pages[i] == page) { in erofs_try_to_free_cached_page()
352 WRITE_ONCE(pcl->compressed_pages[i], NULL); in erofs_try_to_free_cached_page()
357 erofs_workgroup_unfreeze(&pcl->obj, 1); in erofs_try_to_free_cached_page()
369 struct z_erofs_pcluster *const pcl = clt->pcl; in z_erofs_try_inplace_io() local
371 while (clt->icpage_ptr > pcl->compressed_pages) in z_erofs_try_inplace_io()
397 try_to_claim_pcluster(struct z_erofs_pcluster *pcl, in try_to_claim_pcluster() argument
402 if (pcl->next == Z_EROFS_PCLUSTER_NIL) { in try_to_claim_pcluster()
404 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, in try_to_claim_pcluster()
408 *owned_head = &pcl->next; in try_to_claim_pcluster()
411 } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { in try_to_claim_pcluster()
417 if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, in try_to_claim_pcluster()
430 struct z_erofs_pcluster *pcl = clt->pcl; in z_erofs_lookup_collection() local
435 if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { in z_erofs_lookup_collection()
440 cl = z_erofs_primarycollection(pcl); in z_erofs_lookup_collection()
446 length = READ_ONCE(pcl->length); in z_erofs_lookup_collection()
459 length != cmpxchg_relaxed(&pcl->length, length, llen)) { in z_erofs_lookup_collection()
461 length = READ_ONCE(pcl->length); in z_erofs_lookup_collection()
467 clt->tailpcl = pcl; in z_erofs_lookup_collection()
468 clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); in z_erofs_lookup_collection()
480 struct z_erofs_pcluster *pcl; in z_erofs_register_collection() local
486 pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT); in z_erofs_register_collection()
487 if (IS_ERR(pcl)) in z_erofs_register_collection()
488 return PTR_ERR(pcl); in z_erofs_register_collection()
490 atomic_set(&pcl->obj.refcount, 1); in z_erofs_register_collection()
491 pcl->obj.index = map->m_pa >> PAGE_SHIFT; in z_erofs_register_collection()
493 pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | in z_erofs_register_collection()
498 pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; in z_erofs_register_collection()
500 pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; in z_erofs_register_collection()
503 pcl->next = clt->owned_head; in z_erofs_register_collection()
506 cl = z_erofs_primarycollection(pcl); in z_erofs_register_collection()
516 grp = erofs_insert_workgroup(inode->i_sb, &pcl->obj); in z_erofs_register_collection()
522 if (grp != &pcl->obj) { in z_erofs_register_collection()
523 clt->pcl = container_of(grp, struct z_erofs_pcluster, obj); in z_erofs_register_collection()
529 clt->tailpcl = pcl; in z_erofs_register_collection()
530 clt->owned_head = &pcl->next; in z_erofs_register_collection()
531 clt->pcl = pcl; in z_erofs_register_collection()
537 z_erofs_free_pcluster(pcl); in z_erofs_register_collection()
561 clt->pcl = container_of(grp, struct z_erofs_pcluster, obj); in z_erofs_collector_begin()
573 erofs_workgroup_put(&clt->pcl->obj); in z_erofs_collector_begin()
582 clt->icpage_ptr = clt->pcl->compressed_pages + clt->pcl->pclusterpages; in z_erofs_collector_begin()
601 struct z_erofs_pcluster *const pcl = in erofs_workgroup_free_rcu() local
603 struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); in erofs_workgroup_free_rcu()
610 struct z_erofs_pcluster *const pcl = in z_erofs_collection_put() local
613 erofs_workgroup_put(&pcl->obj); in z_erofs_collection_put()
842 struct z_erofs_pcluster *pcl, in z_erofs_decompress_pcluster() argument
857 cl = z_erofs_primarycollection(pcl); in z_erofs_decompress_pcluster()
925 compressed_pages = pcl->compressed_pages; in z_erofs_decompress_pcluster()
927 for (i = 0; i < pcl->pclusterpages; ++i) { in z_erofs_decompress_pcluster()
971 llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; in z_erofs_decompress_pcluster()
974 partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); in z_erofs_decompress_pcluster()
980 inputsize = pcl->pclusterpages * PAGE_SIZE; in z_erofs_decompress_pcluster()
988 .alg = pcl->algorithmformat, in z_erofs_decompress_pcluster()
995 for (i = 0; i < pcl->pclusterpages; ++i) { in z_erofs_decompress_pcluster()
1033 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); in z_erofs_decompress_pcluster()
1048 struct z_erofs_pcluster *pcl; in z_erofs_decompress_queue() local
1056 pcl = container_of(owned, struct z_erofs_pcluster, next); in z_erofs_decompress_queue()
1057 owned = READ_ONCE(pcl->next); in z_erofs_decompress_queue()
1059 z_erofs_decompress_pcluster(io->sb, pcl, pagepool); in z_erofs_decompress_queue()
1076 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, in pickup_page_for_submission() argument
1082 const pgoff_t index = pcl->obj.index; in pickup_page_for_submission()
1092 page = READ_ONCE(pcl->compressed_pages[nr]); in pickup_page_for_submission()
1117 WRITE_ONCE(pcl->compressed_pages[nr], page); in pickup_page_for_submission()
1143 WRITE_ONCE(pcl->compressed_pages[nr], page); in pickup_page_for_submission()
1155 set_page_private(page, (unsigned long)pcl); in pickup_page_for_submission()
1179 if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { in pickup_page_for_submission()
1190 attach_page_private(page, pcl); in pickup_page_for_submission()
1243 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, in move_to_bypass_jobqueue() argument
1254 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); in move_to_bypass_jobqueue()
1257 WRITE_ONCE(*bypass_qtail, &pcl->next); in move_to_bypass_jobqueue()
1259 qtail[JQ_BYPASS] = &pcl->next; in move_to_bypass_jobqueue()
1286 struct z_erofs_pcluster *pcl; in z_erofs_submit_queue() local
1295 pcl = container_of(owned_head, struct z_erofs_pcluster, next); in z_erofs_submit_queue()
1297 cur = pcl->obj.index; in z_erofs_submit_queue()
1298 end = cur + pcl->pclusterpages; in z_erofs_submit_queue()
1301 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, in z_erofs_submit_queue()
1307 page = pickup_page_for_submission(pcl, i++, pagepool, in z_erofs_submit_queue()
1341 qtail[JQ_SUBMIT] = &pcl->next; in z_erofs_submit_queue()
1343 move_to_bypass_jobqueue(pcl, qtail, owned_head); in z_erofs_submit_queue()