Lines Matching refs:ppl_conf

87 struct ppl_conf {  struct
116 struct ppl_conf *ppl_conf; /* shared between all log instances */ member
235 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_new_iounit() local
240 io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT); in ppl_new_iounit()
258 pplhdr->signature = cpu_to_le32(ppl_conf->signature); in ppl_new_iounit()
260 io->seq = atomic64_add_return(1, &ppl_conf->seq); in ppl_new_iounit()
362 struct ppl_conf *ppl_conf = conf->log_private; in ppl_write_stripe() local
373 log = &ppl_conf->child_logs[sh->pd_idx]; in ppl_write_stripe()
387 spin_lock_irq(&ppl_conf->no_mem_stripes_lock); in ppl_write_stripe()
388 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes); in ppl_write_stripe()
389 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock); in ppl_write_stripe()
401 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_log_endio() local
407 md_error(ppl_conf->mddev, log->rdev); in ppl_log_endio()
432 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_submit_iounit() local
453 ilog2(ppl_conf->block_size >> 9)); in ppl_submit_iounit()
472 bio->bi_write_hint = ppl_conf->write_hint; in ppl_submit_iounit()
486 if ((ppl_conf->child_logs[i].wb_cache_on) && in ppl_submit_iounit()
500 &ppl_conf->bs); in ppl_submit_iounit()
540 struct ppl_conf *ppl_conf = conf->log_private; in ppl_write_stripe_run() local
544 for (i = 0; i < ppl_conf->count; i++) { in ppl_write_stripe_run()
545 log = &ppl_conf->child_logs[i]; in ppl_write_stripe_run()
556 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_io_unit_finished() local
557 struct r5conf *conf = ppl_conf->mddev->private; in ppl_io_unit_finished()
568 mempool_free(io, &ppl_conf->io_pool); in ppl_io_unit_finished()
570 spin_lock(&ppl_conf->no_mem_stripes_lock); in ppl_io_unit_finished()
571 if (!list_empty(&ppl_conf->no_mem_stripes)) { in ppl_io_unit_finished()
574 sh = list_first_entry(&ppl_conf->no_mem_stripes, in ppl_io_unit_finished()
580 spin_unlock(&ppl_conf->no_mem_stripes_lock); in ppl_io_unit_finished()
591 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_flush_endio() local
592 struct r5conf *conf = ppl_conf->mddev->private; in ppl_flush_endio()
618 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_do_flush() local
619 struct r5conf *conf = ppl_conf->mddev->private; in ppl_do_flush()
640 bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs); in ppl_do_flush()
675 struct ppl_conf *ppl_conf = conf->log_private; in ppl_quiesce() local
679 for (i = 0; i < ppl_conf->count; i++) { in ppl_quiesce()
680 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_quiesce()
798 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_recover_entry() local
799 struct mddev *mddev = ppl_conf->mddev; in ppl_recover_entry()
801 int block_size = ppl_conf->block_size; in ppl_recover_entry()
973 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_recover() local
1028 ppl_conf->mismatch_count++; in ppl_recover()
1033 ppl_conf->recovered_entries++; in ppl_recover()
1065 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature); in ppl_write_empty_header()
1081 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_load_distributed() local
1138 ppl_conf->signature = signature; in ppl_load_distributed()
1139 } else if (ppl_conf->signature != signature) { in ppl_load_distributed()
1141 __func__, signature, ppl_conf->signature, in ppl_load_distributed()
1172 ppl_conf->mismatch_count++; in ppl_load_distributed()
1190 __func__, ret, ppl_conf->mismatch_count, in ppl_load_distributed()
1191 ppl_conf->recovered_entries); in ppl_load_distributed()
1195 static int ppl_load(struct ppl_conf *ppl_conf) in ppl_load() argument
1202 for (i = 0; i < ppl_conf->count; i++) { in ppl_load()
1203 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_load()
1218 if (ppl_conf->mddev->external) { in ppl_load()
1220 signature = ppl_conf->signature; in ppl_load()
1222 } else if (signature != ppl_conf->signature) { in ppl_load()
1224 mdname(ppl_conf->mddev)); in ppl_load()
1232 __func__, ret, ppl_conf->mismatch_count, in ppl_load()
1233 ppl_conf->recovered_entries); in ppl_load()
1237 static void __ppl_exit_log(struct ppl_conf *ppl_conf) in __ppl_exit_log() argument
1239 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); in __ppl_exit_log()
1240 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); in __ppl_exit_log()
1242 kfree(ppl_conf->child_logs); in __ppl_exit_log()
1244 bioset_exit(&ppl_conf->bs); in __ppl_exit_log()
1245 bioset_exit(&ppl_conf->flush_bs); in __ppl_exit_log()
1246 mempool_exit(&ppl_conf->io_pool); in __ppl_exit_log()
1247 kmem_cache_destroy(ppl_conf->io_kc); in __ppl_exit_log()
1249 kfree(ppl_conf); in __ppl_exit_log()
1254 struct ppl_conf *ppl_conf = conf->log_private; in ppl_exit_log() local
1256 if (ppl_conf) { in ppl_exit_log()
1257 __ppl_exit_log(ppl_conf); in ppl_exit_log()
1318 &log->ppl_conf->mddev->flags); in ppl_init_child_log()
1334 struct ppl_conf *ppl_conf; in ppl_init_log() local
1372 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL); in ppl_init_log()
1373 if (!ppl_conf) in ppl_init_log()
1376 ppl_conf->mddev = mddev; in ppl_init_log()
1378 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0); in ppl_init_log()
1379 if (!ppl_conf->io_kc) { in ppl_init_log()
1384 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc, in ppl_init_log()
1385 ppl_io_pool_free, ppl_conf->io_kc); in ppl_init_log()
1389 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS); in ppl_init_log()
1393 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0); in ppl_init_log()
1397 ppl_conf->count = conf->raid_disks; in ppl_init_log()
1398 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log), in ppl_init_log()
1400 if (!ppl_conf->child_logs) { in ppl_init_log()
1405 atomic64_set(&ppl_conf->seq, 0); in ppl_init_log()
1406 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); in ppl_init_log()
1407 spin_lock_init(&ppl_conf->no_mem_stripes_lock); in ppl_init_log()
1408 ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET; in ppl_init_log()
1411 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); in ppl_init_log()
1412 ppl_conf->block_size = 512; in ppl_init_log()
1414 ppl_conf->block_size = queue_logical_block_size(mddev->queue); in ppl_init_log()
1417 for (i = 0; i < ppl_conf->count; i++) { in ppl_init_log()
1418 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_init_log()
1425 log->ppl_conf = ppl_conf; in ppl_init_log()
1438 ret = ppl_load(ppl_conf); in ppl_init_log()
1443 ppl_conf->recovered_entries > 0 && in ppl_init_log()
1444 ppl_conf->mismatch_count == 0) { in ppl_init_log()
1451 } else if (mddev->pers && ppl_conf->mismatch_count > 0) { in ppl_init_log()
1457 conf->log_private = ppl_conf; in ppl_init_log()
1458 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); in ppl_init_log()
1462 __ppl_exit_log(ppl_conf); in ppl_init_log()
1468 struct ppl_conf *ppl_conf = conf->log_private; in ppl_modify_log() local
1483 if (rdev->raid_disk >= ppl_conf->count) in ppl_modify_log()
1486 log = &ppl_conf->child_logs[rdev->raid_disk]; in ppl_modify_log()
1509 struct ppl_conf *ppl_conf = NULL; in ppl_write_hint_show() local
1514 ppl_conf = conf->log_private; in ppl_write_hint_show()
1515 ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0); in ppl_write_hint_show()
1525 struct ppl_conf *ppl_conf; in ppl_write_hint_store() local
1542 ppl_conf = conf->log_private; in ppl_write_hint_store()
1543 if (!ppl_conf) in ppl_write_hint_store()
1546 ppl_conf->write_hint = new; in ppl_write_hint_store()