Lines Matching refs:pd_idx
142 if (idx == sh->pd_idx) in raid6_idx_to_slot()
275 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
871 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
1567 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1798 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5() local
1802 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1803 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1931 int pd_idx = sh->pd_idx; in ops_complete_reconstruct() local
1948 if (dev->written || i == pd_idx || i == qd_idx) { in ops_complete_reconstruct()
1982 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5() local
1995 if (pd_idx == i) in ops_run_reconstruct5()
2002 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2015 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2016 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2026 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2027 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2030 if (i != pd_idx) { in ops_run_reconstruct5()
2088 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2095 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2150 int pd_idx = sh->pd_idx; in ops_run_check_p() local
2166 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2167 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2171 if (i == pd_idx || i == qd_idx) in ops_run_check_p()
2770 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2919 int pd_idx, qd_idx; in raid5_compute_sector() local
2947 pd_idx = qd_idx = -1; in raid5_compute_sector()
2950 pd_idx = data_disks; in raid5_compute_sector()
2955 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2956 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2960 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
2961 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2965 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2966 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2969 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
2970 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2973 pd_idx = 0; in raid5_compute_sector()
2977 pd_idx = data_disks; in raid5_compute_sector()
2987 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2988 qd_idx = pd_idx + 1; in raid5_compute_sector()
2989 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
2992 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2996 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
2997 qd_idx = pd_idx + 1; in raid5_compute_sector()
2998 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3001 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3005 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3006 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
3007 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3010 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3011 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
3012 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3016 pd_idx = 0; in raid5_compute_sector()
3021 pd_idx = data_disks; in raid5_compute_sector()
3029 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3030 qd_idx = pd_idx + 1; in raid5_compute_sector()
3031 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3034 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3045 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3046 qd_idx = pd_idx + 1; in raid5_compute_sector()
3047 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3050 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3057 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3058 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; in raid5_compute_sector()
3059 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3065 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3066 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3072 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3073 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3079 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3080 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3085 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3086 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3091 pd_idx = 0; in raid5_compute_sector()
3103 sh->pd_idx = pd_idx; in raid5_compute_sector()
3134 if (i == sh->pd_idx) in raid5_compute_blocknr()
3142 if (i > sh->pd_idx) in raid5_compute_blocknr()
3147 if (i < sh->pd_idx) in raid5_compute_blocknr()
3149 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3168 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3170 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3175 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3179 if (i < sh->pd_idx) in raid5_compute_blocknr()
3181 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3191 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3195 if (i < sh->pd_idx) in raid5_compute_blocknr()
3197 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3202 if (i > sh->pd_idx) in raid5_compute_blocknr()
3207 if (i < sh->pd_idx) in raid5_compute_blocknr()
3209 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3225 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3294 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction() local
3340 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3341 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3348 if (i == pd_idx || i == qd_idx) in schedule_reconstruction()
3375 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3376 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3391 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3449 if (i != sh->pd_idx && in add_stripe_bio()
3669 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3795 s->failed_num[i] == sh->pd_idx || in need_this_block()
3817 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3857 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
4019 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4021 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4022 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4104 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4116 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4140 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4169 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4194 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4254 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4263 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4315 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4316 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4336 int pd_idx = sh->pd_idx; in handle_parity_checks6() local
4376 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4415 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4485 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4486 *target = pd_idx; in handle_parity_checks6()
4520 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4552 if (j != sh2->pd_idx && in handle_stripe_expansion()
4926 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4991 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4992 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4999 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5009 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5022 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5023 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5024 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5137 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5710 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5716 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5719 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5733 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
6110 if (j == sh->pd_idx) in reshape_request()