Lines Matching refs:sector_nr
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
871 int idx = sector_to_idx(sector_nr); in raise_barrier()
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
921 int idx = sector_to_idx(sector_nr); in lower_barrier()
989 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) in wait_read_barrier() argument
991 int idx = sector_to_idx(sector_nr); in wait_read_barrier()
1022 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) in wait_barrier() argument
1024 int idx = sector_to_idx(sector_nr); in wait_barrier()
1035 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) in allow_barrier() argument
1037 int idx = sector_to_idx(sector_nr); in allow_barrier()
2614 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, in raid1_sync_request() argument
2629 int idx = sector_to_idx(sector_nr); in raid1_sync_request()
2637 if (sector_nr >= max_sector) { in raid1_sync_request()
2664 return max_sector - sector_nr; in raid1_sync_request()
2669 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid1_sync_request()
2687 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2688 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2691 if (raise_barrier(conf, sector_nr)) in raid1_sync_request()
2707 r1_bio->sector = sector_nr; in raid1_sync_request()
2711 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors); in raid1_sync_request()
2731 if (is_badblock(rdev, sector_nr, good_sectors, in raid1_sync_request()
2733 if (first_bad > sector_nr) in raid1_sync_request()
2734 good_sectors = first_bad - sector_nr; in raid1_sync_request()
2736 bad_sectors -= (sector_nr - first_bad); in raid1_sync_request()
2742 if (sector_nr < first_bad) { in raid1_sync_request()
2769 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; in raid1_sync_request()
2788 ok = rdev_set_badblocks(rdev, sector_nr, in raid1_sync_request()
2825 max_sector = sector_nr + min_bad; in raid1_sync_request()
2826 rv = max_sector - sector_nr; in raid1_sync_request()
2834 if (max_sector > sector_nr + good_sectors) in raid1_sync_request()
2835 max_sector = sector_nr + good_sectors; in raid1_sync_request()
2841 if (sector_nr + (len>>9) > max_sector) in raid1_sync_request()
2842 len = (max_sector - sector_nr) << 9; in raid1_sync_request()
2846 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2871 sector_nr += len>>9; in raid1_sync_request()
2878 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()