Lines Matching refs:sector
127 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final()
157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final()
183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
361 sector_t sector = peer_req->i.sector; in w_e_send_csum() local
372 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum()
392 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument
402 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
593 sector_t sector; in make_resync_request() local
659 sector = BM_BIT_TO_SECT(bit); in make_resync_request()
661 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request()
668 drbd_rs_complete_io(device, sector); in make_resync_request()
686 if (sector & ((1<<(align+3))-1)) in make_resync_request()
715 if (sector + (size>>9) > capacity) in make_resync_request()
716 size = (capacity-sector)<<9; in make_resync_request()
719 switch (read_for_csum(peer_device, sector, size)) { in make_resync_request()
724 drbd_rs_complete_io(device, sector); in make_resync_request()
725 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in make_resync_request()
740 sector, size, ID_SYNCER); in make_resync_request()
771 sector_t sector; in make_ov_request() local
780 sector = device->ov_position; in make_ov_request()
782 if (sector >= capacity) in make_ov_request()
790 && sector >= device->ov_stop_sector; in make_ov_request()
796 if (drbd_try_rs_begin_io(device, sector)) { in make_ov_request()
797 device->ov_position = sector; in make_ov_request()
801 if (sector + (size>>9) > capacity) in make_ov_request()
802 size = (capacity-sector)<<9; in make_ov_request()
805 if (drbd_send_ov_request(first_peer_device(device), sector, size)) { in make_ov_request()
809 sector += BM_SECT_PER_BIT; in make_ov_request()
811 device->ov_position = sector; in make_ov_request()
1072 (unsigned long long)peer_req->i.sector); in w_e_end_data_req()
1129 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1151 (unsigned long long)peer_req->i.sector); in w_e_end_rsdata_req()
1156 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1185 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1207 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1237 sector_t sector = peer_req->i.sector; in w_e_end_ov_req() local
1266 err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY); in w_e_end_ov_req()
1278 void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) in drbd_ov_out_of_sync_found() argument
1280 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { in drbd_ov_out_of_sync_found()
1283 device->ov_last_oos_start = sector; in drbd_ov_out_of_sync_found()
1286 drbd_set_out_of_sync(device, sector, size); in drbd_ov_out_of_sync_found()
1296 sector_t sector = peer_req->i.sector; in w_e_end_ov_reply() local
1311 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1336 drbd_ov_out_of_sync_found(device, sector, size); in w_e_end_ov_reply()
1340 err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, in w_e_end_ov_reply()
1352 (sector + (size>>9)) >= device->ov_stop_sector; in w_e_end_ov_reply()
1507 err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size, in w_send_read_req()