Lines Matching refs:sector
177 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument
208 peer_req->i.sector = sector; in drbd_alloc_peer_req()
1414 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out()
1452 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local
1506 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1519 sector += len >> 9; in drbd_submit_peer_request()
1659 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument
1709 if (sector + (ds>>9) > capacity) { in read_in_block()
1713 (unsigned long long)sector, ds); in read_in_block()
1720 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block()
1757 (unsigned long long)sector, data_size); in read_in_block()
1795 sector_t sector, int data_size) in recv_dless_read() argument
1818 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
1852 sector_t sector = peer_req->i.sector; in e_end_resync_block() local
1858 drbd_set_in_sync(peer_device, sector, peer_req->i.size); in e_end_resync_block()
1862 drbd_rs_failed_io(peer_device, sector, peer_req->i.size); in e_end_resync_block()
1871 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, in recv_resync_read() argument
1877 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); in recv_resync_read()
1913 sector_t sector, bool missing_ok, const char *func) in find_request() argument
1919 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) in find_request()
1923 (unsigned long)id, (unsigned long long)sector); in find_request()
1933 sector_t sector; in receive_DataReply() local
1942 sector = be64_to_cpu(p->sector); in receive_DataReply()
1945 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); in receive_DataReply()
1950 err = recv_dless_read(peer_device, req, sector, pi->size); in receive_DataReply()
1964 sector_t sector; in receive_RSDataReply() local
1973 sector = be64_to_cpu(p->sector); in receive_RSDataReply()
1980 err = recv_resync_read(peer_device, sector, pi); in receive_RSDataReply()
1996 sector_t sector, int size) in restart_conflicting_writes() argument
2001 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in restart_conflicting_writes()
2023 sector_t sector = peer_req->i.sector; in e_end_block() local
2034 drbd_set_in_sync(peer_device, sector, peer_req->i.size); in e_end_block()
2050 restart_conflicting_writes(device, sector, peer_req->i.size); in e_end_block()
2132 if (overlaps(peer_req->i.sector, peer_req->i.size, in overlapping_resync_write()
2133 rs_req->i.sector, rs_req->i.size)) { in overlapping_resync_write()
2231 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, in fail_postponed_requests() argument
2238 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in fail_postponed_requests()
2262 sector_t sector = peer_req->i.sector; in handle_write_conflicts() local
2275 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in handle_write_conflicts()
2293 equal = i->sector == sector && i->size == size; in handle_write_conflicts()
2301 bool superseded = i->sector <= sector && i->sector + in handle_write_conflicts()
2302 (i->size >> 9) >= sector + (size >> 9); in handle_write_conflicts()
2308 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2309 (unsigned long long)sector, size, in handle_write_conflicts()
2330 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2331 (unsigned long long)sector, size); in handle_write_conflicts()
2349 fail_postponed_requests(device, sector, size); in handle_write_conflicts()
2375 sector_t sector; in receive_Data() local
2405 sector = be64_to_cpu(p->sector); in receive_Data()
2406 peer_req = read_in_block(peer_device, p->block_id, sector, pi); in receive_Data()
2509 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in receive_Data()
2548 bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, in drbd_rs_should_slow_down() argument
2559 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); in drbd_rs_should_slow_down()
2622 sector_t sector; in receive_DataRequest() local
2635 sector = be64_to_cpu(p->sector); in receive_DataRequest()
2640 (unsigned long long)sector, size); in receive_DataRequest()
2643 if (sector + (size>>9) > capacity) { in receive_DataRequest()
2645 (unsigned long long)sector, size); in receive_DataRequest()
2664 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); in receive_DataRequest()
2680 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, in receive_DataRequest()
2705 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2727 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2746 device->ov_start_sector = sector; in receive_DataRequest()
2747 device->ov_position = sector; in receive_DataRequest()
2748 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); in receive_DataRequest()
2755 (unsigned long long)sector); in receive_DataRequest()
2797 && drbd_rs_should_slow_down(peer_device, sector, false)) in receive_DataRequest()
2800 if (drbd_rs_begin_io(device, sector)) in receive_DataRequest()
4756 drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); in receive_out_of_sync()
4766 sector_t sector; in receive_rs_deallocated() local
4774 sector = be64_to_cpu(p->sector); in receive_rs_deallocated()
4782 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, in receive_rs_deallocated()
4818 drbd_rs_complete_io(device, sector); in receive_rs_deallocated()
4819 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); in receive_rs_deallocated()
5445 sector_t sector = be64_to_cpu(p->sector); in got_IsInSync() local
5458 drbd_rs_complete_io(device, sector); in got_IsInSync()
5459 drbd_set_in_sync(peer_device, sector, blksize); in got_IsInSync()
5471 validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in validate_req_change_req_state() argument
5480 req = find_request(device, root, id, sector, missing_ok, func); in validate_req_change_req_state()
5498 sector_t sector = be64_to_cpu(p->sector); in got_BlockAck() local
5510 drbd_set_in_sync(peer_device, sector, blksize); in got_BlockAck()
5534 return validate_req_change_req_state(peer_device, p->block_id, sector, in got_BlockAck()
5544 sector_t sector = be64_to_cpu(p->sector); in got_NegAck() local
5557 drbd_rs_failed_io(peer_device, sector, size); in got_NegAck()
5561 err = validate_req_change_req_state(peer_device, p->block_id, sector, in got_NegAck()
5570 drbd_set_out_of_sync(peer_device, sector, size); in got_NegAck()
5580 sector_t sector = be64_to_cpu(p->sector); in got_NegDReply() local
5590 (unsigned long long)sector, be32_to_cpu(p->blksize)); in got_NegDReply()
5592 return validate_req_change_req_state(peer_device, p->block_id, sector, in got_NegDReply()
5601 sector_t sector; in got_NegRSDReply() local
5610 sector = be64_to_cpu(p->sector); in got_NegRSDReply()
5618 drbd_rs_complete_io(device, sector); in got_NegRSDReply()
5621 drbd_rs_failed_io(peer_device, sector, size); in got_NegRSDReply()
5664 sector_t sector; in got_OVResult() local
5672 sector = be64_to_cpu(p->sector); in got_OVResult()
5678 drbd_ov_out_of_sync_found(peer_device, sector, size); in got_OVResult()
5685 drbd_rs_complete_io(device, sector); in got_OVResult()