Lines Matching refs:rinfo
225 struct blkfront_ring_info *rinfo; member
270 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
275 for ((ptr) = (info)->rinfo, (idx) = 0; \
283 return (void *)info->rinfo + i * info->rinfo_size; in get_rinfo()
286 static int get_id_from_freelist(struct blkfront_ring_info *rinfo) in get_id_from_freelist() argument
288 unsigned long free = rinfo->shadow_free; in get_id_from_freelist()
290 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info)); in get_id_from_freelist()
291 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; in get_id_from_freelist()
292 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ in get_id_from_freelist()
296 static int add_id_to_freelist(struct blkfront_ring_info *rinfo, in add_id_to_freelist() argument
299 if (rinfo->shadow[id].req.u.rw.id != id) in add_id_to_freelist()
301 if (rinfo->shadow[id].request == NULL) in add_id_to_freelist()
303 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; in add_id_to_freelist()
304 rinfo->shadow[id].request = NULL; in add_id_to_freelist()
305 rinfo->shadow_free = id; in add_id_to_freelist()
309 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) in fill_grant_buffer() argument
311 struct blkfront_info *info = rinfo->dev_info; in fill_grant_buffer()
331 list_add(&gnt_list_entry->node, &rinfo->grants); in fill_grant_buffer()
339 &rinfo->grants, node) { in fill_grant_buffer()
350 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo) in get_free_grant() argument
354 BUG_ON(list_empty(&rinfo->grants)); in get_free_grant()
355 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant, in get_free_grant()
360 rinfo->persistent_gnts_c--; in get_free_grant()
376 struct blkfront_ring_info *rinfo) in get_grant() argument
378 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_grant()
379 struct blkfront_info *info = rinfo->dev_info; in get_grant()
400 struct blkfront_ring_info *rinfo) in get_indirect_grant() argument
402 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_indirect_grant()
403 struct blkfront_info *info = rinfo->dev_info; in get_indirect_grant()
415 BUG_ON(list_empty(&rinfo->indirect_pages)); in get_indirect_grant()
416 indirect_page = list_first_entry(&rinfo->indirect_pages, in get_indirect_grant()
492 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg; in blkif_restart_queue_callback() local
493 schedule_work(&rinfo->work); in blkif_restart_queue_callback()
545 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, in blkif_ring_get_request() argument
551 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); in blkif_ring_get_request()
552 rinfo->ring.req_prod_pvt++; in blkif_ring_get_request()
554 id = get_id_from_freelist(rinfo); in blkif_ring_get_request()
555 rinfo->shadow[id].request = req; in blkif_ring_get_request()
556 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_ring_get_request()
557 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; in blkif_ring_get_request()
559 rinfo->shadow[id].req.u.rw.id = id; in blkif_ring_get_request()
564 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_discard_req() argument
566 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_discard_req()
571 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_discard_req()
572 ring_req = &rinfo->shadow[id].req; in blkif_queue_discard_req()
585 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_discard_req()
593 struct blkfront_ring_info *rinfo; member
616 struct blkfront_ring_info *rinfo = setup->rinfo; in blkif_setup_rw_req_grant() local
623 struct blk_shadow *shadow = &rinfo->shadow[setup->id]; in blkif_setup_rw_req_grant()
641 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo); in blkif_setup_rw_req_grant()
647 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo); in blkif_setup_rw_req_grant()
714 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_rw_req() argument
716 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_rw_req()
725 .rinfo = rinfo, in blkif_queue_rw_req()
747 if (rinfo->persistent_gnts_c < max_grefs) { in blkif_queue_rw_req()
751 max_grefs - rinfo->persistent_gnts_c, in blkif_queue_rw_req()
754 &rinfo->callback, in blkif_queue_rw_req()
756 rinfo, in blkif_queue_rw_req()
757 max_grefs - rinfo->persistent_gnts_c); in blkif_queue_rw_req()
763 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_rw_req()
764 ring_req = &rinfo->shadow[id].req; in blkif_queue_rw_req()
766 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); in blkif_queue_rw_req()
769 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) in blkif_queue_rw_req()
776 rinfo->shadow[id].num_sg = num_sg; in blkif_queue_rw_req()
814 extra_id = blkif_ring_get_request(rinfo, req, in blkif_queue_rw_req()
816 extra_ring_req = &rinfo->shadow[extra_id].req; in blkif_queue_rw_req()
822 rinfo->shadow[extra_id].num_sg = 0; in blkif_queue_rw_req()
827 rinfo->shadow[extra_id].associated_id = id; in blkif_queue_rw_req()
828 rinfo->shadow[id].associated_id = extra_id; in blkif_queue_rw_req()
839 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) { in blkif_queue_rw_req()
861 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_rw_req()
864 rinfo->shadow[extra_id].status = REQ_WAITING; in blkif_queue_rw_req()
879 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_request() argument
881 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) in blkif_queue_request()
886 return blkif_queue_discard_req(req, rinfo); in blkif_queue_request()
888 return blkif_queue_rw_req(req, rinfo); in blkif_queue_request()
891 static inline void flush_requests(struct blkfront_ring_info *rinfo) in flush_requests() argument
895 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify); in flush_requests()
898 notify_remote_via_irq(rinfo->irq); in flush_requests()
917 struct blkfront_ring_info *rinfo = NULL; in blkif_queue_rq() local
919 rinfo = get_rinfo(info, qid); in blkif_queue_rq()
921 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_queue_rq()
922 if (RING_FULL(&rinfo->ring)) in blkif_queue_rq()
925 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info)) in blkif_queue_rq()
928 if (blkif_queue_request(qd->rq, rinfo)) in blkif_queue_rq()
931 flush_requests(rinfo); in blkif_queue_rq()
932 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
936 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
941 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
1218 struct blkfront_ring_info *rinfo; in xlvbd_release_gendisk() local
1226 for_each_rinfo(info, rinfo, i) { in xlvbd_release_gendisk()
1228 gnttab_cancel_free_callback(&rinfo->callback); in xlvbd_release_gendisk()
1231 flush_work(&rinfo->work); in xlvbd_release_gendisk()
1249 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) in kick_pending_request_queues_locked() argument
1251 if (!RING_FULL(&rinfo->ring)) in kick_pending_request_queues_locked()
1252 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); in kick_pending_request_queues_locked()
1255 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) in kick_pending_request_queues() argument
1259 spin_lock_irqsave(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1260 kick_pending_request_queues_locked(rinfo); in kick_pending_request_queues()
1261 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1266 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); in blkif_restart_queue() local
1268 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) in blkif_restart_queue()
1269 kick_pending_request_queues(rinfo); in blkif_restart_queue()
1272 static void blkif_free_ring(struct blkfront_ring_info *rinfo) in blkif_free_ring() argument
1275 struct blkfront_info *info = rinfo->dev_info; in blkif_free_ring()
1282 if (!list_empty(&rinfo->indirect_pages)) { in blkif_free_ring()
1286 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkif_free_ring()
1293 if (!list_empty(&rinfo->grants)) { in blkif_free_ring()
1295 &rinfo->grants, node) { in blkif_free_ring()
1300 rinfo->persistent_gnts_c--; in blkif_free_ring()
1307 BUG_ON(rinfo->persistent_gnts_c != 0); in blkif_free_ring()
1314 if (!rinfo->shadow[i].request) in blkif_free_ring()
1317 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free_ring()
1318 rinfo->shadow[i].req.u.indirect.nr_segments : in blkif_free_ring()
1319 rinfo->shadow[i].req.u.rw.nr_segments; in blkif_free_ring()
1321 persistent_gnt = rinfo->shadow[i].grants_used[j]; in blkif_free_ring()
1328 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) in blkif_free_ring()
1336 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; in blkif_free_ring()
1343 kvfree(rinfo->shadow[i].grants_used); in blkif_free_ring()
1344 rinfo->shadow[i].grants_used = NULL; in blkif_free_ring()
1345 kvfree(rinfo->shadow[i].indirect_grants); in blkif_free_ring()
1346 rinfo->shadow[i].indirect_grants = NULL; in blkif_free_ring()
1347 kvfree(rinfo->shadow[i].sg); in blkif_free_ring()
1348 rinfo->shadow[i].sg = NULL; in blkif_free_ring()
1352 gnttab_cancel_free_callback(&rinfo->callback); in blkif_free_ring()
1355 flush_work(&rinfo->work); in blkif_free_ring()
1359 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { in blkif_free_ring()
1360 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0); in blkif_free_ring()
1361 rinfo->ring_ref[i] = GRANT_INVALID_REF; in blkif_free_ring()
1364 free_pages_exact(rinfo->ring.sring, in blkif_free_ring()
1366 rinfo->ring.sring = NULL; in blkif_free_ring()
1368 if (rinfo->irq) in blkif_free_ring()
1369 unbind_from_irqhandler(rinfo->irq, rinfo); in blkif_free_ring()
1370 rinfo->evtchn = rinfo->irq = 0; in blkif_free_ring()
1376 struct blkfront_ring_info *rinfo; in blkif_free() local
1385 for_each_rinfo(info, rinfo, i) in blkif_free()
1386 blkif_free_ring(rinfo); in blkif_free()
1388 kvfree(info->rinfo); in blkif_free()
1389 info->rinfo = NULL; in blkif_free()
1456 struct blkfront_ring_info *rinfo, in blkif_completion() argument
1462 struct blkfront_info *info = rinfo->dev_info; in blkif_completion()
1463 struct blk_shadow *s = &rinfo->shadow[*id]; in blkif_completion()
1473 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id]; in blkif_completion()
1505 if (add_id_to_freelist(rinfo, s->associated_id)) in blkif_completion()
1543 list_add(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1544 rinfo->persistent_gnts_c++; in blkif_completion()
1552 list_add_tail(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1563 list_add(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1564 rinfo->persistent_gnts_c++; in blkif_completion()
1574 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkif_completion()
1577 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1591 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; in blkif_interrupt() local
1592 struct blkfront_info *info = rinfo->dev_info; in blkif_interrupt()
1600 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_interrupt()
1602 rp = READ_ONCE(rinfo->ring.sring->rsp_prod); in blkif_interrupt()
1604 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) { in blkif_interrupt()
1606 info->gd->disk_name, rp - rinfo->ring.rsp_cons); in blkif_interrupt()
1610 for (i = rinfo->ring.rsp_cons; i != rp; i++) { in blkif_interrupt()
1616 RING_COPY_RESPONSE(&rinfo->ring, i, &bret); in blkif_interrupt()
1629 if (rinfo->shadow[id].status != REQ_WAITING) { in blkif_interrupt()
1635 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_interrupt()
1636 req = rinfo->shadow[id].request; in blkif_interrupt()
1638 op = rinfo->shadow[id].req.operation; in blkif_interrupt()
1640 op = rinfo->shadow[id].req.u.indirect.indirect_op; in blkif_interrupt()
1654 ret = blkif_completion(&id, rinfo, &bret); in blkif_interrupt()
1661 if (add_id_to_freelist(rinfo, id)) { in blkif_interrupt()
1694 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { in blkif_interrupt()
1723 rinfo->ring.rsp_cons = i; in blkif_interrupt()
1725 if (i != rinfo->ring.req_prod_pvt) { in blkif_interrupt()
1727 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do); in blkif_interrupt()
1731 rinfo->ring.sring->rsp_event = i + 1; in blkif_interrupt()
1733 kick_pending_request_queues_locked(rinfo); in blkif_interrupt()
1735 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1744 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1754 struct blkfront_ring_info *rinfo) in setup_blkring() argument
1758 struct blkfront_info *info = rinfo->dev_info; in setup_blkring()
1763 rinfo->ring_ref[i] = GRANT_INVALID_REF; in setup_blkring()
1771 FRONT_RING_INIT(&rinfo->ring, sring, ring_size); in setup_blkring()
1773 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); in setup_blkring()
1776 rinfo->ring.sring = NULL; in setup_blkring()
1780 rinfo->ring_ref[i] = gref[i]; in setup_blkring()
1782 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); in setup_blkring()
1786 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, in setup_blkring()
1787 0, "blkif", rinfo); in setup_blkring()
1793 rinfo->irq = err; in setup_blkring()
1806 struct blkfront_ring_info *rinfo, const char *dir) in write_per_ring_nodes() argument
1811 struct blkfront_info *info = rinfo->dev_info; in write_per_ring_nodes()
1814 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]); in write_per_ring_nodes()
1825 "%u", rinfo->ring_ref[i]); in write_per_ring_nodes()
1833 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn); in write_per_ring_nodes()
1870 struct blkfront_ring_info *rinfo; in talk_to_blkback() local
1888 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1890 err = setup_blkring(dev, rinfo); in talk_to_blkback()
1913 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); in talk_to_blkback()
1935 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1938 err = write_per_ring_nodes(xbt, rinfo, path); in talk_to_blkback()
1967 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1971 rinfo->shadow[j].req.u.rw.id = j + 1; in talk_to_blkback()
1972 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; in talk_to_blkback()
1998 struct blkfront_ring_info *rinfo; in negotiate_mq() local
2010 info->rinfo_size = struct_size(info->rinfo, shadow, in negotiate_mq()
2012 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); in negotiate_mq()
2013 if (!info->rinfo) { in negotiate_mq()
2019 for_each_rinfo(info, rinfo, i) { in negotiate_mq()
2020 INIT_LIST_HEAD(&rinfo->indirect_pages); in negotiate_mq()
2021 INIT_LIST_HEAD(&rinfo->grants); in negotiate_mq()
2022 rinfo->dev_info = info; in negotiate_mq()
2023 INIT_WORK(&rinfo->work, blkif_restart_queue); in negotiate_mq()
2024 spin_lock_init(&rinfo->ring_lock); in negotiate_mq()
2113 struct blkfront_ring_info *rinfo; in blkif_recover() local
2121 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2122 rc = blkfront_setup_indirect(rinfo); in blkif_recover()
2131 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2133 kick_pending_request_queues(rinfo); in blkif_recover()
2164 struct blkfront_ring_info *rinfo; in blkfront_resume() local
2170 for_each_rinfo(info, rinfo, i) { in blkfront_resume()
2172 struct blk_shadow *shadow = rinfo->shadow; in blkfront_resume()
2269 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) in blkfront_setup_indirect() argument
2273 struct blkfront_info *info = rinfo->dev_info; in blkfront_setup_indirect()
2293 err = fill_grant_buffer(rinfo, in blkfront_setup_indirect()
2306 BUG_ON(!list_empty(&rinfo->indirect_pages)); in blkfront_setup_indirect()
2312 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkfront_setup_indirect()
2317 rinfo->shadow[i].grants_used = in blkfront_setup_indirect()
2319 sizeof(rinfo->shadow[i].grants_used[0]), in blkfront_setup_indirect()
2321 rinfo->shadow[i].sg = kvcalloc(psegs, in blkfront_setup_indirect()
2322 sizeof(rinfo->shadow[i].sg[0]), in blkfront_setup_indirect()
2325 rinfo->shadow[i].indirect_grants = in blkfront_setup_indirect()
2327 sizeof(rinfo->shadow[i].indirect_grants[0]), in blkfront_setup_indirect()
2329 if ((rinfo->shadow[i].grants_used == NULL) || in blkfront_setup_indirect()
2330 (rinfo->shadow[i].sg == NULL) || in blkfront_setup_indirect()
2332 (rinfo->shadow[i].indirect_grants == NULL))) in blkfront_setup_indirect()
2334 sg_init_table(rinfo->shadow[i].sg, psegs); in blkfront_setup_indirect()
2343 kvfree(rinfo->shadow[i].grants_used); in blkfront_setup_indirect()
2344 rinfo->shadow[i].grants_used = NULL; in blkfront_setup_indirect()
2345 kvfree(rinfo->shadow[i].sg); in blkfront_setup_indirect()
2346 rinfo->shadow[i].sg = NULL; in blkfront_setup_indirect()
2347 kvfree(rinfo->shadow[i].indirect_grants); in blkfront_setup_indirect()
2348 rinfo->shadow[i].indirect_grants = NULL; in blkfront_setup_indirect()
2350 if (!list_empty(&rinfo->indirect_pages)) { in blkfront_setup_indirect()
2352 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkfront_setup_indirect()
2431 struct blkfront_ring_info *rinfo; in blkfront_connect() local
2486 for_each_rinfo(info, rinfo, i) { in blkfront_connect()
2487 err = blkfront_setup_indirect(rinfo); in blkfront_connect()
2508 for_each_rinfo(info, rinfo, i) in blkfront_connect()
2509 kick_pending_request_queues(rinfo); in blkfront_connect()
2744 struct blkfront_ring_info *rinfo; in purge_persistent_grants() local
2746 for_each_rinfo(info, rinfo, i) { in purge_persistent_grants()
2749 spin_lock_irqsave(&rinfo->ring_lock, flags); in purge_persistent_grants()
2751 if (rinfo->persistent_gnts_c == 0) { in purge_persistent_grants()
2752 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()
2756 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, in purge_persistent_grants()
2763 rinfo->persistent_gnts_c--; in purge_persistent_grants()
2765 list_add_tail(&gnt_list_entry->node, &rinfo->grants); in purge_persistent_grants()
2768 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()