Lines Matching refs:rinfo

227 	struct blkfront_ring_info *rinfo;  member
270 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
275 for ((ptr) = (info)->rinfo, (idx) = 0; \
283 return (void *)info->rinfo + i * info->rinfo_size; in get_rinfo()
286 static int get_id_from_freelist(struct blkfront_ring_info *rinfo) in get_id_from_freelist() argument
288 unsigned long free = rinfo->shadow_free; in get_id_from_freelist()
290 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info)); in get_id_from_freelist()
291 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; in get_id_from_freelist()
292 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ in get_id_from_freelist()
296 static int add_id_to_freelist(struct blkfront_ring_info *rinfo, in add_id_to_freelist() argument
299 if (rinfo->shadow[id].req.u.rw.id != id) in add_id_to_freelist()
301 if (rinfo->shadow[id].request == NULL) in add_id_to_freelist()
303 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; in add_id_to_freelist()
304 rinfo->shadow[id].request = NULL; in add_id_to_freelist()
305 rinfo->shadow_free = id; in add_id_to_freelist()
309 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) in fill_grant_buffer() argument
311 struct blkfront_info *info = rinfo->dev_info; in fill_grant_buffer()
331 list_add(&gnt_list_entry->node, &rinfo->grants); in fill_grant_buffer()
339 &rinfo->grants, node) { in fill_grant_buffer()
350 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo) in get_free_grant() argument
354 BUG_ON(list_empty(&rinfo->grants)); in get_free_grant()
355 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant, in get_free_grant()
360 rinfo->persistent_gnts_c--; in get_free_grant()
376 struct blkfront_ring_info *rinfo) in get_grant() argument
378 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_grant()
379 struct blkfront_info *info = rinfo->dev_info; in get_grant()
400 struct blkfront_ring_info *rinfo) in get_indirect_grant() argument
402 struct grant *gnt_list_entry = get_free_grant(rinfo); in get_indirect_grant()
403 struct blkfront_info *info = rinfo->dev_info; in get_indirect_grant()
415 BUG_ON(list_empty(&rinfo->indirect_pages)); in get_indirect_grant()
416 indirect_page = list_first_entry(&rinfo->indirect_pages, in get_indirect_grant()
492 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg; in blkif_restart_queue_callback() local
493 schedule_work(&rinfo->work); in blkif_restart_queue_callback()
533 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, in blkif_ring_get_request() argument
539 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); in blkif_ring_get_request()
540 rinfo->ring.req_prod_pvt++; in blkif_ring_get_request()
542 id = get_id_from_freelist(rinfo); in blkif_ring_get_request()
543 rinfo->shadow[id].request = req; in blkif_ring_get_request()
544 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_ring_get_request()
545 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; in blkif_ring_get_request()
547 rinfo->shadow[id].req.u.rw.id = id; in blkif_ring_get_request()
552 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_discard_req() argument
554 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_discard_req()
559 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_discard_req()
560 ring_req = &rinfo->shadow[id].req; in blkif_queue_discard_req()
573 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_discard_req()
581 struct blkfront_ring_info *rinfo; member
604 struct blkfront_ring_info *rinfo = setup->rinfo; in blkif_setup_rw_req_grant() local
611 struct blk_shadow *shadow = &rinfo->shadow[setup->id]; in blkif_setup_rw_req_grant()
629 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo); in blkif_setup_rw_req_grant()
635 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo); in blkif_setup_rw_req_grant()
702 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_rw_req() argument
704 struct blkfront_info *info = rinfo->dev_info; in blkif_queue_rw_req()
713 .rinfo = rinfo, in blkif_queue_rw_req()
735 if (rinfo->persistent_gnts_c < max_grefs) { in blkif_queue_rw_req()
739 max_grefs - rinfo->persistent_gnts_c, in blkif_queue_rw_req()
742 &rinfo->callback, in blkif_queue_rw_req()
744 rinfo, in blkif_queue_rw_req()
745 max_grefs - rinfo->persistent_gnts_c); in blkif_queue_rw_req()
751 id = blkif_ring_get_request(rinfo, req, &final_ring_req); in blkif_queue_rw_req()
752 ring_req = &rinfo->shadow[id].req; in blkif_queue_rw_req()
754 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); in blkif_queue_rw_req()
757 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) in blkif_queue_rw_req()
764 rinfo->shadow[id].num_sg = num_sg; in blkif_queue_rw_req()
802 extra_id = blkif_ring_get_request(rinfo, req, in blkif_queue_rw_req()
804 extra_ring_req = &rinfo->shadow[extra_id].req; in blkif_queue_rw_req()
810 rinfo->shadow[extra_id].num_sg = 0; in blkif_queue_rw_req()
815 rinfo->shadow[extra_id].associated_id = id; in blkif_queue_rw_req()
816 rinfo->shadow[id].associated_id = extra_id; in blkif_queue_rw_req()
827 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) { in blkif_queue_rw_req()
849 rinfo->shadow[id].status = REQ_WAITING; in blkif_queue_rw_req()
852 rinfo->shadow[extra_id].status = REQ_WAITING; in blkif_queue_rw_req()
867 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo) in blkif_queue_request() argument
869 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) in blkif_queue_request()
874 return blkif_queue_discard_req(req, rinfo); in blkif_queue_request()
876 return blkif_queue_rw_req(req, rinfo); in blkif_queue_request()
879 static inline void flush_requests(struct blkfront_ring_info *rinfo) in flush_requests() argument
883 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify); in flush_requests()
886 notify_remote_via_irq(rinfo->irq); in flush_requests()
905 struct blkfront_ring_info *rinfo = NULL; in blkif_queue_rq() local
907 rinfo = get_rinfo(info, qid); in blkif_queue_rq()
909 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_queue_rq()
910 if (RING_FULL(&rinfo->ring)) in blkif_queue_rq()
913 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info)) in blkif_queue_rq()
916 if (blkif_queue_request(qd->rq, rinfo)) in blkif_queue_rq()
919 flush_requests(rinfo); in blkif_queue_rq()
920 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
924 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
929 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_queue_rq()
1183 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) in kick_pending_request_queues_locked() argument
1185 if (!RING_FULL(&rinfo->ring)) in kick_pending_request_queues_locked()
1186 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); in kick_pending_request_queues_locked()
1189 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) in kick_pending_request_queues() argument
1193 spin_lock_irqsave(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1194 kick_pending_request_queues_locked(rinfo); in kick_pending_request_queues()
1195 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in kick_pending_request_queues()
1200 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); in blkif_restart_queue() local
1202 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) in blkif_restart_queue()
1203 kick_pending_request_queues(rinfo); in blkif_restart_queue()
1206 static void blkif_free_ring(struct blkfront_ring_info *rinfo) in blkif_free_ring() argument
1209 struct blkfront_info *info = rinfo->dev_info; in blkif_free_ring()
1216 if (!list_empty(&rinfo->indirect_pages)) { in blkif_free_ring()
1220 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkif_free_ring()
1227 if (!list_empty(&rinfo->grants)) { in blkif_free_ring()
1229 &rinfo->grants, node) { in blkif_free_ring()
1234 rinfo->persistent_gnts_c--; in blkif_free_ring()
1241 BUG_ON(rinfo->persistent_gnts_c != 0); in blkif_free_ring()
1248 if (!rinfo->shadow[i].request) in blkif_free_ring()
1251 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free_ring()
1252 rinfo->shadow[i].req.u.indirect.nr_segments : in blkif_free_ring()
1253 rinfo->shadow[i].req.u.rw.nr_segments; in blkif_free_ring()
1255 persistent_gnt = rinfo->shadow[i].grants_used[j]; in blkif_free_ring()
1262 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) in blkif_free_ring()
1270 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; in blkif_free_ring()
1277 kvfree(rinfo->shadow[i].grants_used); in blkif_free_ring()
1278 rinfo->shadow[i].grants_used = NULL; in blkif_free_ring()
1279 kvfree(rinfo->shadow[i].indirect_grants); in blkif_free_ring()
1280 rinfo->shadow[i].indirect_grants = NULL; in blkif_free_ring()
1281 kvfree(rinfo->shadow[i].sg); in blkif_free_ring()
1282 rinfo->shadow[i].sg = NULL; in blkif_free_ring()
1286 gnttab_cancel_free_callback(&rinfo->callback); in blkif_free_ring()
1289 flush_work(&rinfo->work); in blkif_free_ring()
1292 xenbus_teardown_ring((void **)&rinfo->ring.sring, info->nr_ring_pages, in blkif_free_ring()
1293 rinfo->ring_ref); in blkif_free_ring()
1295 if (rinfo->irq) in blkif_free_ring()
1296 unbind_from_irqhandler(rinfo->irq, rinfo); in blkif_free_ring()
1297 rinfo->evtchn = rinfo->irq = 0; in blkif_free_ring()
1303 struct blkfront_ring_info *rinfo; in blkif_free() local
1312 for_each_rinfo(info, rinfo, i) in blkif_free()
1313 blkif_free_ring(rinfo); in blkif_free()
1315 kvfree(info->rinfo); in blkif_free()
1316 info->rinfo = NULL; in blkif_free()
1383 struct blkfront_ring_info *rinfo, in blkif_completion() argument
1389 struct blkfront_info *info = rinfo->dev_info; in blkif_completion()
1390 struct blk_shadow *s = &rinfo->shadow[*id]; in blkif_completion()
1400 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id]; in blkif_completion()
1432 if (add_id_to_freelist(rinfo, s->associated_id)) in blkif_completion()
1470 list_add(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1471 rinfo->persistent_gnts_c++; in blkif_completion()
1479 list_add_tail(&s->grants_used[i]->node, &rinfo->grants); in blkif_completion()
1490 list_add(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1491 rinfo->persistent_gnts_c++; in blkif_completion()
1501 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkif_completion()
1504 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); in blkif_completion()
1518 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; in blkif_interrupt() local
1519 struct blkfront_info *info = rinfo->dev_info; in blkif_interrupt()
1527 spin_lock_irqsave(&rinfo->ring_lock, flags); in blkif_interrupt()
1529 rp = READ_ONCE(rinfo->ring.sring->rsp_prod); in blkif_interrupt()
1531 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) { in blkif_interrupt()
1533 info->gd->disk_name, rp - rinfo->ring.rsp_cons); in blkif_interrupt()
1537 for (i = rinfo->ring.rsp_cons; i != rp; i++) { in blkif_interrupt()
1543 RING_COPY_RESPONSE(&rinfo->ring, i, &bret); in blkif_interrupt()
1556 if (rinfo->shadow[id].status != REQ_WAITING) { in blkif_interrupt()
1562 rinfo->shadow[id].status = REQ_PROCESSING; in blkif_interrupt()
1563 req = rinfo->shadow[id].request; in blkif_interrupt()
1565 op = rinfo->shadow[id].req.operation; in blkif_interrupt()
1567 op = rinfo->shadow[id].req.u.indirect.indirect_op; in blkif_interrupt()
1581 ret = blkif_completion(&id, rinfo, &bret); in blkif_interrupt()
1588 if (add_id_to_freelist(rinfo, id)) { in blkif_interrupt()
1621 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { in blkif_interrupt()
1650 rinfo->ring.rsp_cons = i; in blkif_interrupt()
1652 if (i != rinfo->ring.req_prod_pvt) { in blkif_interrupt()
1654 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do); in blkif_interrupt()
1658 rinfo->ring.sring->rsp_event = i + 1; in blkif_interrupt()
1660 kick_pending_request_queues_locked(rinfo); in blkif_interrupt()
1662 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1671 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in blkif_interrupt()
1681 struct blkfront_ring_info *rinfo) in setup_blkring() argument
1685 struct blkfront_info *info = rinfo->dev_info; in setup_blkring()
1689 info->nr_ring_pages, rinfo->ring_ref); in setup_blkring()
1693 XEN_FRONT_RING_INIT(&rinfo->ring, sring, ring_size); in setup_blkring()
1695 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); in setup_blkring()
1699 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, in setup_blkring()
1700 0, "blkif", rinfo); in setup_blkring()
1706 rinfo->irq = err; in setup_blkring()
1719 struct blkfront_ring_info *rinfo, const char *dir) in write_per_ring_nodes() argument
1724 struct blkfront_info *info = rinfo->dev_info; in write_per_ring_nodes()
1727 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]); in write_per_ring_nodes()
1738 "%u", rinfo->ring_ref[i]); in write_per_ring_nodes()
1746 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn); in write_per_ring_nodes()
1777 struct blkfront_ring_info *rinfo; in talk_to_blkback() local
1795 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1797 err = setup_blkring(dev, rinfo); in talk_to_blkback()
1820 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename); in talk_to_blkback()
1842 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1845 err = write_per_ring_nodes(xbt, rinfo, path); in talk_to_blkback()
1874 for_each_rinfo(info, rinfo, i) { in talk_to_blkback()
1878 rinfo->shadow[j].req.u.rw.id = j + 1; in talk_to_blkback()
1879 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; in talk_to_blkback()
1898 struct blkfront_ring_info *rinfo; in negotiate_mq() local
1910 info->rinfo_size = struct_size(info->rinfo, shadow, in negotiate_mq()
1912 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL); in negotiate_mq()
1913 if (!info->rinfo) { in negotiate_mq()
1919 for_each_rinfo(info, rinfo, i) { in negotiate_mq()
1920 INIT_LIST_HEAD(&rinfo->indirect_pages); in negotiate_mq()
1921 INIT_LIST_HEAD(&rinfo->grants); in negotiate_mq()
1922 rinfo->dev_info = info; in negotiate_mq()
1923 INIT_WORK(&rinfo->work, blkif_restart_queue); in negotiate_mq()
1924 spin_lock_init(&rinfo->ring_lock); in negotiate_mq()
2013 struct blkfront_ring_info *rinfo; in blkif_recover() local
2021 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2022 rc = blkfront_setup_indirect(rinfo); in blkif_recover()
2031 for_each_rinfo(info, rinfo, r_index) { in blkif_recover()
2033 kick_pending_request_queues(rinfo); in blkif_recover()
2064 struct blkfront_ring_info *rinfo; in blkfront_resume() local
2070 for_each_rinfo(info, rinfo, i) { in blkfront_resume()
2072 struct blk_shadow *shadow = rinfo->shadow; in blkfront_resume()
2122 struct blkfront_ring_info *rinfo; in blkfront_closing() local
2134 for_each_rinfo(info, rinfo, i) { in blkfront_closing()
2136 gnttab_cancel_free_callback(&rinfo->callback); in blkfront_closing()
2139 flush_work(&rinfo->work); in blkfront_closing()
2158 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) in blkfront_setup_indirect() argument
2162 struct blkfront_info *info = rinfo->dev_info; in blkfront_setup_indirect()
2182 err = fill_grant_buffer(rinfo, in blkfront_setup_indirect()
2195 BUG_ON(!list_empty(&rinfo->indirect_pages)); in blkfront_setup_indirect()
2201 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkfront_setup_indirect()
2206 rinfo->shadow[i].grants_used = in blkfront_setup_indirect()
2208 sizeof(rinfo->shadow[i].grants_used[0]), in blkfront_setup_indirect()
2210 rinfo->shadow[i].sg = kvcalloc(psegs, in blkfront_setup_indirect()
2211 sizeof(rinfo->shadow[i].sg[0]), in blkfront_setup_indirect()
2214 rinfo->shadow[i].indirect_grants = in blkfront_setup_indirect()
2216 sizeof(rinfo->shadow[i].indirect_grants[0]), in blkfront_setup_indirect()
2218 if ((rinfo->shadow[i].grants_used == NULL) || in blkfront_setup_indirect()
2219 (rinfo->shadow[i].sg == NULL) || in blkfront_setup_indirect()
2221 (rinfo->shadow[i].indirect_grants == NULL))) in blkfront_setup_indirect()
2223 sg_init_table(rinfo->shadow[i].sg, psegs); in blkfront_setup_indirect()
2232 kvfree(rinfo->shadow[i].grants_used); in blkfront_setup_indirect()
2233 rinfo->shadow[i].grants_used = NULL; in blkfront_setup_indirect()
2234 kvfree(rinfo->shadow[i].sg); in blkfront_setup_indirect()
2235 rinfo->shadow[i].sg = NULL; in blkfront_setup_indirect()
2236 kvfree(rinfo->shadow[i].indirect_grants); in blkfront_setup_indirect()
2237 rinfo->shadow[i].indirect_grants = NULL; in blkfront_setup_indirect()
2239 if (!list_empty(&rinfo->indirect_pages)) { in blkfront_setup_indirect()
2241 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkfront_setup_indirect()
2319 struct blkfront_ring_info *rinfo; in blkfront_connect() local
2374 for_each_rinfo(info, rinfo, i) { in blkfront_connect()
2375 err = blkfront_setup_indirect(rinfo); in blkfront_connect()
2396 for_each_rinfo(info, rinfo, i) in blkfront_connect()
2397 kick_pending_request_queues(rinfo); in blkfront_connect()
2527 struct blkfront_ring_info *rinfo; in purge_persistent_grants() local
2529 for_each_rinfo(info, rinfo, i) { in purge_persistent_grants()
2533 spin_lock_irqsave(&rinfo->ring_lock, flags); in purge_persistent_grants()
2535 if (rinfo->persistent_gnts_c == 0) { in purge_persistent_grants()
2536 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()
2540 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, in purge_persistent_grants()
2547 rinfo->persistent_gnts_c--; in purge_persistent_grants()
2552 list_splice_tail(&grants, &rinfo->grants); in purge_persistent_grants()
2554 spin_unlock_irqrestore(&rinfo->ring_lock, flags); in purge_persistent_grants()