Searched refs:pages_to_map (Results 1 – 4 of 4) sorted by relevance
/linux-6.3-rc2/drivers/infiniband/hw/mlx5/ |
A D | umr.c | 697 size_t pages_to_map = 0; in mlx5r_umr_update_xlt() local 719 pages_to_map = ALIGN(npages, page_align); in mlx5r_umr_update_xlt() 732 pages_to_map = min_t(size_t, pages_to_map, max_pages); in mlx5r_umr_update_xlt() 740 pages_mapped < pages_to_map && !err; in mlx5r_umr_update_xlt() 742 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5r_umr_update_xlt() 751 if (pages_mapped + pages_iter >= pages_to_map) in mlx5r_umr_update_xlt()
|
/linux-6.3-rc2/drivers/net/xen-netback/ |
A D | common.h | 173 struct page *pages_to_map[MAX_PENDING_REQS]; member
|
A D | netback.c | 349 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op() 1368 queue->pages_to_map, in xenvif_tx_action()
|
/linux-6.3-rc2/net/ipv4/ |
A D | tcp.c | 2128 unsigned int pages_to_map, in tcp_zerocopy_vm_insert_batch() argument 2135 unsigned long pages_remaining = pages_to_map; in tcp_zerocopy_vm_insert_batch() 2141 pages_mapped = pages_to_map - (unsigned int)pages_remaining; in tcp_zerocopy_vm_insert_batch() 2196 unsigned int pages_to_map = 0; in tcp_zerocopy_receive() local 2281 pages[pages_to_map++] = page; in tcp_zerocopy_receive() 2285 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || in tcp_zerocopy_receive() 2291 pages_to_map, in tcp_zerocopy_receive() 2297 pages_to_map = 0; in tcp_zerocopy_receive() 2300 if (pages_to_map) { in tcp_zerocopy_receive() 2301 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, in tcp_zerocopy_receive()
|
Completed in 18 milliseconds