Searched refs:page_cache (Results 1 – 11 of 11) sorted by relevance
554 page = select_victim_in_cache(&volume->page_cache); in process_entry()602 struct page_cache *cache = &volume->page_cache; in release_queued_requests()700 page = select_victim_in_cache(&volume->page_cache); in read_page_locked()745 make_page_most_recent(&volume->page_cache, page); in get_volume_page_locked()762 make_page_most_recent(&volume->page_cache, page); in get_volume_page_protected()1057 invalidate_page(&volume->page_cache, first_page + i); in uds_forget_chapter()1074 page = select_victim_in_cache(&volume->page_cache); in donate_index_page_locked()1479 volume->page_cache.index[i] = volume->page_cache.cache_slots; in uds_replace_volume_storage()1480 for (i = 0; i < volume->page_cache.cache_slots; i++) in uds_replace_volume_storage()1481 clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]); in uds_replace_volume_storage()[all …]
65 struct page_cache { struct111 struct page_cache page_cache; argument
144 if (likely(ring->page_cache.index > 0)) { in mlx4_en_prepare_rx_desc()147 ring->page_cache.index--; in mlx4_en_prepare_rx_desc()148 frags->page = ring->page_cache.buf[ring->page_cache.index].page; in mlx4_en_prepare_rx_desc()149 frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; in mlx4_en_prepare_rx_desc()421 struct mlx4_en_page_cache *cache = &ring->page_cache; in mlx4_en_rx_recycle()458 for (i = 0; i < ring->page_cache.index; i++) { in mlx4_en_deactivate_rx_ring()459 dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, in mlx4_en_deactivate_rx_ring()461 put_page(ring->page_cache.buf[i].page); in mlx4_en_deactivate_rx_ring()463 ring->page_cache.index = 0; in mlx4_en_deactivate_rx_ring()
345 struct mlx4_en_page_cache page_cache; member
23 page_cache
630 (zone->page_cache.outstanding_reads == 0) && in check_for_drain_complete()631 (zone->page_cache.outstanding_writes == 0)) { in check_for_drain_complete()1215 struct vdo_page_cache *cache = &zone->page_cache; in vdo_get_page()2029 save_pages(&zone->page_cache); in write_expired_elements()2759 zone->page_cache.zone = zone; in initialize_block_map_zone()2760 zone->page_cache.vdo = vdo; in initialize_block_map_zone()2762 zone->page_cache.stats.free_pages = zone->page_cache.page_count; in initialize_block_map_zone()2769 INIT_LIST_HEAD(&zone->page_cache.lru_list); in initialize_block_map_zone()2770 INIT_LIST_HEAD(&zone->page_cache.outgoing_list); in initialize_block_map_zone()2823 struct vdo_page_cache *cache = &zone->page_cache; in uninitialize_block_map_zone()[all …]
240 struct vdo_page_cache page_cache; member
242 repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false; in free_repair_completion()641 struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache; in rebuild_reference_counts()1106 vdo->block_map->zones[0].page_cache.rebuilding = in recover_block_map()
313 struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ member
732 frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); in gve_xdp_redirect()
1272 page_frag_cache_drain(&priv->rx[i].page_cache); in gve_drain_page_cache()
Completed in 39 milliseconds