Lines Matching refs:netmem
368 static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
373 netmem_ref netmem; in page_pool_refill_alloc_cache() local
394 netmem = (__force netmem_ref)__ptr_ring_consume(r); in page_pool_refill_alloc_cache()
395 if (unlikely(!netmem)) in page_pool_refill_alloc_cache()
398 if (likely(netmem_is_pref_nid(netmem, pref_nid))) { in page_pool_refill_alloc_cache()
399 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache()
406 page_pool_return_page(pool, netmem); in page_pool_refill_alloc_cache()
408 netmem = 0; in page_pool_refill_alloc_cache()
415 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
419 return netmem; in page_pool_refill_alloc_cache()
425 netmem_ref netmem; in __page_pool_get_cached() local
430 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
433 netmem = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
436 return netmem; in __page_pool_get_cached()
440 netmem_ref netmem, in __page_pool_dma_sync_for_device() argument
444 dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem); in __page_pool_dma_sync_for_device()
454 netmem_ref netmem, in page_pool_dma_sync_for_device() argument
458 __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in page_pool_dma_sync_for_device()
461 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) in page_pool_dma_map() argument
470 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, in page_pool_dma_map()
477 if (page_pool_set_dma_addr_netmem(netmem, dma)) in page_pool_dma_map()
480 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); in page_pool_dma_map()
524 netmem_ref netmem; in __page_pool_alloc_pages_slow() local
548 netmem = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
549 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) { in __page_pool_alloc_pages_slow()
550 put_page(netmem_to_page(netmem)); in __page_pool_alloc_pages_slow()
554 page_pool_set_pp_info(pool, netmem); in __page_pool_alloc_pages_slow()
555 pool->alloc.cache[pool->alloc.count++] = netmem; in __page_pool_alloc_pages_slow()
558 trace_page_pool_state_hold(pool, netmem, in __page_pool_alloc_pages_slow()
564 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
567 netmem = 0; in __page_pool_alloc_pages_slow()
571 return netmem; in __page_pool_alloc_pages_slow()
579 netmem_ref netmem; in page_pool_alloc_netmem() local
582 netmem = __page_pool_get_cached(pool); in page_pool_alloc_netmem()
583 if (netmem) in page_pool_alloc_netmem()
584 return netmem; in page_pool_alloc_netmem()
588 netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp); in page_pool_alloc_netmem()
590 netmem = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_netmem()
591 return netmem; in page_pool_alloc_netmem()
626 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) in page_pool_set_pp_info() argument
628 netmem_set_pp(netmem, pool); in page_pool_set_pp_info()
629 netmem_or_pp_magic(netmem, PP_SIGNATURE); in page_pool_set_pp_info()
637 page_pool_fragment_netmem(netmem, 1); in page_pool_set_pp_info()
639 pool->slow.init_callback(netmem, pool->slow.init_arg); in page_pool_set_pp_info()
642 void page_pool_clear_pp_info(netmem_ref netmem) in page_pool_clear_pp_info() argument
644 netmem_clear_pp_magic(netmem); in page_pool_clear_pp_info()
645 netmem_set_pp(netmem, NULL); in page_pool_clear_pp_info()
649 netmem_ref netmem) in __page_pool_release_page_dma() argument
659 dma = page_pool_get_dma_addr_netmem(netmem); in __page_pool_release_page_dma()
665 page_pool_set_dma_addr_netmem(netmem, 0); in __page_pool_release_page_dma()
673 void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) in page_pool_return_page() argument
680 put = mp_dmabuf_devmem_release_page(pool, netmem); in page_pool_return_page()
682 __page_pool_release_page_dma(pool, netmem); in page_pool_return_page()
688 trace_page_pool_state_release(pool, netmem, count); in page_pool_return_page()
691 page_pool_clear_pp_info(netmem); in page_pool_return_page()
692 put_page(netmem_to_page(netmem)); in page_pool_return_page()
700 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) in page_pool_recycle_in_ring() argument
705 ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
707 ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
722 static bool page_pool_recycle_in_cache(netmem_ref netmem, in page_pool_recycle_in_cache() argument
731 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_recycle_in_cache()
736 static bool __page_pool_page_can_be_recycled(netmem_ref netmem) in __page_pool_page_can_be_recycled() argument
738 return netmem_is_net_iov(netmem) || in __page_pool_page_can_be_recycled()
739 (page_ref_count(netmem_to_page(netmem)) == 1 && in __page_pool_page_can_be_recycled()
740 !page_is_pfmemalloc(netmem_to_page(netmem))); in __page_pool_page_can_be_recycled()
750 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, in __page_pool_put_page() argument
764 if (likely(__page_pool_page_can_be_recycled(netmem))) { in __page_pool_put_page()
767 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in __page_pool_put_page()
769 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) in __page_pool_put_page()
773 return netmem; in __page_pool_put_page()
790 page_pool_return_page(pool, netmem); in __page_pool_put_page()
818 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, in page_pool_put_unrefed_netmem() argument
824 netmem = in page_pool_put_unrefed_netmem()
825 __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct); in page_pool_put_unrefed_netmem()
826 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { in page_pool_put_unrefed_netmem()
829 page_pool_return_page(pool, netmem); in page_pool_put_unrefed_netmem()
867 netmem_ref netmem = page_to_netmem(virt_to_head_page(data[i])); in page_pool_put_page_bulk() local
870 if (!page_pool_is_last_ref(netmem)) in page_pool_put_page_bulk()
873 netmem = __page_pool_put_page(pool, netmem, -1, allow_direct); in page_pool_put_page_bulk()
875 if (netmem) in page_pool_put_page_bulk()
876 data[bulk_len++] = (__force void *)netmem; in page_pool_put_page_bulk()
907 netmem_ref netmem) in page_pool_drain_frag() argument
912 if (likely(page_pool_unref_netmem(netmem, drain_count))) in page_pool_drain_frag()
915 if (__page_pool_page_can_be_recycled(netmem)) { in page_pool_drain_frag()
916 page_pool_dma_sync_for_device(pool, netmem, -1); in page_pool_drain_frag()
917 return netmem; in page_pool_drain_frag()
920 page_pool_return_page(pool, netmem); in page_pool_drain_frag()
927 netmem_ref netmem = pool->frag_page; in page_pool_free_frag() local
931 if (!netmem || page_pool_unref_netmem(netmem, drain_count)) in page_pool_free_frag()
934 page_pool_return_page(pool, netmem); in page_pool_free_frag()
942 netmem_ref netmem = pool->frag_page; in page_pool_alloc_frag_netmem() local
950 if (netmem && *offset + size > max_size) { in page_pool_alloc_frag_netmem()
951 netmem = page_pool_drain_frag(pool, netmem); in page_pool_alloc_frag_netmem()
952 if (netmem) { in page_pool_alloc_frag_netmem()
958 if (!netmem) { in page_pool_alloc_frag_netmem()
959 netmem = page_pool_alloc_netmem(pool, gfp); in page_pool_alloc_frag_netmem()
960 if (unlikely(!netmem)) { in page_pool_alloc_frag_netmem()
965 pool->frag_page = netmem; in page_pool_alloc_frag_netmem()
971 page_pool_fragment_netmem(netmem, BIAS_MAX); in page_pool_alloc_frag_netmem()
972 return netmem; in page_pool_alloc_frag_netmem()
978 return netmem; in page_pool_alloc_frag_netmem()
992 netmem_ref netmem; in page_pool_empty_ring() local
995 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
997 if (!(netmem_ref_count(netmem) == 1)) in page_pool_empty_ring()
999 __func__, netmem_ref_count(netmem)); in page_pool_empty_ring()
1001 page_pool_return_page(pool, netmem); in page_pool_empty_ring()
1023 netmem_ref netmem; in page_pool_empty_alloc_cache_once() local
1033 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
1034 page_pool_return_page(pool, netmem); in page_pool_empty_alloc_cache_once()
1141 netmem_ref netmem; in page_pool_update_nid() local
1148 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
1149 page_pool_return_page(pool, netmem); in page_pool_update_nid()