Lines Matching refs:netmem
374 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
379 netmem_ref netmem; in page_pool_refill_alloc_cache() local
400 netmem = (__force netmem_ref)__ptr_ring_consume(r); in page_pool_refill_alloc_cache()
401 if (unlikely(!netmem)) in page_pool_refill_alloc_cache()
404 if (likely(netmem_is_pref_nid(netmem, pref_nid))) { in page_pool_refill_alloc_cache()
405 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache()
412 page_pool_return_netmem(pool, netmem); in page_pool_refill_alloc_cache()
414 netmem = 0; in page_pool_refill_alloc_cache()
421 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
425 return netmem; in page_pool_refill_alloc_cache()
431 netmem_ref netmem; in __page_pool_get_cached() local
436 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
439 netmem = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
442 return netmem; in __page_pool_get_cached()
446 netmem_ref netmem, in __page_pool_dma_sync_for_device() argument
450 dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem); in __page_pool_dma_sync_for_device()
460 netmem_ref netmem, in page_pool_dma_sync_for_device() argument
467 __page_pool_dma_sync_for_device(pool, netmem, in page_pool_dma_sync_for_device()
473 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) in page_pool_dma_map() argument
484 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, in page_pool_dma_map()
491 if (page_pool_set_dma_addr_netmem(netmem, dma)) { in page_pool_dma_map()
497 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), in page_pool_dma_map()
500 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), in page_pool_dma_map()
507 netmem_set_dma_index(netmem, id); in page_pool_dma_map()
508 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); in page_pool_dma_map()
513 page_pool_set_dma_addr_netmem(netmem, 0); in page_pool_dma_map()
553 netmem_ref netmem; in __page_pool_alloc_netmems_slow() local
576 netmem = pool->alloc.cache[i]; in __page_pool_alloc_netmems_slow()
577 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) { in __page_pool_alloc_netmems_slow()
578 put_page(netmem_to_page(netmem)); in __page_pool_alloc_netmems_slow()
582 page_pool_set_pp_info(pool, netmem); in __page_pool_alloc_netmems_slow()
583 pool->alloc.cache[pool->alloc.count++] = netmem; in __page_pool_alloc_netmems_slow()
586 trace_page_pool_state_hold(pool, netmem, in __page_pool_alloc_netmems_slow()
592 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_netmems_slow()
595 netmem = 0; in __page_pool_alloc_netmems_slow()
599 return netmem; in __page_pool_alloc_netmems_slow()
607 netmem_ref netmem; in page_pool_alloc_netmems() local
610 netmem = __page_pool_get_cached(pool); in page_pool_alloc_netmems()
611 if (netmem) in page_pool_alloc_netmems()
612 return netmem; in page_pool_alloc_netmems()
616 netmem = pool->mp_ops->alloc_netmems(pool, gfp); in page_pool_alloc_netmems()
618 netmem = __page_pool_alloc_netmems_slow(pool, gfp); in page_pool_alloc_netmems()
619 return netmem; in page_pool_alloc_netmems()
654 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) in page_pool_set_pp_info() argument
656 netmem_set_pp(netmem, pool); in page_pool_set_pp_info()
657 netmem_or_pp_magic(netmem, PP_SIGNATURE); in page_pool_set_pp_info()
665 page_pool_fragment_netmem(netmem, 1); in page_pool_set_pp_info()
667 pool->slow.init_callback(netmem, pool->slow.init_arg); in page_pool_set_pp_info()
670 void page_pool_clear_pp_info(netmem_ref netmem) in page_pool_clear_pp_info() argument
672 netmem_clear_pp_magic(netmem); in page_pool_clear_pp_info()
673 netmem_set_pp(netmem, NULL); in page_pool_clear_pp_info()
677 netmem_ref netmem) in __page_pool_release_netmem_dma() argument
679 struct page *old, *page = netmem_to_page(netmem); in __page_pool_release_netmem_dma()
689 id = netmem_get_dma_index(netmem); in __page_pool_release_netmem_dma()
700 dma = page_pool_get_dma_addr_netmem(netmem); in __page_pool_release_netmem_dma()
706 page_pool_set_dma_addr_netmem(netmem, 0); in __page_pool_release_netmem_dma()
707 netmem_set_dma_index(netmem, 0); in __page_pool_release_netmem_dma()
715 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem) in page_pool_return_netmem() argument
722 put = pool->mp_ops->release_netmem(pool, netmem); in page_pool_return_netmem()
724 __page_pool_release_netmem_dma(pool, netmem); in page_pool_return_netmem()
730 trace_page_pool_state_release(pool, netmem, count); in page_pool_return_netmem()
733 page_pool_clear_pp_info(netmem); in page_pool_return_netmem()
734 put_page(netmem_to_page(netmem)); in page_pool_return_netmem()
742 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) in page_pool_recycle_in_ring() argument
748 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
761 static bool page_pool_recycle_in_cache(netmem_ref netmem, in page_pool_recycle_in_cache() argument
770 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_recycle_in_cache()
775 static bool __page_pool_page_can_be_recycled(netmem_ref netmem) in __page_pool_page_can_be_recycled() argument
777 return netmem_is_net_iov(netmem) || in __page_pool_page_can_be_recycled()
778 (page_ref_count(netmem_to_page(netmem)) == 1 && in __page_pool_page_can_be_recycled()
779 !page_is_pfmemalloc(netmem_to_page(netmem))); in __page_pool_page_can_be_recycled()
789 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, in __page_pool_put_page() argument
803 if (likely(__page_pool_page_can_be_recycled(netmem))) { in __page_pool_put_page()
806 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in __page_pool_put_page()
808 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) in __page_pool_put_page()
812 return netmem; in __page_pool_put_page()
829 page_pool_return_netmem(pool, netmem); in __page_pool_put_page()
861 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, in page_pool_put_unrefed_netmem() argument
867 netmem = __page_pool_put_page(pool, netmem, dma_sync_size, in page_pool_put_unrefed_netmem()
869 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { in page_pool_put_unrefed_netmem()
872 page_pool_return_netmem(pool, netmem); in page_pool_put_unrefed_netmem()
937 netmem_ref netmem = netmem_compound_head(data[i]); in page_pool_put_netmem_bulk() local
939 if (page_pool_unref_and_test(netmem)) in page_pool_put_netmem_bulk()
940 data[bulk_len++] = netmem; in page_pool_put_netmem_bulk()
954 netmem_ref netmem = data[i]; in page_pool_put_netmem_bulk() local
956 netmem_pp = netmem_get_pp(netmem); in page_pool_put_netmem_bulk()
965 data[foreign++] = netmem; in page_pool_put_netmem_bulk()
969 netmem = __page_pool_put_page(pool, netmem, -1, in page_pool_put_netmem_bulk()
972 if (netmem) in page_pool_put_netmem_bulk()
973 bulk[bulk_len++] = netmem; in page_pool_put_netmem_bulk()
985 netmem_ref netmem) in page_pool_drain_frag() argument
990 if (likely(page_pool_unref_netmem(netmem, drain_count))) in page_pool_drain_frag()
993 if (__page_pool_page_can_be_recycled(netmem)) { in page_pool_drain_frag()
994 page_pool_dma_sync_for_device(pool, netmem, -1); in page_pool_drain_frag()
995 return netmem; in page_pool_drain_frag()
998 page_pool_return_netmem(pool, netmem); in page_pool_drain_frag()
1005 netmem_ref netmem = pool->frag_page; in page_pool_free_frag() local
1009 if (!netmem || page_pool_unref_netmem(netmem, drain_count)) in page_pool_free_frag()
1012 page_pool_return_netmem(pool, netmem); in page_pool_free_frag()
1020 netmem_ref netmem = pool->frag_page; in page_pool_alloc_frag_netmem() local
1028 if (netmem && *offset + size > max_size) { in page_pool_alloc_frag_netmem()
1029 netmem = page_pool_drain_frag(pool, netmem); in page_pool_alloc_frag_netmem()
1030 if (netmem) { in page_pool_alloc_frag_netmem()
1037 if (!netmem) { in page_pool_alloc_frag_netmem()
1038 netmem = page_pool_alloc_netmems(pool, gfp); in page_pool_alloc_frag_netmem()
1039 if (unlikely(!netmem)) { in page_pool_alloc_frag_netmem()
1044 pool->frag_page = netmem; in page_pool_alloc_frag_netmem()
1050 page_pool_fragment_netmem(netmem, BIAS_MAX); in page_pool_alloc_frag_netmem()
1051 return netmem; in page_pool_alloc_frag_netmem()
1056 return netmem; in page_pool_alloc_frag_netmem()
1070 netmem_ref netmem; in page_pool_empty_ring() local
1073 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
1075 if (!(netmem_ref_count(netmem) == 1)) in page_pool_empty_ring()
1077 __func__, netmem_ref_count(netmem)); in page_pool_empty_ring()
1079 page_pool_return_netmem(pool, netmem); in page_pool_empty_ring()
1101 netmem_ref netmem; in page_pool_empty_alloc_cache_once() local
1111 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
1112 page_pool_return_netmem(pool, netmem); in page_pool_empty_alloc_cache_once()
1277 netmem_ref netmem; in page_pool_update_nid() local
1284 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
1285 page_pool_return_netmem(pool, netmem); in page_pool_update_nid()
1300 netmem_ref netmem = net_iov_to_netmem(niov); in net_mp_niov_set_page_pool() local
1302 page_pool_set_pp_info(pool, netmem); in net_mp_niov_set_page_pool()
1305 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); in net_mp_niov_set_page_pool()
1313 netmem_ref netmem = net_iov_to_netmem(niov); in net_mp_niov_clear_page_pool() local
1315 page_pool_clear_pp_info(netmem); in net_mp_niov_clear_page_pool()