| /include/linux/ |
| A D | numa.h | 49 return NUMA_NO_NODE; in numa_nearest_node() 54 return NUMA_NO_NODE; in nearest_node_nodemask()
|
| A D | nodemask_types.h | 15 #define NUMA_NO_NODE (-1) macro
|
| A D | memblock.h | 134 return __memblock_reserve(base, size, NUMA_NO_NODE, 0); in memblock_reserve() 139 return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN); in memblock_reserve_kern() 183 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, in __next_physmem_range() 248 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ 260 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ 274 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ 449 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); in memblock_alloc() 463 NUMA_NO_NODE); in memblock_alloc_raw() 471 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); in memblock_alloc_from() 478 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); in memblock_alloc_low()
|
| A D | async.h | 54 return async_schedule_node(func, data, NUMA_NO_NODE); in async_schedule() 72 return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); in async_schedule_domain()
|
| A D | memory-tiers.h | 62 return NUMA_NO_NODE; in next_demotion_node() 106 return NUMA_NO_NODE; in next_demotion_node()
|
| A D | slab.h | 914 alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 917 …alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE… 1006 #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) 1009 kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) 1049 #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) 1050 #define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) 1055 alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 1068 #define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 1070 #define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
|
| A D | dmapool.h | 60 NUMA_NO_NODE); in dma_pool_create()
|
| A D | kthread.h | 28 kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) 196 kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
|
| A D | cma.h | 41 order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); in cma_declare_contiguous()
|
| A D | mempool.h | 61 GFP_KERNEL, NUMA_NO_NODE)
|
| A D | topology.h | 326 mask = (node != NUMA_NO_NODE || __hops) ? \
|
| A D | gfp.h | 251 if (nid == NUMA_NO_NODE) in alloc_pages_bulk_node_noprof() 308 if (nid == NUMA_NO_NODE) in alloc_pages_node_noprof()
|
| A D | vmalloc.h | 185 return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE); in vmalloc_huge()
|
| A D | nodemask.h | 497 return node < MAX_NUMNODES ? node : NUMA_NO_NODE; in node_random()
|
| A D | cpumask.h | 1032 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); in alloc_cpumask_var()
|
| A D | device.h | 776 return NUMA_NO_NODE; in dev_to_node()
|
| A D | skbuff.h | 1336 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); in alloc_skb() 1386 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); in alloc_skb_fclone() 3517 return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order); in __dev_alloc_pages_noprof()
|
| A D | of.h | 937 return NUMA_NO_NODE; in of_node_to_nid()
|
| A D | bpf.h | 2751 attr->numa_node : NUMA_NO_NODE; in bpf_map_attr_numa_node()
|
| A D | netdevice.h | 760 return NUMA_NO_NODE; in netdev_queue_numa_node_read()
|
| /include/asm-generic/ |
| A D | numa.h | 26 if (node == NUMA_NO_NODE) in cpumask_of_node()
|
| A D | mshyperv.h | 97 if (node != NUMA_NO_NODE) { in hv_numa_node_to_pxm_info()
|
| /include/linux/device/ |
| A D | devres.h | 24 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
|
| /include/rdma/ |
| A D | ib_verbs.h | 4895 return NUMA_NO_NODE; in ibdev_to_node()
|