/linux-6.3-rc2/lib/ |
A D | group_cpus.c | 50 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); in alloc_node_to_cpumask() 54 for (node = 0; node < nr_node_ids; node++) { in alloc_node_to_cpumask() 72 for (node = 0; node < nr_node_ids; node++) in free_node_to_cpumask() 139 for (n = 0; n < nr_node_ids; n++) { in alloc_nodes_groups() 158 sort(node_groups, nr_node_ids, sizeof(node_groups[0]), in alloc_nodes_groups() 229 for (n = 0; n < nr_node_ids; n++) { in alloc_nodes_groups() 280 node_groups = kcalloc(nr_node_ids, in __group_cpus_evenly() 289 for (i = 0; i < nr_node_ids; i++) { in __group_cpus_evenly()
|
/linux-6.3-rc2/drivers/base/ |
A D | arch_numa.c | 52 if (WARN_ON(node < 0 || node >= nr_node_ids)) in cpumask_of_node() 105 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 109 for (node = 0; node < nr_node_ids; node++) { in setup_node_to_cpumask_map() 115 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map() 278 size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); in numa_alloc_distance() 283 numa_distance_cnt = nr_node_ids; in numa_alloc_distance()
|
/linux-6.3-rc2/arch/x86/mm/ |
A D | numa.c | 114 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 118 for (node = 0; node < nr_node_ids; node++) in setup_node_to_cpumask_map() 122 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map() 909 if ((unsigned)node >= nr_node_ids) { in cpumask_of_node() 912 node, nr_node_ids); in cpumask_of_node()
|
/linux-6.3-rc2/mm/ |
A D | shrinker_debug.c | 54 count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); in shrinker_debugfs_count_show() 130 if (nid < 0 || nid >= nr_node_ids) in shrinker_debugfs_scan_write()
|
A D | list_lru.c | 343 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); in memcg_init_list_lru_one() 571 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); in __list_lru_init()
|
A D | memory-tiers.c | 638 node_demotion = kcalloc(nr_node_ids, sizeof(struct demotion_nodes), in memory_tier_init()
|
A D | ksm.c | 3010 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), in merge_across_nodes_store() 3017 root_unstable_tree = buf + nr_node_ids; in merge_across_nodes_store() 3024 ksm_nr_node_ids = knob ? 1 : nr_node_ids; in merge_across_nodes_store()
|
A D | mempolicy.c | 1427 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); in copy_nodes_to_user() 1431 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); in copy_nodes_to_user() 1439 maxnode = nr_node_ids; in copy_nodes_to_user() 1695 if (nmask != NULL && maxnode < nr_node_ids) in kernel_get_mempolicy()
|
A D | slab.h | 824 for (__node = 0; __node < nr_node_ids; __node++) \
|
A D | hugetlb_cgroup.c | 142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids), in hugetlb_cgroup_css_alloc()
|
A D | slub.c | 5033 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init() 5052 nr_cpu_ids, nr_node_ids); in kmem_cache_init() 5393 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); in show_slab_objects() 5485 for (node = 0; node < nr_node_ids; node++) { in show_slab_objects()
|
/linux-6.3-rc2/arch/loongarch/kernel/ |
A D | numa.c | 112 if (nr_node_ids >= 8) in setup_per_cpu_areas() 441 loongson_sysconf.nr_nodes = nr_node_ids; in init_numa_memory()
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | setup_percpu.c | 123 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
|
/linux-6.3-rc2/include/linux/ |
A D | nodemask.h | 455 extern unsigned int nr_node_ids; 497 #define nr_node_ids 1U macro
|
/linux-6.3-rc2/arch/powerpc/mm/ |
A D | numa.c | 78 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map() 86 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map() 183 if (nid == 0xffff || nid >= nr_node_ids) in __associativity_to_nid()
|
/linux-6.3-rc2/kernel/sched/ |
A D | topology.c | 1707 for (i = 0; i < nr_node_ids; i++) { in sched_numa_warn() 1709 for (j = 0; j < nr_node_ids; j++) { in sched_numa_warn() 1886 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); in sched_init_numa() 2017 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set() 2033 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_clear() 2154 if (node >= nr_node_ids || hops >= sched_domains_numa_levels) in sched_numa_hop_mask()
|
A D | fair.c | 1278 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; in task_faults_idx() 2700 nr_node_ids * sizeof(unsigned long); in task_numa_group() 2712 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_group() 2770 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { in task_numa_group() 2813 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free() 2828 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free() 2864 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; in task_numa_fault()
|
/linux-6.3-rc2/drivers/hv/ |
A D | hv.c | 133 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), in hv_synic_alloc()
|
A D | channel_mgmt.c | 761 if (numa_node == nr_node_ids) { in init_vp_index()
|
/linux-6.3-rc2/drivers/infiniband/sw/siw/ |
A D | siw_main.c | 138 int i, num_nodes = nr_node_ids; in siw_init_cpulist()
|
/linux-6.3-rc2/arch/powerpc/platforms/pseries/ |
A D | hotplug-cpu.c | 241 if (rc && nr_node_ids > 1) { in pseries_add_processor()
|
/linux-6.3-rc2/arch/powerpc/sysdev/xive/ |
A D | common.c | 1144 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, in xive_init_ipis() 1149 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); in xive_init_ipis()
|
/linux-6.3-rc2/io_uring/ |
A D | io-wq.c | 1151 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL); in io_wq_create()
|
/linux-6.3-rc2/net/sunrpc/ |
A D | svc.c | 217 unsigned int maxpools = nr_node_ids; in svc_pool_map_init_pernode()
|
/linux-6.3-rc2/kernel/ |
A D | workqueue.c | 4077 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare() 4427 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in alloc_workqueue() 6110 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init()
|