1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic Node interface support
4 */
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/memory.h>
10 #include <linux/vmstat.h>
11 #include <linux/notifier.h>
12 #include <linux/node.h>
13 #include <linux/hugetlb.h>
14 #include <linux/compaction.h>
15 #include <linux/cpumask.h>
16 #include <linux/topology.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24
25 static struct bus_type node_subsys = {
26 .name = "node",
27 .dev_name = "node",
28 };
29
cpumap_read(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)30 static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
31 struct bin_attribute *attr, char *buf,
32 loff_t off, size_t count)
33 {
34 struct device *dev = kobj_to_dev(kobj);
35 struct node *node_dev = to_node(dev);
36 cpumask_var_t mask;
37 ssize_t n;
38
39 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
40 return 0;
41
42 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
43 n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
44 free_cpumask_var(mask);
45
46 return n;
47 }
48
49 static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
50
cpulist_read(struct file * file,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)51 static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
52 struct bin_attribute *attr, char *buf,
53 loff_t off, size_t count)
54 {
55 struct device *dev = kobj_to_dev(kobj);
56 struct node *node_dev = to_node(dev);
57 cpumask_var_t mask;
58 ssize_t n;
59
60 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
61 return 0;
62
63 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
64 n = cpumap_print_list_to_buf(buf, mask, off, count);
65 free_cpumask_var(mask);
66
67 return n;
68 }
69
70 static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
71
72 /**
73 * struct node_access_nodes - Access class device to hold user visible
74 * relationships to other nodes.
75 * @dev: Device for this memory access class
76 * @list_node: List element in the node's access list
77 * @access: The access class rank
78 * @hmem_attrs: Heterogeneous memory performance attributes
79 */
80 struct node_access_nodes {
81 struct device dev;
82 struct list_head list_node;
83 unsigned int access;
84 #ifdef CONFIG_HMEM_REPORTING
85 struct node_hmem_attrs hmem_attrs;
86 #endif
87 };
88 #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
89
90 static struct attribute *node_init_access_node_attrs[] = {
91 NULL,
92 };
93
94 static struct attribute *node_targ_access_node_attrs[] = {
95 NULL,
96 };
97
98 static const struct attribute_group initiators = {
99 .name = "initiators",
100 .attrs = node_init_access_node_attrs,
101 };
102
103 static const struct attribute_group targets = {
104 .name = "targets",
105 .attrs = node_targ_access_node_attrs,
106 };
107
108 static const struct attribute_group *node_access_node_groups[] = {
109 &initiators,
110 &targets,
111 NULL,
112 };
113
node_remove_accesses(struct node * node)114 static void node_remove_accesses(struct node *node)
115 {
116 struct node_access_nodes *c, *cnext;
117
118 list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
119 list_del(&c->list_node);
120 device_unregister(&c->dev);
121 }
122 }
123
node_access_release(struct device * dev)124 static void node_access_release(struct device *dev)
125 {
126 kfree(to_access_nodes(dev));
127 }
128
node_init_node_access(struct node * node,unsigned int access)129 static struct node_access_nodes *node_init_node_access(struct node *node,
130 unsigned int access)
131 {
132 struct node_access_nodes *access_node;
133 struct device *dev;
134
135 list_for_each_entry(access_node, &node->access_list, list_node)
136 if (access_node->access == access)
137 return access_node;
138
139 access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
140 if (!access_node)
141 return NULL;
142
143 access_node->access = access;
144 dev = &access_node->dev;
145 dev->parent = &node->dev;
146 dev->release = node_access_release;
147 dev->groups = node_access_node_groups;
148 if (dev_set_name(dev, "access%u", access))
149 goto free;
150
151 if (device_register(dev))
152 goto free_name;
153
154 pm_runtime_no_callbacks(dev);
155 list_add_tail(&access_node->list_node, &node->access_list);
156 return access_node;
157 free_name:
158 kfree_const(dev->kobj.name);
159 free:
160 kfree(access_node);
161 return NULL;
162 }
163
164 #ifdef CONFIG_HMEM_REPORTING
165 #define ACCESS_ATTR(name) \
166 static ssize_t name##_show(struct device *dev, \
167 struct device_attribute *attr, \
168 char *buf) \
169 { \
170 return sysfs_emit(buf, "%u\n", \
171 to_access_nodes(dev)->hmem_attrs.name); \
172 } \
173 static DEVICE_ATTR_RO(name)
174
175 ACCESS_ATTR(read_bandwidth);
176 ACCESS_ATTR(read_latency);
177 ACCESS_ATTR(write_bandwidth);
178 ACCESS_ATTR(write_latency);
179
180 static struct attribute *access_attrs[] = {
181 &dev_attr_read_bandwidth.attr,
182 &dev_attr_read_latency.attr,
183 &dev_attr_write_bandwidth.attr,
184 &dev_attr_write_latency.attr,
185 NULL,
186 };
187
188 /**
189 * node_set_perf_attrs - Set the performance values for given access class
190 * @nid: Node identifier to be set
191 * @hmem_attrs: Heterogeneous memory performance attributes
192 * @access: The access class the for the given attributes
193 */
node_set_perf_attrs(unsigned int nid,struct node_hmem_attrs * hmem_attrs,unsigned int access)194 void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
195 unsigned int access)
196 {
197 struct node_access_nodes *c;
198 struct node *node;
199 int i;
200
201 if (WARN_ON_ONCE(!node_online(nid)))
202 return;
203
204 node = node_devices[nid];
205 c = node_init_node_access(node, access);
206 if (!c)
207 return;
208
209 c->hmem_attrs = *hmem_attrs;
210 for (i = 0; access_attrs[i] != NULL; i++) {
211 if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
212 "initiators")) {
213 pr_info("failed to add performance attribute to node %d\n",
214 nid);
215 break;
216 }
217 }
218 }
219
220 /**
221 * struct node_cache_info - Internal tracking for memory node caches
222 * @dev: Device represeting the cache level
223 * @node: List element for tracking in the node
224 * @cache_attrs:Attributes for this cache level
225 */
226 struct node_cache_info {
227 struct device dev;
228 struct list_head node;
229 struct node_cache_attrs cache_attrs;
230 };
231 #define to_cache_info(device) container_of(device, struct node_cache_info, dev)
232
233 #define CACHE_ATTR(name, fmt) \
234 static ssize_t name##_show(struct device *dev, \
235 struct device_attribute *attr, \
236 char *buf) \
237 { \
238 return sysfs_emit(buf, fmt "\n", \
239 to_cache_info(dev)->cache_attrs.name); \
240 } \
241 static DEVICE_ATTR_RO(name);
242
243 CACHE_ATTR(size, "%llu")
244 CACHE_ATTR(line_size, "%u")
245 CACHE_ATTR(indexing, "%u")
246 CACHE_ATTR(write_policy, "%u")
247
248 static struct attribute *cache_attrs[] = {
249 &dev_attr_indexing.attr,
250 &dev_attr_size.attr,
251 &dev_attr_line_size.attr,
252 &dev_attr_write_policy.attr,
253 NULL,
254 };
255 ATTRIBUTE_GROUPS(cache);
256
node_cache_release(struct device * dev)257 static void node_cache_release(struct device *dev)
258 {
259 kfree(dev);
260 }
261
node_cacheinfo_release(struct device * dev)262 static void node_cacheinfo_release(struct device *dev)
263 {
264 struct node_cache_info *info = to_cache_info(dev);
265 kfree(info);
266 }
267
node_init_cache_dev(struct node * node)268 static void node_init_cache_dev(struct node *node)
269 {
270 struct device *dev;
271
272 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 if (!dev)
274 return;
275
276 device_initialize(dev);
277 dev->parent = &node->dev;
278 dev->release = node_cache_release;
279 if (dev_set_name(dev, "memory_side_cache"))
280 goto put_device;
281
282 if (device_add(dev))
283 goto put_device;
284
285 pm_runtime_no_callbacks(dev);
286 node->cache_dev = dev;
287 return;
288 put_device:
289 put_device(dev);
290 }
291
292 /**
293 * node_add_cache() - add cache attribute to a memory node
294 * @nid: Node identifier that has new cache attributes
295 * @cache_attrs: Attributes for the cache being added
296 */
node_add_cache(unsigned int nid,struct node_cache_attrs * cache_attrs)297 void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
298 {
299 struct node_cache_info *info;
300 struct device *dev;
301 struct node *node;
302
303 if (!node_online(nid) || !node_devices[nid])
304 return;
305
306 node = node_devices[nid];
307 list_for_each_entry(info, &node->cache_attrs, node) {
308 if (info->cache_attrs.level == cache_attrs->level) {
309 dev_warn(&node->dev,
310 "attempt to add duplicate cache level:%d\n",
311 cache_attrs->level);
312 return;
313 }
314 }
315
316 if (!node->cache_dev)
317 node_init_cache_dev(node);
318 if (!node->cache_dev)
319 return;
320
321 info = kzalloc(sizeof(*info), GFP_KERNEL);
322 if (!info)
323 return;
324
325 dev = &info->dev;
326 device_initialize(dev);
327 dev->parent = node->cache_dev;
328 dev->release = node_cacheinfo_release;
329 dev->groups = cache_groups;
330 if (dev_set_name(dev, "index%d", cache_attrs->level))
331 goto put_device;
332
333 info->cache_attrs = *cache_attrs;
334 if (device_add(dev)) {
335 dev_warn(&node->dev, "failed to add cache level:%d\n",
336 cache_attrs->level);
337 goto put_device;
338 }
339 pm_runtime_no_callbacks(dev);
340 list_add_tail(&info->node, &node->cache_attrs);
341 return;
342 put_device:
343 put_device(dev);
344 }
345
node_remove_caches(struct node * node)346 static void node_remove_caches(struct node *node)
347 {
348 struct node_cache_info *info, *next;
349
350 if (!node->cache_dev)
351 return;
352
353 list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
354 list_del(&info->node);
355 device_unregister(&info->dev);
356 }
357 device_unregister(node->cache_dev);
358 }
359
node_init_caches(unsigned int nid)360 static void node_init_caches(unsigned int nid)
361 {
362 INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
363 }
364 #else
node_init_caches(unsigned int nid)365 static void node_init_caches(unsigned int nid) { }
node_remove_caches(struct node * node)366 static void node_remove_caches(struct node *node) { }
367 #endif
368
369 #define K(x) ((x) << (PAGE_SHIFT - 10))
node_read_meminfo(struct device * dev,struct device_attribute * attr,char * buf)370 static ssize_t node_read_meminfo(struct device *dev,
371 struct device_attribute *attr, char *buf)
372 {
373 int len = 0;
374 int nid = dev->id;
375 struct pglist_data *pgdat = NODE_DATA(nid);
376 struct sysinfo i;
377 unsigned long sreclaimable, sunreclaimable;
378 unsigned long swapcached = 0;
379
380 si_meminfo_node(&i, nid);
381 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
382 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
383 #ifdef CONFIG_SWAP
384 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
385 #endif
386 len = sysfs_emit_at(buf, len,
387 "Node %d MemTotal: %8lu kB\n"
388 "Node %d MemFree: %8lu kB\n"
389 "Node %d MemUsed: %8lu kB\n"
390 "Node %d SwapCached: %8lu kB\n"
391 "Node %d Active: %8lu kB\n"
392 "Node %d Inactive: %8lu kB\n"
393 "Node %d Active(anon): %8lu kB\n"
394 "Node %d Inactive(anon): %8lu kB\n"
395 "Node %d Active(file): %8lu kB\n"
396 "Node %d Inactive(file): %8lu kB\n"
397 "Node %d Unevictable: %8lu kB\n"
398 "Node %d Mlocked: %8lu kB\n",
399 nid, K(i.totalram),
400 nid, K(i.freeram),
401 nid, K(i.totalram - i.freeram),
402 nid, K(swapcached),
403 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
404 node_page_state(pgdat, NR_ACTIVE_FILE)),
405 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
406 node_page_state(pgdat, NR_INACTIVE_FILE)),
407 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
408 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
409 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
410 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
411 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
412 nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
413
414 #ifdef CONFIG_HIGHMEM
415 len += sysfs_emit_at(buf, len,
416 "Node %d HighTotal: %8lu kB\n"
417 "Node %d HighFree: %8lu kB\n"
418 "Node %d LowTotal: %8lu kB\n"
419 "Node %d LowFree: %8lu kB\n",
420 nid, K(i.totalhigh),
421 nid, K(i.freehigh),
422 nid, K(i.totalram - i.totalhigh),
423 nid, K(i.freeram - i.freehigh));
424 #endif
425 len += sysfs_emit_at(buf, len,
426 "Node %d Dirty: %8lu kB\n"
427 "Node %d Writeback: %8lu kB\n"
428 "Node %d FilePages: %8lu kB\n"
429 "Node %d Mapped: %8lu kB\n"
430 "Node %d AnonPages: %8lu kB\n"
431 "Node %d Shmem: %8lu kB\n"
432 "Node %d KernelStack: %8lu kB\n"
433 #ifdef CONFIG_SHADOW_CALL_STACK
434 "Node %d ShadowCallStack:%8lu kB\n"
435 #endif
436 "Node %d PageTables: %8lu kB\n"
437 "Node %d SecPageTables: %8lu kB\n"
438 "Node %d NFS_Unstable: %8lu kB\n"
439 "Node %d Bounce: %8lu kB\n"
440 "Node %d WritebackTmp: %8lu kB\n"
441 "Node %d KReclaimable: %8lu kB\n"
442 "Node %d Slab: %8lu kB\n"
443 "Node %d SReclaimable: %8lu kB\n"
444 "Node %d SUnreclaim: %8lu kB\n"
445 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446 "Node %d AnonHugePages: %8lu kB\n"
447 "Node %d ShmemHugePages: %8lu kB\n"
448 "Node %d ShmemPmdMapped: %8lu kB\n"
449 "Node %d FileHugePages: %8lu kB\n"
450 "Node %d FilePmdMapped: %8lu kB\n"
451 #endif
452 ,
453 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
454 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
455 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
456 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
457 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
458 nid, K(i.sharedram),
459 nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
460 #ifdef CONFIG_SHADOW_CALL_STACK
461 nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
462 #endif
463 nid, K(node_page_state(pgdat, NR_PAGETABLE)),
464 nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
465 nid, 0UL,
466 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
467 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
468 nid, K(sreclaimable +
469 node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
470 nid, K(sreclaimable + sunreclaimable),
471 nid, K(sreclaimable),
472 nid, K(sunreclaimable)
473 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
474 ,
475 nid, K(node_page_state(pgdat, NR_ANON_THPS)),
476 nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
477 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
478 nid, K(node_page_state(pgdat, NR_FILE_THPS)),
479 nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
480 #endif
481 );
482 len += hugetlb_report_node_meminfo(buf, len, nid);
483 return len;
484 }
485
486 #undef K
487 static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
488
node_read_numastat(struct device * dev,struct device_attribute * attr,char * buf)489 static ssize_t node_read_numastat(struct device *dev,
490 struct device_attribute *attr, char *buf)
491 {
492 fold_vm_numa_events();
493 return sysfs_emit(buf,
494 "numa_hit %lu\n"
495 "numa_miss %lu\n"
496 "numa_foreign %lu\n"
497 "interleave_hit %lu\n"
498 "local_node %lu\n"
499 "other_node %lu\n",
500 sum_zone_numa_event_state(dev->id, NUMA_HIT),
501 sum_zone_numa_event_state(dev->id, NUMA_MISS),
502 sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
503 sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
504 sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
505 sum_zone_numa_event_state(dev->id, NUMA_OTHER));
506 }
507 static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
508
node_read_vmstat(struct device * dev,struct device_attribute * attr,char * buf)509 static ssize_t node_read_vmstat(struct device *dev,
510 struct device_attribute *attr, char *buf)
511 {
512 int nid = dev->id;
513 struct pglist_data *pgdat = NODE_DATA(nid);
514 int i;
515 int len = 0;
516
517 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
518 len += sysfs_emit_at(buf, len, "%s %lu\n",
519 zone_stat_name(i),
520 sum_zone_node_page_state(nid, i));
521
522 #ifdef CONFIG_NUMA
523 fold_vm_numa_events();
524 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
525 len += sysfs_emit_at(buf, len, "%s %lu\n",
526 numa_stat_name(i),
527 sum_zone_numa_event_state(nid, i));
528
529 #endif
530 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
531 unsigned long pages = node_page_state_pages(pgdat, i);
532
533 if (vmstat_item_print_in_thp(i))
534 pages /= HPAGE_PMD_NR;
535 len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
536 pages);
537 }
538
539 return len;
540 }
541 static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
542
node_read_distance(struct device * dev,struct device_attribute * attr,char * buf)543 static ssize_t node_read_distance(struct device *dev,
544 struct device_attribute *attr, char *buf)
545 {
546 int nid = dev->id;
547 int len = 0;
548 int i;
549
550 /*
551 * buf is currently PAGE_SIZE in length and each node needs 4 chars
552 * at the most (distance + space or newline).
553 */
554 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
555
556 for_each_online_node(i) {
557 len += sysfs_emit_at(buf, len, "%s%d",
558 i ? " " : "", node_distance(nid, i));
559 }
560
561 len += sysfs_emit_at(buf, len, "\n");
562 return len;
563 }
564 static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
565
566 static struct attribute *node_dev_attrs[] = {
567 &dev_attr_meminfo.attr,
568 &dev_attr_numastat.attr,
569 &dev_attr_distance.attr,
570 &dev_attr_vmstat.attr,
571 NULL
572 };
573
574 static struct bin_attribute *node_dev_bin_attrs[] = {
575 &bin_attr_cpumap,
576 &bin_attr_cpulist,
577 NULL
578 };
579
580 static const struct attribute_group node_dev_group = {
581 .attrs = node_dev_attrs,
582 .bin_attrs = node_dev_bin_attrs
583 };
584
585 static const struct attribute_group *node_dev_groups[] = {
586 &node_dev_group,
587 #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
588 &arch_node_dev_group,
589 #endif
590 #ifdef CONFIG_MEMORY_FAILURE
591 &memory_failure_attr_group,
592 #endif
593 NULL
594 };
595
node_device_release(struct device * dev)596 static void node_device_release(struct device *dev)
597 {
598 kfree(to_node(dev));
599 }
600
601 /*
602 * register_node - Setup a sysfs device for a node.
603 * @num - Node number to use when creating the device.
604 *
605 * Initialize and register the node device.
606 */
register_node(struct node * node,int num)607 static int register_node(struct node *node, int num)
608 {
609 int error;
610
611 node->dev.id = num;
612 node->dev.bus = &node_subsys;
613 node->dev.release = node_device_release;
614 node->dev.groups = node_dev_groups;
615 error = device_register(&node->dev);
616
617 if (error) {
618 put_device(&node->dev);
619 } else {
620 hugetlb_register_node(node);
621 compaction_register_node(node);
622 }
623
624 return error;
625 }
626
627 /**
628 * unregister_node - unregister a node device
629 * @node: node going away
630 *
631 * Unregisters a node device @node. All the devices on the node must be
632 * unregistered before calling this function.
633 */
unregister_node(struct node * node)634 void unregister_node(struct node *node)
635 {
636 hugetlb_unregister_node(node);
637 compaction_unregister_node(node);
638 node_remove_accesses(node);
639 node_remove_caches(node);
640 device_unregister(&node->dev);
641 }
642
643 struct node *node_devices[MAX_NUMNODES];
644
645 /*
646 * register cpu under node
647 */
register_cpu_under_node(unsigned int cpu,unsigned int nid)648 int register_cpu_under_node(unsigned int cpu, unsigned int nid)
649 {
650 int ret;
651 struct device *obj;
652
653 if (!node_online(nid))
654 return 0;
655
656 obj = get_cpu_device(cpu);
657 if (!obj)
658 return 0;
659
660 ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
661 &obj->kobj,
662 kobject_name(&obj->kobj));
663 if (ret)
664 return ret;
665
666 return sysfs_create_link(&obj->kobj,
667 &node_devices[nid]->dev.kobj,
668 kobject_name(&node_devices[nid]->dev.kobj));
669 }
670
671 /**
672 * register_memory_node_under_compute_node - link memory node to its compute
673 * node for a given access class.
674 * @mem_nid: Memory node number
675 * @cpu_nid: Cpu node number
676 * @access: Access class to register
677 *
678 * Description:
679 * For use with platforms that may have separate memory and compute nodes.
680 * This function will export node relationships linking which memory
681 * initiator nodes can access memory targets at a given ranked access
682 * class.
683 */
register_memory_node_under_compute_node(unsigned int mem_nid,unsigned int cpu_nid,unsigned int access)684 int register_memory_node_under_compute_node(unsigned int mem_nid,
685 unsigned int cpu_nid,
686 unsigned int access)
687 {
688 struct node *init_node, *targ_node;
689 struct node_access_nodes *initiator, *target;
690 int ret;
691
692 if (!node_online(cpu_nid) || !node_online(mem_nid))
693 return -ENODEV;
694
695 init_node = node_devices[cpu_nid];
696 targ_node = node_devices[mem_nid];
697 initiator = node_init_node_access(init_node, access);
698 target = node_init_node_access(targ_node, access);
699 if (!initiator || !target)
700 return -ENOMEM;
701
702 ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
703 &targ_node->dev.kobj,
704 dev_name(&targ_node->dev));
705 if (ret)
706 return ret;
707
708 ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
709 &init_node->dev.kobj,
710 dev_name(&init_node->dev));
711 if (ret)
712 goto err;
713
714 return 0;
715 err:
716 sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
717 dev_name(&targ_node->dev));
718 return ret;
719 }
720
unregister_cpu_under_node(unsigned int cpu,unsigned int nid)721 int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
722 {
723 struct device *obj;
724
725 if (!node_online(nid))
726 return 0;
727
728 obj = get_cpu_device(cpu);
729 if (!obj)
730 return 0;
731
732 sysfs_remove_link(&node_devices[nid]->dev.kobj,
733 kobject_name(&obj->kobj));
734 sysfs_remove_link(&obj->kobj,
735 kobject_name(&node_devices[nid]->dev.kobj));
736
737 return 0;
738 }
739
740 #ifdef CONFIG_MEMORY_HOTPLUG
get_nid_for_pfn(unsigned long pfn)741 static int __ref get_nid_for_pfn(unsigned long pfn)
742 {
743 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
744 if (system_state < SYSTEM_RUNNING)
745 return early_pfn_to_nid(pfn);
746 #endif
747 return pfn_to_nid(pfn);
748 }
749
do_register_memory_block_under_node(int nid,struct memory_block * mem_blk,enum meminit_context context)750 static void do_register_memory_block_under_node(int nid,
751 struct memory_block *mem_blk,
752 enum meminit_context context)
753 {
754 int ret;
755
756 memory_block_add_nid(mem_blk, nid, context);
757
758 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
759 &mem_blk->dev.kobj,
760 kobject_name(&mem_blk->dev.kobj));
761 if (ret && ret != -EEXIST)
762 dev_err_ratelimited(&node_devices[nid]->dev,
763 "can't create link to %s in sysfs (%d)\n",
764 kobject_name(&mem_blk->dev.kobj), ret);
765
766 ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
767 &node_devices[nid]->dev.kobj,
768 kobject_name(&node_devices[nid]->dev.kobj));
769 if (ret && ret != -EEXIST)
770 dev_err_ratelimited(&mem_blk->dev,
771 "can't create link to %s in sysfs (%d)\n",
772 kobject_name(&node_devices[nid]->dev.kobj),
773 ret);
774 }
775
776 /* register memory section under specified node if it spans that node */
register_mem_block_under_node_early(struct memory_block * mem_blk,void * arg)777 static int register_mem_block_under_node_early(struct memory_block *mem_blk,
778 void *arg)
779 {
780 unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
781 unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
782 unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
783 int nid = *(int *)arg;
784 unsigned long pfn;
785
786 for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
787 int page_nid;
788
789 /*
790 * memory block could have several absent sections from start.
791 * skip pfn range from absent section
792 */
793 if (!pfn_in_present_section(pfn)) {
794 pfn = round_down(pfn + PAGES_PER_SECTION,
795 PAGES_PER_SECTION) - 1;
796 continue;
797 }
798
799 /*
800 * We need to check if page belongs to nid only at the boot
801 * case because node's ranges can be interleaved.
802 */
803 page_nid = get_nid_for_pfn(pfn);
804 if (page_nid < 0)
805 continue;
806 if (page_nid != nid)
807 continue;
808
809 do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
810 return 0;
811 }
812 /* mem section does not span the specified node */
813 return 0;
814 }
815
816 /*
817 * During hotplug we know that all pages in the memory block belong to the same
818 * node.
819 */
register_mem_block_under_node_hotplug(struct memory_block * mem_blk,void * arg)820 static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
821 void *arg)
822 {
823 int nid = *(int *)arg;
824
825 do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
826 return 0;
827 }
828
829 /*
830 * Unregister a memory block device under the node it spans. Memory blocks
831 * with multiple nodes cannot be offlined and therefore also never be removed.
832 */
unregister_memory_block_under_nodes(struct memory_block * mem_blk)833 void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
834 {
835 if (mem_blk->nid == NUMA_NO_NODE)
836 return;
837
838 sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
839 kobject_name(&mem_blk->dev.kobj));
840 sysfs_remove_link(&mem_blk->dev.kobj,
841 kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
842 }
843
register_memory_blocks_under_node(int nid,unsigned long start_pfn,unsigned long end_pfn,enum meminit_context context)844 void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
845 unsigned long end_pfn,
846 enum meminit_context context)
847 {
848 walk_memory_blocks_func_t func;
849
850 if (context == MEMINIT_HOTPLUG)
851 func = register_mem_block_under_node_hotplug;
852 else
853 func = register_mem_block_under_node_early;
854
855 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
856 (void *)&nid, func);
857 return;
858 }
859 #endif /* CONFIG_MEMORY_HOTPLUG */
860
__register_one_node(int nid)861 int __register_one_node(int nid)
862 {
863 int error;
864 int cpu;
865
866 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
867 if (!node_devices[nid])
868 return -ENOMEM;
869
870 error = register_node(node_devices[nid], nid);
871
872 /* link cpu under this node */
873 for_each_present_cpu(cpu) {
874 if (cpu_to_node(cpu) == nid)
875 register_cpu_under_node(cpu, nid);
876 }
877
878 INIT_LIST_HEAD(&node_devices[nid]->access_list);
879 node_init_caches(nid);
880
881 return error;
882 }
883
unregister_one_node(int nid)884 void unregister_one_node(int nid)
885 {
886 if (!node_devices[nid])
887 return;
888
889 unregister_node(node_devices[nid]);
890 node_devices[nid] = NULL;
891 }
892
893 /*
894 * node states attributes
895 */
896
897 struct node_attr {
898 struct device_attribute attr;
899 enum node_states state;
900 };
901
show_node_state(struct device * dev,struct device_attribute * attr,char * buf)902 static ssize_t show_node_state(struct device *dev,
903 struct device_attribute *attr, char *buf)
904 {
905 struct node_attr *na = container_of(attr, struct node_attr, attr);
906
907 return sysfs_emit(buf, "%*pbl\n",
908 nodemask_pr_args(&node_states[na->state]));
909 }
910
911 #define _NODE_ATTR(name, state) \
912 { __ATTR(name, 0444, show_node_state, NULL), state }
913
914 static struct node_attr node_state_attr[] = {
915 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
916 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
917 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
918 #ifdef CONFIG_HIGHMEM
919 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
920 #endif
921 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
922 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
923 [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
924 N_GENERIC_INITIATOR),
925 };
926
927 static struct attribute *node_state_attrs[] = {
928 &node_state_attr[N_POSSIBLE].attr.attr,
929 &node_state_attr[N_ONLINE].attr.attr,
930 &node_state_attr[N_NORMAL_MEMORY].attr.attr,
931 #ifdef CONFIG_HIGHMEM
932 &node_state_attr[N_HIGH_MEMORY].attr.attr,
933 #endif
934 &node_state_attr[N_MEMORY].attr.attr,
935 &node_state_attr[N_CPU].attr.attr,
936 &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
937 NULL
938 };
939
940 static const struct attribute_group memory_root_attr_group = {
941 .attrs = node_state_attrs,
942 };
943
944 static const struct attribute_group *cpu_root_attr_groups[] = {
945 &memory_root_attr_group,
946 NULL,
947 };
948
node_dev_init(void)949 void __init node_dev_init(void)
950 {
951 int ret, i;
952
953 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
954 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
955
956 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
957 if (ret)
958 panic("%s() failed to register subsystem: %d\n", __func__, ret);
959
960 /*
961 * Create all node devices, which will properly link the node
962 * to applicable memory block devices and already created cpu devices.
963 */
964 for_each_online_node(i) {
965 ret = register_one_node(i);
966 if (ret)
967 panic("%s() failed to add node: %d\n", __func__, ret);
968 }
969 }
970