1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Basic Node interface support |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/init.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/memory.h> |
10 | #include <linux/vmstat.h> |
11 | #include <linux/notifier.h> |
12 | #include <linux/node.h> |
13 | #include <linux/hugetlb.h> |
14 | #include <linux/compaction.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/topology.h> |
17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> |
19 | #include <linux/device.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/slab.h> |
23 | |
24 | static struct bus_type node_subsys = { |
25 | .name = "node" , |
26 | .dev_name = "node" , |
27 | }; |
28 | |
29 | static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj, |
30 | struct bin_attribute *attr, char *buf, |
31 | loff_t off, size_t count) |
32 | { |
33 | struct device *dev = kobj_to_dev(kobj); |
34 | struct node *node_dev = to_node(dev); |
35 | cpumask_var_t mask; |
36 | ssize_t n; |
37 | |
38 | if (!alloc_cpumask_var(mask: &mask, GFP_KERNEL)) |
39 | return 0; |
40 | |
41 | cpumask_and(dstp: mask, src1p: cpumask_of_node(node: node_dev->dev.id), cpu_online_mask); |
42 | n = cpumap_print_bitmask_to_buf(buf, mask, off, count); |
43 | free_cpumask_var(mask); |
44 | |
45 | return n; |
46 | } |
47 | |
48 | static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES); |
49 | |
50 | static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, |
51 | struct bin_attribute *attr, char *buf, |
52 | loff_t off, size_t count) |
53 | { |
54 | struct device *dev = kobj_to_dev(kobj); |
55 | struct node *node_dev = to_node(dev); |
56 | cpumask_var_t mask; |
57 | ssize_t n; |
58 | |
59 | if (!alloc_cpumask_var(mask: &mask, GFP_KERNEL)) |
60 | return 0; |
61 | |
62 | cpumask_and(dstp: mask, src1p: cpumask_of_node(node: node_dev->dev.id), cpu_online_mask); |
63 | n = cpumap_print_list_to_buf(buf, mask, off, count); |
64 | free_cpumask_var(mask); |
65 | |
66 | return n; |
67 | } |
68 | |
69 | static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES); |
70 | |
71 | /** |
72 | * struct node_access_nodes - Access class device to hold user visible |
73 | * relationships to other nodes. |
74 | * @dev: Device for this memory access class |
75 | * @list_node: List element in the node's access list |
76 | * @access: The access class rank |
77 | * @hmem_attrs: Heterogeneous memory performance attributes |
78 | */ |
79 | struct node_access_nodes { |
80 | struct device dev; |
81 | struct list_head list_node; |
82 | unsigned int access; |
83 | #ifdef CONFIG_HMEM_REPORTING |
84 | struct node_hmem_attrs hmem_attrs; |
85 | #endif |
86 | }; |
87 | #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) |
88 | |
89 | static struct attribute *node_init_access_node_attrs[] = { |
90 | NULL, |
91 | }; |
92 | |
93 | static struct attribute *node_targ_access_node_attrs[] = { |
94 | NULL, |
95 | }; |
96 | |
97 | static const struct attribute_group initiators = { |
98 | .name = "initiators" , |
99 | .attrs = node_init_access_node_attrs, |
100 | }; |
101 | |
102 | static const struct attribute_group targets = { |
103 | .name = "targets" , |
104 | .attrs = node_targ_access_node_attrs, |
105 | }; |
106 | |
107 | static const struct attribute_group *node_access_node_groups[] = { |
108 | &initiators, |
109 | &targets, |
110 | NULL, |
111 | }; |
112 | |
113 | static void node_remove_accesses(struct node *node) |
114 | { |
115 | struct node_access_nodes *c, *cnext; |
116 | |
117 | list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { |
118 | list_del(entry: &c->list_node); |
119 | device_unregister(dev: &c->dev); |
120 | } |
121 | } |
122 | |
123 | static void node_access_release(struct device *dev) |
124 | { |
125 | kfree(to_access_nodes(dev)); |
126 | } |
127 | |
128 | static struct node_access_nodes *node_init_node_access(struct node *node, |
129 | unsigned int access) |
130 | { |
131 | struct node_access_nodes *access_node; |
132 | struct device *dev; |
133 | |
134 | list_for_each_entry(access_node, &node->access_list, list_node) |
135 | if (access_node->access == access) |
136 | return access_node; |
137 | |
138 | access_node = kzalloc(size: sizeof(*access_node), GFP_KERNEL); |
139 | if (!access_node) |
140 | return NULL; |
141 | |
142 | access_node->access = access; |
143 | dev = &access_node->dev; |
144 | dev->parent = &node->dev; |
145 | dev->release = node_access_release; |
146 | dev->groups = node_access_node_groups; |
147 | if (dev_set_name(dev, name: "access%u" , access)) |
148 | goto free; |
149 | |
150 | if (device_register(dev)) |
151 | goto free_name; |
152 | |
153 | pm_runtime_no_callbacks(dev); |
154 | list_add_tail(new: &access_node->list_node, head: &node->access_list); |
155 | return access_node; |
156 | free_name: |
157 | kfree_const(x: dev->kobj.name); |
158 | free: |
159 | kfree(objp: access_node); |
160 | return NULL; |
161 | } |
162 | |
163 | #ifdef CONFIG_HMEM_REPORTING |
164 | #define ACCESS_ATTR(property) \ |
165 | static ssize_t property##_show(struct device *dev, \ |
166 | struct device_attribute *attr, \ |
167 | char *buf) \ |
168 | { \ |
169 | return sysfs_emit(buf, "%u\n", \ |
170 | to_access_nodes(dev)->hmem_attrs.property); \ |
171 | } \ |
172 | static DEVICE_ATTR_RO(property) |
173 | |
174 | ACCESS_ATTR(read_bandwidth); |
175 | ACCESS_ATTR(read_latency); |
176 | ACCESS_ATTR(write_bandwidth); |
177 | ACCESS_ATTR(write_latency); |
178 | |
179 | static struct attribute *access_attrs[] = { |
180 | &dev_attr_read_bandwidth.attr, |
181 | &dev_attr_read_latency.attr, |
182 | &dev_attr_write_bandwidth.attr, |
183 | &dev_attr_write_latency.attr, |
184 | NULL, |
185 | }; |
186 | |
187 | /** |
188 | * node_set_perf_attrs - Set the performance values for given access class |
189 | * @nid: Node identifier to be set |
190 | * @hmem_attrs: Heterogeneous memory performance attributes |
191 | * @access: The access class the for the given attributes |
192 | */ |
193 | void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, |
194 | unsigned int access) |
195 | { |
196 | struct node_access_nodes *c; |
197 | struct node *node; |
198 | int i; |
199 | |
200 | if (WARN_ON_ONCE(!node_online(nid))) |
201 | return; |
202 | |
203 | node = node_devices[nid]; |
204 | c = node_init_node_access(node, access); |
205 | if (!c) |
206 | return; |
207 | |
208 | c->hmem_attrs = *hmem_attrs; |
209 | for (i = 0; access_attrs[i] != NULL; i++) { |
210 | if (sysfs_add_file_to_group(kobj: &c->dev.kobj, attr: access_attrs[i], |
211 | group: "initiators" )) { |
212 | pr_info("failed to add performance attribute to node %d\n" , |
213 | nid); |
214 | break; |
215 | } |
216 | } |
217 | } |
218 | |
219 | /** |
220 | * struct node_cache_info - Internal tracking for memory node caches |
221 | * @dev: Device represeting the cache level |
222 | * @node: List element for tracking in the node |
223 | * @cache_attrs:Attributes for this cache level |
224 | */ |
225 | struct node_cache_info { |
226 | struct device dev; |
227 | struct list_head node; |
228 | struct node_cache_attrs cache_attrs; |
229 | }; |
230 | #define to_cache_info(device) container_of(device, struct node_cache_info, dev) |
231 | |
232 | #define CACHE_ATTR(name, fmt) \ |
233 | static ssize_t name##_show(struct device *dev, \ |
234 | struct device_attribute *attr, \ |
235 | char *buf) \ |
236 | { \ |
237 | return sysfs_emit(buf, fmt "\n", \ |
238 | to_cache_info(dev)->cache_attrs.name); \ |
239 | } \ |
240 | static DEVICE_ATTR_RO(name); |
241 | |
242 | CACHE_ATTR(size, "%llu" ) |
243 | CACHE_ATTR(line_size, "%u" ) |
244 | CACHE_ATTR(indexing, "%u" ) |
245 | CACHE_ATTR(write_policy, "%u" ) |
246 | |
247 | static struct attribute *cache_attrs[] = { |
248 | &dev_attr_indexing.attr, |
249 | &dev_attr_size.attr, |
250 | &dev_attr_line_size.attr, |
251 | &dev_attr_write_policy.attr, |
252 | NULL, |
253 | }; |
254 | ATTRIBUTE_GROUPS(cache); |
255 | |
256 | static void node_cache_release(struct device *dev) |
257 | { |
258 | kfree(objp: dev); |
259 | } |
260 | |
261 | static void node_cacheinfo_release(struct device *dev) |
262 | { |
263 | struct node_cache_info *info = to_cache_info(dev); |
264 | kfree(objp: info); |
265 | } |
266 | |
267 | static void node_init_cache_dev(struct node *node) |
268 | { |
269 | struct device *dev; |
270 | |
271 | dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
272 | if (!dev) |
273 | return; |
274 | |
275 | device_initialize(dev); |
276 | dev->parent = &node->dev; |
277 | dev->release = node_cache_release; |
278 | if (dev_set_name(dev, name: "memory_side_cache" )) |
279 | goto put_device; |
280 | |
281 | if (device_add(dev)) |
282 | goto put_device; |
283 | |
284 | pm_runtime_no_callbacks(dev); |
285 | node->cache_dev = dev; |
286 | return; |
287 | put_device: |
288 | put_device(dev); |
289 | } |
290 | |
291 | /** |
292 | * node_add_cache() - add cache attribute to a memory node |
293 | * @nid: Node identifier that has new cache attributes |
294 | * @cache_attrs: Attributes for the cache being added |
295 | */ |
296 | void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) |
297 | { |
298 | struct node_cache_info *info; |
299 | struct device *dev; |
300 | struct node *node; |
301 | |
302 | if (!node_online(nid) || !node_devices[nid]) |
303 | return; |
304 | |
305 | node = node_devices[nid]; |
306 | list_for_each_entry(info, &node->cache_attrs, node) { |
307 | if (info->cache_attrs.level == cache_attrs->level) { |
308 | dev_warn(&node->dev, |
309 | "attempt to add duplicate cache level:%d\n" , |
310 | cache_attrs->level); |
311 | return; |
312 | } |
313 | } |
314 | |
315 | if (!node->cache_dev) |
316 | node_init_cache_dev(node); |
317 | if (!node->cache_dev) |
318 | return; |
319 | |
320 | info = kzalloc(size: sizeof(*info), GFP_KERNEL); |
321 | if (!info) |
322 | return; |
323 | |
324 | dev = &info->dev; |
325 | device_initialize(dev); |
326 | dev->parent = node->cache_dev; |
327 | dev->release = node_cacheinfo_release; |
328 | dev->groups = cache_groups; |
329 | if (dev_set_name(dev, name: "index%d" , cache_attrs->level)) |
330 | goto put_device; |
331 | |
332 | info->cache_attrs = *cache_attrs; |
333 | if (device_add(dev)) { |
334 | dev_warn(&node->dev, "failed to add cache level:%d\n" , |
335 | cache_attrs->level); |
336 | goto put_device; |
337 | } |
338 | pm_runtime_no_callbacks(dev); |
339 | list_add_tail(new: &info->node, head: &node->cache_attrs); |
340 | return; |
341 | put_device: |
342 | put_device(dev); |
343 | } |
344 | |
345 | static void node_remove_caches(struct node *node) |
346 | { |
347 | struct node_cache_info *info, *next; |
348 | |
349 | if (!node->cache_dev) |
350 | return; |
351 | |
352 | list_for_each_entry_safe(info, next, &node->cache_attrs, node) { |
353 | list_del(entry: &info->node); |
354 | device_unregister(dev: &info->dev); |
355 | } |
356 | device_unregister(dev: node->cache_dev); |
357 | } |
358 | |
359 | static void node_init_caches(unsigned int nid) |
360 | { |
361 | INIT_LIST_HEAD(list: &node_devices[nid]->cache_attrs); |
362 | } |
363 | #else |
364 | static void node_init_caches(unsigned int nid) { } |
365 | static void node_remove_caches(struct node *node) { } |
366 | #endif |
367 | |
368 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
369 | static ssize_t node_read_meminfo(struct device *dev, |
370 | struct device_attribute *attr, char *buf) |
371 | { |
372 | int len = 0; |
373 | int nid = dev->id; |
374 | struct pglist_data *pgdat = NODE_DATA(nid); |
375 | struct sysinfo i; |
376 | unsigned long sreclaimable, sunreclaimable; |
377 | unsigned long swapcached = 0; |
378 | |
379 | si_meminfo_node(val: &i, nid); |
380 | sreclaimable = node_page_state_pages(pgdat, item: NR_SLAB_RECLAIMABLE_B); |
381 | sunreclaimable = node_page_state_pages(pgdat, item: NR_SLAB_UNRECLAIMABLE_B); |
382 | #ifdef CONFIG_SWAP |
383 | swapcached = node_page_state_pages(pgdat, item: NR_SWAPCACHE); |
384 | #endif |
385 | len = sysfs_emit_at(buf, at: len, |
386 | fmt: "Node %d MemTotal: %8lu kB\n" |
387 | "Node %d MemFree: %8lu kB\n" |
388 | "Node %d MemUsed: %8lu kB\n" |
389 | "Node %d SwapCached: %8lu kB\n" |
390 | "Node %d Active: %8lu kB\n" |
391 | "Node %d Inactive: %8lu kB\n" |
392 | "Node %d Active(anon): %8lu kB\n" |
393 | "Node %d Inactive(anon): %8lu kB\n" |
394 | "Node %d Active(file): %8lu kB\n" |
395 | "Node %d Inactive(file): %8lu kB\n" |
396 | "Node %d Unevictable: %8lu kB\n" |
397 | "Node %d Mlocked: %8lu kB\n" , |
398 | nid, K(i.totalram), |
399 | nid, K(i.freeram), |
400 | nid, K(i.totalram - i.freeram), |
401 | nid, K(swapcached), |
402 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + |
403 | node_page_state(pgdat, NR_ACTIVE_FILE)), |
404 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + |
405 | node_page_state(pgdat, NR_INACTIVE_FILE)), |
406 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), |
407 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), |
408 | nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), |
409 | nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), |
410 | nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), |
411 | nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); |
412 | |
413 | #ifdef CONFIG_HIGHMEM |
414 | len += sysfs_emit_at(buf, len, |
415 | "Node %d HighTotal: %8lu kB\n" |
416 | "Node %d HighFree: %8lu kB\n" |
417 | "Node %d LowTotal: %8lu kB\n" |
418 | "Node %d LowFree: %8lu kB\n" , |
419 | nid, K(i.totalhigh), |
420 | nid, K(i.freehigh), |
421 | nid, K(i.totalram - i.totalhigh), |
422 | nid, K(i.freeram - i.freehigh)); |
423 | #endif |
424 | len += sysfs_emit_at(buf, at: len, |
425 | fmt: "Node %d Dirty: %8lu kB\n" |
426 | "Node %d Writeback: %8lu kB\n" |
427 | "Node %d FilePages: %8lu kB\n" |
428 | "Node %d Mapped: %8lu kB\n" |
429 | "Node %d AnonPages: %8lu kB\n" |
430 | "Node %d Shmem: %8lu kB\n" |
431 | "Node %d KernelStack: %8lu kB\n" |
432 | #ifdef CONFIG_SHADOW_CALL_STACK |
433 | "Node %d ShadowCallStack:%8lu kB\n" |
434 | #endif |
435 | "Node %d PageTables: %8lu kB\n" |
436 | "Node %d SecPageTables: %8lu kB\n" |
437 | "Node %d NFS_Unstable: %8lu kB\n" |
438 | "Node %d Bounce: %8lu kB\n" |
439 | "Node %d WritebackTmp: %8lu kB\n" |
440 | "Node %d KReclaimable: %8lu kB\n" |
441 | "Node %d Slab: %8lu kB\n" |
442 | "Node %d SReclaimable: %8lu kB\n" |
443 | "Node %d SUnreclaim: %8lu kB\n" |
444 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
445 | "Node %d AnonHugePages: %8lu kB\n" |
446 | "Node %d ShmemHugePages: %8lu kB\n" |
447 | "Node %d ShmemPmdMapped: %8lu kB\n" |
448 | "Node %d FileHugePages: %8lu kB\n" |
449 | "Node %d FilePmdMapped: %8lu kB\n" |
450 | #endif |
451 | #ifdef CONFIG_UNACCEPTED_MEMORY |
452 | "Node %d Unaccepted: %8lu kB\n" |
453 | #endif |
454 | , |
455 | nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), |
456 | nid, K(node_page_state(pgdat, NR_WRITEBACK)), |
457 | nid, K(node_page_state(pgdat, NR_FILE_PAGES)), |
458 | nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), |
459 | nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), |
460 | nid, K(i.sharedram), |
461 | nid, node_page_state(pgdat, item: NR_KERNEL_STACK_KB), |
462 | #ifdef CONFIG_SHADOW_CALL_STACK |
463 | nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), |
464 | #endif |
465 | nid, K(node_page_state(pgdat, NR_PAGETABLE)), |
466 | nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), |
467 | nid, 0UL, |
468 | nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), |
469 | nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), |
470 | nid, K(sreclaimable + |
471 | node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), |
472 | nid, K(sreclaimable + sunreclaimable), |
473 | nid, K(sreclaimable), |
474 | nid, K(sunreclaimable) |
475 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
476 | , |
477 | nid, K(node_page_state(pgdat, NR_ANON_THPS)), |
478 | nid, K(node_page_state(pgdat, NR_SHMEM_THPS)), |
479 | nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), |
480 | nid, K(node_page_state(pgdat, NR_FILE_THPS)), |
481 | nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) |
482 | #endif |
483 | #ifdef CONFIG_UNACCEPTED_MEMORY |
484 | , |
485 | nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED)) |
486 | #endif |
487 | ); |
488 | len += hugetlb_report_node_meminfo(buf, len, nid); |
489 | return len; |
490 | } |
491 | |
492 | #undef K |
493 | static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL); |
494 | |
495 | static ssize_t node_read_numastat(struct device *dev, |
496 | struct device_attribute *attr, char *buf) |
497 | { |
498 | fold_vm_numa_events(); |
499 | return sysfs_emit(buf, |
500 | fmt: "numa_hit %lu\n" |
501 | "numa_miss %lu\n" |
502 | "numa_foreign %lu\n" |
503 | "interleave_hit %lu\n" |
504 | "local_node %lu\n" |
505 | "other_node %lu\n" , |
506 | sum_zone_numa_event_state(node: dev->id, item: NUMA_HIT), |
507 | sum_zone_numa_event_state(node: dev->id, item: NUMA_MISS), |
508 | sum_zone_numa_event_state(node: dev->id, item: NUMA_FOREIGN), |
509 | sum_zone_numa_event_state(node: dev->id, item: NUMA_INTERLEAVE_HIT), |
510 | sum_zone_numa_event_state(node: dev->id, item: NUMA_LOCAL), |
511 | sum_zone_numa_event_state(node: dev->id, item: NUMA_OTHER)); |
512 | } |
513 | static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); |
514 | |
515 | static ssize_t node_read_vmstat(struct device *dev, |
516 | struct device_attribute *attr, char *buf) |
517 | { |
518 | int nid = dev->id; |
519 | struct pglist_data *pgdat = NODE_DATA(nid); |
520 | int i; |
521 | int len = 0; |
522 | |
523 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
524 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , |
525 | zone_stat_name(item: i), |
526 | sum_zone_node_page_state(node: nid, item: i)); |
527 | |
528 | #ifdef CONFIG_NUMA |
529 | fold_vm_numa_events(); |
530 | for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) |
531 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , |
532 | numa_stat_name(item: i), |
533 | sum_zone_numa_event_state(node: nid, item: i)); |
534 | |
535 | #endif |
536 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { |
537 | unsigned long pages = node_page_state_pages(pgdat, item: i); |
538 | |
539 | if (vmstat_item_print_in_thp(item: i)) |
540 | pages /= HPAGE_PMD_NR; |
541 | len += sysfs_emit_at(buf, at: len, fmt: "%s %lu\n" , node_stat_name(item: i), |
542 | pages); |
543 | } |
544 | |
545 | return len; |
546 | } |
547 | static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL); |
548 | |
549 | static ssize_t node_read_distance(struct device *dev, |
550 | struct device_attribute *attr, char *buf) |
551 | { |
552 | int nid = dev->id; |
553 | int len = 0; |
554 | int i; |
555 | |
556 | /* |
557 | * buf is currently PAGE_SIZE in length and each node needs 4 chars |
558 | * at the most (distance + space or newline). |
559 | */ |
560 | BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); |
561 | |
562 | for_each_online_node(i) { |
563 | len += sysfs_emit_at(buf, at: len, fmt: "%s%d" , |
564 | i ? " " : "" , node_distance(nid, i)); |
565 | } |
566 | |
567 | len += sysfs_emit_at(buf, at: len, fmt: "\n" ); |
568 | return len; |
569 | } |
570 | static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); |
571 | |
572 | static struct attribute *node_dev_attrs[] = { |
573 | &dev_attr_meminfo.attr, |
574 | &dev_attr_numastat.attr, |
575 | &dev_attr_distance.attr, |
576 | &dev_attr_vmstat.attr, |
577 | NULL |
578 | }; |
579 | |
580 | static struct bin_attribute *node_dev_bin_attrs[] = { |
581 | &bin_attr_cpumap, |
582 | &bin_attr_cpulist, |
583 | NULL |
584 | }; |
585 | |
586 | static const struct attribute_group node_dev_group = { |
587 | .attrs = node_dev_attrs, |
588 | .bin_attrs = node_dev_bin_attrs |
589 | }; |
590 | |
591 | static const struct attribute_group *node_dev_groups[] = { |
592 | &node_dev_group, |
593 | #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP |
594 | &arch_node_dev_group, |
595 | #endif |
596 | #ifdef CONFIG_MEMORY_FAILURE |
597 | &memory_failure_attr_group, |
598 | #endif |
599 | NULL |
600 | }; |
601 | |
602 | static void node_device_release(struct device *dev) |
603 | { |
604 | kfree(to_node(dev)); |
605 | } |
606 | |
607 | /* |
608 | * register_node - Setup a sysfs device for a node. |
609 | * @num - Node number to use when creating the device. |
610 | * |
611 | * Initialize and register the node device. |
612 | */ |
613 | static int register_node(struct node *node, int num) |
614 | { |
615 | int error; |
616 | |
617 | node->dev.id = num; |
618 | node->dev.bus = &node_subsys; |
619 | node->dev.release = node_device_release; |
620 | node->dev.groups = node_dev_groups; |
621 | error = device_register(dev: &node->dev); |
622 | |
623 | if (error) { |
624 | put_device(dev: &node->dev); |
625 | } else { |
626 | hugetlb_register_node(node); |
627 | compaction_register_node(node); |
628 | } |
629 | |
630 | return error; |
631 | } |
632 | |
633 | /** |
634 | * unregister_node - unregister a node device |
635 | * @node: node going away |
636 | * |
637 | * Unregisters a node device @node. All the devices on the node must be |
638 | * unregistered before calling this function. |
639 | */ |
640 | void unregister_node(struct node *node) |
641 | { |
642 | hugetlb_unregister_node(node); |
643 | compaction_unregister_node(node); |
644 | node_remove_accesses(node); |
645 | node_remove_caches(node); |
646 | device_unregister(dev: &node->dev); |
647 | } |
648 | |
649 | struct node *node_devices[MAX_NUMNODES]; |
650 | |
651 | /* |
652 | * register cpu under node |
653 | */ |
654 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
655 | { |
656 | int ret; |
657 | struct device *obj; |
658 | |
659 | if (!node_online(nid)) |
660 | return 0; |
661 | |
662 | obj = get_cpu_device(cpu); |
663 | if (!obj) |
664 | return 0; |
665 | |
666 | ret = sysfs_create_link(kobj: &node_devices[nid]->dev.kobj, |
667 | target: &obj->kobj, |
668 | name: kobject_name(kobj: &obj->kobj)); |
669 | if (ret) |
670 | return ret; |
671 | |
672 | return sysfs_create_link(kobj: &obj->kobj, |
673 | target: &node_devices[nid]->dev.kobj, |
674 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
675 | } |
676 | |
677 | /** |
678 | * register_memory_node_under_compute_node - link memory node to its compute |
679 | * node for a given access class. |
680 | * @mem_nid: Memory node number |
681 | * @cpu_nid: Cpu node number |
682 | * @access: Access class to register |
683 | * |
684 | * Description: |
685 | * For use with platforms that may have separate memory and compute nodes. |
686 | * This function will export node relationships linking which memory |
687 | * initiator nodes can access memory targets at a given ranked access |
688 | * class. |
689 | */ |
690 | int register_memory_node_under_compute_node(unsigned int mem_nid, |
691 | unsigned int cpu_nid, |
692 | unsigned int access) |
693 | { |
694 | struct node *init_node, *targ_node; |
695 | struct node_access_nodes *initiator, *target; |
696 | int ret; |
697 | |
698 | if (!node_online(cpu_nid) || !node_online(mem_nid)) |
699 | return -ENODEV; |
700 | |
701 | init_node = node_devices[cpu_nid]; |
702 | targ_node = node_devices[mem_nid]; |
703 | initiator = node_init_node_access(node: init_node, access); |
704 | target = node_init_node_access(node: targ_node, access); |
705 | if (!initiator || !target) |
706 | return -ENOMEM; |
707 | |
708 | ret = sysfs_add_link_to_group(kobj: &initiator->dev.kobj, group_name: "targets" , |
709 | target: &targ_node->dev.kobj, |
710 | link_name: dev_name(dev: &targ_node->dev)); |
711 | if (ret) |
712 | return ret; |
713 | |
714 | ret = sysfs_add_link_to_group(kobj: &target->dev.kobj, group_name: "initiators" , |
715 | target: &init_node->dev.kobj, |
716 | link_name: dev_name(dev: &init_node->dev)); |
717 | if (ret) |
718 | goto err; |
719 | |
720 | return 0; |
721 | err: |
722 | sysfs_remove_link_from_group(kobj: &initiator->dev.kobj, group_name: "targets" , |
723 | link_name: dev_name(dev: &targ_node->dev)); |
724 | return ret; |
725 | } |
726 | |
727 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
728 | { |
729 | struct device *obj; |
730 | |
731 | if (!node_online(nid)) |
732 | return 0; |
733 | |
734 | obj = get_cpu_device(cpu); |
735 | if (!obj) |
736 | return 0; |
737 | |
738 | sysfs_remove_link(kobj: &node_devices[nid]->dev.kobj, |
739 | name: kobject_name(kobj: &obj->kobj)); |
740 | sysfs_remove_link(kobj: &obj->kobj, |
741 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
742 | |
743 | return 0; |
744 | } |
745 | |
746 | #ifdef CONFIG_MEMORY_HOTPLUG |
747 | static int __ref get_nid_for_pfn(unsigned long pfn) |
748 | { |
749 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
750 | if (system_state < SYSTEM_RUNNING) |
751 | return early_pfn_to_nid(pfn); |
752 | #endif |
753 | return pfn_to_nid(pfn); |
754 | } |
755 | |
756 | static void do_register_memory_block_under_node(int nid, |
757 | struct memory_block *mem_blk, |
758 | enum meminit_context context) |
759 | { |
760 | int ret; |
761 | |
762 | memory_block_add_nid(mem: mem_blk, nid, context); |
763 | |
764 | ret = sysfs_create_link_nowarn(kobj: &node_devices[nid]->dev.kobj, |
765 | target: &mem_blk->dev.kobj, |
766 | name: kobject_name(kobj: &mem_blk->dev.kobj)); |
767 | if (ret && ret != -EEXIST) |
768 | dev_err_ratelimited(&node_devices[nid]->dev, |
769 | "can't create link to %s in sysfs (%d)\n" , |
770 | kobject_name(&mem_blk->dev.kobj), ret); |
771 | |
772 | ret = sysfs_create_link_nowarn(kobj: &mem_blk->dev.kobj, |
773 | target: &node_devices[nid]->dev.kobj, |
774 | name: kobject_name(kobj: &node_devices[nid]->dev.kobj)); |
775 | if (ret && ret != -EEXIST) |
776 | dev_err_ratelimited(&mem_blk->dev, |
777 | "can't create link to %s in sysfs (%d)\n" , |
778 | kobject_name(&node_devices[nid]->dev.kobj), |
779 | ret); |
780 | } |
781 | |
782 | /* register memory section under specified node if it spans that node */ |
783 | static int register_mem_block_under_node_early(struct memory_block *mem_blk, |
784 | void *arg) |
785 | { |
786 | unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; |
787 | unsigned long start_pfn = section_nr_to_pfn(sec: mem_blk->start_section_nr); |
788 | unsigned long end_pfn = start_pfn + memory_block_pfns - 1; |
789 | int nid = *(int *)arg; |
790 | unsigned long pfn; |
791 | |
792 | for (pfn = start_pfn; pfn <= end_pfn; pfn++) { |
793 | int page_nid; |
794 | |
795 | /* |
796 | * memory block could have several absent sections from start. |
797 | * skip pfn range from absent section |
798 | */ |
799 | if (!pfn_in_present_section(pfn)) { |
800 | pfn = round_down(pfn + PAGES_PER_SECTION, |
801 | PAGES_PER_SECTION) - 1; |
802 | continue; |
803 | } |
804 | |
805 | /* |
806 | * We need to check if page belongs to nid only at the boot |
807 | * case because node's ranges can be interleaved. |
808 | */ |
809 | page_nid = get_nid_for_pfn(pfn); |
810 | if (page_nid < 0) |
811 | continue; |
812 | if (page_nid != nid) |
813 | continue; |
814 | |
815 | do_register_memory_block_under_node(nid, mem_blk, context: MEMINIT_EARLY); |
816 | return 0; |
817 | } |
818 | /* mem section does not span the specified node */ |
819 | return 0; |
820 | } |
821 | |
822 | /* |
823 | * During hotplug we know that all pages in the memory block belong to the same |
824 | * node. |
825 | */ |
826 | static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, |
827 | void *arg) |
828 | { |
829 | int nid = *(int *)arg; |
830 | |
831 | do_register_memory_block_under_node(nid, mem_blk, context: MEMINIT_HOTPLUG); |
832 | return 0; |
833 | } |
834 | |
835 | /* |
836 | * Unregister a memory block device under the node it spans. Memory blocks |
837 | * with multiple nodes cannot be offlined and therefore also never be removed. |
838 | */ |
839 | void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
840 | { |
841 | if (mem_blk->nid == NUMA_NO_NODE) |
842 | return; |
843 | |
844 | sysfs_remove_link(kobj: &node_devices[mem_blk->nid]->dev.kobj, |
845 | name: kobject_name(kobj: &mem_blk->dev.kobj)); |
846 | sysfs_remove_link(kobj: &mem_blk->dev.kobj, |
847 | name: kobject_name(kobj: &node_devices[mem_blk->nid]->dev.kobj)); |
848 | } |
849 | |
850 | void register_memory_blocks_under_node(int nid, unsigned long start_pfn, |
851 | unsigned long end_pfn, |
852 | enum meminit_context context) |
853 | { |
854 | walk_memory_blocks_func_t func; |
855 | |
856 | if (context == MEMINIT_HOTPLUG) |
857 | func = register_mem_block_under_node_hotplug; |
858 | else |
859 | func = register_mem_block_under_node_early; |
860 | |
861 | walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), |
862 | arg: (void *)&nid, func); |
863 | return; |
864 | } |
865 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
866 | |
867 | int __register_one_node(int nid) |
868 | { |
869 | int error; |
870 | int cpu; |
871 | |
872 | node_devices[nid] = kzalloc(size: sizeof(struct node), GFP_KERNEL); |
873 | if (!node_devices[nid]) |
874 | return -ENOMEM; |
875 | |
876 | error = register_node(node: node_devices[nid], num: nid); |
877 | |
878 | /* link cpu under this node */ |
879 | for_each_present_cpu(cpu) { |
880 | if (cpu_to_node(cpu) == nid) |
881 | register_cpu_under_node(cpu, nid); |
882 | } |
883 | |
884 | INIT_LIST_HEAD(list: &node_devices[nid]->access_list); |
885 | node_init_caches(nid); |
886 | |
887 | return error; |
888 | } |
889 | |
890 | void unregister_one_node(int nid) |
891 | { |
892 | if (!node_devices[nid]) |
893 | return; |
894 | |
895 | unregister_node(node: node_devices[nid]); |
896 | node_devices[nid] = NULL; |
897 | } |
898 | |
899 | /* |
900 | * node states attributes |
901 | */ |
902 | |
903 | struct node_attr { |
904 | struct device_attribute attr; |
905 | enum node_states state; |
906 | }; |
907 | |
908 | static ssize_t show_node_state(struct device *dev, |
909 | struct device_attribute *attr, char *buf) |
910 | { |
911 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
912 | |
913 | return sysfs_emit(buf, fmt: "%*pbl\n" , |
914 | nodemask_pr_args(&node_states[na->state])); |
915 | } |
916 | |
917 | #define _NODE_ATTR(name, state) \ |
918 | { __ATTR(name, 0444, show_node_state, NULL), state } |
919 | |
920 | static struct node_attr node_state_attr[] = { |
921 | [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), |
922 | [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), |
923 | [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), |
924 | #ifdef CONFIG_HIGHMEM |
925 | [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
926 | #endif |
927 | [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), |
928 | [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), |
929 | [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator, |
930 | N_GENERIC_INITIATOR), |
931 | }; |
932 | |
933 | static struct attribute *node_state_attrs[] = { |
934 | &node_state_attr[N_POSSIBLE].attr.attr, |
935 | &node_state_attr[N_ONLINE].attr.attr, |
936 | &node_state_attr[N_NORMAL_MEMORY].attr.attr, |
937 | #ifdef CONFIG_HIGHMEM |
938 | &node_state_attr[N_HIGH_MEMORY].attr.attr, |
939 | #endif |
940 | &node_state_attr[N_MEMORY].attr.attr, |
941 | &node_state_attr[N_CPU].attr.attr, |
942 | &node_state_attr[N_GENERIC_INITIATOR].attr.attr, |
943 | NULL |
944 | }; |
945 | |
946 | static const struct attribute_group memory_root_attr_group = { |
947 | .attrs = node_state_attrs, |
948 | }; |
949 | |
950 | static const struct attribute_group *cpu_root_attr_groups[] = { |
951 | &memory_root_attr_group, |
952 | NULL, |
953 | }; |
954 | |
955 | void __init node_dev_init(void) |
956 | { |
957 | int ret, i; |
958 | |
959 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); |
960 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); |
961 | |
962 | ret = subsys_system_register(subsys: &node_subsys, groups: cpu_root_attr_groups); |
963 | if (ret) |
964 | panic(fmt: "%s() failed to register subsystem: %d\n" , __func__, ret); |
965 | |
966 | /* |
967 | * Create all node devices, which will properly link the node |
968 | * to applicable memory block devices and already created cpu devices. |
969 | */ |
970 | for_each_online_node(i) { |
971 | ret = register_one_node(nid: i); |
972 | if (ret) |
973 | panic(fmt: "%s() failed to add node: %d\n" , __func__, ret); |
974 | } |
975 | } |
976 | |