1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/slab.h> |
3 | #include <linux/lockdep.h> |
4 | #include <linux/sysfs.h> |
5 | #include <linux/kobject.h> |
6 | #include <linux/memory.h> |
7 | #include <linux/memory-tiers.h> |
8 | #include <linux/notifier.h> |
9 | |
10 | #include "internal.h" |
11 | |
12 | struct memory_tier { |
13 | /* hierarchy of memory tiers */ |
14 | struct list_head list; |
15 | /* list of all memory types part of this tier */ |
16 | struct list_head memory_types; |
17 | /* |
18 | * start value of abstract distance. memory tier maps |
19 | * an abstract distance range, |
20 | * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE |
21 | */ |
22 | int adistance_start; |
23 | struct device dev; |
24 | /* All the nodes that are part of all the lower memory tiers. */ |
25 | nodemask_t lower_tier_mask; |
26 | }; |
27 | |
28 | struct demotion_nodes { |
29 | nodemask_t preferred; |
30 | }; |
31 | |
32 | struct node_memory_type_map { |
33 | struct memory_dev_type *memtype; |
34 | int map_count; |
35 | }; |
36 | |
37 | static DEFINE_MUTEX(memory_tier_lock); |
38 | static LIST_HEAD(memory_tiers); |
39 | static struct node_memory_type_map node_memory_types[MAX_NUMNODES]; |
40 | struct memory_dev_type *default_dram_type; |
41 | |
42 | static const struct bus_type memory_tier_subsys = { |
43 | .name = "memory_tiering" , |
44 | .dev_name = "memory_tier" , |
45 | }; |
46 | |
47 | #ifdef CONFIG_MIGRATION |
48 | static int top_tier_adistance; |
49 | /* |
50 | * node_demotion[] examples: |
51 | * |
52 | * Example 1: |
53 | * |
54 | * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes. |
55 | * |
56 | * node distances: |
57 | * node 0 1 2 3 |
58 | * 0 10 20 30 40 |
59 | * 1 20 10 40 30 |
60 | * 2 30 40 10 40 |
61 | * 3 40 30 40 10 |
62 | * |
63 | * memory_tiers0 = 0-1 |
64 | * memory_tiers1 = 2-3 |
65 | * |
66 | * node_demotion[0].preferred = 2 |
67 | * node_demotion[1].preferred = 3 |
68 | * node_demotion[2].preferred = <empty> |
69 | * node_demotion[3].preferred = <empty> |
70 | * |
71 | * Example 2: |
72 | * |
73 | * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node. |
74 | * |
75 | * node distances: |
76 | * node 0 1 2 |
77 | * 0 10 20 30 |
78 | * 1 20 10 30 |
79 | * 2 30 30 10 |
80 | * |
81 | * memory_tiers0 = 0-2 |
82 | * |
83 | * node_demotion[0].preferred = <empty> |
84 | * node_demotion[1].preferred = <empty> |
85 | * node_demotion[2].preferred = <empty> |
86 | * |
87 | * Example 3: |
88 | * |
89 | * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node. |
90 | * |
91 | * node distances: |
92 | * node 0 1 2 |
93 | * 0 10 20 30 |
94 | * 1 20 10 40 |
95 | * 2 30 40 10 |
96 | * |
97 | * memory_tiers0 = 1 |
98 | * memory_tiers1 = 0 |
99 | * memory_tiers2 = 2 |
100 | * |
101 | * node_demotion[0].preferred = 2 |
102 | * node_demotion[1].preferred = 0 |
103 | * node_demotion[2].preferred = <empty> |
104 | * |
105 | */ |
106 | static struct demotion_nodes *node_demotion __read_mostly; |
107 | #endif /* CONFIG_MIGRATION */ |
108 | |
109 | static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms); |
110 | |
111 | static bool default_dram_perf_error; |
112 | static struct access_coordinate default_dram_perf; |
113 | static int default_dram_perf_ref_nid = NUMA_NO_NODE; |
114 | static const char *default_dram_perf_ref_source; |
115 | |
116 | static inline struct memory_tier *to_memory_tier(struct device *device) |
117 | { |
118 | return container_of(device, struct memory_tier, dev); |
119 | } |
120 | |
121 | static __always_inline nodemask_t get_memtier_nodemask(struct memory_tier *memtier) |
122 | { |
123 | nodemask_t nodes = NODE_MASK_NONE; |
124 | struct memory_dev_type *memtype; |
125 | |
126 | list_for_each_entry(memtype, &memtier->memory_types, tier_sibling) |
127 | nodes_or(nodes, nodes, memtype->nodes); |
128 | |
129 | return nodes; |
130 | } |
131 | |
132 | static void memory_tier_device_release(struct device *dev) |
133 | { |
134 | struct memory_tier *tier = to_memory_tier(device: dev); |
135 | /* |
136 | * synchronize_rcu in clear_node_memory_tier makes sure |
137 | * we don't have rcu access to this memory tier. |
138 | */ |
139 | kfree(objp: tier); |
140 | } |
141 | |
142 | static ssize_t nodelist_show(struct device *dev, |
143 | struct device_attribute *attr, char *buf) |
144 | { |
145 | int ret; |
146 | nodemask_t nmask; |
147 | |
148 | mutex_lock(&memory_tier_lock); |
149 | nmask = get_memtier_nodemask(memtier: to_memory_tier(device: dev)); |
150 | ret = sysfs_emit(buf, fmt: "%*pbl\n" , nodemask_pr_args(&nmask)); |
151 | mutex_unlock(lock: &memory_tier_lock); |
152 | return ret; |
153 | } |
154 | static DEVICE_ATTR_RO(nodelist); |
155 | |
156 | static struct attribute *memtier_dev_attrs[] = { |
157 | &dev_attr_nodelist.attr, |
158 | NULL |
159 | }; |
160 | |
161 | static const struct attribute_group memtier_dev_group = { |
162 | .attrs = memtier_dev_attrs, |
163 | }; |
164 | |
165 | static const struct attribute_group *memtier_dev_groups[] = { |
166 | &memtier_dev_group, |
167 | NULL |
168 | }; |
169 | |
170 | static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memtype) |
171 | { |
172 | int ret; |
173 | bool found_slot = false; |
174 | struct memory_tier *memtier, *new_memtier; |
175 | int adistance = memtype->adistance; |
176 | unsigned int memtier_adistance_chunk_size = MEMTIER_CHUNK_SIZE; |
177 | |
178 | lockdep_assert_held_once(&memory_tier_lock); |
179 | |
180 | adistance = round_down(adistance, memtier_adistance_chunk_size); |
181 | /* |
182 | * If the memtype is already part of a memory tier, |
183 | * just return that. |
184 | */ |
185 | if (!list_empty(head: &memtype->tier_sibling)) { |
186 | list_for_each_entry(memtier, &memory_tiers, list) { |
187 | if (adistance == memtier->adistance_start) |
188 | return memtier; |
189 | } |
190 | WARN_ON(1); |
191 | return ERR_PTR(error: -EINVAL); |
192 | } |
193 | |
194 | list_for_each_entry(memtier, &memory_tiers, list) { |
195 | if (adistance == memtier->adistance_start) { |
196 | goto link_memtype; |
197 | } else if (adistance < memtier->adistance_start) { |
198 | found_slot = true; |
199 | break; |
200 | } |
201 | } |
202 | |
203 | new_memtier = kzalloc(size: sizeof(struct memory_tier), GFP_KERNEL); |
204 | if (!new_memtier) |
205 | return ERR_PTR(error: -ENOMEM); |
206 | |
207 | new_memtier->adistance_start = adistance; |
208 | INIT_LIST_HEAD(list: &new_memtier->list); |
209 | INIT_LIST_HEAD(list: &new_memtier->memory_types); |
210 | if (found_slot) |
211 | list_add_tail(new: &new_memtier->list, head: &memtier->list); |
212 | else |
213 | list_add_tail(new: &new_memtier->list, head: &memory_tiers); |
214 | |
215 | new_memtier->dev.id = adistance >> MEMTIER_CHUNK_BITS; |
216 | new_memtier->dev.bus = &memory_tier_subsys; |
217 | new_memtier->dev.release = memory_tier_device_release; |
218 | new_memtier->dev.groups = memtier_dev_groups; |
219 | |
220 | ret = device_register(dev: &new_memtier->dev); |
221 | if (ret) { |
222 | list_del(entry: &new_memtier->list); |
223 | put_device(dev: &new_memtier->dev); |
224 | return ERR_PTR(error: ret); |
225 | } |
226 | memtier = new_memtier; |
227 | |
228 | link_memtype: |
229 | list_add(new: &memtype->tier_sibling, head: &memtier->memory_types); |
230 | return memtier; |
231 | } |
232 | |
233 | static struct memory_tier *__node_get_memory_tier(int node) |
234 | { |
235 | pg_data_t *pgdat; |
236 | |
237 | pgdat = NODE_DATA(node); |
238 | if (!pgdat) |
239 | return NULL; |
240 | /* |
241 | * Since we hold memory_tier_lock, we can avoid |
242 | * RCU read locks when accessing the details. No |
243 | * parallel updates are possible here. |
244 | */ |
245 | return rcu_dereference_check(pgdat->memtier, |
246 | lockdep_is_held(&memory_tier_lock)); |
247 | } |
248 | |
249 | #ifdef CONFIG_MIGRATION |
250 | bool node_is_toptier(int node) |
251 | { |
252 | bool toptier; |
253 | pg_data_t *pgdat; |
254 | struct memory_tier *memtier; |
255 | |
256 | pgdat = NODE_DATA(node); |
257 | if (!pgdat) |
258 | return false; |
259 | |
260 | rcu_read_lock(); |
261 | memtier = rcu_dereference(pgdat->memtier); |
262 | if (!memtier) { |
263 | toptier = true; |
264 | goto out; |
265 | } |
266 | if (memtier->adistance_start <= top_tier_adistance) |
267 | toptier = true; |
268 | else |
269 | toptier = false; |
270 | out: |
271 | rcu_read_unlock(); |
272 | return toptier; |
273 | } |
274 | |
275 | void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) |
276 | { |
277 | struct memory_tier *memtier; |
278 | |
279 | /* |
280 | * pg_data_t.memtier updates includes a synchronize_rcu() |
281 | * which ensures that we either find NULL or a valid memtier |
282 | * in NODE_DATA. protect the access via rcu_read_lock(); |
283 | */ |
284 | rcu_read_lock(); |
285 | memtier = rcu_dereference(pgdat->memtier); |
286 | if (memtier) |
287 | *targets = memtier->lower_tier_mask; |
288 | else |
289 | *targets = NODE_MASK_NONE; |
290 | rcu_read_unlock(); |
291 | } |
292 | |
293 | /** |
294 | * next_demotion_node() - Get the next node in the demotion path |
295 | * @node: The starting node to lookup the next node |
296 | * |
297 | * Return: node id for next memory node in the demotion path hierarchy |
298 | * from @node; NUMA_NO_NODE if @node is terminal. This does not keep |
299 | * @node online or guarantee that it *continues* to be the next demotion |
300 | * target. |
301 | */ |
302 | int next_demotion_node(int node) |
303 | { |
304 | struct demotion_nodes *nd; |
305 | int target; |
306 | |
307 | if (!node_demotion) |
308 | return NUMA_NO_NODE; |
309 | |
310 | nd = &node_demotion[node]; |
311 | |
312 | /* |
313 | * node_demotion[] is updated without excluding this |
314 | * function from running. |
315 | * |
316 | * Make sure to use RCU over entire code blocks if |
317 | * node_demotion[] reads need to be consistent. |
318 | */ |
319 | rcu_read_lock(); |
320 | /* |
321 | * If there are multiple target nodes, just select one |
322 | * target node randomly. |
323 | * |
324 | * In addition, we can also use round-robin to select |
325 | * target node, but we should introduce another variable |
326 | * for node_demotion[] to record last selected target node, |
327 | * that may cause cache ping-pong due to the changing of |
328 | * last target node. Or introducing per-cpu data to avoid |
329 | * caching issue, which seems more complicated. So selecting |
330 | * target node randomly seems better until now. |
331 | */ |
332 | target = node_random(maskp: &nd->preferred); |
333 | rcu_read_unlock(); |
334 | |
335 | return target; |
336 | } |
337 | |
338 | static void disable_all_demotion_targets(void) |
339 | { |
340 | struct memory_tier *memtier; |
341 | int node; |
342 | |
343 | for_each_node_state(node, N_MEMORY) { |
344 | node_demotion[node].preferred = NODE_MASK_NONE; |
345 | /* |
346 | * We are holding memory_tier_lock, it is safe |
347 | * to access pgda->memtier. |
348 | */ |
349 | memtier = __node_get_memory_tier(node); |
350 | if (memtier) |
351 | memtier->lower_tier_mask = NODE_MASK_NONE; |
352 | } |
353 | /* |
354 | * Ensure that the "disable" is visible across the system. |
355 | * Readers will see either a combination of before+disable |
356 | * state or disable+after. They will never see before and |
357 | * after state together. |
358 | */ |
359 | synchronize_rcu(); |
360 | } |
361 | |
362 | static void dump_demotion_targets(void) |
363 | { |
364 | int node; |
365 | |
366 | for_each_node_state(node, N_MEMORY) { |
367 | struct memory_tier *memtier = __node_get_memory_tier(node); |
368 | nodemask_t preferred = node_demotion[node].preferred; |
369 | |
370 | if (!memtier) |
371 | continue; |
372 | |
373 | if (nodes_empty(preferred)) |
374 | pr_info("Demotion targets for Node %d: null\n" , node); |
375 | else |
376 | pr_info("Demotion targets for Node %d: preferred: %*pbl, fallback: %*pbl\n" , |
377 | node, nodemask_pr_args(&preferred), |
378 | nodemask_pr_args(&memtier->lower_tier_mask)); |
379 | } |
380 | } |
381 | |
382 | /* |
383 | * Find an automatic demotion target for all memory |
384 | * nodes. Failing here is OK. It might just indicate |
385 | * being at the end of a chain. |
386 | */ |
387 | static void establish_demotion_targets(void) |
388 | { |
389 | struct memory_tier *memtier; |
390 | struct demotion_nodes *nd; |
391 | int target = NUMA_NO_NODE, node; |
392 | int distance, best_distance; |
393 | nodemask_t tier_nodes, lower_tier; |
394 | |
395 | lockdep_assert_held_once(&memory_tier_lock); |
396 | |
397 | if (!node_demotion) |
398 | return; |
399 | |
400 | disable_all_demotion_targets(); |
401 | |
402 | for_each_node_state(node, N_MEMORY) { |
403 | best_distance = -1; |
404 | nd = &node_demotion[node]; |
405 | |
406 | memtier = __node_get_memory_tier(node); |
407 | if (!memtier || list_is_last(list: &memtier->list, head: &memory_tiers)) |
408 | continue; |
409 | /* |
410 | * Get the lower memtier to find the demotion node list. |
411 | */ |
412 | memtier = list_next_entry(memtier, list); |
413 | tier_nodes = get_memtier_nodemask(memtier); |
414 | /* |
415 | * find_next_best_node, use 'used' nodemask as a skip list. |
416 | * Add all memory nodes except the selected memory tier |
417 | * nodelist to skip list so that we find the best node from the |
418 | * memtier nodelist. |
419 | */ |
420 | nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes); |
421 | |
422 | /* |
423 | * Find all the nodes in the memory tier node list of same best distance. |
424 | * add them to the preferred mask. We randomly select between nodes |
425 | * in the preferred mask when allocating pages during demotion. |
426 | */ |
427 | do { |
428 | target = find_next_best_node(node, used_node_mask: &tier_nodes); |
429 | if (target == NUMA_NO_NODE) |
430 | break; |
431 | |
432 | distance = node_distance(node, target); |
433 | if (distance == best_distance || best_distance == -1) { |
434 | best_distance = distance; |
435 | node_set(target, nd->preferred); |
436 | } else { |
437 | break; |
438 | } |
439 | } while (1); |
440 | } |
441 | /* |
442 | * Promotion is allowed from a memory tier to higher |
443 | * memory tier only if the memory tier doesn't include |
444 | * compute. We want to skip promotion from a memory tier, |
445 | * if any node that is part of the memory tier have CPUs. |
446 | * Once we detect such a memory tier, we consider that tier |
447 | * as top tiper from which promotion is not allowed. |
448 | */ |
449 | list_for_each_entry_reverse(memtier, &memory_tiers, list) { |
450 | tier_nodes = get_memtier_nodemask(memtier); |
451 | nodes_and(tier_nodes, node_states[N_CPU], tier_nodes); |
452 | if (!nodes_empty(tier_nodes)) { |
453 | /* |
454 | * abstract distance below the max value of this memtier |
455 | * is considered toptier. |
456 | */ |
457 | top_tier_adistance = memtier->adistance_start + |
458 | MEMTIER_CHUNK_SIZE - 1; |
459 | break; |
460 | } |
461 | } |
462 | /* |
463 | * Now build the lower_tier mask for each node collecting node mask from |
464 | * all memory tier below it. This allows us to fallback demotion page |
465 | * allocation to a set of nodes that is closer the above selected |
466 | * preferred node. |
467 | */ |
468 | lower_tier = node_states[N_MEMORY]; |
469 | list_for_each_entry(memtier, &memory_tiers, list) { |
470 | /* |
471 | * Keep removing current tier from lower_tier nodes, |
472 | * This will remove all nodes in current and above |
473 | * memory tier from the lower_tier mask. |
474 | */ |
475 | tier_nodes = get_memtier_nodemask(memtier); |
476 | nodes_andnot(lower_tier, lower_tier, tier_nodes); |
477 | memtier->lower_tier_mask = lower_tier; |
478 | } |
479 | |
480 | dump_demotion_targets(); |
481 | } |
482 | |
483 | #else |
484 | static inline void establish_demotion_targets(void) {} |
485 | #endif /* CONFIG_MIGRATION */ |
486 | |
487 | static inline void __init_node_memory_type(int node, struct memory_dev_type *memtype) |
488 | { |
489 | if (!node_memory_types[node].memtype) |
490 | node_memory_types[node].memtype = memtype; |
491 | /* |
492 | * for each device getting added in the same NUMA node |
493 | * with this specific memtype, bump the map count. We |
494 | * Only take memtype device reference once, so that |
495 | * changing a node memtype can be done by droping the |
496 | * only reference count taken here. |
497 | */ |
498 | |
499 | if (node_memory_types[node].memtype == memtype) { |
500 | if (!node_memory_types[node].map_count++) |
501 | kref_get(kref: &memtype->kref); |
502 | } |
503 | } |
504 | |
505 | static struct memory_tier *set_node_memory_tier(int node) |
506 | { |
507 | struct memory_tier *memtier; |
508 | struct memory_dev_type *memtype; |
509 | pg_data_t *pgdat = NODE_DATA(node); |
510 | |
511 | |
512 | lockdep_assert_held_once(&memory_tier_lock); |
513 | |
514 | if (!node_state(node, state: N_MEMORY)) |
515 | return ERR_PTR(error: -EINVAL); |
516 | |
517 | __init_node_memory_type(node, memtype: default_dram_type); |
518 | |
519 | memtype = node_memory_types[node].memtype; |
520 | node_set(node, memtype->nodes); |
521 | memtier = find_create_memory_tier(memtype); |
522 | if (!IS_ERR(ptr: memtier)) |
523 | rcu_assign_pointer(pgdat->memtier, memtier); |
524 | return memtier; |
525 | } |
526 | |
527 | static void destroy_memory_tier(struct memory_tier *memtier) |
528 | { |
529 | list_del(entry: &memtier->list); |
530 | device_unregister(dev: &memtier->dev); |
531 | } |
532 | |
533 | static bool clear_node_memory_tier(int node) |
534 | { |
535 | bool cleared = false; |
536 | pg_data_t *pgdat; |
537 | struct memory_tier *memtier; |
538 | |
539 | pgdat = NODE_DATA(node); |
540 | if (!pgdat) |
541 | return false; |
542 | |
543 | /* |
544 | * Make sure that anybody looking at NODE_DATA who finds |
545 | * a valid memtier finds memory_dev_types with nodes still |
546 | * linked to the memtier. We achieve this by waiting for |
547 | * rcu read section to finish using synchronize_rcu. |
548 | * This also enables us to free the destroyed memory tier |
549 | * with kfree instead of kfree_rcu |
550 | */ |
551 | memtier = __node_get_memory_tier(node); |
552 | if (memtier) { |
553 | struct memory_dev_type *memtype; |
554 | |
555 | rcu_assign_pointer(pgdat->memtier, NULL); |
556 | synchronize_rcu(); |
557 | memtype = node_memory_types[node].memtype; |
558 | node_clear(node, memtype->nodes); |
559 | if (nodes_empty(memtype->nodes)) { |
560 | list_del_init(entry: &memtype->tier_sibling); |
561 | if (list_empty(head: &memtier->memory_types)) |
562 | destroy_memory_tier(memtier); |
563 | } |
564 | cleared = true; |
565 | } |
566 | return cleared; |
567 | } |
568 | |
569 | static void release_memtype(struct kref *kref) |
570 | { |
571 | struct memory_dev_type *memtype; |
572 | |
573 | memtype = container_of(kref, struct memory_dev_type, kref); |
574 | kfree(objp: memtype); |
575 | } |
576 | |
577 | struct memory_dev_type *alloc_memory_type(int adistance) |
578 | { |
579 | struct memory_dev_type *memtype; |
580 | |
581 | memtype = kmalloc(size: sizeof(*memtype), GFP_KERNEL); |
582 | if (!memtype) |
583 | return ERR_PTR(error: -ENOMEM); |
584 | |
585 | memtype->adistance = adistance; |
586 | INIT_LIST_HEAD(list: &memtype->tier_sibling); |
587 | memtype->nodes = NODE_MASK_NONE; |
588 | kref_init(kref: &memtype->kref); |
589 | return memtype; |
590 | } |
591 | EXPORT_SYMBOL_GPL(alloc_memory_type); |
592 | |
593 | void put_memory_type(struct memory_dev_type *memtype) |
594 | { |
595 | kref_put(kref: &memtype->kref, release: release_memtype); |
596 | } |
597 | EXPORT_SYMBOL_GPL(put_memory_type); |
598 | |
599 | void init_node_memory_type(int node, struct memory_dev_type *memtype) |
600 | { |
601 | |
602 | mutex_lock(&memory_tier_lock); |
603 | __init_node_memory_type(node, memtype); |
604 | mutex_unlock(lock: &memory_tier_lock); |
605 | } |
606 | EXPORT_SYMBOL_GPL(init_node_memory_type); |
607 | |
608 | void clear_node_memory_type(int node, struct memory_dev_type *memtype) |
609 | { |
610 | mutex_lock(&memory_tier_lock); |
611 | if (node_memory_types[node].memtype == memtype || !memtype) |
612 | node_memory_types[node].map_count--; |
613 | /* |
614 | * If we umapped all the attached devices to this node, |
615 | * clear the node memory type. |
616 | */ |
617 | if (!node_memory_types[node].map_count) { |
618 | memtype = node_memory_types[node].memtype; |
619 | node_memory_types[node].memtype = NULL; |
620 | put_memory_type(memtype); |
621 | } |
622 | mutex_unlock(lock: &memory_tier_lock); |
623 | } |
624 | EXPORT_SYMBOL_GPL(clear_node_memory_type); |
625 | |
626 | static void dump_hmem_attrs(struct access_coordinate *coord, const char *prefix) |
627 | { |
628 | pr_info( |
629 | "%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n" , |
630 | prefix, coord->read_latency, coord->write_latency, |
631 | coord->read_bandwidth, coord->write_bandwidth); |
632 | } |
633 | |
634 | int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, |
635 | const char *source) |
636 | { |
637 | int rc = 0; |
638 | |
639 | mutex_lock(&memory_tier_lock); |
640 | if (default_dram_perf_error) { |
641 | rc = -EIO; |
642 | goto out; |
643 | } |
644 | |
645 | if (perf->read_latency + perf->write_latency == 0 || |
646 | perf->read_bandwidth + perf->write_bandwidth == 0) { |
647 | rc = -EINVAL; |
648 | goto out; |
649 | } |
650 | |
651 | if (default_dram_perf_ref_nid == NUMA_NO_NODE) { |
652 | default_dram_perf = *perf; |
653 | default_dram_perf_ref_nid = nid; |
654 | default_dram_perf_ref_source = kstrdup(s: source, GFP_KERNEL); |
655 | goto out; |
656 | } |
657 | |
658 | /* |
659 | * The performance of all default DRAM nodes is expected to be |
660 | * same (that is, the variation is less than 10%). And it |
661 | * will be used as base to calculate the abstract distance of |
662 | * other memory nodes. |
663 | */ |
664 | if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 > |
665 | default_dram_perf.read_latency || |
666 | abs(perf->write_latency - default_dram_perf.write_latency) * 10 > |
667 | default_dram_perf.write_latency || |
668 | abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 > |
669 | default_dram_perf.read_bandwidth || |
670 | abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 > |
671 | default_dram_perf.write_bandwidth) { |
672 | pr_info( |
673 | "memory-tiers: the performance of DRAM node %d mismatches that of the reference\n" |
674 | "DRAM node %d.\n" , nid, default_dram_perf_ref_nid); |
675 | pr_info(" performance of reference DRAM node %d:\n" , |
676 | default_dram_perf_ref_nid); |
677 | dump_hmem_attrs(coord: &default_dram_perf, prefix: " " ); |
678 | pr_info(" performance of DRAM node %d:\n" , nid); |
679 | dump_hmem_attrs(coord: perf, prefix: " " ); |
680 | pr_info( |
681 | " disable default DRAM node performance based abstract distance algorithm.\n" ); |
682 | default_dram_perf_error = true; |
683 | rc = -EINVAL; |
684 | } |
685 | |
686 | out: |
687 | mutex_unlock(lock: &memory_tier_lock); |
688 | return rc; |
689 | } |
690 | |
691 | int mt_perf_to_adistance(struct access_coordinate *perf, int *adist) |
692 | { |
693 | if (default_dram_perf_error) |
694 | return -EIO; |
695 | |
696 | if (default_dram_perf_ref_nid == NUMA_NO_NODE) |
697 | return -ENOENT; |
698 | |
699 | if (perf->read_latency + perf->write_latency == 0 || |
700 | perf->read_bandwidth + perf->write_bandwidth == 0) |
701 | return -EINVAL; |
702 | |
703 | mutex_lock(&memory_tier_lock); |
704 | /* |
705 | * The abstract distance of a memory node is in direct proportion to |
706 | * its memory latency (read + write) and inversely proportional to its |
707 | * memory bandwidth (read + write). The abstract distance, memory |
708 | * latency, and memory bandwidth of the default DRAM nodes are used as |
709 | * the base. |
710 | */ |
711 | *adist = MEMTIER_ADISTANCE_DRAM * |
712 | (perf->read_latency + perf->write_latency) / |
713 | (default_dram_perf.read_latency + default_dram_perf.write_latency) * |
714 | (default_dram_perf.read_bandwidth + default_dram_perf.write_bandwidth) / |
715 | (perf->read_bandwidth + perf->write_bandwidth); |
716 | mutex_unlock(lock: &memory_tier_lock); |
717 | |
718 | return 0; |
719 | } |
720 | EXPORT_SYMBOL_GPL(mt_perf_to_adistance); |
721 | |
722 | /** |
723 | * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm |
724 | * @nb: The notifier block which describe the algorithm |
725 | * |
726 | * Return: 0 on success, errno on error. |
727 | * |
728 | * Every memory tiering abstract distance algorithm provider needs to |
729 | * register the algorithm with register_mt_adistance_algorithm(). To |
730 | * calculate the abstract distance for a specified memory node, the |
731 | * notifier function will be called unless some high priority |
732 | * algorithm has provided result. The prototype of the notifier |
733 | * function is as follows, |
734 | * |
735 | * int (*algorithm_notifier)(struct notifier_block *nb, |
736 | * unsigned long nid, void *data); |
737 | * |
738 | * Where "nid" specifies the memory node, "data" is the pointer to the |
739 | * returned abstract distance (that is, "int *adist"). If the |
740 | * algorithm provides the result, NOTIFY_STOP should be returned. |
741 | * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next |
742 | * algorithm in the chain to provide the result. |
743 | */ |
744 | int register_mt_adistance_algorithm(struct notifier_block *nb) |
745 | { |
746 | return blocking_notifier_chain_register(nh: &mt_adistance_algorithms, nb); |
747 | } |
748 | EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm); |
749 | |
750 | /** |
751 | * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm |
752 | * @nb: the notifier block which describe the algorithm |
753 | * |
754 | * Return: 0 on success, errno on error. |
755 | */ |
756 | int unregister_mt_adistance_algorithm(struct notifier_block *nb) |
757 | { |
758 | return blocking_notifier_chain_unregister(nh: &mt_adistance_algorithms, nb); |
759 | } |
760 | EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm); |
761 | |
762 | /** |
763 | * mt_calc_adistance() - Calculate abstract distance with registered algorithms |
764 | * @node: the node to calculate abstract distance for |
765 | * @adist: the returned abstract distance |
766 | * |
767 | * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some |
768 | * abstract distance algorithm provides the result, and return it via |
769 | * @adist. Otherwise, no algorithm can provide the result and @adist |
770 | * will be kept as it is. |
771 | */ |
772 | int mt_calc_adistance(int node, int *adist) |
773 | { |
774 | return blocking_notifier_call_chain(nh: &mt_adistance_algorithms, val: node, v: adist); |
775 | } |
776 | EXPORT_SYMBOL_GPL(mt_calc_adistance); |
777 | |
778 | static int __meminit memtier_hotplug_callback(struct notifier_block *self, |
779 | unsigned long action, void *_arg) |
780 | { |
781 | struct memory_tier *memtier; |
782 | struct memory_notify *arg = _arg; |
783 | |
784 | /* |
785 | * Only update the node migration order when a node is |
786 | * changing status, like online->offline. |
787 | */ |
788 | if (arg->status_change_nid < 0) |
789 | return notifier_from_errno(err: 0); |
790 | |
791 | switch (action) { |
792 | case MEM_OFFLINE: |
793 | mutex_lock(&memory_tier_lock); |
794 | if (clear_node_memory_tier(node: arg->status_change_nid)) |
795 | establish_demotion_targets(); |
796 | mutex_unlock(lock: &memory_tier_lock); |
797 | break; |
798 | case MEM_ONLINE: |
799 | mutex_lock(&memory_tier_lock); |
800 | memtier = set_node_memory_tier(arg->status_change_nid); |
801 | if (!IS_ERR(ptr: memtier)) |
802 | establish_demotion_targets(); |
803 | mutex_unlock(lock: &memory_tier_lock); |
804 | break; |
805 | } |
806 | |
807 | return notifier_from_errno(err: 0); |
808 | } |
809 | |
810 | static int __init memory_tier_init(void) |
811 | { |
812 | int ret, node; |
813 | struct memory_tier *memtier; |
814 | |
815 | ret = subsys_virtual_register(subsys: &memory_tier_subsys, NULL); |
816 | if (ret) |
817 | panic(fmt: "%s() failed to register memory tier subsystem\n" , __func__); |
818 | |
819 | #ifdef CONFIG_MIGRATION |
820 | node_demotion = kcalloc(n: nr_node_ids, size: sizeof(struct demotion_nodes), |
821 | GFP_KERNEL); |
822 | WARN_ON(!node_demotion); |
823 | #endif |
824 | mutex_lock(&memory_tier_lock); |
825 | /* |
826 | * For now we can have 4 faster memory tiers with smaller adistance |
827 | * than default DRAM tier. |
828 | */ |
829 | default_dram_type = alloc_memory_type(MEMTIER_ADISTANCE_DRAM); |
830 | if (IS_ERR(ptr: default_dram_type)) |
831 | panic(fmt: "%s() failed to allocate default DRAM tier\n" , __func__); |
832 | |
833 | /* |
834 | * Look at all the existing N_MEMORY nodes and add them to |
835 | * default memory tier or to a tier if we already have memory |
836 | * types assigned. |
837 | */ |
838 | for_each_node_state(node, N_MEMORY) { |
839 | memtier = set_node_memory_tier(node); |
840 | if (IS_ERR(ptr: memtier)) |
841 | /* |
842 | * Continue with memtiers we are able to setup |
843 | */ |
844 | break; |
845 | } |
846 | establish_demotion_targets(); |
847 | mutex_unlock(lock: &memory_tier_lock); |
848 | |
849 | hotplug_memory_notifier(memtier_hotplug_callback, MEMTIER_HOTPLUG_PRI); |
850 | return 0; |
851 | } |
852 | subsys_initcall(memory_tier_init); |
853 | |
854 | bool numa_demotion_enabled = false; |
855 | |
856 | #ifdef CONFIG_MIGRATION |
857 | #ifdef CONFIG_SYSFS |
858 | static ssize_t demotion_enabled_show(struct kobject *kobj, |
859 | struct kobj_attribute *attr, char *buf) |
860 | { |
861 | return sysfs_emit(buf, fmt: "%s\n" , |
862 | numa_demotion_enabled ? "true" : "false" ); |
863 | } |
864 | |
865 | static ssize_t demotion_enabled_store(struct kobject *kobj, |
866 | struct kobj_attribute *attr, |
867 | const char *buf, size_t count) |
868 | { |
869 | ssize_t ret; |
870 | |
871 | ret = kstrtobool(s: buf, res: &numa_demotion_enabled); |
872 | if (ret) |
873 | return ret; |
874 | |
875 | return count; |
876 | } |
877 | |
878 | static struct kobj_attribute numa_demotion_enabled_attr = |
879 | __ATTR_RW(demotion_enabled); |
880 | |
881 | static struct attribute *numa_attrs[] = { |
882 | &numa_demotion_enabled_attr.attr, |
883 | NULL, |
884 | }; |
885 | |
886 | static const struct attribute_group numa_attr_group = { |
887 | .attrs = numa_attrs, |
888 | }; |
889 | |
890 | static int __init numa_init_sysfs(void) |
891 | { |
892 | int err; |
893 | struct kobject *numa_kobj; |
894 | |
895 | numa_kobj = kobject_create_and_add(name: "numa" , parent: mm_kobj); |
896 | if (!numa_kobj) { |
897 | pr_err("failed to create numa kobject\n" ); |
898 | return -ENOMEM; |
899 | } |
900 | err = sysfs_create_group(kobj: numa_kobj, grp: &numa_attr_group); |
901 | if (err) { |
902 | pr_err("failed to register numa group\n" ); |
903 | goto delete_obj; |
904 | } |
905 | return 0; |
906 | |
907 | delete_obj: |
908 | kobject_put(kobj: numa_kobj); |
909 | return err; |
910 | } |
911 | subsys_initcall(numa_init_sysfs); |
912 | #endif /* CONFIG_SYSFS */ |
913 | #endif |
914 | |