1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Data Access Monitor |
4 | * |
5 | * Author: SeongJae Park <sj@kernel.org> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "damon: " fmt |
9 | |
10 | #include <linux/damon.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/kthread.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/psi.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/string.h> |
17 | #include <linux/string_choices.h> |
18 | |
19 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/damon.h> |
21 | |
22 | #ifdef CONFIG_DAMON_KUNIT_TEST |
23 | #undef DAMON_MIN_REGION |
24 | #define DAMON_MIN_REGION 1 |
25 | #endif |
26 | |
27 | static DEFINE_MUTEX(damon_lock); |
28 | static int nr_running_ctxs; |
29 | static bool running_exclusive_ctxs; |
30 | |
31 | static DEFINE_MUTEX(damon_ops_lock); |
32 | static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; |
33 | |
34 | static struct kmem_cache *damon_region_cache __ro_after_init; |
35 | |
36 | /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ |
37 | static bool __damon_is_registered_ops(enum damon_ops_id id) |
38 | { |
39 | struct damon_operations empty_ops = {}; |
40 | |
41 | if (!memcmp(p: &empty_ops, q: &damon_registered_ops[id], size: sizeof(empty_ops))) |
42 | return false; |
43 | return true; |
44 | } |
45 | |
46 | /** |
47 | * damon_is_registered_ops() - Check if a given damon_operations is registered. |
48 | * @id: Id of the damon_operations to check if registered. |
49 | * |
50 | * Return: true if the ops is set, false otherwise. |
51 | */ |
52 | bool damon_is_registered_ops(enum damon_ops_id id) |
53 | { |
54 | bool registered; |
55 | |
56 | if (id >= NR_DAMON_OPS) |
57 | return false; |
58 | mutex_lock(&damon_ops_lock); |
59 | registered = __damon_is_registered_ops(id); |
60 | mutex_unlock(lock: &damon_ops_lock); |
61 | return registered; |
62 | } |
63 | |
64 | /** |
65 | * damon_register_ops() - Register a monitoring operations set to DAMON. |
66 | * @ops: monitoring operations set to register. |
67 | * |
68 | * This function registers a monitoring operations set of valid &struct |
69 | * damon_operations->id so that others can find and use them later. |
70 | * |
71 | * Return: 0 on success, negative error code otherwise. |
72 | */ |
73 | int damon_register_ops(struct damon_operations *ops) |
74 | { |
75 | int err = 0; |
76 | |
77 | if (ops->id >= NR_DAMON_OPS) |
78 | return -EINVAL; |
79 | |
80 | mutex_lock(&damon_ops_lock); |
81 | /* Fail for already registered ops */ |
82 | if (__damon_is_registered_ops(id: ops->id)) |
83 | err = -EINVAL; |
84 | else |
85 | damon_registered_ops[ops->id] = *ops; |
86 | mutex_unlock(lock: &damon_ops_lock); |
87 | return err; |
88 | } |
89 | |
90 | /** |
91 | * damon_select_ops() - Select a monitoring operations to use with the context. |
92 | * @ctx: monitoring context to use the operations. |
93 | * @id: id of the registered monitoring operations to select. |
94 | * |
95 | * This function finds registered monitoring operations set of @id and make |
96 | * @ctx to use it. |
97 | * |
98 | * Return: 0 on success, negative error code otherwise. |
99 | */ |
100 | int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) |
101 | { |
102 | int err = 0; |
103 | |
104 | if (id >= NR_DAMON_OPS) |
105 | return -EINVAL; |
106 | |
107 | mutex_lock(&damon_ops_lock); |
108 | if (!__damon_is_registered_ops(id)) |
109 | err = -EINVAL; |
110 | else |
111 | ctx->ops = damon_registered_ops[id]; |
112 | mutex_unlock(lock: &damon_ops_lock); |
113 | return err; |
114 | } |
115 | |
116 | /* |
117 | * Construct a damon_region struct |
118 | * |
119 | * Returns the pointer to the new struct if success, or NULL otherwise |
120 | */ |
121 | struct damon_region *damon_new_region(unsigned long start, unsigned long end) |
122 | { |
123 | struct damon_region *region; |
124 | |
125 | region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); |
126 | if (!region) |
127 | return NULL; |
128 | |
129 | region->ar.start = start; |
130 | region->ar.end = end; |
131 | region->nr_accesses = 0; |
132 | region->nr_accesses_bp = 0; |
133 | INIT_LIST_HEAD(list: ®ion->list); |
134 | |
135 | region->age = 0; |
136 | region->last_nr_accesses = 0; |
137 | |
138 | return region; |
139 | } |
140 | |
141 | void damon_add_region(struct damon_region *r, struct damon_target *t) |
142 | { |
143 | list_add_tail(new: &r->list, head: &t->regions_list); |
144 | t->nr_regions++; |
145 | } |
146 | |
147 | static void damon_del_region(struct damon_region *r, struct damon_target *t) |
148 | { |
149 | list_del(entry: &r->list); |
150 | t->nr_regions--; |
151 | } |
152 | |
153 | static void damon_free_region(struct damon_region *r) |
154 | { |
155 | kmem_cache_free(s: damon_region_cache, objp: r); |
156 | } |
157 | |
158 | void damon_destroy_region(struct damon_region *r, struct damon_target *t) |
159 | { |
160 | damon_del_region(r, t); |
161 | damon_free_region(r); |
162 | } |
163 | |
164 | /* |
165 | * Check whether a region is intersecting an address range |
166 | * |
167 | * Returns true if it is. |
168 | */ |
169 | static bool damon_intersect(struct damon_region *r, |
170 | struct damon_addr_range *re) |
171 | { |
172 | return !(r->ar.end <= re->start || re->end <= r->ar.start); |
173 | } |
174 | |
175 | /* |
176 | * Fill holes in regions with new regions. |
177 | */ |
178 | static int damon_fill_regions_holes(struct damon_region *first, |
179 | struct damon_region *last, struct damon_target *t) |
180 | { |
181 | struct damon_region *r = first; |
182 | |
183 | damon_for_each_region_from(r, t) { |
184 | struct damon_region *next, *newr; |
185 | |
186 | if (r == last) |
187 | break; |
188 | next = damon_next_region(r); |
189 | if (r->ar.end != next->ar.start) { |
190 | newr = damon_new_region(start: r->ar.end, end: next->ar.start); |
191 | if (!newr) |
192 | return -ENOMEM; |
193 | damon_insert_region(r: newr, prev: r, next, t); |
194 | } |
195 | } |
196 | return 0; |
197 | } |
198 | |
199 | /* |
200 | * damon_set_regions() - Set regions of a target for given address ranges. |
201 | * @t: the given target. |
202 | * @ranges: array of new monitoring target ranges. |
203 | * @nr_ranges: length of @ranges. |
204 | * |
205 | * This function adds new regions to, or modify existing regions of a |
206 | * monitoring target to fit in specific ranges. |
207 | * |
208 | * Return: 0 if success, or negative error code otherwise. |
209 | */ |
210 | int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, |
211 | unsigned int nr_ranges) |
212 | { |
213 | struct damon_region *r, *next; |
214 | unsigned int i; |
215 | int err; |
216 | |
217 | /* Remove regions which are not in the new ranges */ |
218 | damon_for_each_region_safe(r, next, t) { |
219 | for (i = 0; i < nr_ranges; i++) { |
220 | if (damon_intersect(r, re: &ranges[i])) |
221 | break; |
222 | } |
223 | if (i == nr_ranges) |
224 | damon_destroy_region(r, t); |
225 | } |
226 | |
227 | r = damon_first_region(t); |
228 | /* Add new regions or resize existing regions to fit in the ranges */ |
229 | for (i = 0; i < nr_ranges; i++) { |
230 | struct damon_region *first = NULL, *last, *newr; |
231 | struct damon_addr_range *range; |
232 | |
233 | range = &ranges[i]; |
234 | /* Get the first/last regions intersecting with the range */ |
235 | damon_for_each_region_from(r, t) { |
236 | if (damon_intersect(r, re: range)) { |
237 | if (!first) |
238 | first = r; |
239 | last = r; |
240 | } |
241 | if (r->ar.start >= range->end) |
242 | break; |
243 | } |
244 | if (!first) { |
245 | /* no region intersects with this range */ |
246 | newr = damon_new_region( |
247 | ALIGN_DOWN(range->start, |
248 | DAMON_MIN_REGION), |
249 | ALIGN(range->end, DAMON_MIN_REGION)); |
250 | if (!newr) |
251 | return -ENOMEM; |
252 | damon_insert_region(r: newr, prev: damon_prev_region(r), next: r, t); |
253 | } else { |
254 | /* resize intersecting regions to fit in this range */ |
255 | first->ar.start = ALIGN_DOWN(range->start, |
256 | DAMON_MIN_REGION); |
257 | last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); |
258 | |
259 | /* fill possible holes in the range */ |
260 | err = damon_fill_regions_holes(first, last, t); |
261 | if (err) |
262 | return err; |
263 | } |
264 | } |
265 | return 0; |
266 | } |
267 | |
268 | struct damos_filter *damos_new_filter(enum damos_filter_type type, |
269 | bool matching, bool allow) |
270 | { |
271 | struct damos_filter *filter; |
272 | |
273 | filter = kmalloc(sizeof(*filter), GFP_KERNEL); |
274 | if (!filter) |
275 | return NULL; |
276 | filter->type = type; |
277 | filter->matching = matching; |
278 | filter->allow = allow; |
279 | INIT_LIST_HEAD(list: &filter->list); |
280 | return filter; |
281 | } |
282 | |
283 | /** |
284 | * damos_filter_for_ops() - Return if the filter is ops-hndled one. |
285 | * @type: type of the filter. |
286 | * |
287 | * Return: true if the filter of @type needs to be handled by ops layer, false |
288 | * otherwise. |
289 | */ |
290 | bool damos_filter_for_ops(enum damos_filter_type type) |
291 | { |
292 | switch (type) { |
293 | case DAMOS_FILTER_TYPE_ADDR: |
294 | case DAMOS_FILTER_TYPE_TARGET: |
295 | return false; |
296 | default: |
297 | break; |
298 | } |
299 | return true; |
300 | } |
301 | |
302 | void damos_add_filter(struct damos *s, struct damos_filter *f) |
303 | { |
304 | if (damos_filter_for_ops(type: f->type)) |
305 | list_add_tail(new: &f->list, head: &s->ops_filters); |
306 | else |
307 | list_add_tail(new: &f->list, head: &s->filters); |
308 | } |
309 | |
310 | static void damos_del_filter(struct damos_filter *f) |
311 | { |
312 | list_del(entry: &f->list); |
313 | } |
314 | |
315 | static void damos_free_filter(struct damos_filter *f) |
316 | { |
317 | kfree(objp: f); |
318 | } |
319 | |
320 | void damos_destroy_filter(struct damos_filter *f) |
321 | { |
322 | damos_del_filter(f); |
323 | damos_free_filter(f); |
324 | } |
325 | |
326 | struct damos_quota_goal *damos_new_quota_goal( |
327 | enum damos_quota_goal_metric metric, |
328 | unsigned long target_value) |
329 | { |
330 | struct damos_quota_goal *goal; |
331 | |
332 | goal = kmalloc(sizeof(*goal), GFP_KERNEL); |
333 | if (!goal) |
334 | return NULL; |
335 | goal->metric = metric; |
336 | goal->target_value = target_value; |
337 | INIT_LIST_HEAD(list: &goal->list); |
338 | return goal; |
339 | } |
340 | |
341 | void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) |
342 | { |
343 | list_add_tail(new: &g->list, head: &q->goals); |
344 | } |
345 | |
346 | static void damos_del_quota_goal(struct damos_quota_goal *g) |
347 | { |
348 | list_del(entry: &g->list); |
349 | } |
350 | |
351 | static void damos_free_quota_goal(struct damos_quota_goal *g) |
352 | { |
353 | kfree(objp: g); |
354 | } |
355 | |
356 | void damos_destroy_quota_goal(struct damos_quota_goal *g) |
357 | { |
358 | damos_del_quota_goal(g); |
359 | damos_free_quota_goal(g); |
360 | } |
361 | |
362 | /* initialize fields of @quota that normally API users wouldn't set */ |
363 | static struct damos_quota *damos_quota_init(struct damos_quota *quota) |
364 | { |
365 | quota->esz = 0; |
366 | quota->total_charged_sz = 0; |
367 | quota->total_charged_ns = 0; |
368 | quota->charged_sz = 0; |
369 | quota->charged_from = 0; |
370 | quota->charge_target_from = NULL; |
371 | quota->charge_addr_from = 0; |
372 | quota->esz_bp = 0; |
373 | return quota; |
374 | } |
375 | |
376 | struct damos *damon_new_scheme(struct damos_access_pattern *pattern, |
377 | enum damos_action action, |
378 | unsigned long apply_interval_us, |
379 | struct damos_quota *quota, |
380 | struct damos_watermarks *wmarks, |
381 | int target_nid) |
382 | { |
383 | struct damos *scheme; |
384 | |
385 | scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); |
386 | if (!scheme) |
387 | return NULL; |
388 | scheme->pattern = *pattern; |
389 | scheme->action = action; |
390 | scheme->apply_interval_us = apply_interval_us; |
391 | /* |
392 | * next_apply_sis will be set when kdamond starts. While kdamond is |
393 | * running, it will also updated when it is added to the DAMON context, |
394 | * or damon_attrs are updated. |
395 | */ |
396 | scheme->next_apply_sis = 0; |
397 | scheme->walk_completed = false; |
398 | INIT_LIST_HEAD(list: &scheme->filters); |
399 | INIT_LIST_HEAD(list: &scheme->ops_filters); |
400 | scheme->stat = (struct damos_stat){}; |
401 | INIT_LIST_HEAD(list: &scheme->list); |
402 | |
403 | scheme->quota = *(damos_quota_init(quota)); |
404 | /* quota.goals should be separately set by caller */ |
405 | INIT_LIST_HEAD(list: &scheme->quota.goals); |
406 | |
407 | scheme->wmarks = *wmarks; |
408 | scheme->wmarks.activated = true; |
409 | |
410 | scheme->target_nid = target_nid; |
411 | |
412 | return scheme; |
413 | } |
414 | |
415 | static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) |
416 | { |
417 | unsigned long sample_interval = ctx->attrs.sample_interval ? |
418 | ctx->attrs.sample_interval : 1; |
419 | unsigned long apply_interval = s->apply_interval_us ? |
420 | s->apply_interval_us : ctx->attrs.aggr_interval; |
421 | |
422 | s->next_apply_sis = ctx->passed_sample_intervals + |
423 | apply_interval / sample_interval; |
424 | } |
425 | |
426 | void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) |
427 | { |
428 | list_add_tail(new: &s->list, head: &ctx->schemes); |
429 | damos_set_next_apply_sis(s, ctx); |
430 | } |
431 | |
432 | static void damon_del_scheme(struct damos *s) |
433 | { |
434 | list_del(entry: &s->list); |
435 | } |
436 | |
437 | static void damon_free_scheme(struct damos *s) |
438 | { |
439 | kfree(objp: s); |
440 | } |
441 | |
442 | void damon_destroy_scheme(struct damos *s) |
443 | { |
444 | struct damos_quota_goal *g, *g_next; |
445 | struct damos_filter *f, *next; |
446 | |
447 | damos_for_each_quota_goal_safe(g, g_next, &s->quota) |
448 | damos_destroy_quota_goal(g); |
449 | |
450 | damos_for_each_filter_safe(f, next, s) |
451 | damos_destroy_filter(f); |
452 | damon_del_scheme(s); |
453 | damon_free_scheme(s); |
454 | } |
455 | |
456 | /* |
457 | * Construct a damon_target struct |
458 | * |
459 | * Returns the pointer to the new struct if success, or NULL otherwise |
460 | */ |
461 | struct damon_target *damon_new_target(void) |
462 | { |
463 | struct damon_target *t; |
464 | |
465 | t = kmalloc(sizeof(*t), GFP_KERNEL); |
466 | if (!t) |
467 | return NULL; |
468 | |
469 | t->pid = NULL; |
470 | t->nr_regions = 0; |
471 | INIT_LIST_HEAD(list: &t->regions_list); |
472 | INIT_LIST_HEAD(list: &t->list); |
473 | |
474 | return t; |
475 | } |
476 | |
477 | void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) |
478 | { |
479 | list_add_tail(new: &t->list, head: &ctx->adaptive_targets); |
480 | } |
481 | |
482 | bool damon_targets_empty(struct damon_ctx *ctx) |
483 | { |
484 | return list_empty(head: &ctx->adaptive_targets); |
485 | } |
486 | |
487 | static void damon_del_target(struct damon_target *t) |
488 | { |
489 | list_del(entry: &t->list); |
490 | } |
491 | |
492 | void damon_free_target(struct damon_target *t) |
493 | { |
494 | struct damon_region *r, *next; |
495 | |
496 | damon_for_each_region_safe(r, next, t) |
497 | damon_free_region(r); |
498 | kfree(objp: t); |
499 | } |
500 | |
501 | void damon_destroy_target(struct damon_target *t) |
502 | { |
503 | damon_del_target(t); |
504 | damon_free_target(t); |
505 | } |
506 | |
507 | unsigned int damon_nr_regions(struct damon_target *t) |
508 | { |
509 | return t->nr_regions; |
510 | } |
511 | |
512 | struct damon_ctx *damon_new_ctx(void) |
513 | { |
514 | struct damon_ctx *ctx; |
515 | |
516 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
517 | if (!ctx) |
518 | return NULL; |
519 | |
520 | init_completion(x: &ctx->kdamond_started); |
521 | |
522 | ctx->attrs.sample_interval = 5 * 1000; |
523 | ctx->attrs.aggr_interval = 100 * 1000; |
524 | ctx->attrs.ops_update_interval = 60 * 1000 * 1000; |
525 | |
526 | ctx->passed_sample_intervals = 0; |
527 | /* These will be set from kdamond_init_ctx() */ |
528 | ctx->next_aggregation_sis = 0; |
529 | ctx->next_ops_update_sis = 0; |
530 | |
531 | mutex_init(&ctx->kdamond_lock); |
532 | mutex_init(&ctx->call_control_lock); |
533 | mutex_init(&ctx->walk_control_lock); |
534 | |
535 | ctx->attrs.min_nr_regions = 10; |
536 | ctx->attrs.max_nr_regions = 1000; |
537 | |
538 | INIT_LIST_HEAD(list: &ctx->adaptive_targets); |
539 | INIT_LIST_HEAD(list: &ctx->schemes); |
540 | |
541 | return ctx; |
542 | } |
543 | |
544 | static void damon_destroy_targets(struct damon_ctx *ctx) |
545 | { |
546 | struct damon_target *t, *next_t; |
547 | |
548 | if (ctx->ops.cleanup) { |
549 | ctx->ops.cleanup(ctx); |
550 | return; |
551 | } |
552 | |
553 | damon_for_each_target_safe(t, next_t, ctx) |
554 | damon_destroy_target(t); |
555 | } |
556 | |
557 | void damon_destroy_ctx(struct damon_ctx *ctx) |
558 | { |
559 | struct damos *s, *next_s; |
560 | |
561 | damon_destroy_targets(ctx); |
562 | |
563 | damon_for_each_scheme_safe(s, next_s, ctx) |
564 | damon_destroy_scheme(s); |
565 | |
566 | kfree(objp: ctx); |
567 | } |
568 | |
569 | static unsigned int damon_age_for_new_attrs(unsigned int age, |
570 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) |
571 | { |
572 | return age * old_attrs->aggr_interval / new_attrs->aggr_interval; |
573 | } |
574 | |
575 | /* convert access ratio in bp (per 10,000) to nr_accesses */ |
576 | static unsigned int damon_accesses_bp_to_nr_accesses( |
577 | unsigned int accesses_bp, struct damon_attrs *attrs) |
578 | { |
579 | return accesses_bp * damon_max_nr_accesses(attrs) / 10000; |
580 | } |
581 | |
582 | /* |
583 | * Convert nr_accesses to access ratio in bp (per 10,000). |
584 | * |
585 | * Callers should ensure attrs.aggr_interval is not zero, like |
586 | * damon_update_monitoring_results() does . Otherwise, divide-by-zero would |
587 | * happen. |
588 | */ |
589 | static unsigned int damon_nr_accesses_to_accesses_bp( |
590 | unsigned int nr_accesses, struct damon_attrs *attrs) |
591 | { |
592 | return nr_accesses * 10000 / damon_max_nr_accesses(attrs); |
593 | } |
594 | |
595 | static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, |
596 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) |
597 | { |
598 | return damon_accesses_bp_to_nr_accesses( |
599 | accesses_bp: damon_nr_accesses_to_accesses_bp( |
600 | nr_accesses, attrs: old_attrs), |
601 | attrs: new_attrs); |
602 | } |
603 | |
604 | static void damon_update_monitoring_result(struct damon_region *r, |
605 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, |
606 | bool aggregating) |
607 | { |
608 | if (!aggregating) { |
609 | r->nr_accesses = damon_nr_accesses_for_new_attrs( |
610 | nr_accesses: r->nr_accesses, old_attrs, new_attrs); |
611 | r->nr_accesses_bp = r->nr_accesses * 10000; |
612 | } else { |
613 | /* |
614 | * if this is called in the middle of the aggregation, reset |
615 | * the aggregations we made so far for this aggregation |
616 | * interval. In other words, make the status like |
617 | * kdamond_reset_aggregated() is called. |
618 | */ |
619 | r->last_nr_accesses = damon_nr_accesses_for_new_attrs( |
620 | nr_accesses: r->last_nr_accesses, old_attrs, new_attrs); |
621 | r->nr_accesses_bp = r->last_nr_accesses * 10000; |
622 | r->nr_accesses = 0; |
623 | } |
624 | r->age = damon_age_for_new_attrs(age: r->age, old_attrs, new_attrs); |
625 | } |
626 | |
627 | /* |
628 | * region->nr_accesses is the number of sampling intervals in the last |
629 | * aggregation interval that access to the region has found, and region->age is |
630 | * the number of aggregation intervals that its access pattern has maintained. |
631 | * For the reason, the real meaning of the two fields depend on current |
632 | * sampling interval and aggregation interval. This function updates |
633 | * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. |
634 | */ |
635 | static void damon_update_monitoring_results(struct damon_ctx *ctx, |
636 | struct damon_attrs *new_attrs, bool aggregating) |
637 | { |
638 | struct damon_attrs *old_attrs = &ctx->attrs; |
639 | struct damon_target *t; |
640 | struct damon_region *r; |
641 | |
642 | /* if any interval is zero, simply forgive conversion */ |
643 | if (!old_attrs->sample_interval || !old_attrs->aggr_interval || |
644 | !new_attrs->sample_interval || |
645 | !new_attrs->aggr_interval) |
646 | return; |
647 | |
648 | damon_for_each_target(t, ctx) |
649 | damon_for_each_region(r, t) |
650 | damon_update_monitoring_result( |
651 | r, old_attrs, new_attrs, aggregating); |
652 | } |
653 | |
654 | /* |
655 | * damon_valid_intervals_goal() - return if the intervals goal of @attrs is |
656 | * valid. |
657 | */ |
658 | static bool damon_valid_intervals_goal(struct damon_attrs *attrs) |
659 | { |
660 | struct damon_intervals_goal *goal = &attrs->intervals_goal; |
661 | |
662 | /* tuning is disabled */ |
663 | if (!goal->aggrs) |
664 | return true; |
665 | if (goal->min_sample_us > goal->max_sample_us) |
666 | return false; |
667 | if (attrs->sample_interval < goal->min_sample_us || |
668 | goal->max_sample_us < attrs->sample_interval) |
669 | return false; |
670 | return true; |
671 | } |
672 | |
673 | /** |
674 | * damon_set_attrs() - Set attributes for the monitoring. |
675 | * @ctx: monitoring context |
676 | * @attrs: monitoring attributes |
677 | * |
678 | * This function should be called while the kdamond is not running, an access |
679 | * check results aggregation is not ongoing (e.g., from &struct |
680 | * damon_callback->after_aggregation or &struct |
681 | * damon_callback->after_wmarks_check callbacks), or from damon_call(). |
682 | * |
683 | * Every time interval is in micro-seconds. |
684 | * |
685 | * Return: 0 on success, negative error code otherwise. |
686 | */ |
687 | int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) |
688 | { |
689 | unsigned long sample_interval = attrs->sample_interval ? |
690 | attrs->sample_interval : 1; |
691 | struct damos *s; |
692 | bool aggregating = ctx->passed_sample_intervals < |
693 | ctx->next_aggregation_sis; |
694 | |
695 | if (!damon_valid_intervals_goal(attrs)) |
696 | return -EINVAL; |
697 | |
698 | if (attrs->min_nr_regions < 3) |
699 | return -EINVAL; |
700 | if (attrs->min_nr_regions > attrs->max_nr_regions) |
701 | return -EINVAL; |
702 | if (attrs->sample_interval > attrs->aggr_interval) |
703 | return -EINVAL; |
704 | |
705 | /* calls from core-external doesn't set this. */ |
706 | if (!attrs->aggr_samples) |
707 | attrs->aggr_samples = attrs->aggr_interval / sample_interval; |
708 | |
709 | ctx->next_aggregation_sis = ctx->passed_sample_intervals + |
710 | attrs->aggr_interval / sample_interval; |
711 | ctx->next_ops_update_sis = ctx->passed_sample_intervals + |
712 | attrs->ops_update_interval / sample_interval; |
713 | |
714 | damon_update_monitoring_results(ctx, new_attrs: attrs, aggregating); |
715 | ctx->attrs = *attrs; |
716 | |
717 | damon_for_each_scheme(s, ctx) |
718 | damos_set_next_apply_sis(s, ctx); |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | /** |
724 | * damon_set_schemes() - Set data access monitoring based operation schemes. |
725 | * @ctx: monitoring context |
726 | * @schemes: array of the schemes |
727 | * @nr_schemes: number of entries in @schemes |
728 | * |
729 | * This function should not be called while the kdamond of the context is |
730 | * running. |
731 | */ |
732 | void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, |
733 | ssize_t nr_schemes) |
734 | { |
735 | struct damos *s, *next; |
736 | ssize_t i; |
737 | |
738 | damon_for_each_scheme_safe(s, next, ctx) |
739 | damon_destroy_scheme(s); |
740 | for (i = 0; i < nr_schemes; i++) |
741 | damon_add_scheme(ctx, s: schemes[i]); |
742 | } |
743 | |
744 | static struct damos_quota_goal *damos_nth_quota_goal( |
745 | int n, struct damos_quota *q) |
746 | { |
747 | struct damos_quota_goal *goal; |
748 | int i = 0; |
749 | |
750 | damos_for_each_quota_goal(goal, q) { |
751 | if (i++ == n) |
752 | return goal; |
753 | } |
754 | return NULL; |
755 | } |
756 | |
757 | static void damos_commit_quota_goal( |
758 | struct damos_quota_goal *dst, struct damos_quota_goal *src) |
759 | { |
760 | dst->metric = src->metric; |
761 | dst->target_value = src->target_value; |
762 | if (dst->metric == DAMOS_QUOTA_USER_INPUT) |
763 | dst->current_value = src->current_value; |
764 | /* keep last_psi_total as is, since it will be updated in next cycle */ |
765 | } |
766 | |
767 | /** |
768 | * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. |
769 | * @dst: The commit destination DAMOS quota. |
770 | * @src: The commit source DAMOS quota. |
771 | * |
772 | * Copies user-specified parameters for quota goals from @src to @dst. Users |
773 | * should use this function for quota goals-level parameters update of running |
774 | * DAMON contexts, instead of manual in-place updates. |
775 | * |
776 | * This function should be called from parameters-update safe context, like |
777 | * DAMON callbacks. |
778 | */ |
779 | int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) |
780 | { |
781 | struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; |
782 | int i = 0, j = 0; |
783 | |
784 | damos_for_each_quota_goal_safe(dst_goal, next, dst) { |
785 | src_goal = damos_nth_quota_goal(n: i++, q: src); |
786 | if (src_goal) |
787 | damos_commit_quota_goal(dst: dst_goal, src: src_goal); |
788 | else |
789 | damos_destroy_quota_goal(g: dst_goal); |
790 | } |
791 | damos_for_each_quota_goal_safe(src_goal, next, src) { |
792 | if (j++ < i) |
793 | continue; |
794 | new_goal = damos_new_quota_goal( |
795 | metric: src_goal->metric, target_value: src_goal->target_value); |
796 | if (!new_goal) |
797 | return -ENOMEM; |
798 | damos_add_quota_goal(q: dst, g: new_goal); |
799 | } |
800 | return 0; |
801 | } |
802 | |
803 | static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) |
804 | { |
805 | int err; |
806 | |
807 | dst->reset_interval = src->reset_interval; |
808 | dst->ms = src->ms; |
809 | dst->sz = src->sz; |
810 | err = damos_commit_quota_goals(dst, src); |
811 | if (err) |
812 | return err; |
813 | dst->weight_sz = src->weight_sz; |
814 | dst->weight_nr_accesses = src->weight_nr_accesses; |
815 | dst->weight_age = src->weight_age; |
816 | return 0; |
817 | } |
818 | |
819 | static struct damos_filter *damos_nth_filter(int n, struct damos *s) |
820 | { |
821 | struct damos_filter *filter; |
822 | int i = 0; |
823 | |
824 | damos_for_each_filter(filter, s) { |
825 | if (i++ == n) |
826 | return filter; |
827 | } |
828 | return NULL; |
829 | } |
830 | |
831 | static void damos_commit_filter_arg( |
832 | struct damos_filter *dst, struct damos_filter *src) |
833 | { |
834 | switch (dst->type) { |
835 | case DAMOS_FILTER_TYPE_MEMCG: |
836 | dst->memcg_id = src->memcg_id; |
837 | break; |
838 | case DAMOS_FILTER_TYPE_ADDR: |
839 | dst->addr_range = src->addr_range; |
840 | break; |
841 | case DAMOS_FILTER_TYPE_TARGET: |
842 | dst->target_idx = src->target_idx; |
843 | break; |
844 | case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: |
845 | dst->sz_range = src->sz_range; |
846 | break; |
847 | default: |
848 | break; |
849 | } |
850 | } |
851 | |
852 | static void damos_commit_filter( |
853 | struct damos_filter *dst, struct damos_filter *src) |
854 | { |
855 | dst->type = src->type; |
856 | dst->matching = src->matching; |
857 | damos_commit_filter_arg(dst, src); |
858 | } |
859 | |
860 | static int damos_commit_core_filters(struct damos *dst, struct damos *src) |
861 | { |
862 | struct damos_filter *dst_filter, *next, *src_filter, *new_filter; |
863 | int i = 0, j = 0; |
864 | |
865 | damos_for_each_filter_safe(dst_filter, next, dst) { |
866 | src_filter = damos_nth_filter(n: i++, s: src); |
867 | if (src_filter) |
868 | damos_commit_filter(dst: dst_filter, src: src_filter); |
869 | else |
870 | damos_destroy_filter(f: dst_filter); |
871 | } |
872 | |
873 | damos_for_each_filter_safe(src_filter, next, src) { |
874 | if (j++ < i) |
875 | continue; |
876 | |
877 | new_filter = damos_new_filter( |
878 | type: src_filter->type, matching: src_filter->matching, |
879 | allow: src_filter->allow); |
880 | if (!new_filter) |
881 | return -ENOMEM; |
882 | damos_commit_filter_arg(dst: new_filter, src: src_filter); |
883 | damos_add_filter(s: dst, f: new_filter); |
884 | } |
885 | return 0; |
886 | } |
887 | |
888 | static int damos_commit_ops_filters(struct damos *dst, struct damos *src) |
889 | { |
890 | struct damos_filter *dst_filter, *next, *src_filter, *new_filter; |
891 | int i = 0, j = 0; |
892 | |
893 | damos_for_each_ops_filter_safe(dst_filter, next, dst) { |
894 | src_filter = damos_nth_filter(n: i++, s: src); |
895 | if (src_filter) |
896 | damos_commit_filter(dst: dst_filter, src: src_filter); |
897 | else |
898 | damos_destroy_filter(f: dst_filter); |
899 | } |
900 | |
901 | damos_for_each_ops_filter_safe(src_filter, next, src) { |
902 | if (j++ < i) |
903 | continue; |
904 | |
905 | new_filter = damos_new_filter( |
906 | type: src_filter->type, matching: src_filter->matching, |
907 | allow: src_filter->allow); |
908 | if (!new_filter) |
909 | return -ENOMEM; |
910 | damos_commit_filter_arg(dst: new_filter, src: src_filter); |
911 | damos_add_filter(s: dst, f: new_filter); |
912 | } |
913 | return 0; |
914 | } |
915 | |
916 | /** |
917 | * damos_filters_default_reject() - decide whether to reject memory that didn't |
918 | * match with any given filter. |
919 | * @filters: Given DAMOS filters of a group. |
920 | */ |
921 | static bool damos_filters_default_reject(struct list_head *filters) |
922 | { |
923 | struct damos_filter *last_filter; |
924 | |
925 | if (list_empty(head: filters)) |
926 | return false; |
927 | last_filter = list_last_entry(filters, struct damos_filter, list); |
928 | return last_filter->allow; |
929 | } |
930 | |
931 | static void damos_set_filters_default_reject(struct damos *s) |
932 | { |
933 | if (!list_empty(head: &s->ops_filters)) |
934 | s->core_filters_default_reject = false; |
935 | else |
936 | s->core_filters_default_reject = |
937 | damos_filters_default_reject(filters: &s->filters); |
938 | s->ops_filters_default_reject = |
939 | damos_filters_default_reject(filters: &s->ops_filters); |
940 | } |
941 | |
942 | static int damos_commit_filters(struct damos *dst, struct damos *src) |
943 | { |
944 | int err; |
945 | |
946 | err = damos_commit_core_filters(dst, src); |
947 | if (err) |
948 | return err; |
949 | err = damos_commit_ops_filters(dst, src); |
950 | if (err) |
951 | return err; |
952 | damos_set_filters_default_reject(s: dst); |
953 | return 0; |
954 | } |
955 | |
956 | static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) |
957 | { |
958 | struct damos *s; |
959 | int i = 0; |
960 | |
961 | damon_for_each_scheme(s, ctx) { |
962 | if (i++ == n) |
963 | return s; |
964 | } |
965 | return NULL; |
966 | } |
967 | |
968 | static int damos_commit(struct damos *dst, struct damos *src) |
969 | { |
970 | int err; |
971 | |
972 | dst->pattern = src->pattern; |
973 | dst->action = src->action; |
974 | dst->apply_interval_us = src->apply_interval_us; |
975 | |
976 | err = damos_commit_quota(dst: &dst->quota, src: &src->quota); |
977 | if (err) |
978 | return err; |
979 | |
980 | dst->wmarks = src->wmarks; |
981 | |
982 | err = damos_commit_filters(dst, src); |
983 | return err; |
984 | } |
985 | |
986 | static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) |
987 | { |
988 | struct damos *dst_scheme, *next, *src_scheme, *new_scheme; |
989 | int i = 0, j = 0, err; |
990 | |
991 | damon_for_each_scheme_safe(dst_scheme, next, dst) { |
992 | src_scheme = damon_nth_scheme(n: i++, ctx: src); |
993 | if (src_scheme) { |
994 | err = damos_commit(dst: dst_scheme, src: src_scheme); |
995 | if (err) |
996 | return err; |
997 | } else { |
998 | damon_destroy_scheme(s: dst_scheme); |
999 | } |
1000 | } |
1001 | |
1002 | damon_for_each_scheme_safe(src_scheme, next, src) { |
1003 | if (j++ < i) |
1004 | continue; |
1005 | new_scheme = damon_new_scheme(pattern: &src_scheme->pattern, |
1006 | action: src_scheme->action, |
1007 | apply_interval_us: src_scheme->apply_interval_us, |
1008 | quota: &src_scheme->quota, wmarks: &src_scheme->wmarks, |
1009 | NUMA_NO_NODE); |
1010 | if (!new_scheme) |
1011 | return -ENOMEM; |
1012 | err = damos_commit(dst: new_scheme, src: src_scheme); |
1013 | if (err) { |
1014 | damon_destroy_scheme(s: new_scheme); |
1015 | return err; |
1016 | } |
1017 | damon_add_scheme(ctx: dst, s: new_scheme); |
1018 | } |
1019 | return 0; |
1020 | } |
1021 | |
1022 | static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) |
1023 | { |
1024 | struct damon_target *t; |
1025 | int i = 0; |
1026 | |
1027 | damon_for_each_target(t, ctx) { |
1028 | if (i++ == n) |
1029 | return t; |
1030 | } |
1031 | return NULL; |
1032 | } |
1033 | |
1034 | /* |
1035 | * The caller should ensure the regions of @src are |
1036 | * 1. valid (end >= src) and |
1037 | * 2. sorted by starting address. |
1038 | * |
1039 | * If @src has no region, @dst keeps current regions. |
1040 | */ |
1041 | static int damon_commit_target_regions( |
1042 | struct damon_target *dst, struct damon_target *src) |
1043 | { |
1044 | struct damon_region *src_region; |
1045 | struct damon_addr_range *ranges; |
1046 | int i = 0, err; |
1047 | |
1048 | damon_for_each_region(src_region, src) |
1049 | i++; |
1050 | if (!i) |
1051 | return 0; |
1052 | |
1053 | ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); |
1054 | if (!ranges) |
1055 | return -ENOMEM; |
1056 | i = 0; |
1057 | damon_for_each_region(src_region, src) |
1058 | ranges[i++] = src_region->ar; |
1059 | err = damon_set_regions(t: dst, ranges, nr_ranges: i); |
1060 | kfree(objp: ranges); |
1061 | return err; |
1062 | } |
1063 | |
1064 | static int damon_commit_target( |
1065 | struct damon_target *dst, bool dst_has_pid, |
1066 | struct damon_target *src, bool src_has_pid) |
1067 | { |
1068 | int err; |
1069 | |
1070 | err = damon_commit_target_regions(dst, src); |
1071 | if (err) |
1072 | return err; |
1073 | if (dst_has_pid) |
1074 | put_pid(pid: dst->pid); |
1075 | if (src_has_pid) |
1076 | get_pid(pid: src->pid); |
1077 | dst->pid = src->pid; |
1078 | return 0; |
1079 | } |
1080 | |
1081 | static int damon_commit_targets( |
1082 | struct damon_ctx *dst, struct damon_ctx *src) |
1083 | { |
1084 | struct damon_target *dst_target, *next, *src_target, *new_target; |
1085 | int i = 0, j = 0, err; |
1086 | |
1087 | damon_for_each_target_safe(dst_target, next, dst) { |
1088 | src_target = damon_nth_target(n: i++, ctx: src); |
1089 | if (src_target) { |
1090 | err = damon_commit_target( |
1091 | dst: dst_target, dst_has_pid: damon_target_has_pid(ctx: dst), |
1092 | src: src_target, src_has_pid: damon_target_has_pid(ctx: src)); |
1093 | if (err) |
1094 | return err; |
1095 | } else { |
1096 | struct damos *s; |
1097 | |
1098 | if (damon_target_has_pid(ctx: dst)) |
1099 | put_pid(pid: dst_target->pid); |
1100 | damon_destroy_target(t: dst_target); |
1101 | damon_for_each_scheme(s, dst) { |
1102 | if (s->quota.charge_target_from == dst_target) { |
1103 | s->quota.charge_target_from = NULL; |
1104 | s->quota.charge_addr_from = 0; |
1105 | } |
1106 | } |
1107 | } |
1108 | } |
1109 | |
1110 | damon_for_each_target_safe(src_target, next, src) { |
1111 | if (j++ < i) |
1112 | continue; |
1113 | new_target = damon_new_target(); |
1114 | if (!new_target) |
1115 | return -ENOMEM; |
1116 | err = damon_commit_target(dst: new_target, dst_has_pid: false, |
1117 | src: src_target, src_has_pid: damon_target_has_pid(ctx: src)); |
1118 | if (err) { |
1119 | damon_destroy_target(t: new_target); |
1120 | return err; |
1121 | } |
1122 | damon_add_target(ctx: dst, t: new_target); |
1123 | } |
1124 | return 0; |
1125 | } |
1126 | |
1127 | /** |
1128 | * damon_commit_ctx() - Commit parameters of a DAMON context to another. |
1129 | * @dst: The commit destination DAMON context. |
1130 | * @src: The commit source DAMON context. |
1131 | * |
1132 | * This function copies user-specified parameters from @src to @dst and update |
1133 | * the internal status and results accordingly. Users should use this function |
1134 | * for context-level parameters update of running context, instead of manual |
1135 | * in-place updates. |
1136 | * |
1137 | * This function should be called from parameters-update safe context, like |
1138 | * DAMON callbacks. |
1139 | */ |
1140 | int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) |
1141 | { |
1142 | int err; |
1143 | |
1144 | err = damon_commit_schemes(dst, src); |
1145 | if (err) |
1146 | return err; |
1147 | err = damon_commit_targets(dst, src); |
1148 | if (err) |
1149 | return err; |
1150 | /* |
1151 | * schemes and targets should be updated first, since |
1152 | * 1. damon_set_attrs() updates monitoring results of targets and |
1153 | * next_apply_sis of schemes, and |
1154 | * 2. ops update should be done after pid handling is done (target |
1155 | * committing require putting pids). |
1156 | */ |
1157 | err = damon_set_attrs(ctx: dst, attrs: &src->attrs); |
1158 | if (err) |
1159 | return err; |
1160 | dst->ops = src->ops; |
1161 | |
1162 | return 0; |
1163 | } |
1164 | |
1165 | /** |
1166 | * damon_nr_running_ctxs() - Return number of currently running contexts. |
1167 | */ |
1168 | int damon_nr_running_ctxs(void) |
1169 | { |
1170 | int nr_ctxs; |
1171 | |
1172 | mutex_lock(&damon_lock); |
1173 | nr_ctxs = nr_running_ctxs; |
1174 | mutex_unlock(lock: &damon_lock); |
1175 | |
1176 | return nr_ctxs; |
1177 | } |
1178 | |
1179 | /* Returns the size upper limit for each monitoring region */ |
1180 | static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) |
1181 | { |
1182 | struct damon_target *t; |
1183 | struct damon_region *r; |
1184 | unsigned long sz = 0; |
1185 | |
1186 | damon_for_each_target(t, ctx) { |
1187 | damon_for_each_region(r, t) |
1188 | sz += damon_sz_region(r); |
1189 | } |
1190 | |
1191 | if (ctx->attrs.min_nr_regions) |
1192 | sz /= ctx->attrs.min_nr_regions; |
1193 | if (sz < DAMON_MIN_REGION) |
1194 | sz = DAMON_MIN_REGION; |
1195 | |
1196 | return sz; |
1197 | } |
1198 | |
1199 | static int kdamond_fn(void *data); |
1200 | |
1201 | /* |
1202 | * __damon_start() - Starts monitoring with given context. |
1203 | * @ctx: monitoring context |
1204 | * |
1205 | * This function should be called while damon_lock is hold. |
1206 | * |
1207 | * Return: 0 on success, negative error code otherwise. |
1208 | */ |
1209 | static int __damon_start(struct damon_ctx *ctx) |
1210 | { |
1211 | int err = -EBUSY; |
1212 | |
1213 | mutex_lock(&ctx->kdamond_lock); |
1214 | if (!ctx->kdamond) { |
1215 | err = 0; |
1216 | reinit_completion(x: &ctx->kdamond_started); |
1217 | ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", |
1218 | nr_running_ctxs); |
1219 | if (IS_ERR(ptr: ctx->kdamond)) { |
1220 | err = PTR_ERR(ptr: ctx->kdamond); |
1221 | ctx->kdamond = NULL; |
1222 | } else { |
1223 | wait_for_completion(&ctx->kdamond_started); |
1224 | } |
1225 | } |
1226 | mutex_unlock(lock: &ctx->kdamond_lock); |
1227 | |
1228 | return err; |
1229 | } |
1230 | |
1231 | /** |
1232 | * damon_start() - Starts the monitorings for a given group of contexts. |
1233 | * @ctxs: an array of the pointers for contexts to start monitoring |
1234 | * @nr_ctxs: size of @ctxs |
1235 | * @exclusive: exclusiveness of this contexts group |
1236 | * |
1237 | * This function starts a group of monitoring threads for a group of monitoring |
1238 | * contexts. One thread per each context is created and run in parallel. The |
1239 | * caller should handle synchronization between the threads by itself. If |
1240 | * @exclusive is true and a group of threads that created by other |
1241 | * 'damon_start()' call is currently running, this function does nothing but |
1242 | * returns -EBUSY. |
1243 | * |
1244 | * Return: 0 on success, negative error code otherwise. |
1245 | */ |
1246 | int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) |
1247 | { |
1248 | int i; |
1249 | int err = 0; |
1250 | |
1251 | mutex_lock(&damon_lock); |
1252 | if ((exclusive && nr_running_ctxs) || |
1253 | (!exclusive && running_exclusive_ctxs)) { |
1254 | mutex_unlock(lock: &damon_lock); |
1255 | return -EBUSY; |
1256 | } |
1257 | |
1258 | for (i = 0; i < nr_ctxs; i++) { |
1259 | err = __damon_start(ctx: ctxs[i]); |
1260 | if (err) |
1261 | break; |
1262 | nr_running_ctxs++; |
1263 | } |
1264 | if (exclusive && nr_running_ctxs) |
1265 | running_exclusive_ctxs = true; |
1266 | mutex_unlock(lock: &damon_lock); |
1267 | |
1268 | return err; |
1269 | } |
1270 | |
1271 | /* |
1272 | * __damon_stop() - Stops monitoring of a given context. |
1273 | * @ctx: monitoring context |
1274 | * |
1275 | * Return: 0 on success, negative error code otherwise. |
1276 | */ |
1277 | static int __damon_stop(struct damon_ctx *ctx) |
1278 | { |
1279 | struct task_struct *tsk; |
1280 | |
1281 | mutex_lock(&ctx->kdamond_lock); |
1282 | tsk = ctx->kdamond; |
1283 | if (tsk) { |
1284 | get_task_struct(t: tsk); |
1285 | mutex_unlock(lock: &ctx->kdamond_lock); |
1286 | kthread_stop_put(k: tsk); |
1287 | return 0; |
1288 | } |
1289 | mutex_unlock(lock: &ctx->kdamond_lock); |
1290 | |
1291 | return -EPERM; |
1292 | } |
1293 | |
1294 | /** |
1295 | * damon_stop() - Stops the monitorings for a given group of contexts. |
1296 | * @ctxs: an array of the pointers for contexts to stop monitoring |
1297 | * @nr_ctxs: size of @ctxs |
1298 | * |
1299 | * Return: 0 on success, negative error code otherwise. |
1300 | */ |
1301 | int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) |
1302 | { |
1303 | int i, err = 0; |
1304 | |
1305 | for (i = 0; i < nr_ctxs; i++) { |
1306 | /* nr_running_ctxs is decremented in kdamond_fn */ |
1307 | err = __damon_stop(ctx: ctxs[i]); |
1308 | if (err) |
1309 | break; |
1310 | } |
1311 | return err; |
1312 | } |
1313 | |
1314 | static bool damon_is_running(struct damon_ctx *ctx) |
1315 | { |
1316 | bool running; |
1317 | |
1318 | mutex_lock(&ctx->kdamond_lock); |
1319 | running = ctx->kdamond != NULL; |
1320 | mutex_unlock(lock: &ctx->kdamond_lock); |
1321 | return running; |
1322 | } |
1323 | |
1324 | /** |
1325 | * damon_call() - Invoke a given function on DAMON worker thread (kdamond). |
1326 | * @ctx: DAMON context to call the function for. |
1327 | * @control: Control variable of the call request. |
1328 | * |
1329 | * Ask DAMON worker thread (kdamond) of @ctx to call a function with an |
1330 | * argument data that respectively passed via &damon_call_control->fn and |
1331 | * &damon_call_control->data of @control, and wait until the kdamond finishes |
1332 | * handling of the request. |
1333 | * |
1334 | * The kdamond executes the function with the argument in the main loop, just |
1335 | * after a sampling of the iteration is finished. The function can hence |
1336 | * safely access the internal data of the &struct damon_ctx without additional |
1337 | * synchronization. The return value of the function will be saved in |
1338 | * &damon_call_control->return_code. |
1339 | * |
1340 | * Return: 0 on success, negative error code otherwise. |
1341 | */ |
1342 | int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) |
1343 | { |
1344 | init_completion(x: &control->completion); |
1345 | control->canceled = false; |
1346 | |
1347 | mutex_lock(&ctx->call_control_lock); |
1348 | if (ctx->call_control) { |
1349 | mutex_unlock(lock: &ctx->call_control_lock); |
1350 | return -EBUSY; |
1351 | } |
1352 | ctx->call_control = control; |
1353 | mutex_unlock(lock: &ctx->call_control_lock); |
1354 | if (!damon_is_running(ctx)) |
1355 | return -EINVAL; |
1356 | wait_for_completion(&control->completion); |
1357 | if (control->canceled) |
1358 | return -ECANCELED; |
1359 | return 0; |
1360 | } |
1361 | |
1362 | /** |
1363 | * damos_walk() - Invoke a given functions while DAMOS walk regions. |
1364 | * @ctx: DAMON context to call the functions for. |
1365 | * @control: Control variable of the walk request. |
1366 | * |
1367 | * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region |
1368 | * that the kdamond will apply DAMOS action to, and wait until the kdamond |
1369 | * finishes handling of the request. |
1370 | * |
1371 | * The kdamond executes the given function in the main loop, for each region |
1372 | * just after it applied any DAMOS actions of @ctx to it. The invocation is |
1373 | * made only within one &damos->apply_interval_us since damos_walk() |
1374 | * invocation, for each scheme. The given callback function can hence safely |
1375 | * access the internal data of &struct damon_ctx and &struct damon_region that |
1376 | * each of the scheme will apply the action for next interval, without |
1377 | * additional synchronizations against the kdamond. If every scheme of @ctx |
1378 | * passed at least one &damos->apply_interval_us, kdamond marks the request as |
1379 | * completed so that damos_walk() can wakeup and return. |
1380 | * |
1381 | * Return: 0 on success, negative error code otherwise. |
1382 | */ |
1383 | int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) |
1384 | { |
1385 | init_completion(x: &control->completion); |
1386 | control->canceled = false; |
1387 | mutex_lock(&ctx->walk_control_lock); |
1388 | if (ctx->walk_control) { |
1389 | mutex_unlock(lock: &ctx->walk_control_lock); |
1390 | return -EBUSY; |
1391 | } |
1392 | ctx->walk_control = control; |
1393 | mutex_unlock(lock: &ctx->walk_control_lock); |
1394 | if (!damon_is_running(ctx)) |
1395 | return -EINVAL; |
1396 | wait_for_completion(&control->completion); |
1397 | if (control->canceled) |
1398 | return -ECANCELED; |
1399 | return 0; |
1400 | } |
1401 | |
1402 | /* |
1403 | * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing |
1404 | * the problem being propagated. |
1405 | */ |
1406 | static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) |
1407 | { |
1408 | if (r->nr_accesses_bp == r->nr_accesses * 10000) |
1409 | return; |
1410 | WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", |
1411 | r->nr_accesses_bp, r->nr_accesses); |
1412 | r->nr_accesses_bp = r->nr_accesses * 10000; |
1413 | } |
1414 | |
1415 | /* |
1416 | * Reset the aggregated monitoring results ('nr_accesses' of each region). |
1417 | */ |
1418 | static void kdamond_reset_aggregated(struct damon_ctx *c) |
1419 | { |
1420 | struct damon_target *t; |
1421 | unsigned int ti = 0; /* target's index */ |
1422 | |
1423 | damon_for_each_target(t, c) { |
1424 | struct damon_region *r; |
1425 | |
1426 | damon_for_each_region(r, t) { |
1427 | trace_damon_aggregated(target_id: ti, r, nr_regions: damon_nr_regions(t)); |
1428 | damon_warn_fix_nr_accesses_corruption(r); |
1429 | r->last_nr_accesses = r->nr_accesses; |
1430 | r->nr_accesses = 0; |
1431 | } |
1432 | ti++; |
1433 | } |
1434 | } |
1435 | |
1436 | static unsigned long damon_get_intervals_score(struct damon_ctx *c) |
1437 | { |
1438 | struct damon_target *t; |
1439 | struct damon_region *r; |
1440 | unsigned long sz_region, max_access_events = 0, access_events = 0; |
1441 | unsigned long target_access_events; |
1442 | unsigned long goal_bp = c->attrs.intervals_goal.access_bp; |
1443 | |
1444 | damon_for_each_target(t, c) { |
1445 | damon_for_each_region(r, t) { |
1446 | sz_region = damon_sz_region(r); |
1447 | max_access_events += sz_region * c->attrs.aggr_samples; |
1448 | access_events += sz_region * r->nr_accesses; |
1449 | } |
1450 | } |
1451 | target_access_events = max_access_events * goal_bp / 10000; |
1452 | return access_events * 10000 / target_access_events; |
1453 | } |
1454 | |
1455 | static unsigned long damon_feed_loop_next_input(unsigned long last_input, |
1456 | unsigned long score); |
1457 | |
1458 | static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) |
1459 | { |
1460 | unsigned long score_bp, adaptation_bp; |
1461 | |
1462 | score_bp = damon_get_intervals_score(c); |
1463 | adaptation_bp = damon_feed_loop_next_input(last_input: 100000000, score: score_bp) / |
1464 | 10000; |
1465 | /* |
1466 | * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of |
1467 | * the intervals by rescaling [1,10,000] to [5000, 10,000]. |
1468 | */ |
1469 | if (adaptation_bp <= 10000) |
1470 | adaptation_bp = 5000 + adaptation_bp / 2; |
1471 | return adaptation_bp; |
1472 | } |
1473 | |
1474 | static void kdamond_tune_intervals(struct damon_ctx *c) |
1475 | { |
1476 | unsigned long adaptation_bp; |
1477 | struct damon_attrs new_attrs; |
1478 | struct damon_intervals_goal *goal; |
1479 | |
1480 | adaptation_bp = damon_get_intervals_adaptation_bp(c); |
1481 | if (adaptation_bp == 10000) |
1482 | return; |
1483 | |
1484 | new_attrs = c->attrs; |
1485 | goal = &c->attrs.intervals_goal; |
1486 | new_attrs.sample_interval = min(goal->max_sample_us, |
1487 | c->attrs.sample_interval * adaptation_bp / 10000); |
1488 | new_attrs.sample_interval = max(goal->min_sample_us, |
1489 | new_attrs.sample_interval); |
1490 | new_attrs.aggr_interval = new_attrs.sample_interval * |
1491 | c->attrs.aggr_samples; |
1492 | damon_set_attrs(ctx: c, attrs: &new_attrs); |
1493 | } |
1494 | |
1495 | static void damon_split_region_at(struct damon_target *t, |
1496 | struct damon_region *r, unsigned long sz_r); |
1497 | |
1498 | static bool __damos_valid_target(struct damon_region *r, struct damos *s) |
1499 | { |
1500 | unsigned long sz; |
1501 | unsigned int nr_accesses = r->nr_accesses_bp / 10000; |
1502 | |
1503 | sz = damon_sz_region(r); |
1504 | return s->pattern.min_sz_region <= sz && |
1505 | sz <= s->pattern.max_sz_region && |
1506 | s->pattern.min_nr_accesses <= nr_accesses && |
1507 | nr_accesses <= s->pattern.max_nr_accesses && |
1508 | s->pattern.min_age_region <= r->age && |
1509 | r->age <= s->pattern.max_age_region; |
1510 | } |
1511 | |
1512 | static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, |
1513 | struct damon_region *r, struct damos *s) |
1514 | { |
1515 | bool ret = __damos_valid_target(r, s); |
1516 | |
1517 | if (!ret || !s->quota.esz || !c->ops.get_scheme_score) |
1518 | return ret; |
1519 | |
1520 | return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; |
1521 | } |
1522 | |
1523 | /* |
1524 | * damos_skip_charged_region() - Check if the given region or starting part of |
1525 | * it is already charged for the DAMOS quota. |
1526 | * @t: The target of the region. |
1527 | * @rp: The pointer to the region. |
1528 | * @s: The scheme to be applied. |
1529 | * |
1530 | * If a quota of a scheme has exceeded in a quota charge window, the scheme's |
1531 | * action would applied to only a part of the target access pattern fulfilling |
1532 | * regions. To avoid applying the scheme action to only already applied |
1533 | * regions, DAMON skips applying the scheme action to the regions that charged |
1534 | * in the previous charge window. |
1535 | * |
1536 | * This function checks if a given region should be skipped or not for the |
1537 | * reason. If only the starting part of the region has previously charged, |
1538 | * this function splits the region into two so that the second one covers the |
1539 | * area that not charged in the previous charge widnow and saves the second |
1540 | * region in *rp and returns false, so that the caller can apply DAMON action |
1541 | * to the second one. |
1542 | * |
1543 | * Return: true if the region should be entirely skipped, false otherwise. |
1544 | */ |
1545 | static bool damos_skip_charged_region(struct damon_target *t, |
1546 | struct damon_region **rp, struct damos *s) |
1547 | { |
1548 | struct damon_region *r = *rp; |
1549 | struct damos_quota *quota = &s->quota; |
1550 | unsigned long sz_to_skip; |
1551 | |
1552 | /* Skip previously charged regions */ |
1553 | if (quota->charge_target_from) { |
1554 | if (t != quota->charge_target_from) |
1555 | return true; |
1556 | if (r == damon_last_region(t)) { |
1557 | quota->charge_target_from = NULL; |
1558 | quota->charge_addr_from = 0; |
1559 | return true; |
1560 | } |
1561 | if (quota->charge_addr_from && |
1562 | r->ar.end <= quota->charge_addr_from) |
1563 | return true; |
1564 | |
1565 | if (quota->charge_addr_from && r->ar.start < |
1566 | quota->charge_addr_from) { |
1567 | sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - |
1568 | r->ar.start, DAMON_MIN_REGION); |
1569 | if (!sz_to_skip) { |
1570 | if (damon_sz_region(r) <= DAMON_MIN_REGION) |
1571 | return true; |
1572 | sz_to_skip = DAMON_MIN_REGION; |
1573 | } |
1574 | damon_split_region_at(t, r, sz_r: sz_to_skip); |
1575 | r = damon_next_region(r); |
1576 | *rp = r; |
1577 | } |
1578 | quota->charge_target_from = NULL; |
1579 | quota->charge_addr_from = 0; |
1580 | } |
1581 | return false; |
1582 | } |
1583 | |
1584 | static void damos_update_stat(struct damos *s, |
1585 | unsigned long sz_tried, unsigned long sz_applied, |
1586 | unsigned long sz_ops_filter_passed) |
1587 | { |
1588 | s->stat.nr_tried++; |
1589 | s->stat.sz_tried += sz_tried; |
1590 | if (sz_applied) |
1591 | s->stat.nr_applied++; |
1592 | s->stat.sz_applied += sz_applied; |
1593 | s->stat.sz_ops_filter_passed += sz_ops_filter_passed; |
1594 | } |
1595 | |
1596 | static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, |
1597 | struct damon_region *r, struct damos_filter *filter) |
1598 | { |
1599 | bool matched = false; |
1600 | struct damon_target *ti; |
1601 | int target_idx = 0; |
1602 | unsigned long start, end; |
1603 | |
1604 | switch (filter->type) { |
1605 | case DAMOS_FILTER_TYPE_TARGET: |
1606 | damon_for_each_target(ti, ctx) { |
1607 | if (ti == t) |
1608 | break; |
1609 | target_idx++; |
1610 | } |
1611 | matched = target_idx == filter->target_idx; |
1612 | break; |
1613 | case DAMOS_FILTER_TYPE_ADDR: |
1614 | start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); |
1615 | end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); |
1616 | |
1617 | /* inside the range */ |
1618 | if (start <= r->ar.start && r->ar.end <= end) { |
1619 | matched = true; |
1620 | break; |
1621 | } |
1622 | /* outside of the range */ |
1623 | if (r->ar.end <= start || end <= r->ar.start) { |
1624 | matched = false; |
1625 | break; |
1626 | } |
1627 | /* start before the range and overlap */ |
1628 | if (r->ar.start < start) { |
1629 | damon_split_region_at(t, r, sz_r: start - r->ar.start); |
1630 | matched = false; |
1631 | break; |
1632 | } |
1633 | /* start inside the range */ |
1634 | damon_split_region_at(t, r, sz_r: end - r->ar.start); |
1635 | matched = true; |
1636 | break; |
1637 | default: |
1638 | return false; |
1639 | } |
1640 | |
1641 | return matched == filter->matching; |
1642 | } |
1643 | |
1644 | static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, |
1645 | struct damon_region *r, struct damos *s) |
1646 | { |
1647 | struct damos_filter *filter; |
1648 | |
1649 | s->core_filters_allowed = false; |
1650 | damos_for_each_filter(filter, s) { |
1651 | if (damos_filter_match(ctx, t, r, filter)) { |
1652 | if (filter->allow) |
1653 | s->core_filters_allowed = true; |
1654 | return !filter->allow; |
1655 | } |
1656 | } |
1657 | return s->core_filters_default_reject; |
1658 | } |
1659 | |
1660 | /* |
1661 | * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. |
1662 | * @ctx: The context of &damon_ctx->walk_control. |
1663 | * @t: The monitoring target of @r that @s will be applied. |
1664 | * @r: The region of @t that @s will be applied. |
1665 | * @s: The scheme of @ctx that will be applied to @r. |
1666 | * |
1667 | * This function is called from kdamond whenever it asked the operation set to |
1668 | * apply a DAMOS scheme action to a region. If a DAMOS walk request is |
1669 | * installed by damos_walk() and not yet uninstalled, invoke it. |
1670 | */ |
1671 | static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, |
1672 | struct damon_region *r, struct damos *s, |
1673 | unsigned long sz_filter_passed) |
1674 | { |
1675 | struct damos_walk_control *control; |
1676 | |
1677 | if (s->walk_completed) |
1678 | return; |
1679 | |
1680 | control = ctx->walk_control; |
1681 | if (!control) |
1682 | return; |
1683 | |
1684 | control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); |
1685 | } |
1686 | |
1687 | /* |
1688 | * damos_walk_complete() - Complete DAMOS walk request if all walks are done. |
1689 | * @ctx: The context of &damon_ctx->walk_control. |
1690 | * @s: A scheme of @ctx that all walks are now done. |
1691 | * |
1692 | * This function is called when kdamond finished applying the action of a DAMOS |
1693 | * scheme to all regions that eligible for the given &damos->apply_interval_us. |
1694 | * If every scheme of @ctx including @s now finished walking for at least one |
1695 | * &damos->apply_interval_us, this function makrs the handling of the given |
1696 | * DAMOS walk request is done, so that damos_walk() can wake up and return. |
1697 | */ |
1698 | static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) |
1699 | { |
1700 | struct damos *siter; |
1701 | struct damos_walk_control *control; |
1702 | |
1703 | control = ctx->walk_control; |
1704 | if (!control) |
1705 | return; |
1706 | |
1707 | s->walk_completed = true; |
1708 | /* if all schemes completed, signal completion to walker */ |
1709 | damon_for_each_scheme(siter, ctx) { |
1710 | if (!siter->walk_completed) |
1711 | return; |
1712 | } |
1713 | damon_for_each_scheme(siter, ctx) |
1714 | siter->walk_completed = false; |
1715 | |
1716 | complete(&control->completion); |
1717 | ctx->walk_control = NULL; |
1718 | } |
1719 | |
1720 | /* |
1721 | * damos_walk_cancel() - Cancel the current DAMOS walk request. |
1722 | * @ctx: The context of &damon_ctx->walk_control. |
1723 | * |
1724 | * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS |
1725 | * walk is requested but there is no DAMOS scheme to walk for, or the kdamond |
1726 | * is already out of the main loop and therefore gonna be terminated, and hence |
1727 | * cannot continue the walks. This function therefore marks the walk request |
1728 | * as canceled, so that damos_walk() can wake up and return. |
1729 | */ |
1730 | static void damos_walk_cancel(struct damon_ctx *ctx) |
1731 | { |
1732 | struct damos_walk_control *control; |
1733 | |
1734 | mutex_lock(&ctx->walk_control_lock); |
1735 | control = ctx->walk_control; |
1736 | mutex_unlock(lock: &ctx->walk_control_lock); |
1737 | |
1738 | if (!control) |
1739 | return; |
1740 | control->canceled = true; |
1741 | complete(&control->completion); |
1742 | mutex_lock(&ctx->walk_control_lock); |
1743 | ctx->walk_control = NULL; |
1744 | mutex_unlock(lock: &ctx->walk_control_lock); |
1745 | } |
1746 | |
1747 | static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, |
1748 | struct damon_region *r, struct damos *s) |
1749 | { |
1750 | struct damos_quota *quota = &s->quota; |
1751 | unsigned long sz = damon_sz_region(r); |
1752 | struct timespec64 begin, end; |
1753 | unsigned long sz_applied = 0; |
1754 | unsigned long sz_ops_filter_passed = 0; |
1755 | /* |
1756 | * We plan to support multiple context per kdamond, as DAMON sysfs |
1757 | * implies with 'nr_contexts' file. Nevertheless, only single context |
1758 | * per kdamond is supported for now. So, we can simply use '0' context |
1759 | * index here. |
1760 | */ |
1761 | unsigned int cidx = 0; |
1762 | struct damos *siter; /* schemes iterator */ |
1763 | unsigned int sidx = 0; |
1764 | struct damon_target *titer; /* targets iterator */ |
1765 | unsigned int tidx = 0; |
1766 | bool do_trace = false; |
1767 | |
1768 | /* get indices for trace_damos_before_apply() */ |
1769 | if (trace_damos_before_apply_enabled()) { |
1770 | damon_for_each_scheme(siter, c) { |
1771 | if (siter == s) |
1772 | break; |
1773 | sidx++; |
1774 | } |
1775 | damon_for_each_target(titer, c) { |
1776 | if (titer == t) |
1777 | break; |
1778 | tidx++; |
1779 | } |
1780 | do_trace = true; |
1781 | } |
1782 | |
1783 | if (c->ops.apply_scheme) { |
1784 | if (quota->esz && quota->charged_sz + sz > quota->esz) { |
1785 | sz = ALIGN_DOWN(quota->esz - quota->charged_sz, |
1786 | DAMON_MIN_REGION); |
1787 | if (!sz) |
1788 | goto update_stat; |
1789 | damon_split_region_at(t, r, sz_r: sz); |
1790 | } |
1791 | if (damos_filter_out(ctx: c, t, r, s)) |
1792 | return; |
1793 | ktime_get_coarse_ts64(ts: &begin); |
1794 | trace_damos_before_apply(context_idx: cidx, scheme_idx: sidx, target_idx: tidx, r, |
1795 | nr_regions: damon_nr_regions(t), do_trace); |
1796 | sz_applied = c->ops.apply_scheme(c, t, r, s, |
1797 | &sz_ops_filter_passed); |
1798 | damos_walk_call_walk(ctx: c, t, r, s, sz_filter_passed: sz_ops_filter_passed); |
1799 | ktime_get_coarse_ts64(ts: &end); |
1800 | quota->total_charged_ns += timespec64_to_ns(ts: &end) - |
1801 | timespec64_to_ns(ts: &begin); |
1802 | quota->charged_sz += sz; |
1803 | if (quota->esz && quota->charged_sz >= quota->esz) { |
1804 | quota->charge_target_from = t; |
1805 | quota->charge_addr_from = r->ar.end + 1; |
1806 | } |
1807 | } |
1808 | if (s->action != DAMOS_STAT) |
1809 | r->age = 0; |
1810 | |
1811 | update_stat: |
1812 | damos_update_stat(s, sz_tried: sz, sz_applied, sz_ops_filter_passed); |
1813 | } |
1814 | |
1815 | static void damon_do_apply_schemes(struct damon_ctx *c, |
1816 | struct damon_target *t, |
1817 | struct damon_region *r) |
1818 | { |
1819 | struct damos *s; |
1820 | |
1821 | damon_for_each_scheme(s, c) { |
1822 | struct damos_quota *quota = &s->quota; |
1823 | |
1824 | if (c->passed_sample_intervals < s->next_apply_sis) |
1825 | continue; |
1826 | |
1827 | if (!s->wmarks.activated) |
1828 | continue; |
1829 | |
1830 | /* Check the quota */ |
1831 | if (quota->esz && quota->charged_sz >= quota->esz) |
1832 | continue; |
1833 | |
1834 | if (damos_skip_charged_region(t, rp: &r, s)) |
1835 | continue; |
1836 | |
1837 | if (!damos_valid_target(c, t, r, s)) |
1838 | continue; |
1839 | |
1840 | damos_apply_scheme(c, t, r, s); |
1841 | } |
1842 | } |
1843 | |
1844 | /* |
1845 | * damon_feed_loop_next_input() - get next input to achieve a target score. |
1846 | * @last_input The last input. |
1847 | * @score Current score that made with @last_input. |
1848 | * |
1849 | * Calculate next input to achieve the target score, based on the last input |
1850 | * and current score. Assuming the input and the score are positively |
1851 | * proportional, calculate how much compensation should be added to or |
1852 | * subtracted from the last input as a proportion of the last input. Avoid |
1853 | * next input always being zero by setting it non-zero always. In short form |
1854 | * (assuming support of float and signed calculations), the algorithm is as |
1855 | * below. |
1856 | * |
1857 | * next_input = max(last_input * ((goal - current) / goal + 1), 1) |
1858 | * |
1859 | * For simple implementation, we assume the target score is always 10,000. The |
1860 | * caller should adjust @score for this. |
1861 | * |
1862 | * Returns next input that assumed to achieve the target score. |
1863 | */ |
1864 | static unsigned long damon_feed_loop_next_input(unsigned long last_input, |
1865 | unsigned long score) |
1866 | { |
1867 | const unsigned long goal = 10000; |
1868 | /* Set minimum input as 10000 to avoid compensation be zero */ |
1869 | const unsigned long min_input = 10000; |
1870 | unsigned long score_goal_diff, compensation; |
1871 | bool over_achieving = score > goal; |
1872 | |
1873 | if (score == goal) |
1874 | return last_input; |
1875 | if (score >= goal * 2) |
1876 | return min_input; |
1877 | |
1878 | if (over_achieving) |
1879 | score_goal_diff = score - goal; |
1880 | else |
1881 | score_goal_diff = goal - score; |
1882 | |
1883 | if (last_input < ULONG_MAX / score_goal_diff) |
1884 | compensation = last_input * score_goal_diff / goal; |
1885 | else |
1886 | compensation = last_input / goal * score_goal_diff; |
1887 | |
1888 | if (over_achieving) |
1889 | return max(last_input - compensation, min_input); |
1890 | if (last_input < ULONG_MAX - compensation) |
1891 | return last_input + compensation; |
1892 | return ULONG_MAX; |
1893 | } |
1894 | |
1895 | #ifdef CONFIG_PSI |
1896 | |
1897 | static u64 damos_get_some_mem_psi_total(void) |
1898 | { |
1899 | if (static_branch_likely(&psi_disabled)) |
1900 | return 0; |
1901 | return div_u64(dividend: psi_system.total[PSI_AVGS][PSI_MEM * 2], |
1902 | NSEC_PER_USEC); |
1903 | } |
1904 | |
1905 | #else /* CONFIG_PSI */ |
1906 | |
1907 | static inline u64 damos_get_some_mem_psi_total(void) |
1908 | { |
1909 | return 0; |
1910 | }; |
1911 | |
1912 | #endif /* CONFIG_PSI */ |
1913 | |
1914 | #ifdef CONFIG_NUMA |
1915 | static __kernel_ulong_t damos_get_node_mem_bp( |
1916 | struct damos_quota_goal *goal) |
1917 | { |
1918 | struct sysinfo i; |
1919 | __kernel_ulong_t numerator; |
1920 | |
1921 | si_meminfo_node(val: &i, nid: goal->nid); |
1922 | if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) |
1923 | numerator = i.totalram - i.freeram; |
1924 | else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ |
1925 | numerator = i.freeram; |
1926 | return numerator * 10000 / i.totalram; |
1927 | } |
1928 | #else |
1929 | static __kernel_ulong_t damos_get_node_mem_bp( |
1930 | struct damos_quota_goal *goal) |
1931 | { |
1932 | return 0; |
1933 | } |
1934 | #endif |
1935 | |
1936 | |
1937 | static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) |
1938 | { |
1939 | u64 now_psi_total; |
1940 | |
1941 | switch (goal->metric) { |
1942 | case DAMOS_QUOTA_USER_INPUT: |
1943 | /* User should already set goal->current_value */ |
1944 | break; |
1945 | case DAMOS_QUOTA_SOME_MEM_PSI_US: |
1946 | now_psi_total = damos_get_some_mem_psi_total(); |
1947 | goal->current_value = now_psi_total - goal->last_psi_total; |
1948 | goal->last_psi_total = now_psi_total; |
1949 | break; |
1950 | case DAMOS_QUOTA_NODE_MEM_USED_BP: |
1951 | case DAMOS_QUOTA_NODE_MEM_FREE_BP: |
1952 | goal->current_value = damos_get_node_mem_bp(goal); |
1953 | break; |
1954 | default: |
1955 | break; |
1956 | } |
1957 | } |
1958 | |
1959 | /* Return the highest score since it makes schemes least aggressive */ |
1960 | static unsigned long damos_quota_score(struct damos_quota *quota) |
1961 | { |
1962 | struct damos_quota_goal *goal; |
1963 | unsigned long highest_score = 0; |
1964 | |
1965 | damos_for_each_quota_goal(goal, quota) { |
1966 | damos_set_quota_goal_current_value(goal); |
1967 | highest_score = max(highest_score, |
1968 | goal->current_value * 10000 / |
1969 | goal->target_value); |
1970 | } |
1971 | |
1972 | return highest_score; |
1973 | } |
1974 | |
1975 | /* |
1976 | * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty |
1977 | */ |
1978 | static void damos_set_effective_quota(struct damos_quota *quota) |
1979 | { |
1980 | unsigned long throughput; |
1981 | unsigned long esz = ULONG_MAX; |
1982 | |
1983 | if (!quota->ms && list_empty(head: "a->goals)) { |
1984 | quota->esz = quota->sz; |
1985 | return; |
1986 | } |
1987 | |
1988 | if (!list_empty(head: "a->goals)) { |
1989 | unsigned long score = damos_quota_score(quota); |
1990 | |
1991 | quota->esz_bp = damon_feed_loop_next_input( |
1992 | max(quota->esz_bp, 10000UL), |
1993 | score); |
1994 | esz = quota->esz_bp / 10000; |
1995 | } |
1996 | |
1997 | if (quota->ms) { |
1998 | if (quota->total_charged_ns) |
1999 | throughput = quota->total_charged_sz * 1000000 / |
2000 | quota->total_charged_ns; |
2001 | else |
2002 | throughput = PAGE_SIZE * 1024; |
2003 | esz = min(throughput * quota->ms, esz); |
2004 | } |
2005 | |
2006 | if (quota->sz && quota->sz < esz) |
2007 | esz = quota->sz; |
2008 | |
2009 | quota->esz = esz; |
2010 | } |
2011 | |
2012 | static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) |
2013 | { |
2014 | struct damos_quota *quota = &s->quota; |
2015 | struct damon_target *t; |
2016 | struct damon_region *r; |
2017 | unsigned long cumulated_sz; |
2018 | unsigned int score, max_score = 0; |
2019 | |
2020 | if (!quota->ms && !quota->sz && list_empty(head: "a->goals)) |
2021 | return; |
2022 | |
2023 | /* New charge window starts */ |
2024 | if (time_after_eq(jiffies, quota->charged_from + |
2025 | msecs_to_jiffies(quota->reset_interval))) { |
2026 | if (quota->esz && quota->charged_sz >= quota->esz) |
2027 | s->stat.qt_exceeds++; |
2028 | quota->total_charged_sz += quota->charged_sz; |
2029 | quota->charged_from = jiffies; |
2030 | quota->charged_sz = 0; |
2031 | damos_set_effective_quota(quota); |
2032 | } |
2033 | |
2034 | if (!c->ops.get_scheme_score) |
2035 | return; |
2036 | |
2037 | /* Fill up the score histogram */ |
2038 | memset(c->regions_score_histogram, 0, |
2039 | sizeof(*c->regions_score_histogram) * |
2040 | (DAMOS_MAX_SCORE + 1)); |
2041 | damon_for_each_target(t, c) { |
2042 | damon_for_each_region(r, t) { |
2043 | if (!__damos_valid_target(r, s)) |
2044 | continue; |
2045 | score = c->ops.get_scheme_score(c, t, r, s); |
2046 | c->regions_score_histogram[score] += |
2047 | damon_sz_region(r); |
2048 | if (score > max_score) |
2049 | max_score = score; |
2050 | } |
2051 | } |
2052 | |
2053 | /* Set the min score limit */ |
2054 | for (cumulated_sz = 0, score = max_score; ; score--) { |
2055 | cumulated_sz += c->regions_score_histogram[score]; |
2056 | if (cumulated_sz >= quota->esz || !score) |
2057 | break; |
2058 | } |
2059 | quota->min_score = score; |
2060 | } |
2061 | |
2062 | static void kdamond_apply_schemes(struct damon_ctx *c) |
2063 | { |
2064 | struct damon_target *t; |
2065 | struct damon_region *r, *next_r; |
2066 | struct damos *s; |
2067 | unsigned long sample_interval = c->attrs.sample_interval ? |
2068 | c->attrs.sample_interval : 1; |
2069 | bool has_schemes_to_apply = false; |
2070 | |
2071 | damon_for_each_scheme(s, c) { |
2072 | if (c->passed_sample_intervals < s->next_apply_sis) |
2073 | continue; |
2074 | |
2075 | if (!s->wmarks.activated) |
2076 | continue; |
2077 | |
2078 | has_schemes_to_apply = true; |
2079 | |
2080 | damos_adjust_quota(c, s); |
2081 | } |
2082 | |
2083 | if (!has_schemes_to_apply) |
2084 | return; |
2085 | |
2086 | mutex_lock(&c->walk_control_lock); |
2087 | damon_for_each_target(t, c) { |
2088 | damon_for_each_region_safe(r, next_r, t) |
2089 | damon_do_apply_schemes(c, t, r); |
2090 | } |
2091 | |
2092 | damon_for_each_scheme(s, c) { |
2093 | if (c->passed_sample_intervals < s->next_apply_sis) |
2094 | continue; |
2095 | damos_walk_complete(ctx: c, s); |
2096 | s->next_apply_sis = c->passed_sample_intervals + |
2097 | (s->apply_interval_us ? s->apply_interval_us : |
2098 | c->attrs.aggr_interval) / sample_interval; |
2099 | s->last_applied = NULL; |
2100 | } |
2101 | mutex_unlock(lock: &c->walk_control_lock); |
2102 | } |
2103 | |
2104 | /* |
2105 | * Merge two adjacent regions into one region |
2106 | */ |
2107 | static void damon_merge_two_regions(struct damon_target *t, |
2108 | struct damon_region *l, struct damon_region *r) |
2109 | { |
2110 | unsigned long sz_l = damon_sz_region(r: l), sz_r = damon_sz_region(r); |
2111 | |
2112 | l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / |
2113 | (sz_l + sz_r); |
2114 | l->nr_accesses_bp = l->nr_accesses * 10000; |
2115 | l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); |
2116 | l->ar.end = r->ar.end; |
2117 | damon_destroy_region(r, t); |
2118 | } |
2119 | |
2120 | /* |
2121 | * Merge adjacent regions having similar access frequencies |
2122 | * |
2123 | * t target affected by this merge operation |
2124 | * thres '->nr_accesses' diff threshold for the merge |
2125 | * sz_limit size upper limit of each region |
2126 | */ |
2127 | static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, |
2128 | unsigned long sz_limit) |
2129 | { |
2130 | struct damon_region *r, *prev = NULL, *next; |
2131 | |
2132 | damon_for_each_region_safe(r, next, t) { |
2133 | if (abs(r->nr_accesses - r->last_nr_accesses) > thres) |
2134 | r->age = 0; |
2135 | else |
2136 | r->age++; |
2137 | |
2138 | if (prev && prev->ar.end == r->ar.start && |
2139 | abs(prev->nr_accesses - r->nr_accesses) <= thres && |
2140 | damon_sz_region(r: prev) + damon_sz_region(r) <= sz_limit) |
2141 | damon_merge_two_regions(t, l: prev, r); |
2142 | else |
2143 | prev = r; |
2144 | } |
2145 | } |
2146 | |
2147 | /* |
2148 | * Merge adjacent regions having similar access frequencies |
2149 | * |
2150 | * threshold '->nr_accesses' diff threshold for the merge |
2151 | * sz_limit size upper limit of each region |
2152 | * |
2153 | * This function merges monitoring target regions which are adjacent and their |
2154 | * access frequencies are similar. This is for minimizing the monitoring |
2155 | * overhead under the dynamically changeable access pattern. If a merge was |
2156 | * unnecessarily made, later 'kdamond_split_regions()' will revert it. |
2157 | * |
2158 | * The total number of regions could be higher than the user-defined limit, |
2159 | * max_nr_regions for some cases. For example, the user can update |
2160 | * max_nr_regions to a number that lower than the current number of regions |
2161 | * while DAMON is running. For such a case, repeat merging until the limit is |
2162 | * met while increasing @threshold up to possible maximum level. |
2163 | */ |
2164 | static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, |
2165 | unsigned long sz_limit) |
2166 | { |
2167 | struct damon_target *t; |
2168 | unsigned int nr_regions; |
2169 | unsigned int max_thres; |
2170 | |
2171 | max_thres = c->attrs.aggr_interval / |
2172 | (c->attrs.sample_interval ? c->attrs.sample_interval : 1); |
2173 | do { |
2174 | nr_regions = 0; |
2175 | damon_for_each_target(t, c) { |
2176 | damon_merge_regions_of(t, thres: threshold, sz_limit); |
2177 | nr_regions += damon_nr_regions(t); |
2178 | } |
2179 | threshold = max(1, threshold * 2); |
2180 | } while (nr_regions > c->attrs.max_nr_regions && |
2181 | threshold / 2 < max_thres); |
2182 | } |
2183 | |
2184 | /* |
2185 | * Split a region in two |
2186 | * |
2187 | * r the region to be split |
2188 | * sz_r size of the first sub-region that will be made |
2189 | */ |
2190 | static void damon_split_region_at(struct damon_target *t, |
2191 | struct damon_region *r, unsigned long sz_r) |
2192 | { |
2193 | struct damon_region *new; |
2194 | |
2195 | new = damon_new_region(start: r->ar.start + sz_r, end: r->ar.end); |
2196 | if (!new) |
2197 | return; |
2198 | |
2199 | r->ar.end = new->ar.start; |
2200 | |
2201 | new->age = r->age; |
2202 | new->last_nr_accesses = r->last_nr_accesses; |
2203 | new->nr_accesses_bp = r->nr_accesses_bp; |
2204 | new->nr_accesses = r->nr_accesses; |
2205 | |
2206 | damon_insert_region(r: new, prev: r, next: damon_next_region(r), t); |
2207 | } |
2208 | |
2209 | /* Split every region in the given target into 'nr_subs' regions */ |
2210 | static void damon_split_regions_of(struct damon_target *t, int nr_subs) |
2211 | { |
2212 | struct damon_region *r, *next; |
2213 | unsigned long sz_region, sz_sub = 0; |
2214 | int i; |
2215 | |
2216 | damon_for_each_region_safe(r, next, t) { |
2217 | sz_region = damon_sz_region(r); |
2218 | |
2219 | for (i = 0; i < nr_subs - 1 && |
2220 | sz_region > 2 * DAMON_MIN_REGION; i++) { |
2221 | /* |
2222 | * Randomly select size of left sub-region to be at |
2223 | * least 10 percent and at most 90% of original region |
2224 | */ |
2225 | sz_sub = ALIGN_DOWN(damon_rand(1, 10) * |
2226 | sz_region / 10, DAMON_MIN_REGION); |
2227 | /* Do not allow blank region */ |
2228 | if (sz_sub == 0 || sz_sub >= sz_region) |
2229 | continue; |
2230 | |
2231 | damon_split_region_at(t, r, sz_r: sz_sub); |
2232 | sz_region = sz_sub; |
2233 | } |
2234 | } |
2235 | } |
2236 | |
2237 | /* |
2238 | * Split every target region into randomly-sized small regions |
2239 | * |
2240 | * This function splits every target region into random-sized small regions if |
2241 | * current total number of the regions is equal or smaller than half of the |
2242 | * user-specified maximum number of regions. This is for maximizing the |
2243 | * monitoring accuracy under the dynamically changeable access patterns. If a |
2244 | * split was unnecessarily made, later 'kdamond_merge_regions()' will revert |
2245 | * it. |
2246 | */ |
2247 | static void kdamond_split_regions(struct damon_ctx *ctx) |
2248 | { |
2249 | struct damon_target *t; |
2250 | unsigned int nr_regions = 0; |
2251 | static unsigned int last_nr_regions; |
2252 | int nr_subregions = 2; |
2253 | |
2254 | damon_for_each_target(t, ctx) |
2255 | nr_regions += damon_nr_regions(t); |
2256 | |
2257 | if (nr_regions > ctx->attrs.max_nr_regions / 2) |
2258 | return; |
2259 | |
2260 | /* Maybe the middle of the region has different access frequency */ |
2261 | if (last_nr_regions == nr_regions && |
2262 | nr_regions < ctx->attrs.max_nr_regions / 3) |
2263 | nr_subregions = 3; |
2264 | |
2265 | damon_for_each_target(t, ctx) |
2266 | damon_split_regions_of(t, nr_subs: nr_subregions); |
2267 | |
2268 | last_nr_regions = nr_regions; |
2269 | } |
2270 | |
2271 | /* |
2272 | * Check whether current monitoring should be stopped |
2273 | * |
2274 | * The monitoring is stopped when either the user requested to stop, or all |
2275 | * monitoring targets are invalid. |
2276 | * |
2277 | * Returns true if need to stop current monitoring. |
2278 | */ |
2279 | static bool kdamond_need_stop(struct damon_ctx *ctx) |
2280 | { |
2281 | struct damon_target *t; |
2282 | |
2283 | if (kthread_should_stop()) |
2284 | return true; |
2285 | |
2286 | if (!ctx->ops.target_valid) |
2287 | return false; |
2288 | |
2289 | damon_for_each_target(t, ctx) { |
2290 | if (ctx->ops.target_valid(t)) |
2291 | return false; |
2292 | } |
2293 | |
2294 | return true; |
2295 | } |
2296 | |
2297 | static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, |
2298 | unsigned long *metric_value) |
2299 | { |
2300 | switch (metric) { |
2301 | case DAMOS_WMARK_FREE_MEM_RATE: |
2302 | *metric_value = global_zone_page_state(item: NR_FREE_PAGES) * 1000 / |
2303 | totalram_pages(); |
2304 | return 0; |
2305 | default: |
2306 | break; |
2307 | } |
2308 | return -EINVAL; |
2309 | } |
2310 | |
2311 | /* |
2312 | * Returns zero if the scheme is active. Else, returns time to wait for next |
2313 | * watermark check in micro-seconds. |
2314 | */ |
2315 | static unsigned long damos_wmark_wait_us(struct damos *scheme) |
2316 | { |
2317 | unsigned long metric; |
2318 | |
2319 | if (damos_get_wmark_metric_value(metric: scheme->wmarks.metric, metric_value: &metric)) |
2320 | return 0; |
2321 | |
2322 | /* higher than high watermark or lower than low watermark */ |
2323 | if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { |
2324 | if (scheme->wmarks.activated) |
2325 | pr_debug("deactivate a scheme (%d) for %s wmark\n", |
2326 | scheme->action, |
2327 | str_high_low(metric > scheme->wmarks.high)); |
2328 | scheme->wmarks.activated = false; |
2329 | return scheme->wmarks.interval; |
2330 | } |
2331 | |
2332 | /* inactive and higher than middle watermark */ |
2333 | if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && |
2334 | !scheme->wmarks.activated) |
2335 | return scheme->wmarks.interval; |
2336 | |
2337 | if (!scheme->wmarks.activated) |
2338 | pr_debug("activate a scheme (%d)\n", scheme->action); |
2339 | scheme->wmarks.activated = true; |
2340 | return 0; |
2341 | } |
2342 | |
2343 | static void kdamond_usleep(unsigned long usecs) |
2344 | { |
2345 | if (usecs >= USLEEP_RANGE_UPPER_BOUND) |
2346 | schedule_timeout_idle(timeout: usecs_to_jiffies(u: usecs)); |
2347 | else |
2348 | usleep_range_idle(min: usecs, max: usecs + 1); |
2349 | } |
2350 | |
2351 | /* |
2352 | * kdamond_call() - handle damon_call_control. |
2353 | * @ctx: The &struct damon_ctx of the kdamond. |
2354 | * @cancel: Whether to cancel the invocation of the function. |
2355 | * |
2356 | * If there is a &struct damon_call_control request that registered via |
2357 | * &damon_call() on @ctx, do or cancel the invocation of the function depending |
2358 | * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS |
2359 | * watermarks, or the kdamond is already out of the main loop and therefore |
2360 | * will be terminated. |
2361 | */ |
2362 | static void kdamond_call(struct damon_ctx *ctx, bool cancel) |
2363 | { |
2364 | struct damon_call_control *control; |
2365 | int ret = 0; |
2366 | |
2367 | mutex_lock(&ctx->call_control_lock); |
2368 | control = ctx->call_control; |
2369 | mutex_unlock(lock: &ctx->call_control_lock); |
2370 | if (!control) |
2371 | return; |
2372 | if (cancel) { |
2373 | control->canceled = true; |
2374 | } else { |
2375 | ret = control->fn(control->data); |
2376 | control->return_code = ret; |
2377 | } |
2378 | complete(&control->completion); |
2379 | mutex_lock(&ctx->call_control_lock); |
2380 | ctx->call_control = NULL; |
2381 | mutex_unlock(lock: &ctx->call_control_lock); |
2382 | } |
2383 | |
2384 | /* Returns negative error code if it's not activated but should return */ |
2385 | static int kdamond_wait_activation(struct damon_ctx *ctx) |
2386 | { |
2387 | struct damos *s; |
2388 | unsigned long wait_time; |
2389 | unsigned long min_wait_time = 0; |
2390 | bool init_wait_time = false; |
2391 | |
2392 | while (!kdamond_need_stop(ctx)) { |
2393 | damon_for_each_scheme(s, ctx) { |
2394 | wait_time = damos_wmark_wait_us(scheme: s); |
2395 | if (!init_wait_time || wait_time < min_wait_time) { |
2396 | init_wait_time = true; |
2397 | min_wait_time = wait_time; |
2398 | } |
2399 | } |
2400 | if (!min_wait_time) |
2401 | return 0; |
2402 | |
2403 | kdamond_usleep(usecs: min_wait_time); |
2404 | |
2405 | if (ctx->callback.after_wmarks_check && |
2406 | ctx->callback.after_wmarks_check(ctx)) |
2407 | break; |
2408 | kdamond_call(ctx, cancel: true); |
2409 | damos_walk_cancel(ctx); |
2410 | } |
2411 | return -EBUSY; |
2412 | } |
2413 | |
2414 | static void kdamond_init_ctx(struct damon_ctx *ctx) |
2415 | { |
2416 | unsigned long sample_interval = ctx->attrs.sample_interval ? |
2417 | ctx->attrs.sample_interval : 1; |
2418 | unsigned long apply_interval; |
2419 | struct damos *scheme; |
2420 | |
2421 | ctx->passed_sample_intervals = 0; |
2422 | ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; |
2423 | ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / |
2424 | sample_interval; |
2425 | ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * |
2426 | ctx->attrs.intervals_goal.aggrs; |
2427 | |
2428 | damon_for_each_scheme(scheme, ctx) { |
2429 | apply_interval = scheme->apply_interval_us ? |
2430 | scheme->apply_interval_us : ctx->attrs.aggr_interval; |
2431 | scheme->next_apply_sis = apply_interval / sample_interval; |
2432 | damos_set_filters_default_reject(s: scheme); |
2433 | } |
2434 | } |
2435 | |
2436 | /* |
2437 | * The monitoring daemon that runs as a kernel thread |
2438 | */ |
2439 | static int kdamond_fn(void *data) |
2440 | { |
2441 | struct damon_ctx *ctx = data; |
2442 | struct damon_target *t; |
2443 | struct damon_region *r, *next; |
2444 | unsigned int max_nr_accesses = 0; |
2445 | unsigned long sz_limit = 0; |
2446 | |
2447 | pr_debug("kdamond (%d) starts\n", current->pid); |
2448 | |
2449 | complete(&ctx->kdamond_started); |
2450 | kdamond_init_ctx(ctx); |
2451 | |
2452 | if (ctx->ops.init) |
2453 | ctx->ops.init(ctx); |
2454 | ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, |
2455 | sizeof(*ctx->regions_score_histogram), GFP_KERNEL); |
2456 | if (!ctx->regions_score_histogram) |
2457 | goto done; |
2458 | |
2459 | sz_limit = damon_region_sz_limit(ctx); |
2460 | |
2461 | while (!kdamond_need_stop(ctx)) { |
2462 | /* |
2463 | * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could |
2464 | * be changed from after_wmarks_check() or after_aggregation() |
2465 | * callbacks. Read the values here, and use those for this |
2466 | * iteration. That is, damon_set_attrs() updated new values |
2467 | * are respected from next iteration. |
2468 | */ |
2469 | unsigned long next_aggregation_sis = ctx->next_aggregation_sis; |
2470 | unsigned long next_ops_update_sis = ctx->next_ops_update_sis; |
2471 | unsigned long sample_interval = ctx->attrs.sample_interval; |
2472 | |
2473 | if (kdamond_wait_activation(ctx)) |
2474 | break; |
2475 | |
2476 | if (ctx->ops.prepare_access_checks) |
2477 | ctx->ops.prepare_access_checks(ctx); |
2478 | |
2479 | kdamond_usleep(usecs: sample_interval); |
2480 | ctx->passed_sample_intervals++; |
2481 | |
2482 | if (ctx->ops.check_accesses) |
2483 | max_nr_accesses = ctx->ops.check_accesses(ctx); |
2484 | |
2485 | if (ctx->passed_sample_intervals >= next_aggregation_sis) { |
2486 | kdamond_merge_regions(c: ctx, |
2487 | threshold: max_nr_accesses / 10, |
2488 | sz_limit); |
2489 | if (ctx->callback.after_aggregation && |
2490 | ctx->callback.after_aggregation(ctx)) |
2491 | break; |
2492 | } |
2493 | |
2494 | /* |
2495 | * do kdamond_call() and kdamond_apply_schemes() after |
2496 | * kdamond_merge_regions() if possible, to reduce overhead |
2497 | */ |
2498 | kdamond_call(ctx, cancel: false); |
2499 | if (!list_empty(head: &ctx->schemes)) |
2500 | kdamond_apply_schemes(c: ctx); |
2501 | else |
2502 | damos_walk_cancel(ctx); |
2503 | |
2504 | sample_interval = ctx->attrs.sample_interval ? |
2505 | ctx->attrs.sample_interval : 1; |
2506 | if (ctx->passed_sample_intervals >= next_aggregation_sis) { |
2507 | if (ctx->attrs.intervals_goal.aggrs && |
2508 | ctx->passed_sample_intervals >= |
2509 | ctx->next_intervals_tune_sis) { |
2510 | /* |
2511 | * ctx->next_aggregation_sis might be updated |
2512 | * from kdamond_call(). In the case, |
2513 | * damon_set_attrs() which will be called from |
2514 | * kdamond_tune_interval() may wrongly think |
2515 | * this is in the middle of the current |
2516 | * aggregation, and make aggregation |
2517 | * information reset for all regions. Then, |
2518 | * following kdamond_reset_aggregated() call |
2519 | * will make the region information invalid, |
2520 | * particularly for ->nr_accesses_bp. |
2521 | * |
2522 | * Reset ->next_aggregation_sis to avoid that. |
2523 | * It will anyway correctly updated after this |
2524 | * if caluse. |
2525 | */ |
2526 | ctx->next_aggregation_sis = |
2527 | next_aggregation_sis; |
2528 | ctx->next_intervals_tune_sis += |
2529 | ctx->attrs.aggr_samples * |
2530 | ctx->attrs.intervals_goal.aggrs; |
2531 | kdamond_tune_intervals(c: ctx); |
2532 | sample_interval = ctx->attrs.sample_interval ? |
2533 | ctx->attrs.sample_interval : 1; |
2534 | |
2535 | } |
2536 | ctx->next_aggregation_sis = next_aggregation_sis + |
2537 | ctx->attrs.aggr_interval / sample_interval; |
2538 | |
2539 | kdamond_reset_aggregated(c: ctx); |
2540 | kdamond_split_regions(ctx); |
2541 | } |
2542 | |
2543 | if (ctx->passed_sample_intervals >= next_ops_update_sis) { |
2544 | ctx->next_ops_update_sis = next_ops_update_sis + |
2545 | ctx->attrs.ops_update_interval / |
2546 | sample_interval; |
2547 | if (ctx->ops.update) |
2548 | ctx->ops.update(ctx); |
2549 | sz_limit = damon_region_sz_limit(ctx); |
2550 | } |
2551 | } |
2552 | done: |
2553 | damon_for_each_target(t, ctx) { |
2554 | damon_for_each_region_safe(r, next, t) |
2555 | damon_destroy_region(r, t); |
2556 | } |
2557 | |
2558 | if (ctx->callback.before_terminate) |
2559 | ctx->callback.before_terminate(ctx); |
2560 | if (ctx->ops.cleanup) |
2561 | ctx->ops.cleanup(ctx); |
2562 | kfree(objp: ctx->regions_score_histogram); |
2563 | |
2564 | pr_debug("kdamond (%d) finishes\n", current->pid); |
2565 | mutex_lock(&ctx->kdamond_lock); |
2566 | ctx->kdamond = NULL; |
2567 | mutex_unlock(lock: &ctx->kdamond_lock); |
2568 | |
2569 | kdamond_call(ctx, cancel: true); |
2570 | damos_walk_cancel(ctx); |
2571 | |
2572 | mutex_lock(&damon_lock); |
2573 | nr_running_ctxs--; |
2574 | if (!nr_running_ctxs && running_exclusive_ctxs) |
2575 | running_exclusive_ctxs = false; |
2576 | mutex_unlock(lock: &damon_lock); |
2577 | |
2578 | return 0; |
2579 | } |
2580 | |
2581 | /* |
2582 | * struct damon_system_ram_region - System RAM resource address region of |
2583 | * [@start, @end). |
2584 | * @start: Start address of the region (inclusive). |
2585 | * @end: End address of the region (exclusive). |
2586 | */ |
2587 | struct damon_system_ram_region { |
2588 | unsigned long start; |
2589 | unsigned long end; |
2590 | }; |
2591 | |
2592 | static int walk_system_ram(struct resource *res, void *arg) |
2593 | { |
2594 | struct damon_system_ram_region *a = arg; |
2595 | |
2596 | if (a->end - a->start < resource_size(res)) { |
2597 | a->start = res->start; |
2598 | a->end = res->end; |
2599 | } |
2600 | return 0; |
2601 | } |
2602 | |
2603 | /* |
2604 | * Find biggest 'System RAM' resource and store its start and end address in |
2605 | * @start and @end, respectively. If no System RAM is found, returns false. |
2606 | */ |
2607 | static bool damon_find_biggest_system_ram(unsigned long *start, |
2608 | unsigned long *end) |
2609 | |
2610 | { |
2611 | struct damon_system_ram_region arg = {}; |
2612 | |
2613 | walk_system_ram_res(start: 0, ULONG_MAX, arg: &arg, func: walk_system_ram); |
2614 | if (arg.end <= arg.start) |
2615 | return false; |
2616 | |
2617 | *start = arg.start; |
2618 | *end = arg.end; |
2619 | return true; |
2620 | } |
2621 | |
2622 | /** |
2623 | * damon_set_region_biggest_system_ram_default() - Set the region of the given |
2624 | * monitoring target as requested, or biggest 'System RAM'. |
2625 | * @t: The monitoring target to set the region. |
2626 | * @start: The pointer to the start address of the region. |
2627 | * @end: The pointer to the end address of the region. |
2628 | * |
2629 | * This function sets the region of @t as requested by @start and @end. If the |
2630 | * values of @start and @end are zero, however, this function finds the biggest |
2631 | * 'System RAM' resource and sets the region to cover the resource. In the |
2632 | * latter case, this function saves the start and end addresses of the resource |
2633 | * in @start and @end, respectively. |
2634 | * |
2635 | * Return: 0 on success, negative error code otherwise. |
2636 | */ |
2637 | int damon_set_region_biggest_system_ram_default(struct damon_target *t, |
2638 | unsigned long *start, unsigned long *end) |
2639 | { |
2640 | struct damon_addr_range addr_range; |
2641 | |
2642 | if (*start > *end) |
2643 | return -EINVAL; |
2644 | |
2645 | if (!*start && !*end && |
2646 | !damon_find_biggest_system_ram(start, end)) |
2647 | return -EINVAL; |
2648 | |
2649 | addr_range.start = *start; |
2650 | addr_range.end = *end; |
2651 | return damon_set_regions(t, ranges: &addr_range, nr_ranges: 1); |
2652 | } |
2653 | |
2654 | /* |
2655 | * damon_moving_sum() - Calculate an inferred moving sum value. |
2656 | * @mvsum: Inferred sum of the last @len_window values. |
2657 | * @nomvsum: Non-moving sum of the last discrete @len_window window values. |
2658 | * @len_window: The number of last values to take care of. |
2659 | * @new_value: New value that will be added to the pseudo moving sum. |
2660 | * |
2661 | * Moving sum (moving average * window size) is good for handling noise, but |
2662 | * the cost of keeping past values can be high for arbitrary window size. This |
2663 | * function implements a lightweight pseudo moving sum function that doesn't |
2664 | * keep the past window values. |
2665 | * |
2666 | * It simply assumes there was no noise in the past, and get the no-noise |
2667 | * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a |
2668 | * non-moving sum of the last window. For example, if @len_window is 10 and we |
2669 | * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 |
2670 | * values. Hence, this function simply drops @nomvsum / @len_window from |
2671 | * given @mvsum and add @new_value. |
2672 | * |
2673 | * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for |
2674 | * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For |
2675 | * calculating next moving sum with a new value, we should drop 0 from 50 and |
2676 | * add the new value. However, this function assumes it got value 5 for each |
2677 | * of the last ten times. Based on the assumption, when the next value is |
2678 | * measured, it drops the assumed past value, 5 from the current sum, and add |
2679 | * the new value to get the updated pseduo-moving average. |
2680 | * |
2681 | * This means the value could have errors, but the errors will be disappeared |
2682 | * for every @len_window aligned calls. For example, if @len_window is 10, the |
2683 | * pseudo moving sum with 11th value to 19th value would have an error. But |
2684 | * the sum with 20th value will not have the error. |
2685 | * |
2686 | * Return: Pseudo-moving average after getting the @new_value. |
2687 | */ |
2688 | static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, |
2689 | unsigned int len_window, unsigned int new_value) |
2690 | { |
2691 | return mvsum - nomvsum / len_window + new_value; |
2692 | } |
2693 | |
2694 | /** |
2695 | * damon_update_region_access_rate() - Update the access rate of a region. |
2696 | * @r: The DAMON region to update for its access check result. |
2697 | * @accessed: Whether the region has accessed during last sampling interval. |
2698 | * @attrs: The damon_attrs of the DAMON context. |
2699 | * |
2700 | * Update the access rate of a region with the region's last sampling interval |
2701 | * access check result. |
2702 | * |
2703 | * Usually this will be called by &damon_operations->check_accesses callback. |
2704 | */ |
2705 | void damon_update_region_access_rate(struct damon_region *r, bool accessed, |
2706 | struct damon_attrs *attrs) |
2707 | { |
2708 | unsigned int len_window = 1; |
2709 | |
2710 | /* |
2711 | * sample_interval can be zero, but cannot be larger than |
2712 | * aggr_interval, owing to validation of damon_set_attrs(). |
2713 | */ |
2714 | if (attrs->sample_interval) |
2715 | len_window = damon_max_nr_accesses(attrs); |
2716 | r->nr_accesses_bp = damon_moving_sum(mvsum: r->nr_accesses_bp, |
2717 | nomvsum: r->last_nr_accesses * 10000, len_window, |
2718 | new_value: accessed ? 10000 : 0); |
2719 | |
2720 | if (accessed) |
2721 | r->nr_accesses++; |
2722 | } |
2723 | |
2724 | static int __init damon_init(void) |
2725 | { |
2726 | damon_region_cache = KMEM_CACHE(damon_region, 0); |
2727 | if (unlikely(!damon_region_cache)) { |
2728 | pr_err("creating damon_region_cache fails\n"); |
2729 | return -ENOMEM; |
2730 | } |
2731 | |
2732 | return 0; |
2733 | } |
2734 | |
2735 | subsys_initcall(damon_init); |
2736 | |
2737 | #include "tests/core-kunit.h" |
2738 |
Definitions
- damon_lock
- nr_running_ctxs
- running_exclusive_ctxs
- damon_ops_lock
- damon_registered_ops
- damon_region_cache
- __damon_is_registered_ops
- damon_is_registered_ops
- damon_register_ops
- damon_select_ops
- damon_new_region
- damon_add_region
- damon_del_region
- damon_free_region
- damon_destroy_region
- damon_intersect
- damon_fill_regions_holes
- damon_set_regions
- damos_new_filter
- damos_filter_for_ops
- damos_add_filter
- damos_del_filter
- damos_free_filter
- damos_destroy_filter
- damos_new_quota_goal
- damos_add_quota_goal
- damos_del_quota_goal
- damos_free_quota_goal
- damos_destroy_quota_goal
- damos_quota_init
- damon_new_scheme
- damos_set_next_apply_sis
- damon_add_scheme
- damon_del_scheme
- damon_free_scheme
- damon_destroy_scheme
- damon_new_target
- damon_add_target
- damon_targets_empty
- damon_del_target
- damon_free_target
- damon_destroy_target
- damon_nr_regions
- damon_new_ctx
- damon_destroy_targets
- damon_destroy_ctx
- damon_age_for_new_attrs
- damon_accesses_bp_to_nr_accesses
- damon_nr_accesses_to_accesses_bp
- damon_nr_accesses_for_new_attrs
- damon_update_monitoring_result
- damon_update_monitoring_results
- damon_valid_intervals_goal
- damon_set_attrs
- damon_set_schemes
- damos_nth_quota_goal
- damos_commit_quota_goal
- damos_commit_quota_goals
- damos_commit_quota
- damos_nth_filter
- damos_commit_filter_arg
- damos_commit_filter
- damos_commit_core_filters
- damos_commit_ops_filters
- damos_filters_default_reject
- damos_set_filters_default_reject
- damos_commit_filters
- damon_nth_scheme
- damos_commit
- damon_commit_schemes
- damon_nth_target
- damon_commit_target_regions
- damon_commit_target
- damon_commit_targets
- damon_commit_ctx
- damon_nr_running_ctxs
- damon_region_sz_limit
- __damon_start
- damon_start
- __damon_stop
- damon_stop
- damon_is_running
- damon_call
- damos_walk
- damon_warn_fix_nr_accesses_corruption
- kdamond_reset_aggregated
- damon_get_intervals_score
- damon_get_intervals_adaptation_bp
- kdamond_tune_intervals
- __damos_valid_target
- damos_valid_target
- damos_skip_charged_region
- damos_update_stat
- damos_filter_match
- damos_filter_out
- damos_walk_call_walk
- damos_walk_complete
- damos_walk_cancel
- damos_apply_scheme
- damon_do_apply_schemes
- damon_feed_loop_next_input
- damos_get_some_mem_psi_total
- damos_get_node_mem_bp
- damos_set_quota_goal_current_value
- damos_quota_score
- damos_set_effective_quota
- damos_adjust_quota
- kdamond_apply_schemes
- damon_merge_two_regions
- damon_merge_regions_of
- kdamond_merge_regions
- damon_split_region_at
- damon_split_regions_of
- kdamond_split_regions
- kdamond_need_stop
- damos_get_wmark_metric_value
- damos_wmark_wait_us
- kdamond_usleep
- kdamond_call
- kdamond_wait_activation
- kdamond_init_ctx
- kdamond_fn
- damon_system_ram_region
- walk_system_ram
- damon_find_biggest_system_ram
- damon_set_region_biggest_system_ram_default
- damon_moving_sum
- damon_update_region_access_rate
Improve your Profiling and Debugging skills
Find out more