| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * menu.c - the menu idle governor |
| 4 | * |
| 5 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> |
| 6 | * Copyright (C) 2009 Intel Corporation |
| 7 | * Author: |
| 8 | * Arjan van de Ven <arjan@linux.intel.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/cpuidle.h> |
| 13 | #include <linux/time.h> |
| 14 | #include <linux/ktime.h> |
| 15 | #include <linux/hrtimer.h> |
| 16 | #include <linux/tick.h> |
| 17 | #include <linux/sched/stat.h> |
| 18 | #include <linux/math64.h> |
| 19 | |
| 20 | #include "gov.h" |
| 21 | |
| 22 | #define BUCKETS 6 |
| 23 | #define INTERVAL_SHIFT 3 |
| 24 | #define INTERVALS (1UL << INTERVAL_SHIFT) |
| 25 | #define RESOLUTION 1024 |
| 26 | #define DECAY 8 |
| 27 | #define MAX_INTERESTING (50000 * NSEC_PER_USEC) |
| 28 | |
| 29 | /* |
| 30 | * Concepts and ideas behind the menu governor |
| 31 | * |
| 32 | * For the menu governor, there are 2 decision factors for picking a C |
| 33 | * state: |
| 34 | * 1) Energy break even point |
| 35 | * 2) Latency tolerance (from pmqos infrastructure) |
| 36 | * These two factors are treated independently. |
| 37 | * |
| 38 | * Energy break even point |
| 39 | * ----------------------- |
| 40 | * C state entry and exit have an energy cost, and a certain amount of time in |
| 41 | * the C state is required to actually break even on this cost. CPUIDLE |
| 42 | * provides us this duration in the "target_residency" field. So all that we |
| 43 | * need is a good prediction of how long we'll be idle. Like the traditional |
| 44 | * menu governor, we take the actual known "next timer event" time. |
| 45 | * |
| 46 | * Since there are other source of wakeups (interrupts for example) than |
| 47 | * the next timer event, this estimation is rather optimistic. To get a |
| 48 | * more realistic estimate, a correction factor is applied to the estimate, |
| 49 | * that is based on historic behavior. For example, if in the past the actual |
| 50 | * duration always was 50% of the next timer tick, the correction factor will |
| 51 | * be 0.5. |
| 52 | * |
| 53 | * menu uses a running average for this correction factor, but it uses a set of |
| 54 | * factors, not just a single factor. This stems from the realization that the |
| 55 | * ratio is dependent on the order of magnitude of the expected duration; if we |
| 56 | * expect 500 milliseconds of idle time the likelihood of getting an interrupt |
| 57 | * very early is much higher than if we expect 50 micro seconds of idle time. |
| 58 | * For this reason, menu keeps an array of 6 independent factors, that gets |
| 59 | * indexed based on the magnitude of the expected duration. |
| 60 | * |
| 61 | * Repeatable-interval-detector |
| 62 | * ---------------------------- |
| 63 | * There are some cases where "next timer" is a completely unusable predictor: |
| 64 | * Those cases where the interval is fixed, for example due to hardware |
| 65 | * interrupt mitigation, but also due to fixed transfer rate devices like mice. |
| 66 | * For this, we use a different predictor: We track the duration of the last 8 |
| 67 | * intervals and use them to estimate the duration of the next one. |
| 68 | */ |
| 69 | |
| 70 | struct { |
| 71 | int ; |
| 72 | int ; |
| 73 | |
| 74 | u64 ; |
| 75 | unsigned int ; |
| 76 | unsigned int [BUCKETS]; |
| 77 | unsigned int [INTERVALS]; |
| 78 | int ; |
| 79 | }; |
| 80 | |
| 81 | static inline int which_bucket(u64 duration_ns) |
| 82 | { |
| 83 | int bucket = 0; |
| 84 | |
| 85 | if (duration_ns < 10ULL * NSEC_PER_USEC) |
| 86 | return bucket; |
| 87 | if (duration_ns < 100ULL * NSEC_PER_USEC) |
| 88 | return bucket + 1; |
| 89 | if (duration_ns < 1000ULL * NSEC_PER_USEC) |
| 90 | return bucket + 2; |
| 91 | if (duration_ns < 10000ULL * NSEC_PER_USEC) |
| 92 | return bucket + 3; |
| 93 | if (duration_ns < 100000ULL * NSEC_PER_USEC) |
| 94 | return bucket + 4; |
| 95 | return bucket + 5; |
| 96 | } |
| 97 | |
| 98 | static DEFINE_PER_CPU(struct menu_device, ); |
| 99 | |
| 100 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
| 101 | |
| 102 | /* |
| 103 | * Try detecting repeating patterns by keeping track of the last 8 |
| 104 | * intervals, and checking if the standard deviation of that set |
| 105 | * of points is below a threshold. If it is... then use the |
| 106 | * average of these 8 points as the estimated value. |
| 107 | */ |
| 108 | static unsigned int get_typical_interval(struct menu_device *data) |
| 109 | { |
| 110 | s64 value, min_thresh = -1, max_thresh = UINT_MAX; |
| 111 | unsigned int max, min, divisor; |
| 112 | u64 avg, variance, avg_sq; |
| 113 | int i; |
| 114 | |
| 115 | again: |
| 116 | /* Compute the average and variance of past intervals. */ |
| 117 | max = 0; |
| 118 | min = UINT_MAX; |
| 119 | avg = 0; |
| 120 | variance = 0; |
| 121 | divisor = 0; |
| 122 | for (i = 0; i < INTERVALS; i++) { |
| 123 | value = data->intervals[i]; |
| 124 | /* |
| 125 | * Discard the samples outside the interval between the min and |
| 126 | * max thresholds. |
| 127 | */ |
| 128 | if (value <= min_thresh || value >= max_thresh) |
| 129 | continue; |
| 130 | |
| 131 | divisor++; |
| 132 | |
| 133 | avg += value; |
| 134 | variance += value * value; |
| 135 | |
| 136 | if (value > max) |
| 137 | max = value; |
| 138 | |
| 139 | if (value < min) |
| 140 | min = value; |
| 141 | } |
| 142 | |
| 143 | if (!max) |
| 144 | return UINT_MAX; |
| 145 | |
| 146 | if (divisor == INTERVALS) { |
| 147 | avg >>= INTERVAL_SHIFT; |
| 148 | variance >>= INTERVAL_SHIFT; |
| 149 | } else { |
| 150 | do_div(avg, divisor); |
| 151 | do_div(variance, divisor); |
| 152 | } |
| 153 | |
| 154 | avg_sq = avg * avg; |
| 155 | variance -= avg_sq; |
| 156 | |
| 157 | /* |
| 158 | * The typical interval is obtained when standard deviation is |
| 159 | * small (stddev <= 20 us, variance <= 400 us^2) or standard |
| 160 | * deviation is small compared to the average interval (avg > |
| 161 | * 6*stddev, avg^2 > 36*variance). The average is smaller than |
| 162 | * UINT_MAX aka U32_MAX, so computing its square does not |
| 163 | * overflow a u64. We simply reject this candidate average if |
| 164 | * the standard deviation is greater than 715 s (which is |
| 165 | * rather unlikely). |
| 166 | * |
| 167 | * Use this result only if there is no timer to wake us up sooner. |
| 168 | */ |
| 169 | if (likely(variance <= U64_MAX/36)) { |
| 170 | if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) || |
| 171 | variance <= 400) |
| 172 | return avg; |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * If there are outliers, discard them by setting thresholds to exclude |
| 177 | * data points at a large enough distance from the average, then |
| 178 | * calculate the average and standard deviation again. Once we get |
| 179 | * down to the last 3/4 of our samples, stop excluding samples. |
| 180 | * |
| 181 | * This can deal with workloads that have long pauses interspersed |
| 182 | * with sporadic activity with a bunch of short pauses. |
| 183 | */ |
| 184 | if (divisor * 4 <= INTERVALS * 3) { |
| 185 | /* |
| 186 | * If there are sufficiently many data points still under |
| 187 | * consideration after the outliers have been eliminated, |
| 188 | * returning without a prediction would be a mistake because it |
| 189 | * is likely that the next interval will not exceed the current |
| 190 | * maximum, so return the latter in that case. |
| 191 | */ |
| 192 | if (divisor >= INTERVALS / 2) |
| 193 | return max; |
| 194 | |
| 195 | return UINT_MAX; |
| 196 | } |
| 197 | |
| 198 | /* Update the thresholds for the next round. */ |
| 199 | if (avg - min > max - avg) |
| 200 | min_thresh = min; |
| 201 | else |
| 202 | max_thresh = max; |
| 203 | |
| 204 | goto again; |
| 205 | } |
| 206 | |
| 207 | /** |
| 208 | * menu_select - selects the next idle state to enter |
| 209 | * @drv: cpuidle driver containing state data |
| 210 | * @dev: the CPU |
| 211 | * @stop_tick: indication on whether or not to stop the tick |
| 212 | */ |
| 213 | static int (struct cpuidle_driver *drv, struct cpuidle_device *dev, |
| 214 | bool *stop_tick) |
| 215 | { |
| 216 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
| 217 | s64 latency_req = cpuidle_governor_latency_req(cpu: dev->cpu); |
| 218 | u64 predicted_ns; |
| 219 | ktime_t delta, delta_tick; |
| 220 | int i, idx; |
| 221 | |
| 222 | if (data->needs_update) { |
| 223 | menu_update(drv, dev); |
| 224 | data->needs_update = 0; |
| 225 | } |
| 226 | |
| 227 | /* Find the shortest expected idle interval. */ |
| 228 | predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; |
| 229 | if (predicted_ns > RESIDENCY_THRESHOLD_NS) { |
| 230 | unsigned int timer_us; |
| 231 | |
| 232 | /* Determine the time till the closest timer. */ |
| 233 | delta = tick_nohz_get_sleep_length(delta_next: &delta_tick); |
| 234 | if (unlikely(delta < 0)) { |
| 235 | delta = 0; |
| 236 | delta_tick = 0; |
| 237 | } |
| 238 | |
| 239 | data->next_timer_ns = delta; |
| 240 | data->bucket = which_bucket(duration_ns: data->next_timer_ns); |
| 241 | |
| 242 | /* Round up the result for half microseconds. */ |
| 243 | timer_us = div_u64(dividend: (RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + |
| 244 | data->next_timer_ns * |
| 245 | data->correction_factor[data->bucket], |
| 246 | RESOLUTION * DECAY * NSEC_PER_USEC); |
| 247 | /* Use the lowest expected idle interval to pick the idle state. */ |
| 248 | predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns); |
| 249 | } else { |
| 250 | /* |
| 251 | * Because the next timer event is not going to be determined |
| 252 | * in this case, assume that without the tick the closest timer |
| 253 | * will be in distant future and that the closest tick will occur |
| 254 | * after 1/2 of the tick period. |
| 255 | */ |
| 256 | data->next_timer_ns = KTIME_MAX; |
| 257 | delta_tick = TICK_NSEC / 2; |
| 258 | data->bucket = BUCKETS - 1; |
| 259 | } |
| 260 | |
| 261 | if (unlikely(drv->state_count <= 1 || latency_req == 0) || |
| 262 | ((data->next_timer_ns < drv->states[1].target_residency_ns || |
| 263 | latency_req < drv->states[1].exit_latency_ns) && |
| 264 | !dev->states_usage[0].disable)) { |
| 265 | /* |
| 266 | * In this case state[0] will be used no matter what, so return |
| 267 | * it right away and keep the tick running if state[0] is a |
| 268 | * polling one. |
| 269 | */ |
| 270 | *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | if (tick_nohz_tick_stopped()) { |
| 275 | /* |
| 276 | * If the tick is already stopped, the cost of possible short |
| 277 | * idle duration misprediction is much higher, because the CPU |
| 278 | * may be stuck in a shallow idle state for a long time as a |
| 279 | * result of it. In that case say we might mispredict and use |
| 280 | * the known time till the closest timer event for the idle |
| 281 | * state selection. |
| 282 | */ |
| 283 | if (predicted_ns < TICK_NSEC) |
| 284 | predicted_ns = data->next_timer_ns; |
| 285 | } else if (latency_req > predicted_ns) { |
| 286 | latency_req = predicted_ns; |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | * Find the idle state with the lowest power while satisfying |
| 291 | * our constraints. |
| 292 | */ |
| 293 | idx = -1; |
| 294 | for (i = 0; i < drv->state_count; i++) { |
| 295 | struct cpuidle_state *s = &drv->states[i]; |
| 296 | |
| 297 | if (dev->states_usage[i].disable) |
| 298 | continue; |
| 299 | |
| 300 | if (idx == -1) |
| 301 | idx = i; /* first enabled state */ |
| 302 | |
| 303 | if (s->target_residency_ns > predicted_ns) { |
| 304 | /* |
| 305 | * Use a physical idle state, not busy polling, unless |
| 306 | * a timer is going to trigger soon enough. |
| 307 | */ |
| 308 | if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && |
| 309 | s->exit_latency_ns <= latency_req && |
| 310 | s->target_residency_ns <= data->next_timer_ns) { |
| 311 | predicted_ns = s->target_residency_ns; |
| 312 | idx = i; |
| 313 | break; |
| 314 | } |
| 315 | if (predicted_ns < TICK_NSEC) |
| 316 | break; |
| 317 | |
| 318 | if (!tick_nohz_tick_stopped()) { |
| 319 | /* |
| 320 | * If the state selected so far is shallow, |
| 321 | * waking up early won't hurt, so retain the |
| 322 | * tick in that case and let the governor run |
| 323 | * again in the next iteration of the loop. |
| 324 | */ |
| 325 | predicted_ns = drv->states[idx].target_residency_ns; |
| 326 | break; |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * If the state selected so far is shallow and this |
| 331 | * state's target residency matches the time till the |
| 332 | * closest timer event, select this one to avoid getting |
| 333 | * stuck in the shallow one for too long. |
| 334 | */ |
| 335 | if (drv->states[idx].target_residency_ns < TICK_NSEC && |
| 336 | s->target_residency_ns <= delta_tick) |
| 337 | idx = i; |
| 338 | |
| 339 | return idx; |
| 340 | } |
| 341 | if (s->exit_latency_ns > latency_req) |
| 342 | break; |
| 343 | |
| 344 | idx = i; |
| 345 | } |
| 346 | |
| 347 | if (idx == -1) |
| 348 | idx = 0; /* No states enabled. Must use 0. */ |
| 349 | |
| 350 | /* |
| 351 | * Don't stop the tick if the selected state is a polling one or if the |
| 352 | * expected idle duration is shorter than the tick period length. |
| 353 | */ |
| 354 | if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || |
| 355 | predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { |
| 356 | *stop_tick = false; |
| 357 | |
| 358 | if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { |
| 359 | /* |
| 360 | * The tick is not going to be stopped and the target |
| 361 | * residency of the state to be returned is not within |
| 362 | * the time until the next timer event including the |
| 363 | * tick, so try to correct that. |
| 364 | */ |
| 365 | for (i = idx - 1; i >= 0; i--) { |
| 366 | if (dev->states_usage[i].disable) |
| 367 | continue; |
| 368 | |
| 369 | idx = i; |
| 370 | if (drv->states[i].target_residency_ns <= delta_tick) |
| 371 | break; |
| 372 | } |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | return idx; |
| 377 | } |
| 378 | |
| 379 | /** |
| 380 | * menu_reflect - records that data structures need update |
| 381 | * @dev: the CPU |
| 382 | * @index: the index of actual entered state |
| 383 | * |
| 384 | * NOTE: it's important to be fast here because this operation will add to |
| 385 | * the overall exit latency. |
| 386 | */ |
| 387 | static void (struct cpuidle_device *dev, int index) |
| 388 | { |
| 389 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
| 390 | |
| 391 | dev->last_state_idx = index; |
| 392 | data->needs_update = 1; |
| 393 | data->tick_wakeup = tick_nohz_idle_got_tick(); |
| 394 | } |
| 395 | |
| 396 | /** |
| 397 | * menu_update - attempts to guess what happened after entry |
| 398 | * @drv: cpuidle driver containing state data |
| 399 | * @dev: the CPU |
| 400 | */ |
| 401 | static void (struct cpuidle_driver *drv, struct cpuidle_device *dev) |
| 402 | { |
| 403 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
| 404 | int last_idx = dev->last_state_idx; |
| 405 | struct cpuidle_state *target = &drv->states[last_idx]; |
| 406 | u64 measured_ns; |
| 407 | unsigned int new_factor; |
| 408 | |
| 409 | /* |
| 410 | * Try to figure out how much time passed between entry to low |
| 411 | * power state and occurrence of the wakeup event. |
| 412 | * |
| 413 | * If the entered idle state didn't support residency measurements, |
| 414 | * we use them anyway if they are short, and if long, |
| 415 | * truncate to the whole expected time. |
| 416 | * |
| 417 | * Any measured amount of time will include the exit latency. |
| 418 | * Since we are interested in when the wakeup begun, not when it |
| 419 | * was completed, we must subtract the exit latency. However, if |
| 420 | * the measured amount of time is less than the exit latency, |
| 421 | * assume the state was never reached and the exit latency is 0. |
| 422 | */ |
| 423 | |
| 424 | if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) { |
| 425 | /* |
| 426 | * The nohz code said that there wouldn't be any events within |
| 427 | * the tick boundary (if the tick was stopped), but the idle |
| 428 | * duration predictor had a differing opinion. Since the CPU |
| 429 | * was woken up by a tick (that wasn't stopped after all), the |
| 430 | * predictor was not quite right, so assume that the CPU could |
| 431 | * have been idle long (but not forever) to help the idle |
| 432 | * duration predictor do a better job next time. |
| 433 | */ |
| 434 | measured_ns = 9 * MAX_INTERESTING / 10; |
| 435 | } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && |
| 436 | dev->poll_time_limit) { |
| 437 | /* |
| 438 | * The CPU exited the "polling" state due to a time limit, so |
| 439 | * the idle duration prediction leading to the selection of that |
| 440 | * state was inaccurate. If a better prediction had been made, |
| 441 | * the CPU might have been woken up from idle by the next timer. |
| 442 | * Assume that to be the case. |
| 443 | */ |
| 444 | measured_ns = data->next_timer_ns; |
| 445 | } else { |
| 446 | /* measured value */ |
| 447 | measured_ns = dev->last_residency_ns; |
| 448 | |
| 449 | /* Deduct exit latency */ |
| 450 | if (measured_ns > 2 * target->exit_latency_ns) |
| 451 | measured_ns -= target->exit_latency_ns; |
| 452 | else |
| 453 | measured_ns /= 2; |
| 454 | } |
| 455 | |
| 456 | /* Make sure our coefficients do not exceed unity */ |
| 457 | if (measured_ns > data->next_timer_ns) |
| 458 | measured_ns = data->next_timer_ns; |
| 459 | |
| 460 | /* Update our correction ratio */ |
| 461 | new_factor = data->correction_factor[data->bucket]; |
| 462 | new_factor -= new_factor / DECAY; |
| 463 | |
| 464 | if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING) |
| 465 | new_factor += div64_u64(RESOLUTION * measured_ns, |
| 466 | divisor: data->next_timer_ns); |
| 467 | else |
| 468 | /* |
| 469 | * we were idle so long that we count it as a perfect |
| 470 | * prediction |
| 471 | */ |
| 472 | new_factor += RESOLUTION; |
| 473 | |
| 474 | /* |
| 475 | * We don't want 0 as factor; we always want at least |
| 476 | * a tiny bit of estimated time. Fortunately, due to rounding, |
| 477 | * new_factor will stay nonzero regardless of measured_us values |
| 478 | * and the compiler can eliminate this test as long as DECAY > 1. |
| 479 | */ |
| 480 | if (DECAY == 1 && unlikely(new_factor == 0)) |
| 481 | new_factor = 1; |
| 482 | |
| 483 | data->correction_factor[data->bucket] = new_factor; |
| 484 | |
| 485 | /* update the repeating-pattern data */ |
| 486 | data->intervals[data->interval_ptr++] = ktime_to_us(kt: measured_ns); |
| 487 | if (data->interval_ptr >= INTERVALS) |
| 488 | data->interval_ptr = 0; |
| 489 | } |
| 490 | |
| 491 | /** |
| 492 | * menu_enable_device - scans a CPU's states and does setup |
| 493 | * @drv: cpuidle driver |
| 494 | * @dev: the CPU |
| 495 | */ |
| 496 | static int (struct cpuidle_driver *drv, |
| 497 | struct cpuidle_device *dev) |
| 498 | { |
| 499 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
| 500 | int i; |
| 501 | |
| 502 | memset(data, 0, sizeof(struct menu_device)); |
| 503 | |
| 504 | /* |
| 505 | * if the correction factor is 0 (eg first time init or cpu hotplug |
| 506 | * etc), we actually want to start out with a unity factor. |
| 507 | */ |
| 508 | for(i = 0; i < BUCKETS; i++) |
| 509 | data->correction_factor[i] = RESOLUTION * DECAY; |
| 510 | |
| 511 | return 0; |
| 512 | } |
| 513 | |
| 514 | static struct cpuidle_governor = { |
| 515 | .name = "menu" , |
| 516 | .rating = 20, |
| 517 | .enable = menu_enable_device, |
| 518 | .select = menu_select, |
| 519 | .reflect = menu_reflect, |
| 520 | }; |
| 521 | |
| 522 | /** |
| 523 | * init_menu - initializes the governor |
| 524 | */ |
| 525 | static int __init (void) |
| 526 | { |
| 527 | return cpuidle_register_governor(gov: &menu_governor); |
| 528 | } |
| 529 | |
| 530 | postcore_initcall(init_menu); |
| 531 | |