| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Generic entry points for the idle threads and |
| 4 | * implementation of the idle task scheduling class. |
| 5 | * |
| 6 | * (NOTE: these are not related to SCHED_IDLE batch scheduled |
| 7 | * tasks which are handled in sched/fair.c ) |
| 8 | */ |
| 9 | #include <linux/cpuidle.h> |
| 10 | #include <linux/suspend.h> |
| 11 | #include <linux/livepatch.h> |
| 12 | #include "sched.h" |
| 13 | #include "smp.h" |
| 14 | |
| 15 | /* Linker adds these: start and end of __cpuidle functions */ |
| 16 | extern char __cpuidle_text_start[], __cpuidle_text_end[]; |
| 17 | |
| 18 | /** |
| 19 | * sched_idle_set_state - Record idle state for the current CPU. |
| 20 | * @idle_state: State to record. |
| 21 | */ |
| 22 | void sched_idle_set_state(struct cpuidle_state *idle_state) |
| 23 | { |
| 24 | idle_set_state(this_rq(), idle_state); |
| 25 | } |
| 26 | |
| 27 | static int __read_mostly cpu_idle_force_poll; |
| 28 | |
| 29 | void cpu_idle_poll_ctrl(bool enable) |
| 30 | { |
| 31 | if (enable) { |
| 32 | cpu_idle_force_poll++; |
| 33 | } else { |
| 34 | cpu_idle_force_poll--; |
| 35 | WARN_ON_ONCE(cpu_idle_force_poll < 0); |
| 36 | } |
| 37 | } |
| 38 | |
| 39 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP |
| 40 | static int __init cpu_idle_poll_setup(char *__unused) |
| 41 | { |
| 42 | cpu_idle_force_poll = 1; |
| 43 | |
| 44 | return 1; |
| 45 | } |
| 46 | __setup("nohlt" , cpu_idle_poll_setup); |
| 47 | |
| 48 | static int __init cpu_idle_nopoll_setup(char *__unused) |
| 49 | { |
| 50 | cpu_idle_force_poll = 0; |
| 51 | |
| 52 | return 1; |
| 53 | } |
| 54 | __setup("hlt" , cpu_idle_nopoll_setup); |
| 55 | #endif /* CONFIG_GENERIC_IDLE_POLL_SETUP */ |
| 56 | |
| 57 | static noinline int __cpuidle cpu_idle_poll(void) |
| 58 | { |
| 59 | instrumentation_begin(); |
| 60 | trace_cpu_idle(state: 0, smp_processor_id()); |
| 61 | stop_critical_timings(); |
| 62 | ct_cpuidle_enter(); |
| 63 | |
| 64 | raw_local_irq_enable(); |
| 65 | while (!tif_need_resched() && |
| 66 | (cpu_idle_force_poll || tick_check_broadcast_expired())) |
| 67 | cpu_relax(); |
| 68 | raw_local_irq_disable(); |
| 69 | |
| 70 | ct_cpuidle_exit(); |
| 71 | start_critical_timings(); |
| 72 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); |
| 73 | local_irq_enable(); |
| 74 | instrumentation_end(); |
| 75 | |
| 76 | return 1; |
| 77 | } |
| 78 | |
| 79 | /* Weak implementations for optional arch specific functions */ |
| 80 | void __weak arch_cpu_idle_prepare(void) { } |
| 81 | void __weak arch_cpu_idle_enter(void) { } |
| 82 | void __weak arch_cpu_idle_exit(void) { } |
| 83 | void __weak __noreturn arch_cpu_idle_dead(void) { while (1); } |
| 84 | void __weak arch_cpu_idle(void) |
| 85 | { |
| 86 | cpu_idle_force_poll = 1; |
| 87 | } |
| 88 | |
| 89 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE |
| 90 | DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast); |
| 91 | |
| 92 | static inline void cond_tick_broadcast_enter(void) |
| 93 | { |
| 94 | if (static_branch_unlikely(&arch_needs_tick_broadcast)) |
| 95 | tick_broadcast_enter(); |
| 96 | } |
| 97 | |
| 98 | static inline void cond_tick_broadcast_exit(void) |
| 99 | { |
| 100 | if (static_branch_unlikely(&arch_needs_tick_broadcast)) |
| 101 | tick_broadcast_exit(); |
| 102 | } |
| 103 | #else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE: */ |
| 104 | static inline void cond_tick_broadcast_enter(void) { } |
| 105 | static inline void cond_tick_broadcast_exit(void) { } |
| 106 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE */ |
| 107 | |
| 108 | /** |
| 109 | * default_idle_call - Default CPU idle routine. |
| 110 | * |
| 111 | * To use when the cpuidle framework cannot be used. |
| 112 | */ |
| 113 | void __cpuidle default_idle_call(void) |
| 114 | { |
| 115 | instrumentation_begin(); |
| 116 | if (!current_clr_polling_and_test()) { |
| 117 | cond_tick_broadcast_enter(); |
| 118 | trace_cpu_idle(state: 1, smp_processor_id()); |
| 119 | stop_critical_timings(); |
| 120 | |
| 121 | ct_cpuidle_enter(); |
| 122 | arch_cpu_idle(); |
| 123 | ct_cpuidle_exit(); |
| 124 | |
| 125 | start_critical_timings(); |
| 126 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); |
| 127 | cond_tick_broadcast_exit(); |
| 128 | } |
| 129 | local_irq_enable(); |
| 130 | instrumentation_end(); |
| 131 | } |
| 132 | |
| 133 | static int call_cpuidle_s2idle(struct cpuidle_driver *drv, |
| 134 | struct cpuidle_device *dev, |
| 135 | u64 max_latency_ns) |
| 136 | { |
| 137 | if (current_clr_polling_and_test()) |
| 138 | return -EBUSY; |
| 139 | |
| 140 | return cpuidle_enter_s2idle(drv, dev, latency_limit_ns: max_latency_ns); |
| 141 | } |
| 142 | |
| 143 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
| 144 | int next_state) |
| 145 | { |
| 146 | /* |
| 147 | * The idle task must be scheduled, it is pointless to go to idle, just |
| 148 | * update no idle residency and return. |
| 149 | */ |
| 150 | if (current_clr_polling_and_test()) { |
| 151 | dev->last_residency_ns = 0; |
| 152 | local_irq_enable(); |
| 153 | return -EBUSY; |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * Enter the idle state previously returned by the governor decision. |
| 158 | * This function will block until an interrupt occurs and will take |
| 159 | * care of re-enabling the local interrupts |
| 160 | */ |
| 161 | return cpuidle_enter(drv, dev, index: next_state); |
| 162 | } |
| 163 | |
| 164 | /** |
| 165 | * cpuidle_idle_call - the main idle function |
| 166 | * |
| 167 | * NOTE: no locks or semaphores should be used here |
| 168 | * |
| 169 | * On architectures that support TIF_POLLING_NRFLAG, is called with polling |
| 170 | * set, and it returns with polling set. If it ever stops polling, it |
| 171 | * must clear the polling bit. |
| 172 | */ |
| 173 | static void cpuidle_idle_call(void) |
| 174 | { |
| 175 | struct cpuidle_device *dev = cpuidle_get_device(); |
| 176 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 177 | int next_state, entered_state; |
| 178 | |
| 179 | /* |
| 180 | * Check if the idle task must be rescheduled. If it is the |
| 181 | * case, exit the function after re-enabling the local IRQ. |
| 182 | */ |
| 183 | if (need_resched()) { |
| 184 | local_irq_enable(); |
| 185 | return; |
| 186 | } |
| 187 | |
| 188 | if (cpuidle_not_available(drv, dev)) { |
| 189 | tick_nohz_idle_stop_tick(); |
| 190 | |
| 191 | default_idle_call(); |
| 192 | goto exit_idle; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Suspend-to-idle ("s2idle") is a system state in which all user space |
| 197 | * has been frozen, all I/O devices have been suspended and the only |
| 198 | * activity happens here and in interrupts (if any). In that case bypass |
| 199 | * the cpuidle governor and go straight for the deepest idle state |
| 200 | * available. Possibly also suspend the local tick and the entire |
| 201 | * timekeeping to prevent timer interrupts from kicking us out of idle |
| 202 | * until a proper wakeup interrupt happens. |
| 203 | */ |
| 204 | |
| 205 | if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { |
| 206 | u64 max_latency_ns; |
| 207 | |
| 208 | if (idle_should_enter_s2idle()) { |
| 209 | max_latency_ns = cpu_wakeup_latency_qos_limit() * |
| 210 | NSEC_PER_USEC; |
| 211 | |
| 212 | entered_state = call_cpuidle_s2idle(drv, dev, |
| 213 | max_latency_ns); |
| 214 | if (entered_state > 0) |
| 215 | goto exit_idle; |
| 216 | } else { |
| 217 | max_latency_ns = dev->forced_idle_latency_limit_ns; |
| 218 | } |
| 219 | |
| 220 | tick_nohz_idle_stop_tick(); |
| 221 | |
| 222 | next_state = cpuidle_find_deepest_state(drv, dev, latency_limit_ns: max_latency_ns); |
| 223 | call_cpuidle(drv, dev, next_state); |
| 224 | } else { |
| 225 | bool stop_tick = true; |
| 226 | |
| 227 | /* |
| 228 | * Ask the cpuidle framework to choose a convenient idle state. |
| 229 | */ |
| 230 | next_state = cpuidle_select(drv, dev, stop_tick: &stop_tick); |
| 231 | |
| 232 | if (stop_tick || tick_nohz_tick_stopped()) |
| 233 | tick_nohz_idle_stop_tick(); |
| 234 | else |
| 235 | tick_nohz_idle_retain_tick(); |
| 236 | |
| 237 | entered_state = call_cpuidle(drv, dev, next_state); |
| 238 | /* |
| 239 | * Give the governor an opportunity to reflect on the outcome |
| 240 | */ |
| 241 | cpuidle_reflect(dev, index: entered_state); |
| 242 | } |
| 243 | |
| 244 | exit_idle: |
| 245 | __current_set_polling(); |
| 246 | |
| 247 | /* |
| 248 | * It is up to the idle functions to re-enable local interrupts |
| 249 | */ |
| 250 | if (WARN_ON_ONCE(irqs_disabled())) |
| 251 | local_irq_enable(); |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * Generic idle loop implementation |
| 256 | * |
| 257 | * Called with polling cleared. |
| 258 | */ |
| 259 | static void do_idle(void) |
| 260 | { |
| 261 | int cpu = smp_processor_id(); |
| 262 | |
| 263 | /* |
| 264 | * Check if we need to update blocked load |
| 265 | */ |
| 266 | nohz_run_idle_balance(cpu); |
| 267 | |
| 268 | /* |
| 269 | * If the arch has a polling bit, we maintain an invariant: |
| 270 | * |
| 271 | * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != |
| 272 | * rq->idle). This means that, if rq->idle has the polling bit set, |
| 273 | * then setting need_resched is guaranteed to cause the CPU to |
| 274 | * reschedule. |
| 275 | */ |
| 276 | |
| 277 | __current_set_polling(); |
| 278 | tick_nohz_idle_enter(); |
| 279 | |
| 280 | while (!need_resched()) { |
| 281 | |
| 282 | /* |
| 283 | * Interrupts shouldn't be re-enabled from that point on until |
| 284 | * the CPU sleeping instruction is reached. Otherwise an interrupt |
| 285 | * may fire and queue a timer that would be ignored until the CPU |
| 286 | * wakes from the sleeping instruction. And testing need_resched() |
| 287 | * doesn't tell about pending needed timer reprogram. |
| 288 | * |
| 289 | * Several cases to consider: |
| 290 | * |
| 291 | * - SLEEP-UNTIL-PENDING-INTERRUPT based instructions such as |
| 292 | * "wfi" or "mwait" are fine because they can be entered with |
| 293 | * interrupt disabled. |
| 294 | * |
| 295 | * - sti;mwait() couple is fine because the interrupts are |
| 296 | * re-enabled only upon the execution of mwait, leaving no gap |
| 297 | * in-between. |
| 298 | * |
| 299 | * - ROLLBACK based idle handlers with the sleeping instruction |
| 300 | * called with interrupts enabled are NOT fine. In this scheme |
| 301 | * when the interrupt detects it has interrupted an idle handler, |
| 302 | * it rolls back to its beginning which performs the |
| 303 | * need_resched() check before re-executing the sleeping |
| 304 | * instruction. This can leak a pending needed timer reprogram. |
| 305 | * If such a scheme is really mandatory due to the lack of an |
| 306 | * appropriate CPU sleeping instruction, then a FAST-FORWARD |
| 307 | * must instead be applied: when the interrupt detects it has |
| 308 | * interrupted an idle handler, it must resume to the end of |
| 309 | * this idle handler so that the generic idle loop is iterated |
| 310 | * again to reprogram the tick. |
| 311 | */ |
| 312 | local_irq_disable(); |
| 313 | |
| 314 | if (cpu_is_offline(cpu)) { |
| 315 | cpuhp_report_idle_dead(); |
| 316 | arch_cpu_idle_dead(); |
| 317 | } |
| 318 | |
| 319 | arch_cpu_idle_enter(); |
| 320 | rcu_nocb_flush_deferred_wakeup(); |
| 321 | |
| 322 | /* |
| 323 | * In poll mode we re-enable interrupts and spin. Also if we |
| 324 | * detected in the wakeup from idle path that the tick |
| 325 | * broadcast device expired for us, we don't want to go deep |
| 326 | * idle as we know that the IPI is going to arrive right away. |
| 327 | */ |
| 328 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
| 329 | tick_nohz_idle_restart_tick(); |
| 330 | cpu_idle_poll(); |
| 331 | } else { |
| 332 | cpuidle_idle_call(); |
| 333 | } |
| 334 | arch_cpu_idle_exit(); |
| 335 | } |
| 336 | |
| 337 | /* |
| 338 | * Since we fell out of the loop above, we know TIF_NEED_RESCHED must |
| 339 | * be set, propagate it into PREEMPT_NEED_RESCHED. |
| 340 | * |
| 341 | * This is required because for polling idle loops we will not have had |
| 342 | * an IPI to fold the state for us. |
| 343 | */ |
| 344 | preempt_set_need_resched(); |
| 345 | tick_nohz_idle_exit(); |
| 346 | __current_clr_polling(); |
| 347 | |
| 348 | /* |
| 349 | * We promise to call sched_ttwu_pending() and reschedule if |
| 350 | * need_resched() is set while polling is set. That means that clearing |
| 351 | * polling needs to be visible before doing these things. |
| 352 | */ |
| 353 | smp_mb__after_atomic(); |
| 354 | |
| 355 | /* |
| 356 | * RCU relies on this call to be done outside of an RCU read-side |
| 357 | * critical section. |
| 358 | */ |
| 359 | flush_smp_call_function_queue(); |
| 360 | schedule_idle(); |
| 361 | |
| 362 | if (unlikely(klp_patch_pending(current))) |
| 363 | klp_update_patch_state(current); |
| 364 | } |
| 365 | |
| 366 | bool cpu_in_idle(unsigned long pc) |
| 367 | { |
| 368 | return pc >= (unsigned long)__cpuidle_text_start && |
| 369 | pc < (unsigned long)__cpuidle_text_end; |
| 370 | } |
| 371 | |
| 372 | struct idle_timer { |
| 373 | struct hrtimer timer; |
| 374 | int done; |
| 375 | }; |
| 376 | |
| 377 | static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) |
| 378 | { |
| 379 | struct idle_timer *it = container_of(timer, struct idle_timer, timer); |
| 380 | |
| 381 | WRITE_ONCE(it->done, 1); |
| 382 | set_tsk_need_resched(current); |
| 383 | |
| 384 | return HRTIMER_NORESTART; |
| 385 | } |
| 386 | |
| 387 | void play_idle_precise(u64 duration_ns, u64 latency_ns) |
| 388 | { |
| 389 | struct idle_timer it; |
| 390 | |
| 391 | /* |
| 392 | * Only FIFO tasks can disable the tick since they don't need the forced |
| 393 | * preemption. |
| 394 | */ |
| 395 | WARN_ON_ONCE(current->policy != SCHED_FIFO); |
| 396 | WARN_ON_ONCE(current->nr_cpus_allowed != 1); |
| 397 | WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); |
| 398 | WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); |
| 399 | WARN_ON_ONCE(!duration_ns); |
| 400 | WARN_ON_ONCE(current->mm); |
| 401 | |
| 402 | rcu_sleep_check(); |
| 403 | preempt_disable(); |
| 404 | current->flags |= PF_IDLE; |
| 405 | cpuidle_use_deepest_state(latency_limit_ns: latency_ns); |
| 406 | |
| 407 | it.done = 0; |
| 408 | hrtimer_setup_on_stack(timer: &it.timer, function: idle_inject_timer_fn, CLOCK_MONOTONIC, |
| 409 | mode: HRTIMER_MODE_REL_HARD); |
| 410 | hrtimer_start(timer: &it.timer, tim: ns_to_ktime(ns: duration_ns), |
| 411 | mode: HRTIMER_MODE_REL_PINNED_HARD); |
| 412 | |
| 413 | while (!READ_ONCE(it.done)) |
| 414 | do_idle(); |
| 415 | |
| 416 | cpuidle_use_deepest_state(latency_limit_ns: 0); |
| 417 | current->flags &= ~PF_IDLE; |
| 418 | |
| 419 | preempt_fold_need_resched(); |
| 420 | preempt_enable(); |
| 421 | } |
| 422 | EXPORT_SYMBOL_GPL(play_idle_precise); |
| 423 | |
| 424 | void cpu_startup_entry(enum cpuhp_state state) |
| 425 | { |
| 426 | current->flags |= PF_IDLE; |
| 427 | arch_cpu_idle_prepare(); |
| 428 | cpuhp_online_idle(state); |
| 429 | while (1) |
| 430 | do_idle(); |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * idle-task scheduling class. |
| 435 | */ |
| 436 | |
| 437 | static int |
| 438 | select_task_rq_idle(struct task_struct *p, int cpu, int flags) |
| 439 | { |
| 440 | return task_cpu(p); /* IDLE tasks as never migrated */ |
| 441 | } |
| 442 | |
| 443 | static int |
| 444 | balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
| 445 | { |
| 446 | return WARN_ON_ONCE(1); |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Idle tasks are unconditionally rescheduled: |
| 451 | */ |
| 452 | static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) |
| 453 | { |
| 454 | resched_curr(rq); |
| 455 | } |
| 456 | |
| 457 | static void update_curr_idle(struct rq *rq); |
| 458 | |
| 459 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) |
| 460 | { |
| 461 | update_curr_idle(rq); |
| 462 | scx_update_idle(rq, idle: false, do_notify: true); |
| 463 | } |
| 464 | |
| 465 | static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) |
| 466 | { |
| 467 | update_idle_core(rq); |
| 468 | scx_update_idle(rq, idle: true, do_notify: true); |
| 469 | schedstat_inc(rq->sched_goidle); |
| 470 | next->se.exec_start = rq_clock_task(rq); |
| 471 | |
| 472 | /* |
| 473 | * rq is about to be idle, check if we need to update the |
| 474 | * lost_idle_time of clock_pelt |
| 475 | */ |
| 476 | update_idle_rq_clock_pelt(rq); |
| 477 | } |
| 478 | |
| 479 | struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf) |
| 480 | { |
| 481 | scx_update_idle(rq, idle: true, do_notify: false); |
| 482 | return rq->idle; |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * It is not legal to sleep in the idle task - print a warning |
| 487 | * message if some code attempts to do it: |
| 488 | */ |
| 489 | static bool |
| 490 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) |
| 491 | { |
| 492 | raw_spin_rq_unlock_irq(rq); |
| 493 | printk(KERN_ERR "bad: scheduling from the idle thread!\n" ); |
| 494 | dump_stack(); |
| 495 | raw_spin_rq_lock_irq(rq); |
| 496 | return true; |
| 497 | } |
| 498 | |
| 499 | /* |
| 500 | * scheduler tick hitting a task of our scheduling class. |
| 501 | * |
| 502 | * NOTE: This function can be called remotely by the tick offload that |
| 503 | * goes along full dynticks. Therefore no local assumption can be made |
| 504 | * and everything must be accessed through the @rq and @curr passed in |
| 505 | * parameters. |
| 506 | */ |
| 507 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
| 508 | { |
| 509 | update_curr_idle(rq); |
| 510 | } |
| 511 | |
| 512 | static void switching_to_idle(struct rq *rq, struct task_struct *p) |
| 513 | { |
| 514 | BUG(); |
| 515 | } |
| 516 | |
| 517 | static void |
| 518 | prio_changed_idle(struct rq *rq, struct task_struct *p, u64 oldprio) |
| 519 | { |
| 520 | if (p->prio == oldprio) |
| 521 | return; |
| 522 | |
| 523 | BUG(); |
| 524 | } |
| 525 | |
| 526 | static void update_curr_idle(struct rq *rq) |
| 527 | { |
| 528 | struct sched_entity *se = &rq->idle->se; |
| 529 | u64 now = rq_clock_task(rq); |
| 530 | s64 delta_exec; |
| 531 | |
| 532 | delta_exec = now - se->exec_start; |
| 533 | if (unlikely(delta_exec <= 0)) |
| 534 | return; |
| 535 | |
| 536 | se->exec_start = now; |
| 537 | |
| 538 | dl_server_update_idle(dl_se: &rq->fair_server, delta_exec); |
| 539 | } |
| 540 | |
| 541 | /* |
| 542 | * Simple, special scheduling class for the per-CPU idle tasks: |
| 543 | */ |
| 544 | DEFINE_SCHED_CLASS(idle) = { |
| 545 | |
| 546 | .queue_mask = 0, |
| 547 | |
| 548 | /* no enqueue/yield_task for idle tasks */ |
| 549 | |
| 550 | /* dequeue is not valid, we print a debug message there: */ |
| 551 | .dequeue_task = dequeue_task_idle, |
| 552 | |
| 553 | .wakeup_preempt = wakeup_preempt_idle, |
| 554 | |
| 555 | .pick_task = pick_task_idle, |
| 556 | .put_prev_task = put_prev_task_idle, |
| 557 | .set_next_task = set_next_task_idle, |
| 558 | |
| 559 | .balance = balance_idle, |
| 560 | .select_task_rq = select_task_rq_idle, |
| 561 | .set_cpus_allowed = set_cpus_allowed_common, |
| 562 | |
| 563 | .task_tick = task_tick_idle, |
| 564 | |
| 565 | .prio_changed = prio_changed_idle, |
| 566 | .switching_to = switching_to_idle, |
| 567 | .update_curr = update_curr_idle, |
| 568 | }; |
| 569 | |