1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/export.h>
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/local_lock.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
21#include <linux/freezer.h>
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
24#include <linux/ftrace.h>
25#include <linux/smp.h>
26#include <linux/smpboot.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/wait_bit.h>
30#include <linux/workqueue.h>
31
32#include <asm/softirq_stack.h>
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/irq.h>
36
37/*
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
40 by its own spinlocks.
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
44 or will not.
45
46 Examples:
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
53 */
54
55#ifndef __ARCH_IRQ_STAT
56DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57EXPORT_PER_CPU_SYMBOL(irq_stat);
58#endif
59
60static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61
62DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63
64const char * const softirq_to_name[NR_SOFTIRQS] = {
65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 "TASKLET", "SCHED", "HRTIMER", "RCU"
67};
68
69/*
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
74 */
75static void wakeup_softirqd(void)
76{
77 /* Interrupts are disabled: no need to stop preemption */
78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79
80 if (tsk)
81 wake_up_process(tsk);
82}
83
84#ifdef CONFIG_TRACE_IRQFLAGS
85DEFINE_PER_CPU(int, hardirqs_enabled);
86DEFINE_PER_CPU(int, hardirq_context);
87EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89#endif
90
91/*
92 * SOFTIRQ_OFFSET usage:
93 *
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 *
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98 * processing.
99 *
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101 * on local_bh_disable or local_bh_enable.
102 *
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
105 */
106#ifdef CONFIG_PREEMPT_RT
107
108/*
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
112 *
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
116 *
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
119 */
120struct softirq_ctrl {
121 local_lock_t lock;
122 int cnt;
123};
124
125static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
127};
128
129/**
130 * local_bh_blocked() - Check for idle whether BH processing is blocked
131 *
132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
133 *
134 * This is invoked from the idle task to guard against false positive
135 * softirq pending warnings, which would happen when the task which holds
136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
137 * some other lock.
138 */
139bool local_bh_blocked(void)
140{
141 return __this_cpu_read(softirq_ctrl.cnt) != 0;
142}
143
144void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
145{
146 unsigned long flags;
147 int newcnt;
148
149 WARN_ON_ONCE(in_hardirq());
150
151 /* First entry of a task into a BH disabled section? */
152 if (!current->softirq_disable_cnt) {
153 if (preemptible()) {
154 local_lock(&softirq_ctrl.lock);
155 /* Required to meet the RCU bottomhalf requirements. */
156 rcu_read_lock();
157 } else {
158 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
159 }
160 }
161
162 /*
163 * Track the per CPU softirq disabled state. On RT this is per CPU
164 * state to allow preemption of bottom half disabled sections.
165 */
166 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
167 /*
168 * Reflect the result in the task state to prevent recursion on the
169 * local lock and to make softirq_count() & al work.
170 */
171 current->softirq_disable_cnt = newcnt;
172
173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
174 raw_local_irq_save(flags);
175 lockdep_softirqs_off(ip);
176 raw_local_irq_restore(flags);
177 }
178}
179EXPORT_SYMBOL(__local_bh_disable_ip);
180
181static void __local_bh_enable(unsigned int cnt, bool unlock)
182{
183 unsigned long flags;
184 int newcnt;
185
186 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
187 this_cpu_read(softirq_ctrl.cnt));
188
189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
190 raw_local_irq_save(flags);
191 lockdep_softirqs_on(_RET_IP_);
192 raw_local_irq_restore(flags);
193 }
194
195 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
196 current->softirq_disable_cnt = newcnt;
197
198 if (!newcnt && unlock) {
199 rcu_read_unlock();
200 local_unlock(&softirq_ctrl.lock);
201 }
202}
203
204void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
205{
206 bool preempt_on = preemptible();
207 unsigned long flags;
208 u32 pending;
209 int curcnt;
210
211 WARN_ON_ONCE(in_hardirq());
212 lockdep_assert_irqs_enabled();
213
214 local_irq_save(flags);
215 curcnt = __this_cpu_read(softirq_ctrl.cnt);
216
217 /*
218 * If this is not reenabling soft interrupts, no point in trying to
219 * run pending ones.
220 */
221 if (curcnt != cnt)
222 goto out;
223
224 pending = local_softirq_pending();
225 if (!pending)
226 goto out;
227
228 /*
229 * If this was called from non preemptible context, wake up the
230 * softirq daemon.
231 */
232 if (!preempt_on) {
233 wakeup_softirqd();
234 goto out;
235 }
236
237 /*
238 * Adjust softirq count to SOFTIRQ_OFFSET which makes
239 * in_serving_softirq() become true.
240 */
241 cnt = SOFTIRQ_OFFSET;
242 __local_bh_enable(cnt, false);
243 __do_softirq();
244
245out:
246 __local_bh_enable(cnt, preempt_on);
247 local_irq_restore(flags);
248}
249EXPORT_SYMBOL(__local_bh_enable_ip);
250
251/*
252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
253 * to acquire the per CPU local lock for reentrancy protection.
254 */
255static inline void ksoftirqd_run_begin(void)
256{
257 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
258 local_irq_disable();
259}
260
261/* Counterpart to ksoftirqd_run_begin() */
262static inline void ksoftirqd_run_end(void)
263{
264 __local_bh_enable(SOFTIRQ_OFFSET, true);
265 WARN_ON_ONCE(in_interrupt());
266 local_irq_enable();
267}
268
269static inline void softirq_handle_begin(void) { }
270static inline void softirq_handle_end(void) { }
271
272static inline bool should_wake_ksoftirqd(void)
273{
274 return !this_cpu_read(softirq_ctrl.cnt);
275}
276
277static inline void invoke_softirq(void)
278{
279 if (should_wake_ksoftirqd())
280 wakeup_softirqd();
281}
282
283/*
284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
285 * call. On RT kernels this is undesired and the only known functionality
286 * in the block layer which does this is disabled on RT. If soft interrupts
287 * get raised which haven't been raised before the flush, warn so it can be
288 * investigated.
289 */
290void do_softirq_post_smp_call_flush(unsigned int was_pending)
291{
292 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
293 invoke_softirq();
294}
295
296#else /* CONFIG_PREEMPT_RT */
297
298/*
299 * This one is for softirq.c-internal use, where hardirqs are disabled
300 * legitimately:
301 */
302#ifdef CONFIG_TRACE_IRQFLAGS
303void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
304{
305 unsigned long flags;
306
307 WARN_ON_ONCE(in_hardirq());
308
309 raw_local_irq_save(flags);
310 /*
311 * The preempt tracer hooks into preempt_count_add and will break
312 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
313 * is set and before current->softirq_enabled is cleared.
314 * We must manually increment preempt_count here and manually
315 * call the trace_preempt_off later.
316 */
317 __preempt_count_add(val: cnt);
318 /*
319 * Were softirqs turned off above:
320 */
321 if (softirq_count() == (cnt & SOFTIRQ_MASK))
322 lockdep_softirqs_off(ip);
323 raw_local_irq_restore(flags);
324
325 if (preempt_count() == cnt) {
326#ifdef CONFIG_DEBUG_PREEMPT
327 current->preempt_disable_ip = get_lock_parent_ip();
328#endif
329 trace_preempt_off(CALLER_ADDR0, a1: get_lock_parent_ip());
330 }
331}
332EXPORT_SYMBOL(__local_bh_disable_ip);
333#endif /* CONFIG_TRACE_IRQFLAGS */
334
335static void __local_bh_enable(unsigned int cnt)
336{
337 lockdep_assert_irqs_disabled();
338
339 if (preempt_count() == cnt)
340 trace_preempt_on(CALLER_ADDR0, a1: get_lock_parent_ip());
341
342 if (softirq_count() == (cnt & SOFTIRQ_MASK))
343 lockdep_softirqs_on(_RET_IP_);
344
345 __preempt_count_sub(val: cnt);
346}
347
348/*
349 * Special-case - softirqs can safely be enabled by __do_softirq(),
350 * without processing still-pending softirqs:
351 */
352void _local_bh_enable(void)
353{
354 WARN_ON_ONCE(in_hardirq());
355 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
356}
357EXPORT_SYMBOL(_local_bh_enable);
358
359void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
360{
361 WARN_ON_ONCE(in_hardirq());
362 lockdep_assert_irqs_enabled();
363#ifdef CONFIG_TRACE_IRQFLAGS
364 local_irq_disable();
365#endif
366 /*
367 * Are softirqs going to be turned on now:
368 */
369 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
370 lockdep_softirqs_on(ip);
371 /*
372 * Keep preemption disabled until we are done with
373 * softirq processing:
374 */
375 __preempt_count_sub(val: cnt - 1);
376
377 if (unlikely(!in_interrupt() && local_softirq_pending())) {
378 /*
379 * Run softirq if any pending. And do it in its own stack
380 * as we may be calling this deep in a task call stack already.
381 */
382 do_softirq();
383 }
384
385 preempt_count_dec();
386#ifdef CONFIG_TRACE_IRQFLAGS
387 local_irq_enable();
388#endif
389 preempt_check_resched();
390}
391EXPORT_SYMBOL(__local_bh_enable_ip);
392
393static inline void softirq_handle_begin(void)
394{
395 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
396}
397
398static inline void softirq_handle_end(void)
399{
400 __local_bh_enable(SOFTIRQ_OFFSET);
401 WARN_ON_ONCE(in_interrupt());
402}
403
404static inline void ksoftirqd_run_begin(void)
405{
406 local_irq_disable();
407}
408
409static inline void ksoftirqd_run_end(void)
410{
411 local_irq_enable();
412}
413
414static inline bool should_wake_ksoftirqd(void)
415{
416 return true;
417}
418
419static inline void invoke_softirq(void)
420{
421 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
422#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
423 /*
424 * We can safely execute softirq on the current stack if
425 * it is the irq stack, because it should be near empty
426 * at this stage.
427 */
428 __do_softirq();
429#else
430 /*
431 * Otherwise, irq_exit() is called on the task stack that can
432 * be potentially deep already. So call softirq in its own stack
433 * to prevent from any overrun.
434 */
435 do_softirq_own_stack();
436#endif
437 } else {
438 wakeup_softirqd();
439 }
440}
441
442asmlinkage __visible void do_softirq(void)
443{
444 __u32 pending;
445 unsigned long flags;
446
447 if (in_interrupt())
448 return;
449
450 local_irq_save(flags);
451
452 pending = local_softirq_pending();
453
454 if (pending)
455 do_softirq_own_stack();
456
457 local_irq_restore(flags);
458}
459
460#endif /* !CONFIG_PREEMPT_RT */
461
462/*
463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
464 * but break the loop if need_resched() is set or after 2 ms.
465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
466 * certain cases, such as stop_machine(), jiffies may cease to
467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
468 * well to make sure we eventually return from this method.
469 *
470 * These limits have been established via experimentation.
471 * The two things to balance is latency against fairness -
472 * we want to handle softirqs as soon as possible, but they
473 * should not be able to lock up the box.
474 */
475#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
476#define MAX_SOFTIRQ_RESTART 10
477
478#ifdef CONFIG_TRACE_IRQFLAGS
479/*
480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
481 * to keep the lockdep irq context tracking as tight as possible in order to
482 * not miss-qualify lock contexts and miss possible deadlocks.
483 */
484
485static inline bool lockdep_softirq_start(void)
486{
487 bool in_hardirq = false;
488
489 if (lockdep_hardirq_context()) {
490 in_hardirq = true;
491 lockdep_hardirq_exit();
492 }
493
494 lockdep_softirq_enter();
495
496 return in_hardirq;
497}
498
499static inline void lockdep_softirq_end(bool in_hardirq)
500{
501 lockdep_softirq_exit();
502
503 if (in_hardirq)
504 lockdep_hardirq_enter();
505}
506#else
507static inline bool lockdep_softirq_start(void) { return false; }
508static inline void lockdep_softirq_end(bool in_hardirq) { }
509#endif
510
511asmlinkage __visible void __softirq_entry __do_softirq(void)
512{
513 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
514 unsigned long old_flags = current->flags;
515 int max_restart = MAX_SOFTIRQ_RESTART;
516 struct softirq_action *h;
517 bool in_hardirq;
518 __u32 pending;
519 int softirq_bit;
520
521 /*
522 * Mask out PF_MEMALLOC as the current task context is borrowed for the
523 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
524 * again if the socket is related to swapping.
525 */
526 current->flags &= ~PF_MEMALLOC;
527
528 pending = local_softirq_pending();
529
530 softirq_handle_begin();
531 in_hardirq = lockdep_softirq_start();
532 account_softirq_enter(current);
533
534restart:
535 /* Reset the pending bitmask before enabling irqs */
536 set_softirq_pending(0);
537
538 local_irq_enable();
539
540 h = softirq_vec;
541
542 while ((softirq_bit = ffs(pending))) {
543 unsigned int vec_nr;
544 int prev_count;
545
546 h += softirq_bit - 1;
547
548 vec_nr = h - softirq_vec;
549 prev_count = preempt_count();
550
551 kstat_incr_softirqs_this_cpu(irq: vec_nr);
552
553 trace_softirq_entry(vec_nr);
554 h->action(h);
555 trace_softirq_exit(vec_nr);
556 if (unlikely(prev_count != preempt_count())) {
557 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
558 vec_nr, softirq_to_name[vec_nr], h->action,
559 prev_count, preempt_count());
560 preempt_count_set(pc: prev_count);
561 }
562 h++;
563 pending >>= softirq_bit;
564 }
565
566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
567 __this_cpu_read(ksoftirqd) == current)
568 rcu_softirq_qs();
569
570 local_irq_disable();
571
572 pending = local_softirq_pending();
573 if (pending) {
574 if (time_before(jiffies, end) && !need_resched() &&
575 --max_restart)
576 goto restart;
577
578 wakeup_softirqd();
579 }
580
581 account_softirq_exit(current);
582 lockdep_softirq_end(in_hardirq);
583 softirq_handle_end();
584 current_restore_flags(orig_flags: old_flags, PF_MEMALLOC);
585}
586
587/**
588 * irq_enter_rcu - Enter an interrupt context with RCU watching
589 */
590void irq_enter_rcu(void)
591{
592 __irq_enter_raw();
593
594 if (tick_nohz_full_cpu(smp_processor_id()) ||
595 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
596 tick_irq_enter();
597
598 account_hardirq_enter(current);
599}
600
601/**
602 * irq_enter - Enter an interrupt context including RCU update
603 */
604void irq_enter(void)
605{
606 ct_irq_enter();
607 irq_enter_rcu();
608}
609
610static inline void tick_irq_exit(void)
611{
612#ifdef CONFIG_NO_HZ_COMMON
613 int cpu = smp_processor_id();
614
615 /* Make sure that timer wheel updates are propagated */
616 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
617 if (!in_hardirq())
618 tick_nohz_irq_exit();
619 }
620#endif
621}
622
623static inline void __irq_exit_rcu(void)
624{
625#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
626 local_irq_disable();
627#else
628 lockdep_assert_irqs_disabled();
629#endif
630 account_hardirq_exit(current);
631 preempt_count_sub(HARDIRQ_OFFSET);
632 if (!in_interrupt() && local_softirq_pending())
633 invoke_softirq();
634
635 tick_irq_exit();
636}
637
638/**
639 * irq_exit_rcu() - Exit an interrupt context without updating RCU
640 *
641 * Also processes softirqs if needed and possible.
642 */
643void irq_exit_rcu(void)
644{
645 __irq_exit_rcu();
646 /* must be last! */
647 lockdep_hardirq_exit();
648}
649
650/**
651 * irq_exit - Exit an interrupt context, update RCU and lockdep
652 *
653 * Also processes softirqs if needed and possible.
654 */
655void irq_exit(void)
656{
657 __irq_exit_rcu();
658 ct_irq_exit();
659 /* must be last! */
660 lockdep_hardirq_exit();
661}
662
663/*
664 * This function must run with irqs disabled!
665 */
666inline void raise_softirq_irqoff(unsigned int nr)
667{
668 __raise_softirq_irqoff(nr);
669
670 /*
671 * If we're in an interrupt or softirq, we're done
672 * (this also catches softirq-disabled code). We will
673 * actually run the softirq once we return from
674 * the irq or softirq.
675 *
676 * Otherwise we wake up ksoftirqd to make sure we
677 * schedule the softirq soon.
678 */
679 if (!in_interrupt() && should_wake_ksoftirqd())
680 wakeup_softirqd();
681}
682
683void raise_softirq(unsigned int nr)
684{
685 unsigned long flags;
686
687 local_irq_save(flags);
688 raise_softirq_irqoff(nr);
689 local_irq_restore(flags);
690}
691
692void __raise_softirq_irqoff(unsigned int nr)
693{
694 lockdep_assert_irqs_disabled();
695 trace_softirq_raise(vec_nr: nr);
696 or_softirq_pending(1UL << nr);
697}
698
699void open_softirq(int nr, void (*action)(struct softirq_action *))
700{
701 softirq_vec[nr].action = action;
702}
703
704/*
705 * Tasklets
706 */
707struct tasklet_head {
708 struct tasklet_struct *head;
709 struct tasklet_struct **tail;
710};
711
712static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
713static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
714
715static void __tasklet_schedule_common(struct tasklet_struct *t,
716 struct tasklet_head __percpu *headp,
717 unsigned int softirq_nr)
718{
719 struct tasklet_head *head;
720 unsigned long flags;
721
722 local_irq_save(flags);
723 head = this_cpu_ptr(headp);
724 t->next = NULL;
725 *head->tail = t;
726 head->tail = &(t->next);
727 raise_softirq_irqoff(nr: softirq_nr);
728 local_irq_restore(flags);
729}
730
731void __tasklet_schedule(struct tasklet_struct *t)
732{
733 __tasklet_schedule_common(t, headp: &tasklet_vec,
734 softirq_nr: TASKLET_SOFTIRQ);
735}
736EXPORT_SYMBOL(__tasklet_schedule);
737
738void __tasklet_hi_schedule(struct tasklet_struct *t)
739{
740 __tasklet_schedule_common(t, headp: &tasklet_hi_vec,
741 softirq_nr: HI_SOFTIRQ);
742}
743EXPORT_SYMBOL(__tasklet_hi_schedule);
744
745static bool tasklet_clear_sched(struct tasklet_struct *t)
746{
747 if (test_and_clear_bit(nr: TASKLET_STATE_SCHED, addr: &t->state)) {
748 wake_up_var(var: &t->state);
749 return true;
750 }
751
752 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
753 t->use_callback ? "callback" : "func",
754 t->use_callback ? (void *)t->callback : (void *)t->func);
755
756 return false;
757}
758
759static void tasklet_action_common(struct softirq_action *a,
760 struct tasklet_head *tl_head,
761 unsigned int softirq_nr)
762{
763 struct tasklet_struct *list;
764
765 local_irq_disable();
766 list = tl_head->head;
767 tl_head->head = NULL;
768 tl_head->tail = &tl_head->head;
769 local_irq_enable();
770
771 while (list) {
772 struct tasklet_struct *t = list;
773
774 list = list->next;
775
776 if (tasklet_trylock(t)) {
777 if (!atomic_read(v: &t->count)) {
778 if (tasklet_clear_sched(t)) {
779 if (t->use_callback) {
780 trace_tasklet_entry(t, func: t->callback);
781 t->callback(t);
782 trace_tasklet_exit(t, func: t->callback);
783 } else {
784 trace_tasklet_entry(t, func: t->func);
785 t->func(t->data);
786 trace_tasklet_exit(t, func: t->func);
787 }
788 }
789 tasklet_unlock(t);
790 continue;
791 }
792 tasklet_unlock(t);
793 }
794
795 local_irq_disable();
796 t->next = NULL;
797 *tl_head->tail = t;
798 tl_head->tail = &t->next;
799 __raise_softirq_irqoff(nr: softirq_nr);
800 local_irq_enable();
801 }
802}
803
804static __latent_entropy void tasklet_action(struct softirq_action *a)
805{
806 workqueue_softirq_action(highpri: false);
807 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), softirq_nr: TASKLET_SOFTIRQ);
808}
809
810static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
811{
812 workqueue_softirq_action(highpri: true);
813 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), softirq_nr: HI_SOFTIRQ);
814}
815
816void tasklet_setup(struct tasklet_struct *t,
817 void (*callback)(struct tasklet_struct *))
818{
819 t->next = NULL;
820 t->state = 0;
821 atomic_set(v: &t->count, i: 0);
822 t->callback = callback;
823 t->use_callback = true;
824 t->data = 0;
825}
826EXPORT_SYMBOL(tasklet_setup);
827
828void tasklet_init(struct tasklet_struct *t,
829 void (*func)(unsigned long), unsigned long data)
830{
831 t->next = NULL;
832 t->state = 0;
833 atomic_set(v: &t->count, i: 0);
834 t->func = func;
835 t->use_callback = false;
836 t->data = data;
837}
838EXPORT_SYMBOL(tasklet_init);
839
840#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
841/*
842 * Do not use in new code. Waiting for tasklets from atomic contexts is
843 * error prone and should be avoided.
844 */
845void tasklet_unlock_spin_wait(struct tasklet_struct *t)
846{
847 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
848 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
849 /*
850 * Prevent a live lock when current preempted soft
851 * interrupt processing or prevents ksoftirqd from
852 * running. If the tasklet runs on a different CPU
853 * then this has no effect other than doing the BH
854 * disable/enable dance for nothing.
855 */
856 local_bh_disable();
857 local_bh_enable();
858 } else {
859 cpu_relax();
860 }
861 }
862}
863EXPORT_SYMBOL(tasklet_unlock_spin_wait);
864#endif
865
866void tasklet_kill(struct tasklet_struct *t)
867{
868 if (in_interrupt())
869 pr_notice("Attempt to kill tasklet from interrupt\n");
870
871 while (test_and_set_bit(nr: TASKLET_STATE_SCHED, addr: &t->state))
872 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
873
874 tasklet_unlock_wait(t);
875 tasklet_clear_sched(t);
876}
877EXPORT_SYMBOL(tasklet_kill);
878
879#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
880void tasklet_unlock(struct tasklet_struct *t)
881{
882 smp_mb__before_atomic();
883 clear_bit(nr: TASKLET_STATE_RUN, addr: &t->state);
884 smp_mb__after_atomic();
885 wake_up_var(var: &t->state);
886}
887EXPORT_SYMBOL_GPL(tasklet_unlock);
888
889void tasklet_unlock_wait(struct tasklet_struct *t)
890{
891 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
892}
893EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
894#endif
895
896void __init softirq_init(void)
897{
898 int cpu;
899
900 for_each_possible_cpu(cpu) {
901 per_cpu(tasklet_vec, cpu).tail =
902 &per_cpu(tasklet_vec, cpu).head;
903 per_cpu(tasklet_hi_vec, cpu).tail =
904 &per_cpu(tasklet_hi_vec, cpu).head;
905 }
906
907 open_softirq(nr: TASKLET_SOFTIRQ, action: tasklet_action);
908 open_softirq(nr: HI_SOFTIRQ, action: tasklet_hi_action);
909}
910
911static int ksoftirqd_should_run(unsigned int cpu)
912{
913 return local_softirq_pending();
914}
915
916static void run_ksoftirqd(unsigned int cpu)
917{
918 ksoftirqd_run_begin();
919 if (local_softirq_pending()) {
920 /*
921 * We can safely run softirq on inline stack, as we are not deep
922 * in the task stack here.
923 */
924 __do_softirq();
925 ksoftirqd_run_end();
926 cond_resched();
927 return;
928 }
929 ksoftirqd_run_end();
930}
931
932#ifdef CONFIG_HOTPLUG_CPU
933static int takeover_tasklets(unsigned int cpu)
934{
935 workqueue_softirq_dead(cpu);
936
937 /* CPU is dead, so no lock needed. */
938 local_irq_disable();
939
940 /* Find end, append list for that CPU. */
941 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
942 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
943 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
944 per_cpu(tasklet_vec, cpu).head = NULL;
945 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
946 }
947 raise_softirq_irqoff(nr: TASKLET_SOFTIRQ);
948
949 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
950 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
951 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
952 per_cpu(tasklet_hi_vec, cpu).head = NULL;
953 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
954 }
955 raise_softirq_irqoff(nr: HI_SOFTIRQ);
956
957 local_irq_enable();
958 return 0;
959}
960#else
961#define takeover_tasklets NULL
962#endif /* CONFIG_HOTPLUG_CPU */
963
964static struct smp_hotplug_thread softirq_threads = {
965 .store = &ksoftirqd,
966 .thread_should_run = ksoftirqd_should_run,
967 .thread_fn = run_ksoftirqd,
968 .thread_comm = "ksoftirqd/%u",
969};
970
971static __init int spawn_ksoftirqd(void)
972{
973 cpuhp_setup_state_nocalls(state: CPUHP_SOFTIRQ_DEAD, name: "softirq:dead", NULL,
974 teardown: takeover_tasklets);
975 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
976
977 return 0;
978}
979early_initcall(spawn_ksoftirqd);
980
981/*
982 * [ These __weak aliases are kept in a separate compilation unit, so that
983 * GCC does not inline them incorrectly. ]
984 */
985
986int __init __weak early_irq_init(void)
987{
988 return 0;
989}
990
991int __init __weak arch_probe_nr_irqs(void)
992{
993 return NR_IRQS_LEGACY;
994}
995
996int __init __weak arch_early_irq_init(void)
997{
998 return 0;
999}
1000
1001unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1002{
1003 return from;
1004}
1005

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/kernel/softirq.c