1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Code to handle x86 style IRQs plus some generic interrupt stuff. |
4 | * |
5 | * Copyright (C) 1992 Linus Torvalds |
6 | * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle |
7 | * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) |
8 | * Copyright (C) 1999-2000 Grant Grundler |
9 | * Copyright (c) 2005 Matthew Wilcox |
10 | */ |
11 | #include <linux/bitops.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/init.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/kernel_stat.h> |
16 | #include <linux/seq_file.h> |
17 | #include <linux/types.h> |
18 | #include <linux/sched/task_stack.h> |
19 | #include <asm/io.h> |
20 | |
21 | #include <asm/softirq_stack.h> |
22 | #include <asm/smp.h> |
23 | #include <asm/ldcw.h> |
24 | |
25 | #undef PARISC_IRQ_CR16_COUNTS |
26 | |
27 | #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq)) |
28 | |
29 | /* Bits in EIEM correlate with cpu_irq_action[]. |
30 | ** Numbered *Big Endian*! (ie bit 0 is MSB) |
31 | */ |
32 | static volatile unsigned long cpu_eiem = 0; |
33 | |
34 | /* |
35 | ** local ACK bitmap ... habitually set to 1, but reset to zero |
36 | ** between ->ack() and ->end() of the interrupt to prevent |
37 | ** re-interruption of a processing interrupt. |
38 | */ |
39 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; |
40 | |
41 | static void cpu_mask_irq(struct irq_data *d) |
42 | { |
43 | unsigned long eirr_bit = EIEM_MASK(d->irq); |
44 | |
45 | cpu_eiem &= ~eirr_bit; |
46 | /* Do nothing on the other CPUs. If they get this interrupt, |
47 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't |
48 | * handle it, and the set_eiem() at the bottom will ensure it |
49 | * then gets disabled */ |
50 | } |
51 | |
52 | static void __cpu_unmask_irq(unsigned int irq) |
53 | { |
54 | unsigned long eirr_bit = EIEM_MASK(irq); |
55 | |
56 | cpu_eiem |= eirr_bit; |
57 | |
58 | /* This is just a simple NOP IPI. But what it does is cause |
59 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end |
60 | * of the interrupt handler */ |
61 | smp_send_all_nop(); |
62 | } |
63 | |
64 | static void cpu_unmask_irq(struct irq_data *d) |
65 | { |
66 | __cpu_unmask_irq(irq: d->irq); |
67 | } |
68 | |
69 | void cpu_ack_irq(struct irq_data *d) |
70 | { |
71 | unsigned long mask = EIEM_MASK(d->irq); |
72 | int cpu = smp_processor_id(); |
73 | |
74 | /* Clear in EIEM so we can no longer process */ |
75 | per_cpu(local_ack_eiem, cpu) &= ~mask; |
76 | |
77 | /* disable the interrupt */ |
78 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
79 | |
80 | /* and now ack it */ |
81 | mtctl(mask, 23); |
82 | } |
83 | |
84 | void cpu_eoi_irq(struct irq_data *d) |
85 | { |
86 | unsigned long mask = EIEM_MASK(d->irq); |
87 | int cpu = smp_processor_id(); |
88 | |
89 | /* set it in the eiems---it's no longer in process */ |
90 | per_cpu(local_ack_eiem, cpu) |= mask; |
91 | |
92 | /* enable the interrupt */ |
93 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
94 | } |
95 | |
96 | #ifdef CONFIG_SMP |
97 | int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) |
98 | { |
99 | int cpu_dest; |
100 | |
101 | /* timer and ipi have to always be received on all CPUs */ |
102 | if (irqd_is_per_cpu(d)) |
103 | return -EINVAL; |
104 | |
105 | cpu_dest = cpumask_first_and(srcp1: dest, cpu_online_mask); |
106 | if (cpu_dest >= nr_cpu_ids) |
107 | cpu_dest = cpumask_first(cpu_online_mask); |
108 | |
109 | return cpu_dest; |
110 | } |
111 | #endif |
112 | |
113 | static struct irq_chip cpu_interrupt_type = { |
114 | .name = "CPU" , |
115 | .irq_mask = cpu_mask_irq, |
116 | .irq_unmask = cpu_unmask_irq, |
117 | .irq_ack = cpu_ack_irq, |
118 | .irq_eoi = cpu_eoi_irq, |
119 | /* XXX: Needs to be written. We managed without it so far, but |
120 | * we really ought to write it. |
121 | */ |
122 | .irq_retrigger = NULL, |
123 | }; |
124 | |
125 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
126 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
127 | |
128 | /* |
129 | * /proc/interrupts printing for arch specific interrupts |
130 | */ |
131 | int arch_show_interrupts(struct seq_file *p, int prec) |
132 | { |
133 | int j; |
134 | |
135 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
136 | seq_printf(p, "%*s: " , prec, "STK" ); |
137 | for_each_online_cpu(j) |
138 | seq_printf(p, "%10u " , irq_stats(j)->kernel_stack_usage); |
139 | seq_puts(p, " Kernel stack usage\n" ); |
140 | # ifdef CONFIG_IRQSTACKS |
141 | seq_printf(p, "%*s: " , prec, "IST" ); |
142 | for_each_online_cpu(j) |
143 | seq_printf(p, "%10u " , irq_stats(j)->irq_stack_usage); |
144 | seq_puts(p, " Interrupt stack usage\n" ); |
145 | # endif |
146 | #endif |
147 | #ifdef CONFIG_SMP |
148 | if (num_online_cpus() > 1) { |
149 | seq_printf(m: p, fmt: "%*s: " , prec, "RES" ); |
150 | for_each_online_cpu(j) |
151 | seq_printf(m: p, fmt: "%10u " , irq_stats(j)->irq_resched_count); |
152 | seq_puts(m: p, s: " Rescheduling interrupts\n" ); |
153 | seq_printf(m: p, fmt: "%*s: " , prec, "CAL" ); |
154 | for_each_online_cpu(j) |
155 | seq_printf(m: p, fmt: "%10u " , irq_stats(j)->irq_call_count); |
156 | seq_puts(m: p, s: " Function call interrupts\n" ); |
157 | } |
158 | #endif |
159 | seq_printf(m: p, fmt: "%*s: " , prec, "UAH" ); |
160 | for_each_online_cpu(j) |
161 | seq_printf(m: p, fmt: "%10u " , irq_stats(j)->irq_unaligned_count); |
162 | seq_puts(m: p, s: " Unaligned access handler traps\n" ); |
163 | seq_printf(m: p, fmt: "%*s: " , prec, "FPA" ); |
164 | for_each_online_cpu(j) |
165 | seq_printf(m: p, fmt: "%10u " , irq_stats(j)->irq_fpassist_count); |
166 | seq_puts(m: p, s: " Floating point assist traps\n" ); |
167 | seq_printf(m: p, fmt: "%*s: " , prec, "TLB" ); |
168 | for_each_online_cpu(j) |
169 | seq_printf(m: p, fmt: "%10u " , irq_stats(j)->irq_tlb_count); |
170 | seq_puts(m: p, s: " TLB shootdowns\n" ); |
171 | return 0; |
172 | } |
173 | |
174 | int show_interrupts(struct seq_file *p, void *v) |
175 | { |
176 | int i = *(loff_t *) v, j; |
177 | unsigned long flags; |
178 | |
179 | if (i == 0) { |
180 | seq_puts(m: p, s: " " ); |
181 | for_each_online_cpu(j) |
182 | seq_printf(m: p, fmt: " CPU%d" , j); |
183 | |
184 | #ifdef PARISC_IRQ_CR16_COUNTS |
185 | seq_printf(p, " [min/avg/max] (CPU cycle counts)" ); |
186 | #endif |
187 | seq_putc(m: p, c: '\n'); |
188 | } |
189 | |
190 | if (i < NR_IRQS) { |
191 | struct irq_desc *desc = irq_to_desc(irq: i); |
192 | struct irqaction *action; |
193 | |
194 | raw_spin_lock_irqsave(&desc->lock, flags); |
195 | action = desc->action; |
196 | if (!action) |
197 | goto skip; |
198 | seq_printf(m: p, fmt: "%3d: " , i); |
199 | |
200 | for_each_online_cpu(j) |
201 | seq_printf(m: p, fmt: "%10u " , irq_desc_kstat_cpu(desc, j)); |
202 | |
203 | seq_printf(m: p, fmt: " %14s" , irq_desc_get_chip(desc)->name); |
204 | #ifndef PARISC_IRQ_CR16_COUNTS |
205 | seq_printf(m: p, fmt: " %s" , action->name); |
206 | |
207 | while ((action = action->next)) |
208 | seq_printf(m: p, fmt: ", %s" , action->name); |
209 | #else |
210 | for ( ;action; action = action->next) { |
211 | unsigned int k, avg, min, max; |
212 | |
213 | min = max = action->cr16_hist[0]; |
214 | |
215 | for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { |
216 | int hist = action->cr16_hist[k]; |
217 | |
218 | if (hist) { |
219 | avg += hist; |
220 | } else |
221 | break; |
222 | |
223 | if (hist > max) max = hist; |
224 | if (hist < min) min = hist; |
225 | } |
226 | |
227 | avg /= k; |
228 | seq_printf(p, " %s[%d/%d/%d]" , action->name, |
229 | min,avg,max); |
230 | } |
231 | #endif |
232 | |
233 | seq_putc(m: p, c: '\n'); |
234 | skip: |
235 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
236 | } |
237 | |
238 | if (i == NR_IRQS) |
239 | arch_show_interrupts(p, prec: 3); |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | |
245 | |
246 | /* |
247 | ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data. |
248 | ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit. |
249 | ** |
250 | ** To use txn_XXX() interfaces, get a Virtual IRQ first. |
251 | ** Then use that to get the Transaction address and data. |
252 | */ |
253 | |
254 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) |
255 | { |
256 | if (irq_has_action(irq)) |
257 | return -EBUSY; |
258 | if (irq_get_chip(irq) != &cpu_interrupt_type) |
259 | return -EBUSY; |
260 | |
261 | /* for iosapic interrupts */ |
262 | if (type) { |
263 | irq_set_chip_and_handler(irq, type, handle_percpu_irq); |
264 | irq_set_chip_data(irq, data); |
265 | __cpu_unmask_irq(irq); |
266 | } |
267 | return 0; |
268 | } |
269 | |
270 | int txn_claim_irq(int irq) |
271 | { |
272 | return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq; |
273 | } |
274 | |
275 | /* |
276 | * The bits_wide parameter accommodates the limitations of the HW/SW which |
277 | * use these bits: |
278 | * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register) |
279 | * V-class (EPIC): 6 bits |
280 | * N/L/A-class (iosapic): 8 bits |
281 | * PCI 2.2 MSI: 16 bits |
282 | * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric) |
283 | * |
284 | * On the service provider side: |
285 | * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register) |
286 | * o PA 2.0 wide mode 6-bits (per processor) |
287 | * o IA64 8-bits (0-256 total) |
288 | * |
289 | * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported |
290 | * by the processor...and the N/L-class I/O subsystem supports more bits than |
291 | * PA2.0 has. The first case is the problem. |
292 | */ |
293 | int txn_alloc_irq(unsigned int bits_wide) |
294 | { |
295 | int irq; |
296 | |
297 | /* never return irq 0 cause that's the interval timer */ |
298 | for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) { |
299 | if (cpu_claim_irq(irq, NULL, NULL) < 0) |
300 | continue; |
301 | if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide)) |
302 | continue; |
303 | return irq; |
304 | } |
305 | |
306 | /* unlikely, but be prepared */ |
307 | return -1; |
308 | } |
309 | |
310 | |
311 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
312 | { |
313 | #ifdef CONFIG_SMP |
314 | struct irq_data *d = irq_get_irq_data(irq); |
315 | irq_data_update_affinity(d, cpumask_of(cpu)); |
316 | #endif |
317 | |
318 | return per_cpu(cpu_data, cpu).txn_addr; |
319 | } |
320 | |
321 | |
322 | unsigned long txn_alloc_addr(unsigned int virt_irq) |
323 | { |
324 | static int next_cpu = -1; |
325 | |
326 | next_cpu++; /* assign to "next" CPU we want this bugger on */ |
327 | |
328 | /* validate entry */ |
329 | while ((next_cpu < nr_cpu_ids) && |
330 | (!per_cpu(cpu_data, next_cpu).txn_addr || |
331 | !cpu_online(cpu: next_cpu))) |
332 | next_cpu++; |
333 | |
334 | if (next_cpu >= nr_cpu_ids) |
335 | next_cpu = 0; /* nothing else, assign monarch */ |
336 | |
337 | return txn_affinity_addr(irq: virt_irq, cpu: next_cpu); |
338 | } |
339 | |
340 | |
341 | unsigned int txn_alloc_data(unsigned int virt_irq) |
342 | { |
343 | return virt_irq - CPU_IRQ_BASE; |
344 | } |
345 | |
346 | static inline int eirr_to_irq(unsigned long eirr) |
347 | { |
348 | int bit = fls_long(l: eirr); |
349 | return (BITS_PER_LONG - bit) + TIMER_IRQ; |
350 | } |
351 | |
352 | #ifdef CONFIG_IRQSTACKS |
353 | /* |
354 | * IRQ STACK - used for irq handler |
355 | */ |
356 | #ifdef CONFIG_64BIT |
357 | #define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */ |
358 | #else |
359 | #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ |
360 | #endif |
361 | |
362 | union irq_stack_union { |
363 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; |
364 | volatile unsigned int slock[4]; |
365 | volatile unsigned int lock[1]; |
366 | }; |
367 | |
368 | static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { |
369 | .slock = { 1,1,1,1 }, |
370 | }; |
371 | #endif |
372 | |
373 | |
374 | int sysctl_panic_on_stackoverflow = 1; |
375 | |
376 | static inline void stack_overflow_check(struct pt_regs *regs) |
377 | { |
378 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
379 | #define STACK_MARGIN (256*6) |
380 | |
381 | unsigned long stack_start = (unsigned long) task_stack_page(current); |
382 | unsigned long sp = regs->gr[30]; |
383 | unsigned long stack_usage; |
384 | unsigned int *last_usage; |
385 | int cpu = smp_processor_id(); |
386 | |
387 | /* if sr7 != 0, we interrupted a userspace process which we do not want |
388 | * to check for stack overflow. We will only check the kernel stack. */ |
389 | if (regs->sr[7]) |
390 | return; |
391 | |
392 | /* exit if already in panic */ |
393 | if (sysctl_panic_on_stackoverflow < 0) |
394 | return; |
395 | |
396 | /* calculate kernel stack usage */ |
397 | stack_usage = sp - stack_start; |
398 | #ifdef CONFIG_IRQSTACKS |
399 | if (likely(stack_usage <= THREAD_SIZE)) |
400 | goto check_kernel_stack; /* found kernel stack */ |
401 | |
402 | /* check irq stack usage */ |
403 | stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; |
404 | stack_usage = sp - stack_start; |
405 | |
406 | last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); |
407 | if (unlikely(stack_usage > *last_usage)) |
408 | *last_usage = stack_usage; |
409 | |
410 | if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) |
411 | return; |
412 | |
413 | pr_emerg("stackcheck: %s will most likely overflow irq stack " |
414 | "(sp:%lx, stk bottom-top:%lx-%lx)\n" , |
415 | current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); |
416 | goto panic_check; |
417 | |
418 | check_kernel_stack: |
419 | #endif |
420 | |
421 | /* check kernel stack usage */ |
422 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); |
423 | |
424 | if (unlikely(stack_usage > *last_usage)) |
425 | *last_usage = stack_usage; |
426 | |
427 | if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN))) |
428 | return; |
429 | |
430 | pr_emerg("stackcheck: %s will most likely overflow kernel stack " |
431 | "(sp:%lx, stk bottom-top:%lx-%lx)\n" , |
432 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); |
433 | |
434 | #ifdef CONFIG_IRQSTACKS |
435 | panic_check: |
436 | #endif |
437 | if (sysctl_panic_on_stackoverflow) { |
438 | sysctl_panic_on_stackoverflow = -1; /* disable further checks */ |
439 | panic("low stack detected by irq handler - check messages\n" ); |
440 | } |
441 | #endif |
442 | } |
443 | |
444 | #ifdef CONFIG_IRQSTACKS |
445 | /* in entry.S: */ |
446 | void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); |
447 | |
448 | static void execute_on_irq_stack(void *func, unsigned long param1) |
449 | { |
450 | union irq_stack_union *union_ptr; |
451 | unsigned long irq_stack; |
452 | volatile unsigned int *irq_stack_in_use; |
453 | |
454 | union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); |
455 | irq_stack = (unsigned long) &union_ptr->stack; |
456 | irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), |
457 | FRAME_ALIGN); /* align for stack frame usage */ |
458 | |
459 | /* We may be called recursive. If we are already using the irq stack, |
460 | * just continue to use it. Use spinlocks to serialize |
461 | * the irq stack usage. |
462 | */ |
463 | irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr); |
464 | if (!__ldcw(irq_stack_in_use)) { |
465 | void (*direct_call)(unsigned long p1) = func; |
466 | |
467 | /* We are using the IRQ stack already. |
468 | * Do direct call on current stack. */ |
469 | direct_call(param1); |
470 | return; |
471 | } |
472 | |
473 | /* This is where we switch to the IRQ stack. */ |
474 | call_on_stack(param1, func, irq_stack); |
475 | |
476 | /* free up irq stack usage. */ |
477 | *irq_stack_in_use = 1; |
478 | } |
479 | |
480 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
481 | void do_softirq_own_stack(void) |
482 | { |
483 | execute_on_irq_stack(__do_softirq, 0); |
484 | } |
485 | #endif |
486 | #endif /* CONFIG_IRQSTACKS */ |
487 | |
488 | /* ONLY called from entry.S:intr_extint() */ |
489 | asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) |
490 | { |
491 | struct pt_regs *old_regs; |
492 | unsigned long eirr_val; |
493 | int irq, cpu = smp_processor_id(); |
494 | struct irq_data *irq_data; |
495 | #ifdef CONFIG_SMP |
496 | cpumask_t dest; |
497 | #endif |
498 | |
499 | old_regs = set_irq_regs(regs); |
500 | local_irq_disable(); |
501 | irq_enter_rcu(); |
502 | |
503 | eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); |
504 | if (!eirr_val) |
505 | goto set_out; |
506 | irq = eirr_to_irq(eirr: eirr_val); |
507 | |
508 | irq_data = irq_get_irq_data(irq); |
509 | |
510 | /* Filter out spurious interrupts, mostly from serial port at bootup */ |
511 | if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data)))) |
512 | goto set_out; |
513 | |
514 | #ifdef CONFIG_SMP |
515 | cpumask_copy(dstp: &dest, srcp: irq_data_get_affinity_mask(irq_data)); |
516 | if (irqd_is_per_cpu(irq_data) && |
517 | !cpumask_test_cpu(smp_processor_id(), cpumask: &dest)) { |
518 | int cpu = cpumask_first(srcp: &dest); |
519 | |
520 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n" , |
521 | irq, smp_processor_id(), cpu); |
522 | gsc_writel(irq + CPU_IRQ_BASE, |
523 | per_cpu(cpu_data, cpu).hpa); |
524 | goto set_out; |
525 | } |
526 | #endif |
527 | stack_overflow_check(regs); |
528 | |
529 | #ifdef CONFIG_IRQSTACKS |
530 | execute_on_irq_stack(&generic_handle_irq, irq); |
531 | #else |
532 | generic_handle_irq(irq); |
533 | #endif /* CONFIG_IRQSTACKS */ |
534 | |
535 | out: |
536 | irq_exit_rcu(); |
537 | set_irq_regs(old_regs); |
538 | return; |
539 | |
540 | set_out: |
541 | set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); |
542 | goto out; |
543 | } |
544 | |
545 | static void claim_cpu_irqs(void) |
546 | { |
547 | unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL; |
548 | int i; |
549 | |
550 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { |
551 | irq_set_chip_and_handler(i, &cpu_interrupt_type, |
552 | handle_percpu_irq); |
553 | } |
554 | |
555 | irq_set_handler(TIMER_IRQ, handle_percpu_irq); |
556 | if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer" , NULL)) |
557 | pr_err("Failed to register timer interrupt\n" ); |
558 | #ifdef CONFIG_SMP |
559 | irq_set_handler(IPI_IRQ, handle_percpu_irq); |
560 | if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI" , NULL)) |
561 | pr_err("Failed to register IPI interrupt\n" ); |
562 | #endif |
563 | } |
564 | |
565 | void init_IRQ(void) |
566 | { |
567 | local_irq_disable(); /* PARANOID - should already be disabled */ |
568 | mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ |
569 | #ifdef CONFIG_SMP |
570 | if (!cpu_eiem) { |
571 | claim_cpu_irqs(); |
572 | cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); |
573 | } |
574 | #else |
575 | claim_cpu_irqs(); |
576 | cpu_eiem = EIEM_MASK(TIMER_IRQ); |
577 | #endif |
578 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
579 | } |
580 | |