1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Copyright (C) 2013 Imagination Technologies |
4 | * Author: Paul Burton <paul.burton@mips.com> |
5 | */ |
6 | |
7 | #include <asm/addrspace.h> |
8 | #include <asm/asm.h> |
9 | #include <asm/asm-offsets.h> |
10 | #include <asm/asmmacro.h> |
11 | #include <asm/cacheops.h> |
12 | #include <asm/eva.h> |
13 | #include <asm/mipsregs.h> |
14 | #include <asm/mipsmtregs.h> |
15 | #include <asm/pm.h> |
16 | #include <asm/smp-cps.h> |
17 | |
18 | #define GCR_CPC_BASE_OFS 0x0088 |
19 | #define GCR_CL_COHERENCE_OFS 0x2008 |
20 | #define GCR_CL_ID_OFS 0x2028 |
21 | |
22 | #define CPC_CL_VC_STOP_OFS 0x2020 |
23 | #define CPC_CL_VC_RUN_OFS 0x2028 |
24 | |
25 | .extern mips_cm_base |
26 | |
27 | .set noreorder |
28 | |
29 | #ifdef CONFIG_64BIT |
30 | # define STATUS_BITDEPS ST0_KX |
31 | #else |
32 | # define STATUS_BITDEPS 0 |
33 | #endif |
34 | |
35 | #ifdef CONFIG_MIPS_CPS_NS16550 |
36 | |
37 | #define DUMP_EXCEP(name) \ |
38 | PTR_LA a0, 8f; \ |
39 | jal mips_cps_bev_dump; \ |
40 | nop; \ |
41 | TEXT(name) |
42 | |
43 | #else /* !CONFIG_MIPS_CPS_NS16550 */ |
44 | |
45 | #define DUMP_EXCEP(name) |
46 | |
47 | #endif /* !CONFIG_MIPS_CPS_NS16550 */ |
48 | |
49 | /* |
50 | * Set dest to non-zero if the core supports the MT ASE, else zero. If |
51 | * MT is not supported then branch to nomt. |
52 | */ |
53 | .macro has_mt dest, nomt |
54 | mfc0 \dest, CP0_CONFIG, 1 |
55 | bgez \dest, \nomt |
56 | mfc0 \dest, CP0_CONFIG, 2 |
57 | bgez \dest, \nomt |
58 | mfc0 \dest, CP0_CONFIG, 3 |
59 | andi \dest, \dest, MIPS_CONF3_MT |
60 | beqz \dest, \nomt |
61 | nop |
62 | .endm |
63 | |
64 | /* |
65 | * Set dest to non-zero if the core supports MIPSr6 multithreading |
66 | * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then |
67 | * branch to nomt. |
68 | */ |
69 | .macro has_vp dest, nomt |
70 | mfc0 \dest, CP0_CONFIG, 1 |
71 | bgez \dest, \nomt |
72 | mfc0 \dest, CP0_CONFIG, 2 |
73 | bgez \dest, \nomt |
74 | mfc0 \dest, CP0_CONFIG, 3 |
75 | bgez \dest, \nomt |
76 | mfc0 \dest, CP0_CONFIG, 4 |
77 | bgez \dest, \nomt |
78 | mfc0 \dest, CP0_CONFIG, 5 |
79 | andi \dest, \dest, MIPS_CONF5_VP |
80 | beqz \dest, \nomt |
81 | nop |
82 | .endm |
83 | |
84 | |
85 | .balign 0x1000 |
86 | |
87 | LEAF(mips_cps_core_entry) |
88 | /* |
89 | * These first several instructions will be patched by cps_smp_setup to load the |
90 | * CCA to use into register s0 and GCR base address to register s1. |
91 | */ |
92 | .rept CPS_ENTRY_PATCH_INSNS |
93 | nop |
94 | .endr |
95 | |
96 | .global mips_cps_core_entry_patch_end |
97 | mips_cps_core_entry_patch_end: |
98 | |
99 | /* Check whether we're here due to an NMI */ |
100 | mfc0 k0, CP0_STATUS |
101 | and k0, k0, ST0_NMI |
102 | beqz k0, not_nmi |
103 | nop |
104 | |
105 | /* This is an NMI */ |
106 | PTR_LA k0, nmi_handler |
107 | jr k0 |
108 | nop |
109 | |
110 | not_nmi: |
111 | /* Setup Cause */ |
112 | li t0, CAUSEF_IV |
113 | mtc0 t0, CP0_CAUSE |
114 | |
115 | /* Setup Status */ |
116 | li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS |
117 | mtc0 t0, CP0_STATUS |
118 | |
119 | /* We don't know how to do coherence setup on earlier ISA */ |
120 | #if MIPS_ISA_REV > 0 |
121 | /* Skip cache & coherence setup if we're already coherent */ |
122 | lw s7, GCR_CL_COHERENCE_OFS(s1) |
123 | bnez s7, 1f |
124 | nop |
125 | |
126 | /* Initialize the L1 caches */ |
127 | jal mips_cps_cache_init |
128 | nop |
129 | |
130 | /* Enter the coherent domain */ |
131 | li t0, 0xff |
132 | sw t0, GCR_CL_COHERENCE_OFS(s1) |
133 | ehb |
134 | #endif /* MIPS_ISA_REV > 0 */ |
135 | |
136 | /* Set Kseg0 CCA to that in s0 */ |
137 | 1: mfc0 t0, CP0_CONFIG |
138 | ori t0, 0x7 |
139 | xori t0, 0x7 |
140 | or t0, t0, s0 |
141 | mtc0 t0, CP0_CONFIG |
142 | ehb |
143 | |
144 | /* Jump to kseg0 */ |
145 | PTR_LA t0, 1f |
146 | jr t0 |
147 | nop |
148 | |
149 | /* |
150 | * We're up, cached & coherent. Perform any EVA initialization necessary |
151 | * before we access memory. |
152 | */ |
153 | 1: eva_init |
154 | |
155 | /* Retrieve boot configuration pointers */ |
156 | jal mips_cps_get_bootcfg |
157 | nop |
158 | |
159 | /* Skip core-level init if we started up coherent */ |
160 | bnez s7, 1f |
161 | nop |
162 | |
163 | /* Perform any further required core-level initialisation */ |
164 | jal mips_cps_core_init |
165 | nop |
166 | |
167 | /* |
168 | * Boot any other VPEs within this core that should be online, and |
169 | * deactivate this VPE if it should be offline. |
170 | */ |
171 | move a1, t9 |
172 | jal mips_cps_boot_vpes |
173 | move a0, v0 |
174 | |
175 | /* Off we go! */ |
176 | 1: PTR_L t1, VPEBOOTCFG_PC(v1) |
177 | PTR_L gp, VPEBOOTCFG_GP(v1) |
178 | PTR_L sp, VPEBOOTCFG_SP(v1) |
179 | jr t1 |
180 | nop |
181 | END(mips_cps_core_entry) |
182 | |
183 | .org 0x200 |
184 | LEAF(excep_tlbfill) |
185 | DUMP_EXCEP("TLB Fill" ) |
186 | b . |
187 | nop |
188 | END(excep_tlbfill) |
189 | |
190 | .org 0x280 |
191 | LEAF(excep_xtlbfill) |
192 | DUMP_EXCEP("XTLB Fill" ) |
193 | b . |
194 | nop |
195 | END(excep_xtlbfill) |
196 | |
197 | .org 0x300 |
198 | LEAF(excep_cache) |
199 | DUMP_EXCEP("Cache" ) |
200 | b . |
201 | nop |
202 | END(excep_cache) |
203 | |
204 | .org 0x380 |
205 | LEAF(excep_genex) |
206 | DUMP_EXCEP("General" ) |
207 | b . |
208 | nop |
209 | END(excep_genex) |
210 | |
211 | .org 0x400 |
212 | LEAF(excep_intex) |
213 | DUMP_EXCEP("Interrupt" ) |
214 | b . |
215 | nop |
216 | END(excep_intex) |
217 | |
218 | .org 0x480 |
219 | LEAF(excep_ejtag) |
220 | PTR_LA k0, ejtag_debug_handler |
221 | jr k0 |
222 | nop |
223 | END(excep_ejtag) |
224 | |
225 | LEAF(mips_cps_core_init) |
226 | #ifdef CONFIG_MIPS_MT_SMP |
227 | /* Check that the core implements the MT ASE */ |
228 | has_mt t0, 3f |
229 | |
230 | .set push |
231 | .set MIPS_ISA_LEVEL_RAW |
232 | .set mt |
233 | |
234 | /* Only allow 1 TC per VPE to execute... */ |
235 | dmt |
236 | |
237 | /* ...and for the moment only 1 VPE */ |
238 | dvpe |
239 | PTR_LA t1, 1f |
240 | jr.hb t1 |
241 | nop |
242 | |
243 | /* Enter VPE configuration state */ |
244 | 1: mfc0 t0, CP0_MVPCONTROL |
245 | ori t0, t0, MVPCONTROL_VPC |
246 | mtc0 t0, CP0_MVPCONTROL |
247 | |
248 | /* Retrieve the number of VPEs within the core */ |
249 | mfc0 t0, CP0_MVPCONF0 |
250 | srl t0, t0, MVPCONF0_PVPE_SHIFT |
251 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) |
252 | addiu ta3, t0, 1 |
253 | |
254 | /* If there's only 1, we're done */ |
255 | beqz t0, 2f |
256 | nop |
257 | |
258 | /* Loop through each VPE within this core */ |
259 | li ta1, 1 |
260 | |
261 | 1: /* Operate on the appropriate TC */ |
262 | mtc0 ta1, CP0_VPECONTROL |
263 | ehb |
264 | |
265 | /* Bind TC to VPE (1:1 TC:VPE mapping) */ |
266 | mttc0 ta1, CP0_TCBIND |
267 | |
268 | /* Set exclusive TC, non-active, master */ |
269 | li t0, VPECONF0_MVP |
270 | sll t1, ta1, VPECONF0_XTC_SHIFT |
271 | or t0, t0, t1 |
272 | mttc0 t0, CP0_VPECONF0 |
273 | |
274 | /* Set TC non-active, non-allocatable */ |
275 | mttc0 zero, CP0_TCSTATUS |
276 | |
277 | /* Set TC halted */ |
278 | li t0, TCHALT_H |
279 | mttc0 t0, CP0_TCHALT |
280 | |
281 | /* Next VPE */ |
282 | addiu ta1, ta1, 1 |
283 | slt t0, ta1, ta3 |
284 | bnez t0, 1b |
285 | nop |
286 | |
287 | /* Leave VPE configuration state */ |
288 | 2: mfc0 t0, CP0_MVPCONTROL |
289 | xori t0, t0, MVPCONTROL_VPC |
290 | mtc0 t0, CP0_MVPCONTROL |
291 | |
292 | 3: .set pop |
293 | #endif |
294 | jr ra |
295 | nop |
296 | END(mips_cps_core_init) |
297 | |
298 | /** |
299 | * mips_cps_get_bootcfg() - retrieve boot configuration pointers |
300 | * |
301 | * Returns: pointer to struct core_boot_config in v0, pointer to |
302 | * struct vpe_boot_config in v1, VPE ID in t9 |
303 | */ |
304 | LEAF(mips_cps_get_bootcfg) |
305 | /* Calculate a pointer to this cores struct core_boot_config */ |
306 | lw t0, GCR_CL_ID_OFS(s1) |
307 | li t1, COREBOOTCFG_SIZE |
308 | mul t0, t0, t1 |
309 | PTR_LA t1, mips_cps_core_bootcfg |
310 | PTR_L t1, 0(t1) |
311 | PTR_ADDU v0, t0, t1 |
312 | |
313 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ |
314 | li t9, 0 |
315 | #if defined(CONFIG_CPU_MIPSR6) |
316 | has_vp ta2, 1f |
317 | |
318 | /* |
319 | * Assume non-contiguous numbering. Perhaps some day we'll need |
320 | * to handle contiguous VP numbering, but no such systems yet |
321 | * exist. |
322 | */ |
323 | mfc0 t9, CP0_GLOBALNUMBER |
324 | andi t9, t9, MIPS_GLOBALNUMBER_VP |
325 | #elif defined(CONFIG_MIPS_MT_SMP) |
326 | has_mt ta2, 1f |
327 | |
328 | /* Find the number of VPEs present in the core */ |
329 | mfc0 t1, CP0_MVPCONF0 |
330 | srl t1, t1, MVPCONF0_PVPE_SHIFT |
331 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT |
332 | addiu t1, t1, 1 |
333 | |
334 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ |
335 | clz t1, t1 |
336 | li t2, 31 |
337 | subu t1, t2, t1 |
338 | li t2, 1 |
339 | sll t1, t2, t1 |
340 | addiu t1, t1, -1 |
341 | |
342 | /* Retrieve the VPE ID from EBase.CPUNum */ |
343 | mfc0 t9, $15, 1 |
344 | and t9, t9, t1 |
345 | #endif |
346 | |
347 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ |
348 | li t1, VPEBOOTCFG_SIZE |
349 | mul v1, t9, t1 |
350 | PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) |
351 | PTR_ADDU v1, v1, ta3 |
352 | |
353 | jr ra |
354 | nop |
355 | END(mips_cps_get_bootcfg) |
356 | |
357 | LEAF(mips_cps_boot_vpes) |
358 | lw ta2, COREBOOTCFG_VPEMASK(a0) |
359 | PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) |
360 | |
361 | #if defined(CONFIG_CPU_MIPSR6) |
362 | |
363 | has_vp t0, 5f |
364 | |
365 | /* Find base address of CPC */ |
366 | PTR_LA t1, mips_gcr_base |
367 | PTR_L t1, 0(t1) |
368 | PTR_L t1, GCR_CPC_BASE_OFS(t1) |
369 | PTR_LI t2, ~0x7fff |
370 | and t1, t1, t2 |
371 | PTR_LI t2, UNCAC_BASE |
372 | PTR_ADD t1, t1, t2 |
373 | |
374 | /* Start any other VPs that ought to be running */ |
375 | PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) |
376 | |
377 | /* Ensure this VP stops running if it shouldn't be */ |
378 | not ta2 |
379 | PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) |
380 | ehb |
381 | |
382 | #elif defined(CONFIG_MIPS_MT) |
383 | |
384 | /* If the core doesn't support MT then return */ |
385 | has_mt t0, 5f |
386 | |
387 | /* Enter VPE configuration state */ |
388 | .set push |
389 | .set MIPS_ISA_LEVEL_RAW |
390 | .set mt |
391 | dvpe |
392 | .set pop |
393 | |
394 | PTR_LA t1, 1f |
395 | jr.hb t1 |
396 | nop |
397 | 1: mfc0 t1, CP0_MVPCONTROL |
398 | ori t1, t1, MVPCONTROL_VPC |
399 | mtc0 t1, CP0_MVPCONTROL |
400 | ehb |
401 | |
402 | /* Loop through each VPE */ |
403 | move t8, ta2 |
404 | li ta1, 0 |
405 | |
406 | /* Check whether the VPE should be running. If not, skip it */ |
407 | 1: andi t0, ta2, 1 |
408 | beqz t0, 2f |
409 | nop |
410 | |
411 | /* Operate on the appropriate TC */ |
412 | mfc0 t0, CP0_VPECONTROL |
413 | ori t0, t0, VPECONTROL_TARGTC |
414 | xori t0, t0, VPECONTROL_TARGTC |
415 | or t0, t0, ta1 |
416 | mtc0 t0, CP0_VPECONTROL |
417 | ehb |
418 | |
419 | .set push |
420 | .set MIPS_ISA_LEVEL_RAW |
421 | .set mt |
422 | |
423 | /* Skip the VPE if its TC is not halted */ |
424 | mftc0 t0, CP0_TCHALT |
425 | beqz t0, 2f |
426 | nop |
427 | |
428 | /* Calculate a pointer to the VPEs struct vpe_boot_config */ |
429 | li t0, VPEBOOTCFG_SIZE |
430 | mul t0, t0, ta1 |
431 | addu t0, t0, ta3 |
432 | |
433 | /* Set the TC restart PC */ |
434 | lw t1, VPEBOOTCFG_PC(t0) |
435 | mttc0 t1, CP0_TCRESTART |
436 | |
437 | /* Set the TC stack pointer */ |
438 | lw t1, VPEBOOTCFG_SP(t0) |
439 | mttgpr t1, sp |
440 | |
441 | /* Set the TC global pointer */ |
442 | lw t1, VPEBOOTCFG_GP(t0) |
443 | mttgpr t1, gp |
444 | |
445 | /* Copy config from this VPE */ |
446 | mfc0 t0, CP0_CONFIG |
447 | mttc0 t0, CP0_CONFIG |
448 | |
449 | /* |
450 | * Copy the EVA config from this VPE if the CPU supports it. |
451 | * CONFIG3 must exist to be running MT startup - just read it. |
452 | */ |
453 | mfc0 t0, CP0_CONFIG, 3 |
454 | and t0, t0, MIPS_CONF3_SC |
455 | beqz t0, 3f |
456 | nop |
457 | mfc0 t0, CP0_SEGCTL0 |
458 | mttc0 t0, CP0_SEGCTL0 |
459 | mfc0 t0, CP0_SEGCTL1 |
460 | mttc0 t0, CP0_SEGCTL1 |
461 | mfc0 t0, CP0_SEGCTL2 |
462 | mttc0 t0, CP0_SEGCTL2 |
463 | 3: |
464 | /* Ensure no software interrupts are pending */ |
465 | mttc0 zero, CP0_CAUSE |
466 | mttc0 zero, CP0_STATUS |
467 | |
468 | /* Set TC active, not interrupt exempt */ |
469 | mftc0 t0, CP0_TCSTATUS |
470 | li t1, ~TCSTATUS_IXMT |
471 | and t0, t0, t1 |
472 | ori t0, t0, TCSTATUS_A |
473 | mttc0 t0, CP0_TCSTATUS |
474 | |
475 | /* Clear the TC halt bit */ |
476 | mttc0 zero, CP0_TCHALT |
477 | |
478 | /* Set VPE active */ |
479 | mftc0 t0, CP0_VPECONF0 |
480 | ori t0, t0, VPECONF0_VPA |
481 | mttc0 t0, CP0_VPECONF0 |
482 | |
483 | /* Next VPE */ |
484 | 2: srl ta2, ta2, 1 |
485 | addiu ta1, ta1, 1 |
486 | bnez ta2, 1b |
487 | nop |
488 | |
489 | /* Leave VPE configuration state */ |
490 | mfc0 t1, CP0_MVPCONTROL |
491 | xori t1, t1, MVPCONTROL_VPC |
492 | mtc0 t1, CP0_MVPCONTROL |
493 | ehb |
494 | evpe |
495 | |
496 | .set pop |
497 | |
498 | /* Check whether this VPE is meant to be running */ |
499 | li t0, 1 |
500 | sll t0, t0, a1 |
501 | and t0, t0, t8 |
502 | bnez t0, 2f |
503 | nop |
504 | |
505 | /* This VPE should be offline, halt the TC */ |
506 | li t0, TCHALT_H |
507 | mtc0 t0, CP0_TCHALT |
508 | PTR_LA t0, 1f |
509 | 1: jr.hb t0 |
510 | nop |
511 | |
512 | 2: |
513 | |
514 | #endif /* CONFIG_MIPS_MT_SMP */ |
515 | |
516 | /* Return */ |
517 | 5: jr ra |
518 | nop |
519 | END(mips_cps_boot_vpes) |
520 | |
521 | #if MIPS_ISA_REV > 0 |
522 | LEAF(mips_cps_cache_init) |
523 | /* |
524 | * Clear the bits used to index the caches. Note that the architecture |
525 | * dictates that writing to any of TagLo or TagHi selects 0 or 2 should |
526 | * be valid for all MIPS32 CPUs, even those for which said writes are |
527 | * unnecessary. |
528 | */ |
529 | mtc0 zero, CP0_TAGLO, 0 |
530 | mtc0 zero, CP0_TAGHI, 0 |
531 | mtc0 zero, CP0_TAGLO, 2 |
532 | mtc0 zero, CP0_TAGHI, 2 |
533 | ehb |
534 | |
535 | /* Primary cache configuration is indicated by Config1 */ |
536 | mfc0 v0, CP0_CONFIG, 1 |
537 | |
538 | /* Detect I-cache line size */ |
539 | _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ |
540 | beqz t0, icache_done |
541 | li t1, 2 |
542 | sllv t0, t1, t0 |
543 | |
544 | /* Detect I-cache size */ |
545 | _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ |
546 | xori t2, t1, 0x7 |
547 | beqz t2, 1f |
548 | li t3, 32 |
549 | addiu t1, t1, 1 |
550 | sllv t1, t3, t1 |
551 | 1: /* At this point t1 == I-cache sets per way */ |
552 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ |
553 | addiu t2, t2, 1 |
554 | mul t1, t1, t0 |
555 | mul t1, t1, t2 |
556 | |
557 | li a0, CKSEG0 |
558 | PTR_ADD a1, a0, t1 |
559 | 1: cache Index_Store_Tag_I, 0(a0) |
560 | PTR_ADD a0, a0, t0 |
561 | bne a0, a1, 1b |
562 | nop |
563 | icache_done: |
564 | |
565 | /* Detect D-cache line size */ |
566 | _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ |
567 | beqz t0, dcache_done |
568 | li t1, 2 |
569 | sllv t0, t1, t0 |
570 | |
571 | /* Detect D-cache size */ |
572 | _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ |
573 | xori t2, t1, 0x7 |
574 | beqz t2, 1f |
575 | li t3, 32 |
576 | addiu t1, t1, 1 |
577 | sllv t1, t3, t1 |
578 | 1: /* At this point t1 == D-cache sets per way */ |
579 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ |
580 | addiu t2, t2, 1 |
581 | mul t1, t1, t0 |
582 | mul t1, t1, t2 |
583 | |
584 | li a0, CKSEG0 |
585 | PTR_ADDU a1, a0, t1 |
586 | PTR_SUBU a1, a1, t0 |
587 | 1: cache Index_Store_Tag_D, 0(a0) |
588 | bne a0, a1, 1b |
589 | PTR_ADD a0, a0, t0 |
590 | dcache_done: |
591 | |
592 | jr ra |
593 | nop |
594 | END(mips_cps_cache_init) |
595 | #endif /* MIPS_ISA_REV > 0 */ |
596 | |
597 | #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) |
598 | |
599 | /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ |
600 | .macro psstate dest |
601 | .set push |
602 | .set noat |
603 | lw $1, TI_CPU(gp) |
604 | sll $1, $1, LONGLOG |
605 | PTR_LA \dest, __per_cpu_offset |
606 | addu $1, $1, \dest |
607 | lw $1, 0($1) |
608 | PTR_LA \dest, cps_cpu_state |
609 | addu \dest, \dest, $1 |
610 | .set pop |
611 | .endm |
612 | |
613 | LEAF(mips_cps_pm_save) |
614 | /* Save CPU state */ |
615 | SUSPEND_SAVE_REGS |
616 | psstate t1 |
617 | SUSPEND_SAVE_STATIC |
618 | jr v0 |
619 | nop |
620 | END(mips_cps_pm_save) |
621 | |
622 | LEAF(mips_cps_pm_restore) |
623 | /* Restore CPU state */ |
624 | psstate t1 |
625 | RESUME_RESTORE_STATIC |
626 | RESUME_RESTORE_REGS_RETURN |
627 | END(mips_cps_pm_restore) |
628 | |
629 | #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ |
630 | |