1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * This file contains the power_save function for 6xx & 7xxx CPUs |
4 | * rewritten in assembler |
5 | * |
6 | * Warning ! This code assumes that if your machine has a 750fx |
7 | * it will have PLL 1 set to low speed mode (used during NAP/DOZE). |
8 | * if this is not the case some additional changes will have to |
9 | * be done to check a runtime var (a bit like powersave-nap) |
10 | */ |
11 | |
12 | #include <linux/threads.h> |
13 | #include <asm/reg.h> |
14 | #include <asm/page.h> |
15 | #include <asm/cputable.h> |
16 | #include <asm/thread_info.h> |
17 | #include <asm/ppc_asm.h> |
18 | #include <asm/asm-offsets.h> |
19 | #include <asm/feature-fixups.h> |
20 | |
21 | .text |
22 | |
23 | /* |
24 | * Init idle, called at early CPU setup time from head.S for each CPU |
25 | * Make sure no rest of NAP mode remains in HID0, save default |
26 | * values for some CPU specific registers. Called with r24 |
27 | * containing CPU number and r3 reloc offset |
28 | */ |
29 | _GLOBAL(init_idle_6xx) |
30 | BEGIN_FTR_SECTION |
31 | mfspr r4,SPRN_HID0 |
32 | rlwinm r4,r4,0,10,8 /* Clear NAP */ |
33 | mtspr SPRN_HID0, r4 |
34 | b 1f |
35 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) |
36 | blr |
37 | 1: |
38 | slwi r5,r24,2 |
39 | add r5,r5,r3 |
40 | BEGIN_FTR_SECTION |
41 | mfspr r4,SPRN_MSSCR0 |
42 | addis r6,r5, nap_save_msscr0@ha |
43 | stw r4,nap_save_msscr0@l(r6) |
44 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) |
45 | BEGIN_FTR_SECTION |
46 | mfspr r4,SPRN_HID1 |
47 | addis r6,r5,nap_save_hid1@ha |
48 | stw r4,nap_save_hid1@l(r6) |
49 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) |
50 | blr |
51 | |
52 | /* |
53 | * Here is the power_save_6xx function. This could eventually be |
54 | * split into several functions & changing the function pointer |
55 | * depending on the various features. |
56 | */ |
57 | _GLOBAL(ppc6xx_idle) |
58 | /* Check if we can nap or doze, put HID0 mask in r3 |
59 | */ |
60 | lis r3, 0 |
61 | BEGIN_FTR_SECTION |
62 | lis r3,HID0_DOZE@h |
63 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) |
64 | BEGIN_FTR_SECTION |
65 | /* We must dynamically check for the NAP feature as it |
66 | * can be cleared by CPU init after the fixups are done |
67 | */ |
68 | lis r4,cur_cpu_spec@ha |
69 | lwz r4,cur_cpu_spec@l(r4) |
70 | lwz r4,CPU_SPEC_FEATURES(r4) |
71 | andi. r0,r4,CPU_FTR_CAN_NAP |
72 | beq 1f |
73 | /* Now check if user or arch enabled NAP mode */ |
74 | lis r4,powersave_nap@ha |
75 | lwz r4,powersave_nap@l(r4) |
76 | cmpwi 0,r4,0 |
77 | beq 1f |
78 | lis r3,HID0_NAP@h |
79 | 1: |
80 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) |
81 | cmpwi 0,r3,0 |
82 | beqlr |
83 | |
84 | /* Some pre-nap cleanups needed on some CPUs */ |
85 | andis. r0,r3,HID0_NAP@h |
86 | beq 2f |
87 | BEGIN_FTR_SECTION |
88 | /* Disable L2 prefetch on some 745x and try to ensure |
89 | * L2 prefetch engines are idle. As explained by errata |
90 | * text, we can't be sure they are, we just hope very hard |
91 | * that well be enough (sic !). At least I noticed Apple |
92 | * doesn't even bother doing the dcbf's here... |
93 | */ |
94 | mfspr r4,SPRN_MSSCR0 |
95 | rlwinm r4,r4,0,0,29 |
96 | sync |
97 | mtspr SPRN_MSSCR0,r4 |
98 | sync |
99 | isync |
100 | lis r4,KERNELBASE@h |
101 | dcbf 0,r4 |
102 | dcbf 0,r4 |
103 | dcbf 0,r4 |
104 | dcbf 0,r4 |
105 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) |
106 | 2: |
107 | BEGIN_FTR_SECTION |
108 | /* Go to low speed mode on some 750FX */ |
109 | lis r4,powersave_lowspeed@ha |
110 | lwz r4,powersave_lowspeed@l(r4) |
111 | cmpwi 0,r4,0 |
112 | beq 1f |
113 | mfspr r4,SPRN_HID1 |
114 | oris r4,r4,0x0001 |
115 | mtspr SPRN_HID1,r4 |
116 | 1: |
117 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) |
118 | |
119 | /* Go to NAP or DOZE now */ |
120 | mfspr r4,SPRN_HID0 |
121 | lis r5,(HID0_NAP|HID0_SLEEP)@h |
122 | BEGIN_FTR_SECTION |
123 | oris r5,r5,HID0_DOZE@h |
124 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) |
125 | andc r4,r4,r5 |
126 | or r4,r4,r3 |
127 | BEGIN_FTR_SECTION |
128 | oris r4,r4,HID0_DPM@h /* that should be done once for all */ |
129 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) |
130 | mtspr SPRN_HID0,r4 |
131 | BEGIN_FTR_SECTION |
132 | PPC_DSSALL |
133 | sync |
134 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
135 | lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */ |
136 | ori r8,r8,_TLF_NAPPING /* so when we take an exception */ |
137 | stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ |
138 | mfmsr r7 |
139 | ori r7,r7,MSR_EE |
140 | oris r7,r7,MSR_POW@h |
141 | 1: sync |
142 | mtmsr r7 |
143 | isync |
144 | b 1b |
145 | |
146 | /* |
147 | * Return from NAP/DOZE mode, restore some CPU specific registers, |
148 | * R11 points to the exception frame. We have to preserve r10. |
149 | */ |
150 | _GLOBAL(power_save_ppc32_restore) |
151 | lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ |
152 | stw r9,_NIP(r11) /* make it do a blr */ |
153 | |
154 | #ifdef CONFIG_SMP |
155 | lwz r11,TASK_CPU(r2) /* get cpu number * 4 */ |
156 | slwi r11,r11,2 |
157 | #else |
158 | li r11,0 |
159 | #endif |
160 | /* Todo make sure all these are in the same page |
161 | * and load r11 (@ha part + CPU offset) only once |
162 | */ |
163 | BEGIN_FTR_SECTION |
164 | mfspr r9,SPRN_HID0 |
165 | andis. r9,r9,HID0_NAP@h |
166 | beq 1f |
167 | addis r9, r11, nap_save_msscr0@ha |
168 | lwz r9,nap_save_msscr0@l(r9) |
169 | mtspr SPRN_MSSCR0, r9 |
170 | sync |
171 | isync |
172 | 1: |
173 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) |
174 | BEGIN_FTR_SECTION |
175 | addis r9, r11, nap_save_hid1@ha |
176 | lwz r9,nap_save_hid1@l(r9) |
177 | mtspr SPRN_HID1, r9 |
178 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) |
179 | blr |
180 | _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore) |
181 | |
182 | .data |
183 | |
184 | _GLOBAL(nap_save_msscr0) |
185 | .space 4*NR_CPUS |
186 | |
187 | _GLOBAL(nap_save_hid1) |
188 | .space 4*NR_CPUS |
189 | |
190 | _GLOBAL(powersave_lowspeed) |
191 | .long 0 |
192 | |