1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | #include <linux/sched.h> |
4 | #include <linux/sched/clock.h> |
5 | |
6 | #include <asm/cpu.h> |
7 | #include <asm/cpufeature.h> |
8 | #include <asm/e820/api.h> |
9 | #include <asm/mtrr.h> |
10 | #include <asm/msr.h> |
11 | |
12 | #include "cpu.h" |
13 | |
14 | #define ACE_PRESENT (1 << 6) |
15 | #define ACE_ENABLED (1 << 7) |
16 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ |
17 | |
18 | #define RNG_PRESENT (1 << 2) |
19 | #define RNG_ENABLED (1 << 3) |
20 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ |
21 | |
22 | static void init_c3(struct cpuinfo_x86 *c) |
23 | { |
24 | u32 lo, hi; |
25 | |
26 | /* Test for Centaur Extended Feature Flags presence */ |
27 | if (cpuid_eax(op: 0xC0000000) >= 0xC0000001) { |
28 | u32 tmp = cpuid_edx(op: 0xC0000001); |
29 | |
30 | /* enable ACE unit, if present and disabled */ |
31 | if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { |
32 | rdmsr(MSR_VIA_FCR, lo, hi); |
33 | lo |= ACE_FCR; /* enable ACE unit */ |
34 | wrmsr(MSR_VIA_FCR, lo, hi); |
35 | pr_info("CPU: Enabled ACE h/w crypto\n" ); |
36 | } |
37 | |
38 | /* enable RNG unit, if present and disabled */ |
39 | if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { |
40 | rdmsr(MSR_VIA_RNG, lo, hi); |
41 | lo |= RNG_ENABLE; /* enable RNG unit */ |
42 | wrmsr(MSR_VIA_RNG, lo, hi); |
43 | pr_info("CPU: Enabled h/w RNG\n" ); |
44 | } |
45 | |
46 | /* store Centaur Extended Feature Flags as |
47 | * word 5 of the CPU capability bit array |
48 | */ |
49 | c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(op: 0xC0000001); |
50 | } |
51 | #ifdef CONFIG_X86_32 |
52 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ |
53 | if (c->x86_model >= 6 && c->x86_model <= 13) { |
54 | rdmsr(MSR_VIA_FCR, lo, hi); |
55 | lo |= (1<<1 | 1<<7); |
56 | wrmsr(MSR_VIA_FCR, lo, hi); |
57 | set_cpu_cap(c, X86_FEATURE_CX8); |
58 | } |
59 | |
60 | /* Before Nehemiah, the C3's had 3dNOW! */ |
61 | if (c->x86_model >= 6 && c->x86_model < 9) |
62 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
63 | #endif |
64 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { |
65 | c->x86_cache_alignment = c->x86_clflush_size * 2; |
66 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
67 | } |
68 | |
69 | if (c->x86 >= 7) |
70 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
71 | } |
72 | |
73 | enum { |
74 | ECX8 = 1<<1, |
75 | EIERRINT = 1<<2, |
76 | DPM = 1<<3, |
77 | DMCE = 1<<4, |
78 | DSTPCLK = 1<<5, |
79 | ELINEAR = 1<<6, |
80 | DSMC = 1<<7, |
81 | DTLOCK = 1<<8, |
82 | EDCTLB = 1<<8, |
83 | EMMX = 1<<9, |
84 | DPDC = 1<<11, |
85 | EBRPRED = 1<<12, |
86 | DIC = 1<<13, |
87 | DDC = 1<<14, |
88 | DNA = 1<<15, |
89 | ERETSTK = 1<<16, |
90 | E2MMX = 1<<19, |
91 | EAMD3D = 1<<20, |
92 | }; |
93 | |
94 | static void early_init_centaur(struct cpuinfo_x86 *c) |
95 | { |
96 | #ifdef CONFIG_X86_32 |
97 | /* Emulate MTRRs using Centaur's MCR. */ |
98 | if (c->x86 == 5) |
99 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
100 | #endif |
101 | if ((c->x86 == 6 && c->x86_model >= 0xf) || |
102 | (c->x86 >= 7)) |
103 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
104 | |
105 | #ifdef CONFIG_X86_64 |
106 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); |
107 | #endif |
108 | if (c->x86_power & (1 << 8)) { |
109 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
110 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
111 | } |
112 | } |
113 | |
114 | static void init_centaur(struct cpuinfo_x86 *c) |
115 | { |
116 | #ifdef CONFIG_X86_32 |
117 | char *name; |
118 | u32 fcr_set = 0; |
119 | u32 fcr_clr = 0; |
120 | u32 lo, hi, newlo; |
121 | u32 aa, bb, cc, dd; |
122 | |
123 | /* |
124 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
125 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
126 | */ |
127 | clear_cpu_cap(c, 0*32+31); |
128 | #endif |
129 | early_init_centaur(c); |
130 | init_intel_cacheinfo(c); |
131 | |
132 | if (c->cpuid_level > 9) { |
133 | unsigned int eax = cpuid_eax(op: 10); |
134 | |
135 | /* |
136 | * Check for version and the number of counters |
137 | * Version(eax[7:0]) can't be 0; |
138 | * Counters(eax[15:8]) should be greater than 1; |
139 | */ |
140 | if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) |
141 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
142 | } |
143 | |
144 | #ifdef CONFIG_X86_32 |
145 | if (c->x86 == 5) { |
146 | switch (c->x86_model) { |
147 | case 4: |
148 | name = "C6" ; |
149 | fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; |
150 | fcr_clr = DPDC; |
151 | pr_notice("Disabling bugged TSC.\n" ); |
152 | clear_cpu_cap(c, X86_FEATURE_TSC); |
153 | break; |
154 | case 8: |
155 | switch (c->x86_stepping) { |
156 | default: |
157 | name = "2" ; |
158 | break; |
159 | case 7 ... 9: |
160 | name = "2A" ; |
161 | break; |
162 | case 10 ... 15: |
163 | name = "2B" ; |
164 | break; |
165 | } |
166 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
167 | E2MMX|EAMD3D; |
168 | fcr_clr = DPDC; |
169 | break; |
170 | case 9: |
171 | name = "3" ; |
172 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
173 | E2MMX|EAMD3D; |
174 | fcr_clr = DPDC; |
175 | break; |
176 | default: |
177 | name = "??" ; |
178 | } |
179 | |
180 | rdmsr(MSR_IDT_FCR1, lo, hi); |
181 | newlo = (lo|fcr_set) & (~fcr_clr); |
182 | |
183 | if (newlo != lo) { |
184 | pr_info("Centaur FCR was 0x%X now 0x%X\n" , |
185 | lo, newlo); |
186 | wrmsr(MSR_IDT_FCR1, newlo, hi); |
187 | } else { |
188 | pr_info("Centaur FCR is 0x%X\n" , lo); |
189 | } |
190 | /* Emulate MTRRs using Centaur's MCR. */ |
191 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
192 | /* Report CX8 */ |
193 | set_cpu_cap(c, X86_FEATURE_CX8); |
194 | /* Set 3DNow! on Winchip 2 and above. */ |
195 | if (c->x86_model >= 8) |
196 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
197 | /* See if we can find out some more. */ |
198 | if (cpuid_eax(0x80000000) >= 0x80000005) { |
199 | /* Yes, we can. */ |
200 | cpuid(0x80000005, &aa, &bb, &cc, &dd); |
201 | /* Add L1 data and code cache sizes. */ |
202 | c->x86_cache_size = (cc>>24)+(dd>>24); |
203 | } |
204 | sprintf(c->x86_model_id, "WinChip %s" , name); |
205 | } |
206 | #endif |
207 | if (c->x86 == 6 || c->x86 >= 7) |
208 | init_c3(c); |
209 | #ifdef CONFIG_X86_64 |
210 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
211 | #endif |
212 | |
213 | init_ia32_feat_ctl(c); |
214 | } |
215 | |
216 | #ifdef CONFIG_X86_32 |
217 | static unsigned int |
218 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
219 | { |
220 | /* VIA C3 CPUs (670-68F) need further shifting. */ |
221 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) |
222 | size >>= 8; |
223 | |
224 | /* |
225 | * There's also an erratum in Nehemiah stepping 1, which |
226 | * returns '65KB' instead of '64KB' |
227 | * - Note, it seems this may only be in engineering samples. |
228 | */ |
229 | if ((c->x86 == 6) && (c->x86_model == 9) && |
230 | (c->x86_stepping == 1) && (size == 65)) |
231 | size -= 1; |
232 | return size; |
233 | } |
234 | #endif |
235 | |
236 | static const struct cpu_dev centaur_cpu_dev = { |
237 | .c_vendor = "Centaur" , |
238 | .c_ident = { "CentaurHauls" }, |
239 | .c_early_init = early_init_centaur, |
240 | .c_init = init_centaur, |
241 | #ifdef CONFIG_X86_32 |
242 | .legacy_cache_size = centaur_size_cache, |
243 | #endif |
244 | .c_x86_vendor = X86_VENDOR_CENTAUR, |
245 | }; |
246 | |
247 | cpu_dev_register(centaur_cpu_dev); |
248 | |