1/* Get CPU type and Features for x86 processors.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by Sriraman Tallam (tmsriram@google.com)
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17Under Section 7 of GPL version 3, you are granted additional
18permissions described in the GCC Runtime Library Exception, version
193.1, as published by the Free Software Foundation.
20
21You should have received a copy of the GNU General Public License and
22a copy of the GCC Runtime Library Exception along with this program;
23see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24<http://www.gnu.org/licenses/>. */
25
26struct __processor_model
27{
28 unsigned int __cpu_vendor;
29 unsigned int __cpu_type;
30 unsigned int __cpu_subtype;
31 /* The first 32 features are stored as bitmasks in __cpu_features.
32 The rest of features are stored as bitmasks in a separate array
33 of unsigned int. */
34 unsigned int __cpu_features[1];
35};
36
37struct __processor_model2
38{
39 unsigned int __cpu_family;
40 unsigned int __cpu_model;
41 unsigned int __cpu_max_level;
42 unsigned int __cpu_ext_level;
43};
44
45#ifndef CHECK___builtin_cpu_is
46# define CHECK___builtin_cpu_is(cpu)
47#endif
48
49#ifndef CHECK___builtin_cpu_supports
50# define CHECK___builtin_cpu_supports(isa)
51#endif
52
53/* Return non-zero if the processor has feature F. */
54
55static inline int
56has_cpu_feature (struct __processor_model *cpu_model,
57 unsigned int *cpu_features2,
58 enum processor_features feature)
59{
60 unsigned index, offset;
61 unsigned f = feature;
62
63 if (f < 32)
64 {
65 /* The first 32 features. */
66 return cpu_model->__cpu_features[0] & (1U << f);
67 }
68 else
69 {
70 /* The rest of features. cpu_features2[i] contains features from
71 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
72 f -= 32;
73 index = f / 32;
74 offset = f % 32;
75 return cpu_features2[index] & (1U << offset);
76 }
77}
78
79/* Save FEATURE to either CPU_MODEL or CPU_FEATURES2. */
80
81static inline void
82set_cpu_feature (struct __processor_model *cpu_model,
83 unsigned int *cpu_features2,
84 enum processor_features feature)
85{
86 unsigned index, offset;
87 unsigned f = feature;
88
89 if (f < 32)
90 {
91 /* The first 32 features. */
92 cpu_model->__cpu_features[0] |= (1U << f);
93 }
94 else
95 {
96 /* The rest of features. cpu_features2[i] contains features from
97 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
98 f -= 32;
99 index = f / 32;
100 offset = f % 32;
101 cpu_features2[index] |= (1U << offset);
102 }
103}
104
105/* Drop FEATURE from either CPU_MODEL or CPU_FEATURES2. */
106
107static inline void
108reset_cpu_feature (struct __processor_model *cpu_model,
109 unsigned int *cpu_features2,
110 enum processor_features feature)
111{
112 unsigned index, offset;
113 unsigned f = feature;
114
115 if (f < 32)
116 {
117 /* The first 32 features. */
118 cpu_model->__cpu_features[0] &= ~(1U << f);
119 }
120 else
121 {
122 /* The rest of features. cpu_features2[i] contains features from
123 (32 + i * 32) to (31 + 32 + i * 32), inclusively. */
124 f -= 32;
125 index = f / 32;
126 offset = f % 32;
127 cpu_features2[index] &= ~(1U << offset);
128 }
129}
130
131/* Get the specific type of AMD CPU and return AMD CPU name. Return
132 NULL for unknown AMD CPU. */
133
134static inline const char *
135get_amd_cpu (struct __processor_model *cpu_model,
136 struct __processor_model2 *cpu_model2,
137 unsigned int *cpu_features2)
138{
139 const char *cpu = NULL;
140 unsigned int family = cpu_model2->__cpu_family;
141 unsigned int model = cpu_model2->__cpu_model;
142
143 switch (family)
144 {
145 case 0x10:
146 /* AMD Family 10h. */
147 cpu = "amdfam10";
148 cpu_model->__cpu_type = AMDFAM10H;
149 switch (model)
150 {
151 case 0x2:
152 /* Barcelona. */
153 CHECK___builtin_cpu_is ("amdfam10h");
154 CHECK___builtin_cpu_is ("barcelona");
155 cpu_model->__cpu_subtype = AMDFAM10H_BARCELONA;
156 break;
157 case 0x4:
158 /* Shanghai. */
159 CHECK___builtin_cpu_is ("amdfam10h");
160 CHECK___builtin_cpu_is ("shanghai");
161 cpu_model->__cpu_subtype = AMDFAM10H_SHANGHAI;
162 break;
163 case 0x8:
164 /* Istanbul. */
165 CHECK___builtin_cpu_is ("amdfam10h");
166 CHECK___builtin_cpu_is ("istanbul");
167 cpu_model->__cpu_subtype = AMDFAM10H_ISTANBUL;
168 break;
169 default:
170 break;
171 }
172 break;
173 case 0x14:
174 /* AMD Family 14h "btver1". */
175 cpu = "btver1";
176 CHECK___builtin_cpu_is ("btver1");
177 cpu_model->__cpu_type = AMD_BTVER1;
178 break;
179 case 0x15:
180 /* AMD Family 15h "Bulldozer". */
181 cpu_model->__cpu_type = AMDFAM15H;
182 if (model == 0x2)
183 {
184 /* Bulldozer version 2 "Piledriver" */
185 cpu = "bdver2";
186 CHECK___builtin_cpu_is ("bdver2");
187 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
188 }
189 else if (model <= 0xf)
190 {
191 /* Bulldozer version 1. */
192 cpu = "bdver1";
193 CHECK___builtin_cpu_is ("bdver1");
194 cpu_model->__cpu_subtype = AMDFAM15H_BDVER1;
195 }
196 else if (model <= 0x2f)
197 {
198 /* Bulldozer version 2 "Piledriver" */
199 cpu = "bdver2";
200 CHECK___builtin_cpu_is ("bdver2");
201 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
202 }
203 else if (model <= 0x4f)
204 {
205 /* Bulldozer version 3 "Steamroller" */
206 cpu = "bdver3";
207 CHECK___builtin_cpu_is ("bdver3");
208 cpu_model->__cpu_subtype = AMDFAM15H_BDVER3;
209 }
210 else if (model <= 0x7f)
211 {
212 /* Bulldozer version 4 "Excavator" */
213 cpu = "bdver4";
214 CHECK___builtin_cpu_is ("bdver4");
215 cpu_model->__cpu_subtype = AMDFAM15H_BDVER4;
216 }
217 else if (has_cpu_feature (cpu_model, cpu_features2,
218 feature: FEATURE_AVX2))
219 {
220 cpu = "bdver4";
221 CHECK___builtin_cpu_is ("bdver4");
222 cpu_model->__cpu_subtype = AMDFAM15H_BDVER4;
223 }
224 else if (has_cpu_feature (cpu_model, cpu_features2,
225 feature: FEATURE_XSAVEOPT))
226 {
227 cpu = "bdver3";
228 CHECK___builtin_cpu_is ("bdver3");
229 cpu_model->__cpu_subtype = AMDFAM15H_BDVER3;
230 }
231 else if (has_cpu_feature (cpu_model, cpu_features2,
232 feature: FEATURE_BMI))
233 {
234 cpu = "bdver2";
235 CHECK___builtin_cpu_is ("bdver2");
236 cpu_model->__cpu_subtype = AMDFAM15H_BDVER2;
237 }
238 else if (has_cpu_feature (cpu_model, cpu_features2,
239 feature: FEATURE_XOP))
240 {
241 cpu = "bdver1";
242 CHECK___builtin_cpu_is ("bdver1");
243 cpu_model->__cpu_subtype = AMDFAM15H_BDVER1;
244 }
245 break;
246 case 0x16:
247 /* AMD Family 16h "btver2" */
248 cpu = "btver2";
249 CHECK___builtin_cpu_is ("btver2");
250 cpu_model->__cpu_type = AMD_BTVER2;
251 break;
252 case 0x17:
253 cpu_model->__cpu_type = AMDFAM17H;
254 if (model <= 0x1f)
255 {
256 /* AMD family 17h version 1. */
257 cpu = "znver1";
258 CHECK___builtin_cpu_is ("znver1");
259 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER1;
260 }
261 else if (model >= 0x30)
262 {
263 cpu = "znver2";
264 CHECK___builtin_cpu_is ("znver2");
265 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER2;
266 }
267 else if (has_cpu_feature (cpu_model, cpu_features2,
268 feature: FEATURE_CLWB))
269 {
270 cpu = "znver2";
271 CHECK___builtin_cpu_is ("znver2");
272 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER2;
273 }
274 else if (has_cpu_feature (cpu_model, cpu_features2,
275 feature: FEATURE_CLZERO))
276 {
277 cpu = "znver1";
278 CHECK___builtin_cpu_is ("znver1");
279 cpu_model->__cpu_subtype = AMDFAM17H_ZNVER1;
280 }
281 break;
282 case 0x19:
283 cpu_model->__cpu_type = AMDFAM19H;
284 /* AMD family 19h. */
285 if (model <= 0x0f)
286 {
287 cpu = "znver3";
288 CHECK___builtin_cpu_is ("znver3");
289 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER3;
290 }
291 else if ((model >= 0x10 && model <= 0x1f)
292 || (model >= 0x60 && model <= 0xaf))
293 {
294 cpu = "znver4";
295 CHECK___builtin_cpu_is ("znver4");
296 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER4;
297 }
298 else if (has_cpu_feature (cpu_model, cpu_features2,
299 feature: FEATURE_AVX512F))
300 {
301 cpu = "znver4";
302 CHECK___builtin_cpu_is ("znver4");
303 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER4;
304 }
305 else if (has_cpu_feature (cpu_model, cpu_features2,
306 feature: FEATURE_VAES))
307 {
308 cpu = "znver3";
309 CHECK___builtin_cpu_is ("znver3");
310 cpu_model->__cpu_subtype = AMDFAM19H_ZNVER3;
311 }
312 break;
313 case 0x1a:
314 cpu_model->__cpu_type = AMDFAM1AH;
315 if (model <= 0x77)
316 {
317 cpu = "znver5";
318 CHECK___builtin_cpu_is ("znver5");
319 cpu_model->__cpu_subtype = AMDFAM1AH_ZNVER5;
320 }
321 else if (has_cpu_feature (cpu_model, cpu_features2,
322 feature: FEATURE_AVX512VP2INTERSECT))
323 {
324 cpu = "znver5";
325 CHECK___builtin_cpu_is ("znver5");
326 cpu_model->__cpu_subtype = AMDFAM1AH_ZNVER5;
327 }
328 break;
329 default:
330 break;
331 }
332
333 return cpu;
334}
335
336/* Get the specific type of Intel CPU and return Intel CPU name. Return
337 NULL for unknown Intel CPU. */
338
339static inline const char *
340get_intel_cpu (struct __processor_model *cpu_model,
341 struct __processor_model2 *cpu_model2,
342 unsigned int *cpu_features2)
343{
344 const char *cpu = NULL;
345
346 /* Parse family and model only for model 6. */
347 if (cpu_model2->__cpu_family != 0x6)
348 return cpu;
349
350 switch (cpu_model2->__cpu_model)
351 {
352 case 0x1c:
353 case 0x26:
354 /* Bonnell. */
355 cpu = "bonnell";
356 CHECK___builtin_cpu_is ("atom");
357 cpu_model->__cpu_type = INTEL_BONNELL;
358 break;
359 case 0x37:
360 case 0x4a:
361 case 0x4d:
362 case 0x5d:
363 /* Silvermont. */
364 case 0x4c:
365 case 0x5a:
366 case 0x75:
367 /* Airmont. */
368 cpu = "silvermont";
369 CHECK___builtin_cpu_is ("silvermont");
370 cpu_model->__cpu_type = INTEL_SILVERMONT;
371 break;
372 case 0x5c:
373 case 0x5f:
374 /* Goldmont. */
375 cpu = "goldmont";
376 CHECK___builtin_cpu_is ("goldmont");
377 cpu_model->__cpu_type = INTEL_GOLDMONT;
378 break;
379 case 0x7a:
380 /* Goldmont Plus. */
381 cpu = "goldmont-plus";
382 CHECK___builtin_cpu_is ("goldmont-plus");
383 cpu_model->__cpu_type = INTEL_GOLDMONT_PLUS;
384 break;
385 case 0x86:
386 case 0x96:
387 case 0x9c:
388 /* Tremont. */
389 cpu = "tremont";
390 CHECK___builtin_cpu_is ("tremont");
391 cpu_model->__cpu_type = INTEL_TREMONT;
392 break;
393 case 0x57:
394 /* Knights Landing. */
395 cpu = "knl";
396 CHECK___builtin_cpu_is ("knl");
397 cpu_model->__cpu_type = INTEL_KNL;
398 break;
399 case 0x85:
400 /* Knights Mill. */
401 cpu = "knm";
402 CHECK___builtin_cpu_is ("knm");
403 cpu_model->__cpu_type = INTEL_KNM;
404 break;
405 case 0x1a:
406 case 0x1e:
407 case 0x1f:
408 case 0x2e:
409 /* Nehalem. */
410 cpu = "nehalem";
411 CHECK___builtin_cpu_is ("corei7");
412 CHECK___builtin_cpu_is ("nehalem");
413 cpu_model->__cpu_type = INTEL_COREI7;
414 cpu_model->__cpu_subtype = INTEL_COREI7_NEHALEM;
415 break;
416 case 0x25:
417 case 0x2c:
418 case 0x2f:
419 /* Westmere. */
420 cpu = "westmere";
421 CHECK___builtin_cpu_is ("corei7");
422 CHECK___builtin_cpu_is ("westmere");
423 cpu_model->__cpu_type = INTEL_COREI7;
424 cpu_model->__cpu_subtype = INTEL_COREI7_WESTMERE;
425 break;
426 case 0x2a:
427 case 0x2d:
428 /* Sandy Bridge. */
429 cpu = "sandybridge";
430 CHECK___builtin_cpu_is ("corei7");
431 CHECK___builtin_cpu_is ("sandybridge");
432 cpu_model->__cpu_type = INTEL_COREI7;
433 cpu_model->__cpu_subtype = INTEL_COREI7_SANDYBRIDGE;
434 break;
435 case 0x3a:
436 case 0x3e:
437 /* Ivy Bridge. */
438 cpu = "ivybridge";
439 CHECK___builtin_cpu_is ("corei7");
440 CHECK___builtin_cpu_is ("ivybridge");
441 cpu_model->__cpu_type = INTEL_COREI7;
442 cpu_model->__cpu_subtype = INTEL_COREI7_IVYBRIDGE;
443 break;
444 case 0x3c:
445 case 0x3f:
446 case 0x45:
447 case 0x46:
448 /* Haswell. */
449 cpu = "haswell";
450 CHECK___builtin_cpu_is ("corei7");
451 CHECK___builtin_cpu_is ("haswell");
452 cpu_model->__cpu_type = INTEL_COREI7;
453 cpu_model->__cpu_subtype = INTEL_COREI7_HASWELL;
454 break;
455 case 0x3d:
456 case 0x47:
457 case 0x4f:
458 case 0x56:
459 /* Broadwell. */
460 cpu = "broadwell";
461 CHECK___builtin_cpu_is ("corei7");
462 CHECK___builtin_cpu_is ("broadwell");
463 cpu_model->__cpu_type = INTEL_COREI7;
464 cpu_model->__cpu_subtype = INTEL_COREI7_BROADWELL;
465 break;
466 case 0x4e:
467 case 0x5e:
468 /* Skylake. */
469 case 0x8e:
470 case 0x9e:
471 /* Kaby Lake. */
472 case 0xa5:
473 case 0xa6:
474 /* Comet Lake. */
475 cpu = "skylake";
476 CHECK___builtin_cpu_is ("corei7");
477 CHECK___builtin_cpu_is ("skylake");
478 cpu_model->__cpu_type = INTEL_COREI7;
479 cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE;
480 break;
481 case 0xa7:
482 /* Rocket Lake. */
483 cpu = "rocketlake";
484 CHECK___builtin_cpu_is ("corei7");
485 CHECK___builtin_cpu_is ("rocketlake");
486 cpu_model->__cpu_type = INTEL_COREI7;
487 cpu_model->__cpu_subtype = INTEL_COREI7_ROCKETLAKE;
488 break;
489 case 0x55:
490 CHECK___builtin_cpu_is ("corei7");
491 cpu_model->__cpu_type = INTEL_COREI7;
492 if (has_cpu_feature (cpu_model, cpu_features2,
493 feature: FEATURE_AVX512BF16))
494 {
495 /* Cooper Lake. */
496 cpu = "cooperlake";
497 CHECK___builtin_cpu_is ("cooperlake");
498 cpu_model->__cpu_subtype = INTEL_COREI7_COOPERLAKE;
499 }
500 else if (has_cpu_feature (cpu_model, cpu_features2,
501 feature: FEATURE_AVX512VNNI))
502 {
503 /* Cascade Lake. */
504 cpu = "cascadelake";
505 CHECK___builtin_cpu_is ("cascadelake");
506 cpu_model->__cpu_subtype = INTEL_COREI7_CASCADELAKE;
507 }
508 else
509 {
510 /* Skylake with AVX-512 support. */
511 cpu = "skylake-avx512";
512 CHECK___builtin_cpu_is ("skylake-avx512");
513 cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE_AVX512;
514 }
515 break;
516 case 0x66:
517 /* Cannon Lake. */
518 cpu = "cannonlake";
519 CHECK___builtin_cpu_is ("corei7");
520 CHECK___builtin_cpu_is ("cannonlake");
521 cpu_model->__cpu_type = INTEL_COREI7;
522 cpu_model->__cpu_subtype = INTEL_COREI7_CANNONLAKE;
523 break;
524 case 0x6a:
525 case 0x6c:
526 /* Ice Lake server. */
527 cpu = "icelake-server";
528 CHECK___builtin_cpu_is ("corei7");
529 CHECK___builtin_cpu_is ("icelake-server");
530 cpu_model->__cpu_type = INTEL_COREI7;
531 cpu_model->__cpu_subtype = INTEL_COREI7_ICELAKE_SERVER;
532 break;
533 case 0x7e:
534 case 0x7d:
535 case 0x9d:
536 /* Ice Lake client. */
537 cpu = "icelake-client";
538 CHECK___builtin_cpu_is ("corei7");
539 CHECK___builtin_cpu_is ("icelake-client");
540 cpu_model->__cpu_type = INTEL_COREI7;
541 cpu_model->__cpu_subtype = INTEL_COREI7_ICELAKE_CLIENT;
542 break;
543 case 0x8c:
544 case 0x8d:
545 /* Tiger Lake. */
546 cpu = "tigerlake";
547 CHECK___builtin_cpu_is ("corei7");
548 CHECK___builtin_cpu_is ("tigerlake");
549 cpu_model->__cpu_type = INTEL_COREI7;
550 cpu_model->__cpu_subtype = INTEL_COREI7_TIGERLAKE;
551 break;
552
553 case 0xbe:
554 /* Alder Lake N, E-core only. */
555 case 0x97:
556 case 0x9a:
557 /* Alder Lake. */
558 case 0xb7:
559 case 0xba:
560 case 0xbf:
561 /* Raptor Lake. */
562 case 0xaa:
563 case 0xac:
564 /* Meteor Lake. */
565 cpu = "alderlake";
566 CHECK___builtin_cpu_is ("corei7");
567 CHECK___builtin_cpu_is ("alderlake");
568 cpu_model->__cpu_type = INTEL_COREI7;
569 cpu_model->__cpu_subtype = INTEL_COREI7_ALDERLAKE;
570 break;
571 case 0x8f:
572 /* Sapphire Rapids. */
573 case 0xcf:
574 /* Emerald Rapids. */
575 cpu = "sapphirerapids";
576 CHECK___builtin_cpu_is ("corei7");
577 CHECK___builtin_cpu_is ("sapphirerapids");
578 cpu_model->__cpu_type = INTEL_COREI7;
579 cpu_model->__cpu_subtype = INTEL_COREI7_SAPPHIRERAPIDS;
580 break;
581 case 0xaf:
582 /* Sierra Forest. */
583 cpu = "sierraforest";
584 CHECK___builtin_cpu_is ("sierraforest");
585 cpu_model->__cpu_type = INTEL_SIERRAFOREST;
586 break;
587 case 0xad:
588 /* Granite Rapids. */
589 cpu = "graniterapids";
590 CHECK___builtin_cpu_is ("corei7");
591 CHECK___builtin_cpu_is ("graniterapids");
592 cpu_model->__cpu_type = INTEL_COREI7;
593 cpu_model->__cpu_subtype = INTEL_COREI7_GRANITERAPIDS;
594 break;
595 case 0xae:
596 /* Granite Rapids D. */
597 cpu = "graniterapids-d";
598 CHECK___builtin_cpu_is ("corei7");
599 CHECK___builtin_cpu_is ("graniterapids-d");
600 cpu_model->__cpu_type = INTEL_COREI7;
601 cpu_model->__cpu_subtype = INTEL_COREI7_GRANITERAPIDS_D;
602 break;
603 case 0xb6:
604 /* Grand Ridge. */
605 cpu = "grandridge";
606 CHECK___builtin_cpu_is ("grandridge");
607 cpu_model->__cpu_type = INTEL_GRANDRIDGE;
608 break;
609 case 0xc5:
610 /* Arrow Lake. */
611 cpu = "arrowlake";
612 CHECK___builtin_cpu_is ("corei7");
613 CHECK___builtin_cpu_is ("arrowlake");
614 cpu_model->__cpu_type = INTEL_COREI7;
615 cpu_model->__cpu_subtype = INTEL_COREI7_ARROWLAKE;
616 break;
617 case 0xc6:
618 /* Arrow Lake S. */
619 case 0xbd:
620 /* Lunar Lake. */
621 cpu = "arrowlake-s";
622 CHECK___builtin_cpu_is ("corei7");
623 CHECK___builtin_cpu_is ("arrowlake-s");
624 cpu_model->__cpu_type = INTEL_COREI7;
625 cpu_model->__cpu_subtype = INTEL_COREI7_ARROWLAKE_S;
626 break;
627 case 0xdd:
628 /* Clearwater Forest. */
629 cpu = "clearwaterforest";
630 CHECK___builtin_cpu_is ("clearwaterforest");
631 cpu_model->__cpu_type = INTEL_CLEARWATERFOREST;
632 break;
633 case 0xcc:
634 /* Panther Lake. */
635 cpu = "pantherlake";
636 CHECK___builtin_cpu_is ("corei7");
637 CHECK___builtin_cpu_is ("pantherlake");
638 cpu_model->__cpu_type = INTEL_COREI7;
639 cpu_model->__cpu_subtype = INTEL_COREI7_PANTHERLAKE;
640 break;
641 case 0x17:
642 case 0x1d:
643 /* Penryn. */
644 case 0x0f:
645 /* Merom. */
646 cpu = "core2";
647 CHECK___builtin_cpu_is ("core2");
648 cpu_model->__cpu_type = INTEL_CORE2;
649 break;
650 default:
651 break;
652 }
653
654 return cpu;
655}
656
657/* Get the specific type of ZHAOXIN CPU and return ZHAOXIN CPU name.
658 Return NULL for unknown ZHAOXIN CPU. */
659
660static inline const char *
661get_zhaoxin_cpu (struct __processor_model *cpu_model,
662 struct __processor_model2 *cpu_model2,
663 unsigned int *cpu_features2)
664{
665 const char *cpu = NULL;
666 unsigned int family = cpu_model2->__cpu_family;
667 unsigned int model = cpu_model2->__cpu_model;
668
669 switch (family)
670 {
671 /* ZHAOXIN family 7h. */
672 case 0x07:
673 cpu_model->__cpu_type = ZHAOXIN_FAM7H;
674 if (model == 0x3b)
675 {
676 cpu = "lujiazui";
677 CHECK___builtin_cpu_is ("lujiazui");
678 reset_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_AVX);
679 reset_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_F16C);
680 cpu_model->__cpu_subtype = ZHAOXIN_FAM7H_LUJIAZUI;
681 }
682 else if (model >= 0x5b)
683 {
684 cpu = "yongfeng";
685 CHECK___builtin_cpu_is ("yongfeng");
686 cpu_model->__cpu_subtype = ZHAOXIN_FAM7H_YONGFENG;
687 }
688 break;
689 default:
690 break;
691 }
692
693 return cpu;
694}
695
696/* ECX and EDX are output of CPUID at level one. */
697static inline void
698get_available_features (struct __processor_model *cpu_model,
699 struct __processor_model2 *cpu_model2,
700 unsigned int *cpu_features2,
701 unsigned int ecx, unsigned int edx)
702{
703 unsigned int max_cpuid_level = cpu_model2->__cpu_max_level;
704 unsigned int eax, ebx;
705 unsigned int ext_level;
706
707 /* Get XCR_XFEATURE_ENABLED_MASK register with xgetbv. */
708#define XCR_XFEATURE_ENABLED_MASK 0x0
709#define XSTATE_FP 0x1
710#define XSTATE_SSE 0x2
711#define XSTATE_YMM 0x4
712#define XSTATE_OPMASK 0x20
713#define XSTATE_ZMM 0x40
714#define XSTATE_HI_ZMM 0x80
715#define XSTATE_TILECFG 0x20000
716#define XSTATE_TILEDATA 0x40000
717#define XSTATE_APX_F 0x80000
718
719#define XCR_AVX_ENABLED_MASK \
720 (XSTATE_SSE | XSTATE_YMM)
721#define XCR_AVX512F_ENABLED_MASK \
722 (XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM)
723#define XCR_AMX_ENABLED_MASK \
724 (XSTATE_TILECFG | XSTATE_TILEDATA)
725#define XCR_APX_F_ENABLED_MASK XSTATE_APX_F
726
727 /* Check if AVX, AVX512 and APX are usable. */
728 int avx_usable = 0;
729 int avx512_usable = 0;
730 int amx_usable = 0;
731 int apx_usable = 0;
732 /* Check if KL is usable. */
733 int has_kl = 0;
734 /* Record AVX10 version. */
735 int avx10_set = 0;
736 int version = 0;
737 if ((ecx & bit_OSXSAVE))
738 {
739 /* Check if XMM, YMM, OPMASK, upper 256 bits of ZMM0-ZMM15 and
740 ZMM16-ZMM31 states are supported by OSXSAVE. */
741 unsigned int xcrlow;
742 unsigned int xcrhigh;
743 __asm__ (".byte 0x0f, 0x01, 0xd0"
744 : "=a" (xcrlow), "=d" (xcrhigh)
745 : "c" (XCR_XFEATURE_ENABLED_MASK));
746 if ((xcrlow & XCR_AVX_ENABLED_MASK) == XCR_AVX_ENABLED_MASK)
747 {
748 avx_usable = 1;
749 avx512_usable = ((xcrlow & XCR_AVX512F_ENABLED_MASK)
750 == XCR_AVX512F_ENABLED_MASK);
751 }
752 amx_usable = ((xcrlow & XCR_AMX_ENABLED_MASK)
753 == XCR_AMX_ENABLED_MASK);
754 apx_usable = ((xcrlow & XCR_APX_F_ENABLED_MASK)
755 == XCR_APX_F_ENABLED_MASK);
756 }
757
758#define set_feature(f) \
759 set_cpu_feature (cpu_model, cpu_features2, f)
760
761 if (edx & bit_CMOV)
762 set_feature (FEATURE_CMOV);
763 if (edx & bit_MMX)
764 set_feature (FEATURE_MMX);
765 if (edx & bit_SSE)
766 set_feature (FEATURE_SSE);
767 if (edx & bit_SSE2)
768 set_feature (FEATURE_SSE2);
769 if (edx & bit_CMPXCHG8B)
770 set_feature (FEATURE_CMPXCHG8B);
771 if (edx & bit_FXSAVE)
772 set_feature (FEATURE_FXSAVE);
773
774 if (ecx & bit_POPCNT)
775 set_feature (FEATURE_POPCNT);
776 if (ecx & bit_AES)
777 set_feature (FEATURE_AES);
778 if (ecx & bit_PCLMUL)
779 set_feature (FEATURE_PCLMUL);
780 if (ecx & bit_SSE3)
781 set_feature (FEATURE_SSE3);
782 if (ecx & bit_SSSE3)
783 set_feature (FEATURE_SSSE3);
784 if (ecx & bit_SSE4_1)
785 set_feature (FEATURE_SSE4_1);
786 if (ecx & bit_SSE4_2)
787 set_feature (FEATURE_SSE4_2);
788 if (ecx & bit_OSXSAVE)
789 set_feature (FEATURE_OSXSAVE);
790 if (ecx & bit_CMPXCHG16B)
791 set_feature (FEATURE_CMPXCHG16B);
792 if (ecx & bit_MOVBE)
793 set_feature (FEATURE_MOVBE);
794 if (ecx & bit_AES)
795 set_feature (FEATURE_AES);
796 if (ecx & bit_RDRND)
797 set_feature (FEATURE_RDRND);
798 if (ecx & bit_XSAVE)
799 set_feature (FEATURE_XSAVE);
800 if (avx_usable)
801 {
802 if (ecx & bit_AVX)
803 set_feature (FEATURE_AVX);
804 if (ecx & bit_FMA)
805 set_feature (FEATURE_FMA);
806 if (ecx & bit_F16C)
807 set_feature (FEATURE_F16C);
808 }
809
810 /* Get Advanced Features at level 7 (eax = 7, ecx = 0/1). */
811 if (max_cpuid_level >= 7)
812 {
813 unsigned int max_subleaf_level;
814
815 __cpuid_count (7, 0, max_subleaf_level, ebx, ecx, edx);
816 if (ebx & bit_BMI)
817 set_feature (FEATURE_BMI);
818 if (ebx & bit_SGX)
819 set_feature (FEATURE_SGX);
820 if (ebx & bit_HLE)
821 set_feature (FEATURE_HLE);
822 if (ebx & bit_RTM)
823 set_feature (FEATURE_RTM);
824 if (avx_usable)
825 {
826 if (ebx & bit_AVX2)
827 set_feature (FEATURE_AVX2);
828 if (ecx & bit_VPCLMULQDQ)
829 set_feature (FEATURE_VPCLMULQDQ);
830 if (ecx & bit_VAES)
831 set_feature (FEATURE_VAES);
832 }
833 if (ebx & bit_BMI2)
834 set_feature (FEATURE_BMI2);
835 if (ebx & bit_FSGSBASE)
836 set_feature (FEATURE_FSGSBASE);
837 if (ebx & bit_RDSEED)
838 set_feature (FEATURE_RDSEED);
839 if (ebx & bit_ADX)
840 set_feature (FEATURE_ADX);
841 if (ebx & bit_SHA)
842 set_feature (FEATURE_SHA);
843 if (ebx & bit_CLFLUSHOPT)
844 set_feature (FEATURE_CLFLUSHOPT);
845 if (ebx & bit_CLWB)
846 set_feature (FEATURE_CLWB);
847 if (ecx & bit_PREFETCHWT1)
848 set_feature (FEATURE_PREFETCHWT1);
849 /* NB: bit_OSPKE indicates that OS supports PKU. */
850 if (ecx & bit_OSPKE)
851 set_feature (FEATURE_PKU);
852 if (ecx & bit_RDPID)
853 set_feature (FEATURE_RDPID);
854 if (ecx & bit_GFNI)
855 set_feature (FEATURE_GFNI);
856 if (ecx & bit_MOVDIRI)
857 set_feature (FEATURE_MOVDIRI);
858 if (ecx & bit_MOVDIR64B)
859 set_feature (FEATURE_MOVDIR64B);
860 if (ecx & bit_ENQCMD)
861 set_feature (FEATURE_ENQCMD);
862 if (ecx & bit_CLDEMOTE)
863 set_feature (FEATURE_CLDEMOTE);
864 if (ecx & bit_WAITPKG)
865 set_feature (FEATURE_WAITPKG);
866 if (ecx & bit_SHSTK)
867 set_feature (FEATURE_SHSTK);
868 if (ecx & bit_KL)
869 has_kl = 1;
870 if (edx & bit_SERIALIZE)
871 set_feature (FEATURE_SERIALIZE);
872 if (edx & bit_TSXLDTRK)
873 set_feature (FEATURE_TSXLDTRK);
874 if (edx & bit_PCONFIG)
875 set_feature (FEATURE_PCONFIG);
876 if (edx & bit_IBT)
877 set_feature (FEATURE_IBT);
878 if (edx & bit_UINTR)
879 set_feature (FEATURE_UINTR);
880 if (amx_usable)
881 {
882 if (edx & bit_AMX_TILE)
883 set_feature (FEATURE_AMX_TILE);
884 if (edx & bit_AMX_INT8)
885 set_feature (FEATURE_AMX_INT8);
886 if (edx & bit_AMX_BF16)
887 set_feature (FEATURE_AMX_BF16);
888 }
889 if (avx512_usable)
890 {
891 if (ebx & bit_AVX512F)
892 set_feature (FEATURE_AVX512F);
893 if (ebx & bit_AVX512VL)
894 set_feature (FEATURE_AVX512VL);
895 if (ebx & bit_AVX512BW)
896 set_feature (FEATURE_AVX512BW);
897 if (ebx & bit_AVX512DQ)
898 set_feature (FEATURE_AVX512DQ);
899 if (ebx & bit_AVX512CD)
900 set_feature (FEATURE_AVX512CD);
901 if (ebx & bit_AVX512PF)
902 set_feature (FEATURE_AVX512PF);
903 if (ebx & bit_AVX512ER)
904 set_feature (FEATURE_AVX512ER);
905 if (ebx & bit_AVX512IFMA)
906 set_feature (FEATURE_AVX512IFMA);
907 if (ecx & bit_AVX512VBMI)
908 set_feature (FEATURE_AVX512VBMI);
909 if (ecx & bit_AVX512VBMI2)
910 set_feature (FEATURE_AVX512VBMI2);
911 if (ecx & bit_AVX512VNNI)
912 set_feature (FEATURE_AVX512VNNI);
913 if (ecx & bit_AVX512BITALG)
914 set_feature (FEATURE_AVX512BITALG);
915 if (ecx & bit_AVX512VPOPCNTDQ)
916 set_feature (FEATURE_AVX512VPOPCNTDQ);
917 if (edx & bit_AVX5124VNNIW)
918 set_feature (FEATURE_AVX5124VNNIW);
919 if (edx & bit_AVX5124FMAPS)
920 set_feature (FEATURE_AVX5124FMAPS);
921 if (edx & bit_AVX512VP2INTERSECT)
922 set_feature (FEATURE_AVX512VP2INTERSECT);
923 if (edx & bit_AVX512FP16)
924 set_feature (FEATURE_AVX512FP16);
925 }
926
927 if (max_subleaf_level >= 1)
928 {
929 __cpuid_count (7, 1, eax, ebx, ecx, edx);
930 if (eax & bit_HRESET)
931 set_feature (FEATURE_HRESET);
932 if (eax & bit_CMPCCXADD)
933 set_feature(FEATURE_CMPCCXADD);
934 if (edx & bit_PREFETCHI)
935 set_feature (FEATURE_PREFETCHI);
936 if (eax & bit_RAOINT)
937 set_feature (FEATURE_RAOINT);
938 if (edx & bit_USER_MSR)
939 set_feature (FEATURE_USER_MSR);
940 if (avx_usable)
941 {
942 if (eax & bit_AVXVNNI)
943 set_feature (FEATURE_AVXVNNI);
944 if (eax & bit_AVXIFMA)
945 set_feature (FEATURE_AVXIFMA);
946 if (edx & bit_AVXVNNIINT8)
947 set_feature (FEATURE_AVXVNNIINT8);
948 if (edx & bit_AVXNECONVERT)
949 set_feature (FEATURE_AVXNECONVERT);
950 if (edx & bit_AVXVNNIINT16)
951 set_feature (FEATURE_AVXVNNIINT16);
952 if (eax & bit_SM3)
953 set_feature (FEATURE_SM3);
954 if (eax & bit_SHA512)
955 set_feature (FEATURE_SHA512);
956 if (eax & bit_SM4)
957 set_feature (FEATURE_SM4);
958 }
959 if (avx512_usable)
960 {
961 if (eax & bit_AVX512BF16)
962 set_feature (FEATURE_AVX512BF16);
963 /* AVX10 has the same XSTATE with AVX512. */
964 if (edx & bit_AVX10)
965 avx10_set = 1;
966 }
967 if (amx_usable)
968 {
969 if (eax & bit_AMX_FP16)
970 set_feature (FEATURE_AMX_FP16);
971 if (edx & bit_AMX_COMPLEX)
972 set_feature (FEATURE_AMX_COMPLEX);
973 }
974 if (apx_usable)
975 {
976 if (edx & bit_APX_F)
977 set_feature (FEATURE_APX_F);
978 }
979 }
980 }
981
982 /* Get Advanced Features at level 0xd (eax = 0xd, ecx = 1). */
983 if (max_cpuid_level >= 0xd)
984 {
985 __cpuid_count (0xd, 1, eax, ebx, ecx, edx);
986 if (eax & bit_XSAVEOPT)
987 set_feature (FEATURE_XSAVEOPT);
988 if (eax & bit_XSAVEC)
989 set_feature (FEATURE_XSAVEC);
990 if (eax & bit_XSAVES)
991 set_feature (FEATURE_XSAVES);
992 }
993
994 /* Get Advanced Features at level 0x14 (eax = 0x14, ecx = 0). */
995 if (max_cpuid_level >= 0x14)
996 {
997 __cpuid_count (0x14, 0, eax, ebx, ecx, edx);
998 if (ebx & bit_PTWRITE)
999 set_feature (FEATURE_PTWRITE);
1000 }
1001
1002 /* Get Advanced Features at level 0x19 (eax = 0x19). */
1003 if (max_cpuid_level >= 0x19)
1004 {
1005 __cpuid (0x19, eax, ebx, ecx, edx);
1006 /* Check if OS support keylocker. */
1007 if (ebx & bit_AESKLE)
1008 {
1009 set_feature (FEATURE_AESKLE);
1010 if (ebx & bit_WIDEKL)
1011 set_feature (FEATURE_WIDEKL);
1012 if (has_kl)
1013 set_feature (FEATURE_KL);
1014 }
1015 }
1016
1017 /* Get Advanced Features at level 0x24 (eax = 0x24). */
1018 if (avx10_set && max_cpuid_level >= 0x24)
1019 {
1020 __cpuid (0x24, eax, ebx, ecx, edx);
1021 version = ebx & 0xff;
1022 if (ebx & bit_AVX10_256)
1023 switch (version)
1024 {
1025 case 1:
1026 set_feature (FEATURE_AVX10_1_256);
1027 break;
1028 default:
1029 set_feature (FEATURE_AVX10_1_256);
1030 break;
1031 }
1032 if (ebx & bit_AVX10_512)
1033 switch (version)
1034 {
1035 case 1:
1036 set_feature (FEATURE_AVX10_1_512);
1037 break;
1038 default:
1039 set_feature (FEATURE_AVX10_1_512);
1040 break;
1041 }
1042 }
1043
1044 /* Check cpuid level of extended features. */
1045 __cpuid (0x80000000, ext_level, ebx, ecx, edx);
1046
1047 cpu_model2->__cpu_ext_level = ext_level;
1048
1049 if (ext_level >= 0x80000001)
1050 {
1051 __cpuid (0x80000001, eax, ebx, ecx, edx);
1052
1053 if (ecx & bit_SSE4a)
1054 set_feature (FEATURE_SSE4_A);
1055 if (ecx & bit_LAHF_LM)
1056 set_feature (FEATURE_LAHF_LM);
1057 if (ecx & bit_ABM)
1058 set_feature (FEATURE_ABM);
1059 if (ecx & bit_LWP)
1060 set_feature (FEATURE_LWP);
1061 if (ecx & bit_TBM)
1062 set_feature (FEATURE_TBM);
1063 if (ecx & bit_LZCNT)
1064 set_feature (FEATURE_LZCNT);
1065 if (ecx & bit_PRFCHW)
1066 set_feature (FEATURE_PRFCHW);
1067 if (ecx & bit_MWAITX)
1068 set_feature (FEATURE_MWAITX);
1069
1070 if (edx & bit_LM)
1071 set_feature (FEATURE_LM);
1072 if (edx & bit_3DNOWP)
1073 set_feature (FEATURE_3DNOWP);
1074 if (edx & bit_3DNOW)
1075 set_feature (FEATURE_3DNOW);
1076
1077 if (avx_usable)
1078 {
1079 if (ecx & bit_FMA4)
1080 set_feature (FEATURE_FMA4);
1081 if (ecx & bit_XOP)
1082 set_feature (FEATURE_XOP);
1083 }
1084 }
1085
1086 if (ext_level >= 0x80000008)
1087 {
1088 __cpuid (0x80000008, eax, ebx, ecx, edx);
1089 if (ebx & bit_CLZERO)
1090 set_feature (FEATURE_CLZERO);
1091 if (ebx & bit_WBNOINVD)
1092 set_feature (FEATURE_WBNOINVD);
1093 }
1094
1095#undef set_feature
1096}
1097
1098static inline int
1099cpu_indicator_init (struct __processor_model *cpu_model,
1100 struct __processor_model2 *cpu_model2,
1101 unsigned int *cpu_features2)
1102{
1103 unsigned int eax, ebx, ecx, edx;
1104
1105 int max_level;
1106 unsigned int vendor;
1107 unsigned int model, family;
1108 unsigned int extended_model, extended_family;
1109
1110 /* This function needs to run just once. */
1111 if (cpu_model->__cpu_vendor)
1112 return 0;
1113
1114 /* Assume cpuid insn present. Run in level 0 to get vendor id. */
1115 if (!__get_cpuid (leaf: 0, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx))
1116 {
1117 cpu_model->__cpu_vendor = VENDOR_OTHER;
1118 return -1;
1119 }
1120
1121 vendor = ebx;
1122 max_level = eax;
1123
1124 if (max_level < 1)
1125 {
1126 cpu_model->__cpu_vendor = VENDOR_OTHER;
1127 return -1;
1128 }
1129
1130 if (!__get_cpuid (leaf: 1, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx))
1131 {
1132 cpu_model->__cpu_vendor = VENDOR_OTHER;
1133 return -1;
1134 }
1135
1136 cpu_model2->__cpu_max_level = max_level;
1137
1138 model = (eax >> 4) & 0x0f;
1139 family = (eax >> 8) & 0x0f;
1140 extended_model = (eax >> 12) & 0xf0;
1141 extended_family = (eax >> 20) & 0xff;
1142
1143 /* Find available features. */
1144 get_available_features (cpu_model, cpu_model2, cpu_features2,
1145 ecx, edx);
1146
1147 if (vendor == signature_INTEL_ebx)
1148 {
1149 /* Adjust model and family for Intel CPUS. */
1150 if (family == 0x0f)
1151 {
1152 family += extended_family;
1153 model += extended_model;
1154 }
1155 else if (family == 0x06)
1156 model += extended_model;
1157
1158 cpu_model2->__cpu_family = family;
1159 cpu_model2->__cpu_model = model;
1160
1161 /* Get CPU type. */
1162 get_intel_cpu (cpu_model, cpu_model2, cpu_features2);
1163 cpu_model->__cpu_vendor = VENDOR_INTEL;
1164 }
1165 else if (vendor == signature_AMD_ebx)
1166 {
1167 /* Adjust model and family for AMD CPUS. */
1168 if (family == 0x0f)
1169 {
1170 family += extended_family;
1171 model += extended_model;
1172 }
1173
1174 cpu_model2->__cpu_family = family;
1175 cpu_model2->__cpu_model = model;
1176
1177 /* Get CPU type. */
1178 get_amd_cpu (cpu_model, cpu_model2, cpu_features2);
1179 cpu_model->__cpu_vendor = VENDOR_AMD;
1180 }
1181 else if (vendor == signature_CENTAUR_ebx && family < 0x07)
1182 cpu_model->__cpu_vendor = VENDOR_CENTAUR;
1183 else if (vendor == signature_SHANGHAI_ebx
1184 || vendor == signature_CENTAUR_ebx)
1185 {
1186 /* Adjust model and family for ZHAOXIN CPUS. */
1187 if (family == 0x07)
1188 model += extended_model;
1189
1190 cpu_model2->__cpu_family = family;
1191 cpu_model2->__cpu_model = model;
1192
1193 /* Get CPU type. */
1194 get_zhaoxin_cpu (cpu_model, cpu_model2, cpu_features2);
1195 cpu_model->__cpu_vendor = VENDOR_ZHAOXIN;
1196 }
1197 else if (vendor == signature_CYRIX_ebx)
1198 cpu_model->__cpu_vendor = VENDOR_CYRIX;
1199 else if (vendor == signature_NSC_ebx)
1200 cpu_model->__cpu_vendor = VENDOR_NSC;
1201 else
1202 cpu_model->__cpu_vendor = VENDOR_OTHER;
1203
1204 if (has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_LM)
1205 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_SSE2))
1206 {
1207 CHECK___builtin_cpu_supports ("x86-64");
1208 set_cpu_feature (cpu_model, cpu_features2,
1209 feature: FEATURE_X86_64_BASELINE);
1210 if (has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_CMPXCHG16B)
1211 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_POPCNT)
1212 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_LAHF_LM)
1213 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_SSE4_2))
1214 {
1215 CHECK___builtin_cpu_supports ("x86-64-v2");
1216 set_cpu_feature (cpu_model, cpu_features2,
1217 feature: FEATURE_X86_64_V2);
1218 if (has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_AVX2)
1219 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_BMI)
1220 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_BMI2)
1221 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_F16C)
1222 && has_cpu_feature (cpu_model, cpu_features2, feature: FEATURE_FMA)
1223 && has_cpu_feature (cpu_model, cpu_features2,
1224 feature: FEATURE_LZCNT)
1225 && has_cpu_feature (cpu_model, cpu_features2,
1226 feature: FEATURE_MOVBE))
1227 {
1228 CHECK___builtin_cpu_supports ("x86-64-v3");
1229 set_cpu_feature (cpu_model, cpu_features2,
1230 feature: FEATURE_X86_64_V3);
1231 if (has_cpu_feature (cpu_model, cpu_features2,
1232 feature: FEATURE_AVX512BW)
1233 && has_cpu_feature (cpu_model, cpu_features2,
1234 feature: FEATURE_AVX512CD)
1235 && has_cpu_feature (cpu_model, cpu_features2,
1236 feature: FEATURE_AVX512DQ)
1237 && has_cpu_feature (cpu_model, cpu_features2,
1238 feature: FEATURE_AVX512VL))
1239 {
1240 CHECK___builtin_cpu_supports ("x86-64-v4");
1241 set_cpu_feature (cpu_model, cpu_features2,
1242 feature: FEATURE_X86_64_V4);
1243 }
1244 }
1245 }
1246 }
1247
1248 gcc_assert (cpu_model->__cpu_vendor < VENDOR_MAX);
1249 gcc_assert (cpu_model->__cpu_type < CPU_TYPE_MAX);
1250 gcc_assert (cpu_model->__cpu_subtype < CPU_SUBTYPE_MAX);
1251
1252 return 0;
1253}
1254

source code of gcc/common/config/i386/cpuinfo.h