2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief ia32 architecture variants
23 * @author Michael Beck, Matthias Braun
28 #include "lc_opts_enum.h"
32 #include "bearch_ia32_t.h"
33 #include "ia32_architecture.h"
38 #if defined(_M_IX86) || defined(_M_X64)
43 #if defined(__i386__) || defined(__x86_64__)
48 ia32_code_gen_config_t ia32_cg_config;
51 * CPU architectures and features.
53 enum cpu_arch_features {
54 arch_generic32 = 0x00000001, /**< no specific architecture */
56 arch_i386 = 0x00000002, /**< i386 architecture */
57 arch_i486 = 0x00000004, /**< i486 architecture */
58 arch_pentium = 0x00000008, /**< Pentium architecture */
59 arch_ppro = 0x00000010, /**< PentiumPro architecture */
60 arch_netburst = 0x00000020, /**< Netburst architecture */
61 arch_nocona = 0x00000040, /**< Nocona architecture */
62 arch_core2 = 0x00000080, /**< Core2 architecture */
63 arch_atom = 0x00000100, /**< Atom architecture */
65 arch_k6 = 0x00000200, /**< k6 architecture */
66 arch_geode = 0x00000400, /**< Geode architecture */
67 arch_athlon = 0x00000800, /**< Athlon architecture */
68 arch_k8 = 0x00001000, /**< K8/Opteron architecture */
69 arch_k10 = 0x00002000, /**< K10/Barcelona architecture */
71 arch_mask = 0x00003FFF,
73 arch_athlon_plus = arch_athlon | arch_k8 | arch_k10,
74 arch_all_amd = arch_k6 | arch_geode | arch_athlon_plus,
76 arch_feature_mmx = 0x00004000, /**< MMX instructions */
77 arch_feature_cmov = 0x00008000, /**< cmov instructions */
78 arch_feature_p6_insn = 0x00010000, /**< PentiumPro instructions */
79 arch_feature_sse1 = 0x00020000, /**< SSE1 instructions */
80 arch_feature_sse2 = 0x00040000, /**< SSE2 instructions */
81 arch_feature_sse3 = 0x00080000, /**< SSE3 instructions */
82 arch_feature_ssse3 = 0x00100000, /**< SSSE3 instructions */
83 arch_feature_3DNow = 0x00200000, /**< 3DNow! instructions */
84 arch_feature_3DNowE = 0x00400000, /**< Enhanced 3DNow! instructions */
85 arch_feature_64bit = 0x00800000, /**< x86_64 support */
86 arch_feature_sse4_1 = 0x01000000, /**< SSE4.1 instructions */
87 arch_feature_sse4_2 = 0x02000000, /**< SSE4.2 instructions */
88 arch_feature_sse4a = 0x04000000, /**< SSE4a instructions */
89 arch_feature_popcnt = 0x08000000, /**< popcnt instruction */
91 arch_mmx_insn = arch_feature_mmx, /**< MMX instructions */
92 arch_sse1_insn = arch_feature_sse1 | arch_mmx_insn, /**< SSE1 instructions, include MMX */
93 arch_sse2_insn = arch_feature_sse2 | arch_sse1_insn, /**< SSE2 instructions, include SSE1 */
94 arch_sse3_insn = arch_feature_sse3 | arch_sse2_insn, /**< SSE3 instructions, include SSE2 */
95 arch_ssse3_insn = arch_feature_ssse3 | arch_sse3_insn, /**< SSSE3 instructions, include SSE3 */
96 arch_sse4_1_insn = arch_feature_sse4_1 | arch_ssse3_insn, /**< SSE4.1 instructions, include SSSE3 */
97 arch_sse4_2_insn = arch_feature_sse4_2 | arch_sse4_1_insn, /**< SSE4.2 instructions, include SSE4.1 */
98 arch_sse4a_insn = arch_feature_sse4a | arch_ssse3_insn, /**< SSE4a instructions, include SSSE3 */
100 arch_3DNow_insn = arch_feature_3DNow | arch_feature_mmx, /**< 3DNow! instructions, including MMX */
101 arch_3DNowE_insn = arch_feature_3DNowE | arch_3DNow_insn, /**< Enhanced 3DNow! instructions */
102 arch_64bit_insn = arch_feature_64bit | arch_sse2_insn, /**< x86_64 support, includes SSE2 */
105 #define FLAGS(x, f) (((x) & (f)) != 0)
110 typedef enum cpu_support {
111 cpu_generic = arch_generic32,
114 cpu_i386 = arch_i386,
115 cpu_i486 = arch_i486,
116 cpu_pentium = arch_pentium,
117 cpu_pentium_mmx = arch_pentium | arch_mmx_insn,
118 cpu_pentium_pro_generic = arch_ppro | arch_feature_p6_insn,
119 cpu_pentium_pro = arch_ppro | arch_feature_cmov | arch_feature_p6_insn,
120 cpu_pentium_2 = arch_ppro | arch_feature_cmov | arch_feature_p6_insn | arch_mmx_insn,
121 cpu_pentium_3 = arch_ppro | arch_feature_cmov | arch_feature_p6_insn | arch_sse1_insn,
122 cpu_pentium_m = arch_ppro | arch_feature_cmov | arch_feature_p6_insn | arch_sse2_insn,
123 cpu_netburst_generic = arch_netburst | arch_feature_p6_insn,
124 cpu_pentium_4 = arch_netburst | arch_feature_cmov | arch_feature_p6_insn | arch_sse2_insn,
125 cpu_prescott = arch_nocona | arch_feature_cmov | arch_feature_p6_insn | arch_sse3_insn,
126 cpu_nocona = arch_nocona | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
127 cpu_core2_generic = arch_core2 | arch_feature_p6_insn,
128 cpu_core2 = arch_core2 | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn | arch_ssse3_insn,
129 cpu_penryn = arch_core2 | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn | arch_sse4_1_insn,
130 cpu_atom_generic = arch_atom | arch_feature_p6_insn,
131 cpu_atom = arch_atom | arch_feature_cmov | arch_feature_p6_insn | arch_ssse3_insn,
134 cpu_k6_generic = arch_k6,
135 cpu_k6 = arch_k6 | arch_mmx_insn,
136 cpu_k6_PLUS = arch_k6 | arch_3DNow_insn,
137 cpu_geode_generic = arch_geode,
138 cpu_geode = arch_geode | arch_sse1_insn | arch_3DNowE_insn,
139 cpu_athlon_generic = arch_athlon | arch_feature_p6_insn,
140 cpu_athlon_old = arch_athlon | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn,
141 cpu_athlon = arch_athlon | arch_sse1_insn | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn,
142 cpu_athlon64 = arch_athlon | arch_sse2_insn | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn,
143 cpu_k8_generic = arch_k8 | arch_feature_p6_insn,
144 cpu_k8 = arch_k8 | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn,
145 cpu_k8_sse3 = arch_k8 | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
146 cpu_k10_generic = arch_k10 | arch_feature_p6_insn,
147 cpu_k10 = arch_k10 | arch_3DNowE_insn | arch_feature_cmov | arch_feature_p6_insn | arch_feature_popcnt | arch_64bit_insn | arch_sse4a_insn,
150 cpu_winchip_c6 = arch_i486 | arch_feature_mmx,
151 cpu_winchip2 = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
152 cpu_c3 = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
153 cpu_c3_2 = arch_ppro | arch_feature_cmov | arch_feature_p6_insn | arch_sse1_insn, /* really no 3DNow! */
158 static int opt_size = 0;
159 static int emit_machcode = 0;
160 static cpu_support arch = cpu_generic;
161 static cpu_support opt_arch = cpu_generic;
162 static int fpu_arch = 0;
163 static int opt_cc = 1;
164 static int opt_unsafe_floatconv = 0;
166 /* instruction set architectures. */
167 static const lc_opt_enum_int_items_t arch_items[] = {
168 { "i386", cpu_i386 },
169 { "i486", cpu_i486 },
170 { "i586", cpu_pentium },
171 { "pentium", cpu_pentium },
172 { "pentium-mmx", cpu_pentium_mmx },
173 { "i686", cpu_pentium_pro },
174 { "pentiumpro", cpu_pentium_pro },
175 { "pentium2", cpu_pentium_2 },
176 { "p2", cpu_pentium_2 },
177 { "pentium3", cpu_pentium_3 },
178 { "pentium3m", cpu_pentium_3 },
179 { "p3", cpu_pentium_3 },
180 { "pentium-m", cpu_pentium_m },
181 { "pm", cpu_pentium_m },
182 { "pentium4", cpu_pentium_4 },
183 { "pentium4m", cpu_pentium_4 },
184 { "p4", cpu_pentium_4 },
185 { "prescott", cpu_prescott },
186 { "nocona", cpu_nocona },
187 { "merom", cpu_core2 },
188 { "core2", cpu_core2 },
189 { "penryn", cpu_penryn },
190 { "atom", cpu_atom },
193 { "k6-2", cpu_k6_PLUS },
194 { "k6-3", cpu_k6_PLUS },
195 { "geode", cpu_geode },
196 { "athlon", cpu_athlon_old },
197 { "athlon-tbird", cpu_athlon },
198 { "athlon-4", cpu_athlon },
199 { "athlon-xp", cpu_athlon },
200 { "athlon-mp", cpu_athlon },
201 { "athlon64", cpu_athlon64 },
203 { "opteron", cpu_k8 },
204 { "athlon-fx", cpu_k8 },
205 { "k8-sse3", cpu_k8_sse3 },
206 { "opteron-sse3", cpu_k8_sse3 },
208 { "barcelona", cpu_k10 },
209 { "amdfam10", cpu_k10 },
211 { "winchip-c6", cpu_winchip_c6, },
212 { "winchip2", cpu_winchip2 },
214 { "c3-2", cpu_c3_2 },
216 { "generic", cpu_generic },
217 { "generic32", cpu_generic },
220 { "native", cpu_autodetect },
226 static lc_opt_enum_int_var_t arch_var = {
227 (int*) &arch, arch_items
230 static lc_opt_enum_int_var_t opt_arch_var = {
231 (int*) &opt_arch, arch_items
234 static const lc_opt_enum_int_items_t fp_unit_items[] = {
235 { "x87" , IA32_FPU_ARCH_X87 },
236 { "sse2", IA32_FPU_ARCH_SSE2 },
237 { "softfloat", IA32_FPU_ARCH_SOFTFLOAT },
238 { NULL, IA32_FPU_ARCH_NONE }
241 static lc_opt_enum_int_var_t fp_unit_var = {
242 &fpu_arch, fp_unit_items
245 static const lc_opt_table_entry_t ia32_architecture_options[] = {
246 LC_OPT_ENT_BOOL ("size", "optimize for size", &opt_size),
247 LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture", &arch_var),
248 LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture", &opt_arch_var),
249 LC_OPT_ENT_ENUM_INT("fpunit", "select the floating point unit", &fp_unit_var),
250 LC_OPT_ENT_NEGBOOL ("nooptcc", "do not optimize calling convention", &opt_cc),
251 LC_OPT_ENT_BOOL ("unsafe_floatconv", "do unsafe floating point controlword optimisations", &opt_unsafe_floatconv),
252 LC_OPT_ENT_BOOL ("machcode", "output machine code instead of assembler", &emit_machcode),
256 typedef struct insn_const {
257 int add_cost; /**< cost of an add instruction */
258 int lea_cost; /**< cost of a lea instruction */
259 int const_shf_cost; /**< cost of a constant shift instruction */
260 int cost_mul_start; /**< starting cost of a multiply instruction */
261 int cost_mul_bit; /**< cost of multiply for every set bit */
262 unsigned function_alignment; /**< logarithm for alignment of function labels */
263 unsigned label_alignment; /**< logarithm for alignment of loops labels */
264 unsigned label_alignment_max_skip; /**< maximum skip for alignment of loops labels */
267 /* costs for optimizing for size */
268 static const insn_const size_cost = {
269 2, /* cost of an add instruction */
270 3, /* cost of a lea instruction */
271 3, /* cost of a constant shift instruction */
272 4, /* starting cost of a multiply instruction */
273 0, /* cost of multiply for every set bit */
274 0, /* logarithm for alignment of function labels */
275 0, /* logarithm for alignment of loops labels */
276 0, /* maximum skip for alignment of loops labels */
279 /* costs for the i386 */
280 static const insn_const i386_cost = {
281 1, /* cost of an add instruction */
282 1, /* cost of a lea instruction */
283 3, /* cost of a constant shift instruction */
284 9, /* starting cost of a multiply instruction */
285 1, /* cost of multiply for every set bit */
286 2, /* logarithm for alignment of function labels */
287 2, /* logarithm for alignment of loops labels */
288 3, /* maximum skip for alignment of loops labels */
291 /* costs for the i486 */
292 static const insn_const i486_cost = {
293 1, /* cost of an add instruction */
294 1, /* cost of a lea instruction */
295 2, /* cost of a constant shift instruction */
296 12, /* starting cost of a multiply instruction */
297 1, /* cost of multiply for every set bit */
298 4, /* logarithm for alignment of function labels */
299 4, /* logarithm for alignment of loops labels */
300 15, /* maximum skip for alignment of loops labels */
303 /* costs for the Pentium */
304 static const insn_const pentium_cost = {
305 1, /* cost of an add instruction */
306 1, /* cost of a lea instruction */
307 1, /* cost of a constant shift instruction */
308 11, /* starting cost of a multiply instruction */
309 0, /* cost of multiply for every set bit */
310 4, /* logarithm for alignment of function labels */
311 4, /* logarithm for alignment of loops labels */
312 7, /* maximum skip for alignment of loops labels */
315 /* costs for the Pentium Pro */
316 static const insn_const pentiumpro_cost = {
317 1, /* cost of an add instruction */
318 1, /* cost of a lea instruction */
319 1, /* cost of a constant shift instruction */
320 4, /* starting cost of a multiply instruction */
321 0, /* cost of multiply for every set bit */
322 4, /* logarithm for alignment of function labels */
323 4, /* logarithm for alignment of loops labels */
324 10, /* maximum skip for alignment of loops labels */
327 /* costs for the K6 */
328 static const insn_const k6_cost = {
329 1, /* cost of an add instruction */
330 2, /* cost of a lea instruction */
331 1, /* cost of a constant shift instruction */
332 3, /* starting cost of a multiply instruction */
333 0, /* cost of multiply for every set bit */
334 5, /* logarithm for alignment of function labels */
335 5, /* logarithm for alignment of loops labels */
336 7, /* maximum skip for alignment of loops labels */
339 /* costs for the Geode */
340 static const insn_const geode_cost = {
341 1, /* cost of an add instruction */
342 1, /* cost of a lea instruction */
343 1, /* cost of a constant shift instruction */
344 7, /* starting cost of a multiply instruction */
345 0, /* cost of multiply for every set bit */
346 0, /* logarithm for alignment of function labels */
347 0, /* logarithm for alignment of loops labels */
348 0, /* maximum skip for alignment of loops labels */
351 /* costs for the Athlon */
352 static const insn_const athlon_cost = {
353 1, /* cost of an add instruction */
354 2, /* cost of a lea instruction */
355 1, /* cost of a constant shift instruction */
356 5, /* starting cost of a multiply instruction */
357 0, /* cost of multiply for every set bit */
358 4, /* logarithm for alignment of function labels */
359 4, /* logarithm for alignment of loops labels */
360 7, /* maximum skip for alignment of loops labels */
363 /* costs for the Opteron/K8 */
364 static const insn_const k8_cost = {
365 1, /* cost of an add instruction */
366 2, /* cost of a lea instruction */
367 1, /* cost of a constant shift instruction */
368 3, /* starting cost of a multiply instruction */
369 0, /* cost of multiply for every set bit */
371 4, /* logarithm for alignment of function labels */
372 4, /* logarithm for alignment of loops labels */
373 7, /* maximum skip for alignment of loops labels */
381 /* costs for the K10 */
382 static const insn_const k10_cost = {
383 1, /* cost of an add instruction */
384 2, /* cost of a lea instruction */
385 1, /* cost of a constant shift instruction */
386 3, /* starting cost of a multiply instruction */
387 0, /* cost of multiply for every set bit */
388 5, /* logarithm for alignment of function labels */
389 5, /* logarithm for alignment of loops labels */
390 7, /* maximum skip for alignment of loops labels */
393 /* costs for the Pentium 4 */
394 static const insn_const netburst_cost = {
395 1, /* cost of an add instruction */
396 3, /* cost of a lea instruction */
397 4, /* cost of a constant shift instruction */
398 15, /* starting cost of a multiply instruction */
399 0, /* cost of multiply for every set bit */
400 4, /* logarithm for alignment of function labels */
401 4, /* logarithm for alignment of loops labels */
402 7, /* maximum skip for alignment of loops labels */
405 /* costs for the Nocona and Core */
406 static const insn_const nocona_cost = {
407 1, /* cost of an add instruction */
408 1, /* cost of a lea instruction */
409 1, /* cost of a constant shift instruction */
410 10, /* starting cost of a multiply instruction */
411 0, /* cost of multiply for every set bit */
412 4, /* logarithm for alignment of function labels */
413 4, /* logarithm for alignment of loops labels */
414 7, /* maximum skip for alignment of loops labels */
417 /* costs for the Core2 */
418 static const insn_const core2_cost = {
419 1, /* cost of an add instruction */
420 1, /* cost of a lea instruction */
421 1, /* cost of a constant shift instruction */
422 3, /* starting cost of a multiply instruction */
423 0, /* cost of multiply for every set bit */
424 4, /* logarithm for alignment of function labels */
425 4, /* logarithm for alignment of loops labels */
426 10, /* maximum skip for alignment of loops labels */
429 /* costs for the generic32 */
430 static const insn_const generic32_cost = {
431 1, /* cost of an add instruction */
432 2, /* cost of a lea instruction */
433 1, /* cost of a constant shift instruction */
434 4, /* starting cost of a multiply instruction */
435 0, /* cost of multiply for every set bit */
436 4, /* logarithm for alignment of function labels */
437 4, /* logarithm for alignment of loops labels */
438 7, /* maximum skip for alignment of loops labels */
441 static const insn_const *arch_costs = &generic32_cost;
443 static void set_arch_costs(void)
446 arch_costs = &size_cost;
449 switch (opt_arch & arch_mask) {
450 case arch_i386: arch_costs = &i386_cost; break;
451 case arch_i486: arch_costs = &i486_cost; break;
452 case arch_pentium: arch_costs = &pentium_cost; break;
453 case arch_ppro: arch_costs = &pentiumpro_cost; break;
454 case arch_netburst: arch_costs = &netburst_cost; break;
455 case arch_nocona: arch_costs = &nocona_cost; break;
456 case arch_core2: arch_costs = &core2_cost; break;
457 case arch_k6: arch_costs = &k6_cost; break;
458 case arch_geode: arch_costs = &geode_cost; break;
459 case arch_athlon: arch_costs = &athlon_cost; break;
460 case arch_k8: arch_costs = &k8_cost; break;
461 case arch_k10: arch_costs = &k10_cost; break;
463 case arch_generic32: arch_costs = &generic32_cost; break;
467 /* Evaluate the costs of an instruction. */
468 int ia32_evaluate_insn(insn_kind kind, const ir_mode *mode, ir_tarval *tv)
474 cost = arch_costs->cost_mul_start;
475 if (arch_costs->cost_mul_bit > 0) {
476 char *bitstr = get_tarval_bitpattern(tv);
479 for (i = 0; bitstr[i] != '\0'; ++i) {
480 if (bitstr[i] == '1') {
481 cost += arch_costs->cost_mul_bit;
486 if (get_mode_size_bits(mode) <= 32)
488 /* 64bit mul supported, approx 4times of a 32bit mul*/
491 /* lea is only supported for 32 bit */
492 if (get_mode_size_bits(mode) <= 32)
493 return arch_costs->lea_cost;
494 /* in 64bit mode, the Lea cost are at wort 2 shifts and one add */
495 return 2 * arch_costs->add_cost + 2 * (2 * arch_costs->const_shf_cost);
498 if (get_mode_size_bits(mode) <= 32)
499 return arch_costs->add_cost;
500 /* 64bit add/sub supported, double the cost */
501 return 2 * arch_costs->add_cost;
503 if (get_mode_size_bits(mode) <= 32)
504 return arch_costs->const_shf_cost;
505 /* 64bit shift supported, double the cost */
506 return 2 * arch_costs->const_shf_cost;
508 return arch_costs->add_cost;
514 /* auto detection code only works if we're on an x86 cpu obviously */
516 typedef struct x86_cpu_info_t {
517 unsigned char cpu_stepping;
518 unsigned char cpu_model;
519 unsigned char cpu_family;
520 unsigned char cpu_type;
521 unsigned char cpu_ext_model;
522 unsigned char cpu_ext_family;
523 unsigned edx_features;
524 unsigned ecx_features;
525 unsigned add_features;
529 CPUID_FEAT_ECX_SSE3 = 1 << 0,
530 CPUID_FEAT_ECX_PCLMUL = 1 << 1,
531 CPUID_FEAT_ECX_DTES64 = 1 << 2,
532 CPUID_FEAT_ECX_MONITOR = 1 << 3,
533 CPUID_FEAT_ECX_DS_CPL = 1 << 4,
534 CPUID_FEAT_ECX_VMX = 1 << 5,
535 CPUID_FEAT_ECX_SMX = 1 << 6,
536 CPUID_FEAT_ECX_EST = 1 << 7,
537 CPUID_FEAT_ECX_TM2 = 1 << 8,
538 CPUID_FEAT_ECX_SSSE3 = 1 << 9,
539 CPUID_FEAT_ECX_CID = 1 << 10,
540 CPUID_FEAT_ECX_FMA = 1 << 12,
541 CPUID_FEAT_ECX_CX16 = 1 << 13,
542 CPUID_FEAT_ECX_ETPRD = 1 << 14,
543 CPUID_FEAT_ECX_PDCM = 1 << 15,
544 CPUID_FEAT_ECX_DCA = 1 << 18,
545 CPUID_FEAT_ECX_SSE4_1 = 1 << 19,
546 CPUID_FEAT_ECX_SSE4_2 = 1 << 20,
547 CPUID_FEAT_ECX_x2APIC = 1 << 21,
548 CPUID_FEAT_ECX_MOVBE = 1 << 22,
549 CPUID_FEAT_ECX_POPCNT = 1 << 23,
550 CPUID_FEAT_ECX_AES = 1 << 25,
551 CPUID_FEAT_ECX_XSAVE = 1 << 26,
552 CPUID_FEAT_ECX_OSXSAVE = 1 << 27,
553 CPUID_FEAT_ECX_AVX = 1 << 28,
555 CPUID_FEAT_EDX_FPU = 1 << 0,
556 CPUID_FEAT_EDX_VME = 1 << 1,
557 CPUID_FEAT_EDX_DE = 1 << 2,
558 CPUID_FEAT_EDX_PSE = 1 << 3,
559 CPUID_FEAT_EDX_TSC = 1 << 4,
560 CPUID_FEAT_EDX_MSR = 1 << 5,
561 CPUID_FEAT_EDX_PAE = 1 << 6,
562 CPUID_FEAT_EDX_MCE = 1 << 7,
563 CPUID_FEAT_EDX_CX8 = 1 << 8,
564 CPUID_FEAT_EDX_APIC = 1 << 9,
565 CPUID_FEAT_EDX_SEP = 1 << 11,
566 CPUID_FEAT_EDX_MTRR = 1 << 12,
567 CPUID_FEAT_EDX_PGE = 1 << 13,
568 CPUID_FEAT_EDX_MCA = 1 << 14,
569 CPUID_FEAT_EDX_CMOV = 1 << 15,
570 CPUID_FEAT_EDX_PAT = 1 << 16,
571 CPUID_FEAT_EDX_PSE36 = 1 << 17,
572 CPUID_FEAT_EDX_PSN = 1 << 18,
573 CPUID_FEAT_EDX_CLF = 1 << 19,
574 CPUID_FEAT_EDX_DTES = 1 << 21,
575 CPUID_FEAT_EDX_ACPI = 1 << 22,
576 CPUID_FEAT_EDX_MMX = 1 << 23,
577 CPUID_FEAT_EDX_FXSR = 1 << 24,
578 CPUID_FEAT_EDX_SSE = 1 << 25,
579 CPUID_FEAT_EDX_SSE2 = 1 << 26,
580 CPUID_FEAT_EDX_SS = 1 << 27,
581 CPUID_FEAT_EDX_HTT = 1 << 28,
582 CPUID_FEAT_EDX_TM1 = 1 << 29,
583 CPUID_FEAT_EDX_IA64 = 1 << 30,
584 CPUID_FEAT_EDX_PBE = 1 << 31
587 static cpu_support auto_detect_Intel(x86_cpu_info_t const *info)
589 cpu_support auto_arch = cpu_generic;
591 unsigned family = (info->cpu_ext_family << 4) | info->cpu_family;
592 unsigned model = (info->cpu_ext_model << 4) | info->cpu_model;
596 auto_arch = cpu_i486;
599 auto_arch = cpu_pentium;
603 case 0x01: /* PentiumPro */
604 case 0x03: /* Pentium II Model 3 */
605 case 0x05: /* Pentium II Model 5 */
606 case 0x06: /* Celeron Model 6 */
607 case 0x07: /* Pentium III Model 7 */
608 case 0x08: /* Pentium III Model 8 */
609 case 0x09: /* Pentium M Model 9 */
610 case 0x0A: /* Pentium III Model 0A */
611 case 0x0B: /* Pentium III Model 0B */
612 case 0x0D: /* Pentium M Model 0D */
613 case 0x0E: /* Core Model 0E */
614 auto_arch = cpu_pentium_pro_generic;
616 case 0x0F: /* Core2 Model 0F */
617 case 0x15: /* Intel EP80579 */
618 case 0x16: /* Celeron Model 16 */
619 case 0x17: /* Core2 Model 17 */
620 auto_arch = cpu_core2_generic;
629 case 0x00: /* Pentium 4 Model 00 */
630 case 0x01: /* Pentium 4 Model 01 */
631 case 0x02: /* Pentium 4 Model 02 */
632 case 0x03: /* Pentium 4 Model 03 */
633 case 0x04: /* Pentium 4 Model 04 */
634 case 0x06: /* Pentium 4 Model 06 */
635 auto_arch = cpu_netburst_generic;
637 case 0x1A: /* Core i7 */
638 auto_arch = cpu_core2_generic;
640 case 0x1C: /* Atom */
641 auto_arch = cpu_atom_generic;
643 case 0x1D: /* Xeon MP */
644 auto_arch = cpu_core2_generic;
659 static cpu_support auto_detect_AMD(x86_cpu_info_t const *info) {
660 cpu_support auto_arch = cpu_generic;
662 unsigned family, model;
664 if (info->cpu_family == 0x0F) {
665 family = (info->cpu_ext_family << 4) | info->cpu_family;
666 model = (info->cpu_ext_model << 4) | info->cpu_model;
668 family = info->cpu_family;
669 model = info->cpu_model;
674 auto_arch = cpu_i486;
678 case 0x00: /* K5 Model 0 */
679 case 0x01: /* K5 Model 1 */
680 case 0x02: /* K5 Model 2 */
681 case 0x03: /* K5 Model 3 */
682 auto_arch = cpu_pentium;
684 case 0x06: /* K6 Model 6 */
685 case 0x07: /* K6 Model 7 */
686 case 0x08: /* K6-2 Model 8 */
687 case 0x09: /* K6-III Model 9 */
688 case 0x0D: /* K6-2+ or K6-III+ */
689 auto_arch = cpu_k6_generic;
691 case 0x0A: /* Geode LX */
692 auto_arch = cpu_geode_generic;
696 auto_arch = cpu_k6_generic;
702 case 0x01: /* Athlon Model 1 */
703 case 0x02: /* Athlon Model 2 */
704 case 0x03: /* Duron Model 3 */
705 case 0x04: /* Athlon Model 4 */
706 case 0x06: /* Athlon MP/Mobile Athlon Model 6 */
707 case 0x07: /* Mobile Duron Model 7 */
708 case 0x08: /* Athlon (TH/AP core) including Geode NX */
709 case 0x0A: /* Athlon (BT core) */
710 default: /* unknown K7 */
711 auto_arch = cpu_athlon_generic;
716 auto_arch = cpu_k8_generic;
719 case 0x2F: /* AMD Family 11h */
720 auto_arch = cpu_k10_generic;
740 static void x86_cpuid(cpuid_registers *regs, unsigned level)
742 #if defined(__GNUC__)
743 # if defined(__PIC__) && !defined(__amd64) // GCC cannot handle EBX in PIC
749 : "=a" (regs->r.eax), "=r" (regs->r.ebx), "=c" (regs->r.ecx), "=d" (regs->r.edx)
754 : "=a" (regs->r.eax), "=b" (regs->r.ebx), "=c" (regs->r.ecx), "=d" (regs->r.edx)
758 #elif defined(_MSC_VER)
759 __cpuid(regs->bulk, level);
761 # error CPUID is missing
765 static int x86_toogle_cpuid(void)
767 unsigned eflags_before = 0, eflags_after = 0;
769 #if defined(__GNUC__)
771 /* If bit 21 of the EFLAGS register can be changed, the cpuid instruction is available */
776 "xorl $0x00200000, %1\n\t"
781 : "=r" (eflags_before), "=r" (eflags_after) :: "cc"
784 /* cpuid always available on 64bit */
787 #elif defined(_MSC_VER)
792 mov eflags_before, eax
798 mov eflags_after, eax
804 return (eflags_before ^ eflags_after) & 0x00200000;
807 static void autodetect_arch(void)
809 cpu_support auto_arch = cpu_generic;
811 /* We use the cpuid instruction to detect the CPU features */
812 if (x86_toogle_cpuid()) {
813 cpuid_registers regs;
815 x86_cpu_info_t cpu_info;
819 memcpy(&vendorid[0], ®s.r.ebx, 4);
820 memcpy(&vendorid[4], ®s.r.edx, 4);
821 memcpy(&vendorid[8], ®s.r.ecx, 4);
824 /* get processor info and feature bits */
827 cpu_info.cpu_stepping = (regs.r.eax >> 0) & 0x0F;
828 cpu_info.cpu_model = (regs.r.eax >> 4) & 0x0F;
829 cpu_info.cpu_family = (regs.r.eax >> 8) & 0x0F;
830 cpu_info.cpu_type = (regs.r.eax >> 12) & 0x03;
831 cpu_info.cpu_ext_model = (regs.r.eax >> 16) & 0x0F;
832 cpu_info.cpu_ext_family = (regs.r.eax >> 20) & 0xFF;
833 cpu_info.edx_features = regs.r.edx;
834 cpu_info.ecx_features = regs.r.ecx;
835 cpu_info.add_features = regs.r.ebx;
837 if (0 == strcmp(vendorid, "GenuineIntel")) {
838 auto_arch = auto_detect_Intel(&cpu_info);
839 } else if (0 == strcmp(vendorid, "AuthenticAMD")) {
840 auto_arch = auto_detect_AMD(&cpu_info);
841 } else if (0 == strcmp(vendorid, "Geode by NSC")) {
842 auto_arch = cpu_geode_generic;
845 if (cpu_info.edx_features & CPUID_FEAT_EDX_CMOV)
846 auto_arch |= arch_feature_cmov;
847 if (cpu_info.edx_features & CPUID_FEAT_EDX_MMX)
848 auto_arch |= arch_feature_mmx;
849 if (cpu_info.edx_features & CPUID_FEAT_EDX_SSE)
850 auto_arch |= arch_feature_sse1;
851 if (cpu_info.edx_features & CPUID_FEAT_EDX_SSE2)
852 auto_arch |= arch_feature_sse2;
854 if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE3)
855 auto_arch |= arch_feature_sse3;
856 if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSSE3)
857 auto_arch |= arch_feature_ssse3;
858 if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE4_1)
859 auto_arch |= arch_feature_sse4_1;
860 if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE4_2)
861 auto_arch |= arch_feature_sse4_2;
862 if (cpu_info.ecx_features & CPUID_FEAT_ECX_POPCNT)
863 auto_arch |= arch_feature_popcnt;
867 opt_arch = auto_arch;
869 #endif /* NATIVE_X86 */
871 void ia32_setup_cg_config(void)
873 ia32_code_gen_config_t *const c = &ia32_cg_config;
874 memset(c, 0, sizeof(*c));
879 if (arch == cpu_autodetect)
883 c->optimize_size = opt_size != 0;
884 /* on newer intel cpus mov, pop is often faster than leave although it has a
886 c->use_leave = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2) || opt_size;
887 /* P4s don't like inc/decs because they only partially write the flags
888 * register which produces false dependencies */
889 c->use_incdec = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_core2 | arch_geode) || opt_size;
890 c->use_softfloat = FLAGS(fpu_arch, IA32_FPU_ARCH_SOFTFLOAT);
891 c->use_sse2 = FLAGS(fpu_arch, IA32_FPU_ARCH_SSE2) && FLAGS(arch, arch_feature_sse2);
892 c->use_ffreep = FLAGS(opt_arch, arch_athlon_plus);
893 c->use_ftst = !FLAGS(arch, arch_feature_p6_insn);
894 /* valgrind can't cope with femms yet and the usefulness of the optimization
895 * is questionable anyway */
897 c->use_femms = FLAGS(opt_arch, arch_athlon_plus) &&
898 FLAGS(arch, arch_feature_mmx | arch_all_amd);
902 c->use_fucomi = FLAGS(arch, arch_feature_p6_insn);
903 c->use_cmov = FLAGS(arch, arch_feature_cmov);
904 c->use_modeD_moves = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode);
905 c->use_add_esp_4 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_geode) && !opt_size;
906 c->use_add_esp_8 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode | arch_i386 | arch_i486) && !opt_size;
907 c->use_sub_esp_4 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro) && !opt_size;
908 c->use_sub_esp_8 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_i386 | arch_i486) && !opt_size;
909 c->use_imul_mem_imm32 = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
910 c->use_pxor = FLAGS(opt_arch, arch_netburst);
911 c->use_mov_0 = FLAGS(opt_arch, arch_k6) && !opt_size;
912 c->use_short_sex_eax = !FLAGS(opt_arch, arch_k6) && !opt_size;
913 c->use_pad_return = FLAGS(opt_arch, arch_athlon_plus) && !opt_size;
914 c->use_bt = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
915 c->use_fisttp = FLAGS(opt_arch & arch, arch_feature_sse3);
916 c->use_sse_prefetch = FLAGS(arch, (arch_feature_3DNowE | arch_feature_sse1));
917 c->use_3dnow_prefetch = FLAGS(arch, arch_feature_3DNow);
918 c->use_popcnt = FLAGS(arch, arch_feature_popcnt);
919 c->use_bswap = (arch & arch_mask) >= arch_i486;
920 c->optimize_cc = opt_cc;
921 c->use_unsafe_floatconv = opt_unsafe_floatconv;
922 c->emit_machcode = emit_machcode;
924 c->function_alignment = arch_costs->function_alignment;
925 c->label_alignment = arch_costs->label_alignment;
926 c->label_alignment_max_skip = arch_costs->label_alignment_max_skip;
928 c->label_alignment_factor =
929 FLAGS(opt_arch, arch_i386 | arch_i486) || opt_size ? 0 :
930 opt_arch & arch_all_amd ? 3 :
934 void ia32_init_architecture(void)
936 lc_opt_entry_t *be_grp, *ia32_grp;
938 memset(&ia32_cg_config, 0, sizeof(ia32_cg_config));
940 be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
941 ia32_grp = lc_opt_get_grp(be_grp, "ia32");
943 lc_opt_add_table(ia32_grp, ia32_architecture_options);