+ switch (family) {
+ case 0x04:
+ auto_arch = arch_i486;
+ break;
+ case 0x05:
+ switch (model) {
+ case 0x00: /* K5 Model 0 */
+ case 0x01: /* K5 Model 1 */
+ case 0x02: /* K5 Model 2 */
+ case 0x03: /* K5 Model 3 */
+ auto_arch = arch_pentium;
+ break;
+ case 0x06: /* K6 Model 6 */
+ case 0x07: /* K6 Model 7 */
+ case 0x08: /* K6-2 Model 8 */
+ case 0x09: /* K6-III Model 9 */
+ case 0x0D: /* K6-2+ or K6-III+ */
+ auto_arch = arch_k6;
+ break;
+ case 0x0A: /* Geode LX */
+ auto_arch = arch_geode;
+ break;
+ default:
+ /* unknown K6 */
+ auto_arch = arch_k6;
+ break;
+ }
+ break;
+ case 0x06:
+ switch (model) {
+ case 0x01: /* Athlon Model 1 */
+ case 0x02: /* Athlon Model 2 */
+ case 0x03: /* Duron Model 3 */
+ case 0x04: /* Athlon Model 4 */
+ case 0x06: /* Athlon MP/Mobile Athlon Model 6 */
+ case 0x07: /* Mobile Duron Model 7 */
+ case 0x08: /* Athlon (TH/AP core) including Geode NX */
+ case 0x0A: /* Athlon (BT core) */
+ auto_arch = arch_athlon | arch_feature_p6_insn;
+ break;
+ default:
+ /* unknown K7 */
+ auto_arch = arch_athlon | arch_feature_p6_insn;
+ break;
+ }
+ break;
+ case 0x0F:
+ auto_arch = arch_k8 | arch_feature_p6_insn;
+ break;
+ case 0x1F:
+ case 0x2F: /* AMD Family 11h */
+ auto_arch = arch_k10 | arch_feature_p6_insn;
+ break;
+ default:
+ /* unknown */
+ break;
+ }
+
+ return auto_arch;
+}
+
+typedef union {
+ struct {
+ unsigned eax;
+ unsigned ebx;
+ unsigned ecx;
+ unsigned edx;
+ } r;
+ int bulk[4];
+} cpuid_registers;
+
+static void x86_cpuid(cpuid_registers *regs, unsigned level)
+{
+#if defined(__GNUC__)
+# if defined(__PIC__) && !defined(__amd64) // GCC cannot handle EBX in PIC
+ __asm (
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %1\n\t"
+ "popl %%ebx"
+ : "=a" (regs->r.eax), "=r" (regs->r.ebx), "=c" (regs->r.ecx), "=d" (regs->r.edx)
+ : "a" (level)
+ );
+# else
+ __asm ("cpuid\n\t"
+ : "=a" (regs->r.eax), "=b" (regs->r.ebx), "=c" (regs->r.ecx), "=d" (regs->r.edx)
+ : "a" (level)
+ );
+# endif
+#elif defined(_MSC_VER)
+ __cpuid(regs->bulk, level);
+#else
+# error CPUID is missing
+#endif
+}
+
+static int x86_toogle_cpuid(void)
+{
+ unsigned eflags_before = 0, eflags_after = 0;
+
+#if defined(__GNUC__)
+#ifdef __i386__
+ /* If bit 21 of the EFLAGS register can be changed, the cpuid instruction is available */
+ __asm__(
+ "pushf\n\t"
+ "popl %0\n\t"
+ "movl %0, %1\n\t"
+ "xorl $0x00200000, %1\n\t"
+ "pushl %1\n\t"
+ "popf\n\t"
+ "pushf\n\t"
+ "popl %1"
+ : "=r" (eflags_before), "=r" (eflags_after) :: "cc"
+ );
+#else
+ /* cpuid always available on 64bit */
+ return true;
+#endif
+#elif defined(_MSC_VER)
+#if defined(_M_IX86)
+ __asm {
+ pushfd
+ pop eax
+ mov eflags_before, eax
+ xor eax, 0x00200000
+ push eax
+ popfd
+ pushfd
+ pop eax
+ mov eflags_after, eax
+ }
+#else
+ return true;
+#endif
+#endif
+ return (eflags_before ^ eflags_after) & 0x00200000;
+}
+
+static void autodetect_arch(void)
+{
+ cpu_support auto_arch = cpu_generic;
+
+ /* We use the cpuid instruction to detect the CPU features */
+ if (x86_toogle_cpuid()) {
+ cpuid_registers regs;
+ char vendorid[13];
+ x86_cpu_info_t cpu_info;
+
+ /* get vendor ID */
+ x86_cpuid(®s, 0);
+ memcpy(&vendorid[0], ®s.r.ebx, 4);
+ memcpy(&vendorid[4], ®s.r.edx, 4);
+ memcpy(&vendorid[8], ®s.r.ecx, 4);
+ vendorid[12] = '\0';
+
+ /* get processor info and feature bits */
+ x86_cpuid(®s, 1);
+
+ cpu_info.cpu_stepping = (regs.r.eax >> 0) & 0x0F;
+ cpu_info.cpu_model = (regs.r.eax >> 4) & 0x0F;
+ cpu_info.cpu_family = (regs.r.eax >> 8) & 0x0F;
+ cpu_info.cpu_type = (regs.r.eax >> 12) & 0x03;
+ cpu_info.cpu_ext_model = (regs.r.eax >> 16) & 0x0F;
+ cpu_info.cpu_ext_family = (regs.r.eax >> 20) & 0xFF;
+ cpu_info.edx_features = regs.r.edx;
+ cpu_info.ecx_features = regs.r.ecx;
+ cpu_info.add_features = regs.r.ebx;
+
+ if (0 == strcmp(vendorid, "GenuineIntel")) {
+ auto_arch = auto_detect_Intel(&cpu_info);
+ } else if (0 == strcmp(vendorid, "AuthenticAMD")) {
+ auto_arch = auto_detect_AMD(&cpu_info);
+ } else if (0 == strcmp(vendorid, "Geode by NSC")) {
+ auto_arch = arch_geode;
+ }
+
+ if (cpu_info.edx_features & CPUID_FEAT_EDX_CMOV)
+ auto_arch |= arch_feature_cmov;
+ if (cpu_info.edx_features & CPUID_FEAT_EDX_MMX)
+ auto_arch |= arch_feature_mmx;
+ if (cpu_info.edx_features & CPUID_FEAT_EDX_SSE)
+ auto_arch |= arch_feature_sse1;
+ if (cpu_info.edx_features & CPUID_FEAT_EDX_SSE2)
+ auto_arch |= arch_feature_sse2;
+
+ if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE3)
+ auto_arch |= arch_feature_sse3;
+ if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSSE3)
+ auto_arch |= arch_feature_ssse3;
+ if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE4_1)
+ auto_arch |= arch_feature_sse4_1;
+ if (cpu_info.ecx_features & CPUID_FEAT_ECX_SSE4_2)
+ auto_arch |= arch_feature_sse4_2;
+ if (cpu_info.ecx_features & CPUID_FEAT_ECX_POPCNT)
+ auto_arch |= arch_feature_popcnt;
+ }
+
+ arch = auto_arch;
+ opt_arch = auto_arch;
+}
+#endif /* NATIVE_X86 */
+
+void ia32_setup_cg_config(void)
+{
+ ia32_code_gen_config_t *const c = &ia32_cg_config;
+ memset(c, 0, sizeof(*c));
+