be_lower_for_target is now a simple function in the public API
[libfirm] / ir / be / ia32 / ia32_architecture.c
index 6245d38..bb6a82b 100644 (file)
 #include "bearch_ia32_t.h"
 #include "ia32_architecture.h"
 
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
 ia32_code_gen_config_t  ia32_cg_config;
 
 /**
@@ -70,12 +74,18 @@ enum cpu_arch_features {
        arch_feature_3DNow    = 0x00100000, /**< 3DNow! instructions */
        arch_feature_3DNowE   = 0x00200000, /**< Enhanced 3DNow! instructions */
        arch_feature_64bit    = 0x00400000, /**< x86_64 support */
+       arch_feature_sse4_1   = 0x00800000, /**< SSE4.1 instructions */
+       arch_feature_sse4_2   = 0x01000000, /**< SSE4.2 instructions */
+       arch_feature_sse4a    = 0x02000000, /**< SSE4a instructions */
 
        arch_mmx_insn     = arch_feature_mmx,                         /**< MMX instructions */
-       arch_sse1_insn    = arch_feature_sse1  | arch_mmx_insn,       /**< SSE1 instructions, include MMX */
-       arch_sse2_insn    = arch_feature_sse2  | arch_sse1_insn,      /**< SSE2 instructions, include SSE1 */
-       arch_sse3_insn    = arch_feature_sse3  | arch_sse2_insn,      /**< SSE3 instructions, include SSE2 */
-       arch_ssse3_insn   = arch_feature_ssse3 | arch_sse3_insn,      /**< SSSE3 instructions, include SSE3 */
+       arch_sse1_insn    = arch_feature_sse1   | arch_mmx_insn,      /**< SSE1 instructions, include MMX */
+       arch_sse2_insn    = arch_feature_sse2   | arch_sse1_insn,     /**< SSE2 instructions, include SSE1 */
+       arch_sse3_insn    = arch_feature_sse3   | arch_sse2_insn,     /**< SSE3 instructions, include SSE2 */
+       arch_ssse3_insn   = arch_feature_ssse3  | arch_sse3_insn,     /**< SSSE3 instructions, include SSE3 */
+       arch_sse4_1_insn  = arch_feature_sse4_1 | arch_ssse3_insn,    /**< SSE4.1 instructions, include SSSE3 */
+       arch_sse4_2_insn  = arch_feature_sse4_2 | arch_sse4_1_insn,   /**< SSE4.2 instructions, include SSE4.1 */
+       arch_sse4a_insn   = arch_feature_sse4a  | arch_ssse3_insn,    /**< SSE4a instructions, include SSSE3 */
 
        arch_3DNow_insn   = arch_feature_3DNow | arch_feature_mmx,    /**< 3DNow! instructions, including MMX */
        arch_3DNowE_insn  = arch_feature_3DNowE | arch_3DNow_insn,    /**< Enhanced 3DNow! instructions */
@@ -87,10 +97,10 @@ enum cpu_arch_features {
 /**
  * CPU's.
  */
-enum cpu_support {
+typedef enum cpu_support {
        cpu_generic     = arch_generic32,
 
-       /* intel CPU's */
+       /* intel CPUs */
        cpu_i386        = arch_i386,
        cpu_i486        = arch_i486,
        cpu_pentium     = arch_pentium,
@@ -103,25 +113,30 @@ enum cpu_support {
        cpu_prescott    = arch_nocona | arch_feature_p6_insn | arch_sse3_insn,
        cpu_nocona      = arch_nocona | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
        cpu_core2       = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_ssse3_insn,
+       cpu_penryn      = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_sse4_1_insn,
 
-       /* AMD CPU's */
+       /* AMD CPUs */
        cpu_k6          = arch_k6 | arch_mmx_insn,
        cpu_k6_PLUS     = arch_k6 | arch_3DNow_insn,
        cpu_geode       = arch_geode  | arch_sse1_insn | arch_3DNowE_insn,
+       cpu_athlon_old  = arch_athlon | arch_3DNowE_insn | arch_feature_p6_insn,
        cpu_athlon      = arch_athlon | arch_sse1_insn | arch_3DNowE_insn | arch_feature_p6_insn,
        cpu_athlon64    = arch_athlon | arch_sse2_insn | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
        cpu_k8          = arch_k8  | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
        cpu_k8_sse3     = arch_k8  | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
-       cpu_k10         = arch_k10 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
+       cpu_k10         = arch_k10 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse4a_insn,
 
-       /* other CPU's */
+       /* other CPUs */
        cpu_winchip_c6  = arch_i486 | arch_feature_mmx,
        cpu_winchip2    = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
        cpu_c3          = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
-       cpu_c3_2        = arch_ppro | arch_sse1_insn,  /* really no 3DNow! */
-};
+       cpu_c3_2        = arch_ppro | arch_feature_p6_insn | arch_sse1_insn, /* really no 3DNow! */
+
+       cpu_autodetect  = 0,
+} cpu_support;
 
 static int         opt_size             = 0;
+static int         emit_machcode        = 0;
 static cpu_support arch                 = cpu_generic;
 static cpu_support opt_arch             = cpu_generic;
 static int         use_sse2             = 0;
@@ -151,12 +166,13 @@ static const lc_opt_enum_int_items_t arch_items[] = {
        { "nocona",       cpu_nocona },
        { "merom",        cpu_core2 },
        { "core2",        cpu_core2 },
+       { "penryn",       cpu_penryn },
 
        { "k6",           cpu_k6 },
        { "k6-2",         cpu_k6_PLUS },
        { "k6-3",         cpu_k6_PLUS },
        { "geode",        cpu_geode },
-       { "athlon",       cpu_athlon },
+       { "athlon",       cpu_athlon_old },
        { "athlon-tbird", cpu_athlon },
        { "athlon-4",     cpu_athlon },
        { "athlon-xp",    cpu_athlon },
@@ -178,6 +194,8 @@ static const lc_opt_enum_int_items_t arch_items[] = {
 
        { "generic",      cpu_generic },
        { "generic32",    cpu_generic },
+
+       { "native",       cpu_autodetect },
        { NULL,           0 }
 };
 
@@ -211,6 +229,8 @@ static const lc_opt_table_entry_t ia32_architecture_options[] = {
                          &opt_cc, 1),
        LC_OPT_ENT_BIT("unsafe_floatconv", "do unsafe floating point controlword "
                       "optimisations", &opt_unsafe_floatconv, 1),
+       LC_OPT_ENT_BOOL("machcode", "output machine code instead of assembler",
+                       &emit_machcode),
        LC_OPT_LAST
 };
 
@@ -321,7 +341,7 @@ static const insn_const athlon_cost = {
        7,   /* maximum skip for alignment of loops labels */
 };
 
-/* costs for the Opteron/K8/K10 */
+/* costs for the Opteron/K8 */
 static const insn_const k8_cost = {
        1,   /* cost of an add instruction */
        2,   /* cost of a lea instruction */
@@ -408,55 +428,31 @@ static void set_arch_costs(void)
                return;
        }
        switch (opt_arch & arch_mask) {
-       case arch_i386:
-               arch_costs = &i386_cost;
-               break;
-       case arch_i486:
-               arch_costs = &i486_cost;
-               break;
-       case arch_pentium:
-               arch_costs = &pentium_cost;
-               break;
-       case arch_ppro:
-               arch_costs = &pentiumpro_cost;
-               break;
-       case arch_netburst:
-               arch_costs = &netburst_cost;
-               break;
-       case arch_nocona:
-               arch_costs = &nocona_cost;
-               break;
-       case arch_core2:
-               arch_costs = &core2_cost;
-               break;
-       case arch_k6:
-               arch_costs = &k6_cost;
-               break;
-       case arch_geode:
-               arch_costs = &geode_cost;
-               break;
-       case arch_athlon:
-               arch_costs = &athlon_cost;
-               break;
-       case arch_k8:
-               arch_costs = &k8_cost;
-               break;
-       case arch_k10:
-               arch_costs = &k10_cost;
-               break;
-       case arch_generic32:
+       case arch_i386:      arch_costs = &i386_cost;       break;
+       case arch_i486:      arch_costs = &i486_cost;       break;
+       case arch_pentium:   arch_costs = &pentium_cost;    break;
+       case arch_ppro:      arch_costs = &pentiumpro_cost; break;
+       case arch_netburst:  arch_costs = &netburst_cost;   break;
+       case arch_nocona:    arch_costs = &nocona_cost;     break;
+       case arch_core2:     arch_costs = &core2_cost;      break;
+       case arch_k6:        arch_costs = &k6_cost;         break;
+       case arch_geode:     arch_costs = &geode_cost;      break;
+       case arch_athlon:    arch_costs = &athlon_cost;     break;
+       case arch_k8:        arch_costs = &k8_cost;         break;
+       case arch_k10:       arch_costs = &k10_cost;        break;
        default:
-               arch_costs = &generic32_cost;
+       case arch_generic32: arch_costs = &generic32_cost;  break;
        }
 }
 
 /* Evaluate the costs of an instruction. */
-int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
+int ia32_evaluate_insn(insn_kind kind, const ir_mode *mode, ir_tarval *tv)
+{
        int cost;
 
        switch (kind) {
        case MUL:
-               cost =  arch_costs->cost_mul_start;
+               cost = arch_costs->cost_mul_start;
                if (arch_costs->cost_mul_bit > 0) {
                        char *bitstr = get_tarval_bitpattern(tv);
                        int i;
@@ -468,14 +464,27 @@ int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
                        }
                        free(bitstr);
                }
-               return cost;
+               if (get_mode_size_bits(mode) <= 32)
+                       return cost;
+               /* 64bit mul supported, approx 4times of a 32bit mul*/
+               return 4 * cost;
        case LEA:
-               return arch_costs->lea_cost;
+               /* lea is only supported for 32 bit */
+               if (get_mode_size_bits(mode) <= 32)
+                       return arch_costs->lea_cost;
+               /* in 64bit mode, the Lea cost are at wort 2 shifts and one add */
+               return 2 * arch_costs->add_cost + 2 * (2 * arch_costs->const_shf_cost);
        case ADD:
        case SUB:
-               return arch_costs->add_cost;
+               if (get_mode_size_bits(mode) <= 32)
+                       return arch_costs->add_cost;
+               /* 64bit add/sub supported, double the cost */
+               return 2 * arch_costs->add_cost;
        case SHIFT:
-               return arch_costs->const_shf_cost;
+               if (get_mode_size_bits(mode) <= 32)
+                       return arch_costs->const_shf_cost;
+               /* 64bit shift supported, double the cost */
+               return 2 * arch_costs->const_shf_cost;
        case ZERO:
                return arch_costs->add_cost;
        default:
@@ -483,66 +492,313 @@ int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
        }
 }
 
+typedef struct cpu_info_t {
+       unsigned char cpu_stepping;
+       unsigned char cpu_model;
+       unsigned char cpu_family;
+       unsigned char cpu_type;
+       unsigned char cpu_ext_model;
+       unsigned char cpu_ext_family;
+       unsigned      edx_features;
+       unsigned      ecx_features;
+       unsigned      add_features;
+} cpu_info_t;
+
+static cpu_support auto_detect_Intel(cpu_info_t const *info) {
+       cpu_support auto_arch = cpu_generic;
+
+       unsigned family = (info->cpu_ext_family << 4) | info->cpu_family;
+       unsigned model  = (info->cpu_ext_model  << 4) | info->cpu_model;
+
+       switch (family) {
+       case 4:
+               auto_arch = arch_i486;
+               break;
+       case 5:
+               auto_arch = arch_pentium;
+               break;
+       case 6:
+               switch (model) {
+               case 0x01: /* PentiumPro */
+               case 0x03: /* Pentium II Model 3 */
+               case 0x05: /* Pentium II Model 5 */
+               case 0x06: /* Celeron Model 6 */
+               case 0x07: /* Pentium III Model 7 */
+               case 0x08: /* Pentium III Model 8 */
+               case 0x09: /* Pentium M Model 9 */
+               case 0x0A: /* Pentium III Model 0A */
+               case 0x0B: /* Pentium III Model 0B */
+               case 0x0D: /* Pentium M Model 0D */
+                       auto_arch = arch_ppro;
+                       break;
+               case 0x0E: /* Core Model 0E */
+                       auto_arch = arch_ppro;
+                       break;
+               case 0x0F: /* Core2 Model 0F */
+               case 0x15: /* Intel EP80579 */
+               case 0x16: /* Celeron Model 16 */
+               case 0x17: /* Core2 Model 17 */
+                       auto_arch = arch_core2;
+                       break;
+               default:
+                       /* unknown */
+                       break;
+               }
+               break;
+       case 15:
+               switch (model) {
+               case 0x00: /* Pentium 4 Model 00 */
+               case 0x01: /* Pentium 4 Model 01 */
+               case 0x02: /* Pentium 4 Model 02 */
+               case 0x03: /* Pentium 4 Model 03 */
+               case 0x04: /* Pentium 4 Model 04 */
+               case 0x06: /* Pentium 4 Model 06 */
+                       auto_arch = arch_netburst;
+                       break;
+               case 0x1A: /* Core i7 */
+                       auto_arch = arch_core2;
+                       break;
+               case 0x1C: /* Atom */
+                       auto_arch = arch_atom;
+                       break;
+               case 0x1D: /* Xeon MP */
+                       auto_arch = arch_core2;
+                       break;
+               default:
+                       /* unknown */
+                       break;
+               }
+               break;
+       default:
+               /* unknown */
+               break;
+       }
+
+       if (info->edx_features & (1<<23)) auto_arch |= arch_feature_mmx;
+       if (info->edx_features & (1<<25)) auto_arch |= arch_feature_sse1;
+       if (info->edx_features & (1<<26)) auto_arch |= arch_feature_sse2;
+
+       if (info->ecx_features & (1<< 0)) auto_arch |= arch_feature_sse3;
+       if (info->ecx_features & (1<< 9)) auto_arch |= arch_feature_ssse3;
+       if (info->ecx_features & (1<<19)) auto_arch |= arch_feature_sse4_1;
+       if (info->ecx_features & (1<<20)) auto_arch |= arch_feature_sse4_2;
+
+       return auto_arch;
+}
+
+static cpu_support auto_detect_AMD(cpu_info_t const *info) {
+       cpu_support auto_arch = cpu_generic;
+
+       unsigned family, model;
+
+       if (info->cpu_family == 0x0F) {
+               family = (info->cpu_ext_family << 4) | info->cpu_family;
+               model  = (info->cpu_ext_model  << 4) | info->cpu_model;
+       } else {
+               family = info->cpu_family;
+               model  = info->cpu_model;
+       }
+
+       switch (family) {
+       case 0x04:
+               auto_arch = arch_i486;
+               break;
+       case 0x05:
+       case 0x06: // actually, 6 means K7 family
+               auto_arch = arch_k6;
+               break;
+       case 0x0F:
+               auto_arch = arch_k8;
+               break;
+       case 0x1F:
+       case 0x2F:
+               auto_arch = arch_k10;
+               break;
+       default:
+               /* unknown */
+               break;
+       }
+
+       if (info->edx_features & (1<<23)) auto_arch |= arch_feature_mmx;
+       if (info->edx_features & (1<<25)) auto_arch |= arch_feature_sse1;
+       if (info->edx_features & (1<<26)) auto_arch |= arch_feature_sse2;
+
+       if (info->ecx_features & (1<< 0)) auto_arch |= arch_feature_sse3;
+       if (info->ecx_features & (1<< 9)) auto_arch |= arch_feature_ssse3;
+       if (info->ecx_features & (1<<19)) auto_arch |= arch_feature_sse4_1;
+       if (info->ecx_features & (1<<20)) auto_arch |= arch_feature_sse4_2;
+
+       return auto_arch;
+}
+
+typedef union {
+       struct {
+        unsigned eax;
+        unsigned ebx;
+        unsigned ecx;
+        unsigned edx;
+       } r;
+       int bulk[4];
+} cpuid_registers;
+
+static void x86_cpuid(cpuid_registers *regs, unsigned level)
+{
+#if defined(__GNUC__)
+       /* 32bit requires ebx to be saved, and it doesn't hurt on 64 bit */
+       __asm ("pushl %%ebx\n\t"
+              "cpuid\n\t"
+                  "movl  %%ebx, %1\n\t"
+                  "popl  %%ebx\n\t"
+       : "=a" (regs->r.eax), "=r" (regs->r.ebx), "=c" (regs->r.ecx), "=d" (regs->r.edx)
+       : "a" (level)
+       );
+#elif defined(_MSC_VER)
+       __cpuid(regs->bulk, level);
+#endif
+}
+
+static int x86_toogle_cpuid(void)
+{
+       unsigned eflags_before = 0, eflags_after = 0;
+
+#if defined(__GNUC__)
+#ifdef __i386__
+       /* If bit 21 of the EFLAGS register can be changed, the cpuid instruction is available */
+       __asm__(
+               "pushf\n\t"
+               "popl %0\n\t"
+               "movl %0, %1\n\t"
+               "xorl $0x00200000, %1\n\t"
+               "pushl %1\n\t"
+               "popf\n\t"
+               "pushf\n\t"
+               "popl %1"
+               : "=r" (eflags_before), "=r" (eflags_after) :: "cc"
+               );
+#else
+       /* cpuid always available on 64bit */
+       return true;
+#endif
+#elif defined(_MSC_VER)
+#if defined(_M_IX86)
+       __asm {
+               pushfd
+               pop eax
+               mov eflags_before, eax
+               xor eax, 0x00200000
+               push eax
+               popfd
+               pushfd
+               pop eax
+               mov eflags_after, eax
+       }
+#else
+       return true;
+#endif
+#endif
+       return (eflags_before ^ eflags_after) & 0x00200000;
+}
+
+static void autodetect_arch(void)
+{
+       cpu_support auto_arch = cpu_generic;
+
+       /* We use the cpuid instruction to detect the CPU features */
+       if (x86_toogle_cpuid()) {
+               cpuid_registers   regs;
+               unsigned          highest_level;
+               char              vendorid[13];
+               struct cpu_info_t cpu_info;
+
+               /* get vendor ID */
+               x86_cpuid(&regs, 0);
+               highest_level = regs.r.eax;
+               memcpy(&vendorid[0], &regs.r.ebx, 4);
+               memcpy(&vendorid[4], &regs.r.edx, 4);
+               memcpy(&vendorid[8], &regs.r.ecx, 4);
+               vendorid[12] = '\0';
+
+               /* get processor info and feature bits */
+               x86_cpuid(&regs, 1);
+
+               cpu_info.cpu_stepping   = (regs.r.eax >>  0) & 0x0F;
+               cpu_info.cpu_model      = (regs.r.eax >>  4) & 0x0F;
+               cpu_info.cpu_family     = (regs.r.eax >>  8) & 0x0F;
+               cpu_info.cpu_type       = (regs.r.eax >> 12) & 0x03;
+               cpu_info.cpu_ext_model  = (regs.r.eax >> 16) & 0x0F;
+               cpu_info.cpu_ext_family = (regs.r.eax >> 20) & 0xFF;
+               cpu_info.edx_features   = regs.r.edx;
+               cpu_info.ecx_features   = regs.r.ecx;
+               cpu_info.add_features   = regs.r.ebx;
+
+               if        (0 == strcmp(vendorid, "GenuineIntel")) {
+                       auto_arch = auto_detect_Intel(&cpu_info);
+               } else if (0 == strcmp(vendorid, "AuthenticAMD")) {
+                       auto_arch = auto_detect_AMD(&cpu_info);
+               }
+       }
+
+       arch     = auto_arch;
+       opt_arch = auto_arch;
+}
+
 void ia32_setup_cg_config(void)
 {
-       memset(&ia32_cg_config, 0, sizeof(ia32_cg_config));
+       ia32_code_gen_config_t *const c = &ia32_cg_config;
+       memset(c, 0, sizeof(*c));
 
        set_arch_costs();
 
-       ia32_cg_config.optimize_size        = opt_size != 0;
-       /* on newer intel cpus mov, pop is often faster then leave although it has a
+       if (arch == 0) autodetect_arch();
+
+       c->optimize_size        = opt_size != 0;
+       /* on newer intel cpus mov, pop is often faster than leave although it has a
         * longer opcode */
-       ia32_cg_config.use_leave            = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2) || opt_size;
+       c->use_leave            = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2) || opt_size;
        /* P4s don't like inc/decs because they only partially write the flags
-          register which produces false dependencies */
-       ia32_cg_config.use_incdec           = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_core2 | arch_geode) || opt_size;
-       ia32_cg_config.use_sse2             = use_sse2 && FLAGS(arch, arch_feature_sse2);
-       ia32_cg_config.use_ffreep           = FLAGS(opt_arch, arch_athlon_plus);
-       ia32_cg_config.use_ftst             = !FLAGS(arch, arch_feature_p6_insn);
-       /* valgrind can't cope with femms yet and the usefulness of the optimization is questionable anyway */
+        * register which produces false dependencies */
+       c->use_incdec           = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_core2 | arch_geode) || opt_size;
+       c->use_sse2             = use_sse2 && FLAGS(arch, arch_feature_sse2);
+       c->use_ffreep           = FLAGS(opt_arch, arch_athlon_plus);
+       c->use_ftst             = !FLAGS(arch, arch_feature_p6_insn);
+       /* valgrind can't cope with femms yet and the usefulness of the optimization
+        * is questionable anyway */
 #if 0
-       ia32_cg_config.use_femms            = FLAGS(opt_arch, arch_athlon_plus) &&
-                                             FLAGS(arch, arch_feature_mmx | arch_all_amd);
+       c->use_femms            = FLAGS(opt_arch, arch_athlon_plus) &&
+               FLAGS(arch, arch_feature_mmx | arch_all_amd);
 #else
-       ia32_cg_config.use_femms            = 0;
+       c->use_femms            = 0;
 #endif
-       ia32_cg_config.use_fucomi           = FLAGS(arch, arch_feature_p6_insn);
-       ia32_cg_config.use_cmov             = FLAGS(arch, arch_feature_p6_insn);
-       ia32_cg_config.use_modeD_moves      = FLAGS(opt_arch, arch_athlon_plus | arch_geode | arch_ppro |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32);
-       ia32_cg_config.use_add_esp_4        = FLAGS(opt_arch, arch_geode | arch_athlon_plus |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_add_esp_8        = FLAGS(opt_arch, arch_geode | arch_athlon_plus |
-                                                   arch_i386 | arch_i486 | arch_ppro | arch_netburst |
-                                                   arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_sub_esp_4        = FLAGS(opt_arch, arch_athlon_plus | arch_ppro |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_sub_esp_8        = FLAGS(opt_arch, arch_athlon_plus | arch_i386 | arch_i486 |
-                                                   arch_ppro | arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_imul_mem_imm32   = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
-       ia32_cg_config.use_pxor             = FLAGS(opt_arch, arch_netburst);
-       ia32_cg_config.use_mov_0            = FLAGS(opt_arch, arch_k6) && !opt_size;
-       ia32_cg_config.use_pad_return       = FLAGS(opt_arch, arch_athlon_plus | arch_core2 | arch_generic32) && !opt_size;
-       ia32_cg_config.use_bt               = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
-       ia32_cg_config.use_fisttp           = FLAGS(opt_arch & arch, arch_feature_sse3);
-       ia32_cg_config.optimize_cc          = opt_cc;
-       ia32_cg_config.use_unsafe_floatconv = opt_unsafe_floatconv;
-
-       ia32_cg_config.function_alignment       = arch_costs->function_alignment;
-       ia32_cg_config.label_alignment          = arch_costs->label_alignment;
-       ia32_cg_config.label_alignment_max_skip = arch_costs->label_alignment_max_skip;
-
-       if (opt_arch & (arch_i386 | arch_i486) || opt_size) {
-               ia32_cg_config.label_alignment_factor = 0;
-       } else if (opt_arch & arch_all_amd) {
-               ia32_cg_config.label_alignment_factor = 3;
-       } else {
-               ia32_cg_config.label_alignment_factor = 2;
-       }
+       c->use_fucomi           = FLAGS(arch, arch_feature_p6_insn);
+       c->use_cmov             = FLAGS(arch, arch_feature_p6_insn);
+       c->use_modeD_moves      = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode);
+       c->use_add_esp_4        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 |             arch_geode)                         && !opt_size;
+       c->use_add_esp_8        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode | arch_i386 | arch_i486) && !opt_size;
+       c->use_sub_esp_4        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro)                                      && !opt_size;
+       c->use_sub_esp_8        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro |              arch_i386 | arch_i486) && !opt_size;
+       c->use_imul_mem_imm32   = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
+       c->use_pxor             = FLAGS(opt_arch, arch_netburst);
+       c->use_mov_0            = FLAGS(opt_arch, arch_k6) && !opt_size;
+       c->use_short_sex_eax    = !FLAGS(opt_arch, arch_k6) && !opt_size;
+       c->use_pad_return       = FLAGS(opt_arch, arch_athlon_plus | arch_core2 | arch_generic32) && !opt_size;
+       c->use_bt               = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
+       c->use_fisttp           = FLAGS(opt_arch & arch, arch_feature_sse3);
+       c->use_sse_prefetch     = FLAGS(arch, (arch_feature_3DNowE | arch_feature_sse1));
+       c->use_3dnow_prefetch   = FLAGS(arch, arch_feature_3DNow);
+       c->use_popcnt           = FLAGS(arch, (arch_feature_sse4_2 | arch_feature_sse4a));
+       c->use_i486             = (arch & arch_mask) >= arch_i486;
+       c->optimize_cc          = opt_cc;
+       c->use_unsafe_floatconv = opt_unsafe_floatconv;
+       c->emit_machcode        = emit_machcode;
+
+       c->function_alignment       = arch_costs->function_alignment;
+       c->label_alignment          = arch_costs->label_alignment;
+       c->label_alignment_max_skip = arch_costs->label_alignment_max_skip;
+
+       c->label_alignment_factor =
+               FLAGS(opt_arch, arch_i386 | arch_i486) || opt_size ? 0 :
+               opt_arch & arch_all_amd                            ? 3 :
+               2;
 }
 
 void ia32_init_architecture(void)