fix cltd
[libfirm] / ir / be / ia32 / ia32_architecture.c
index 2f4c5c5..6e3e141 100644 (file)
@@ -23,9 +23,7 @@
  * @author      Michael Beck, Matthias Braun
  * @version     $Id: bearch_ia32_t.h 16363 2007-10-25 23:27:07Z beck $
  */
-#ifdef HAVE_CONFIG_H
 #include "config.h"
-#endif
 
 #include "lc_opts.h"
 #include "lc_opts_enum.h"
@@ -63,15 +61,31 @@ enum cpu_arch_features {
        arch_athlon_plus      = arch_athlon | arch_k8 | arch_k10,
        arch_all_amd          = arch_k6 | arch_geode | arch_athlon_plus,
 
-       arch_feature_mmx      = 0x00004000,                      /**< MMX instructions */
-       arch_feature_p6_insn  = 0x00008000,                      /**< PentiumPro instructions */
-       arch_feature_sse1     = 0x00010000 | arch_feature_mmx,   /**< SSE1 instructions, include MMX */
-       arch_feature_sse2     = 0x00020000 | arch_feature_sse1,  /**< SSE2 instructions, include SSE1 */
-       arch_feature_sse3     = 0x00040000 | arch_feature_sse2,  /**< SSE3 instructions, include SSE2 */
-       arch_feature_ssse3    = 0x00080000 | arch_feature_sse3,  /**< SSSE3 instructions, include SSE3 */
-       arch_feature_3DNow    = 0x00100000,                      /**< 3DNow! instructions */
-       arch_feature_3DNowE   = 0x00200000 | arch_feature_3DNow, /**< Enhanced 3DNow! instructions */
-       arch_feature_64bit    = 0x00400000 | arch_feature_sse2,  /**< x86_64 support, includes SSE2 */
+       arch_feature_mmx      = 0x00004000, /**< MMX instructions */
+       arch_feature_p6_insn  = 0x00008000, /**< PentiumPro instructions */
+       arch_feature_sse1     = 0x00010000, /**< SSE1 instructions */
+       arch_feature_sse2     = 0x00020000, /**< SSE2 instructions */
+       arch_feature_sse3     = 0x00040000, /**< SSE3 instructions */
+       arch_feature_ssse3    = 0x00080000, /**< SSSE3 instructions */
+       arch_feature_3DNow    = 0x00100000, /**< 3DNow! instructions */
+       arch_feature_3DNowE   = 0x00200000, /**< Enhanced 3DNow! instructions */
+       arch_feature_64bit    = 0x00400000, /**< x86_64 support */
+       arch_feature_sse4_1   = 0x00800000, /**< SSE4.1 instructions */
+       arch_feature_sse4_2   = 0x01000000, /**< SSE4.2 instructions */
+       arch_feature_sse4a    = 0x02000000, /**< SSE4a instructions */
+
+       arch_mmx_insn     = arch_feature_mmx,                         /**< MMX instructions */
+       arch_sse1_insn    = arch_feature_sse1   | arch_mmx_insn,      /**< SSE1 instructions, include MMX */
+       arch_sse2_insn    = arch_feature_sse2   | arch_sse1_insn,     /**< SSE2 instructions, include SSE1 */
+       arch_sse3_insn    = arch_feature_sse3   | arch_sse2_insn,     /**< SSE3 instructions, include SSE2 */
+       arch_ssse3_insn   = arch_feature_ssse3  | arch_sse3_insn,     /**< SSSE3 instructions, include SSE3 */
+       arch_sse4_1_insn  = arch_feature_sse4_1 | arch_ssse3_insn,    /**< SSE4.1 instructions, include SSSE3 */
+       arch_sse4_2_insn  = arch_feature_sse4_2 | arch_sse4_1_insn,   /**< SSE4.2 instructions, include SSE4.1 */
+       arch_sse4a_insn   = arch_feature_sse4a  | arch_ssse3_insn,    /**< SSE4a instructions, include SSSE3 */
+
+       arch_3DNow_insn   = arch_feature_3DNow | arch_feature_mmx,    /**< 3DNow! instructions, including MMX */
+       arch_3DNowE_insn  = arch_feature_3DNowE | arch_3DNow_insn,    /**< Enhanced 3DNow! instructions */
+       arch_64bit_insn   = arch_feature_64bit  | arch_sse2_insn,     /**< x86_64 support, includes SSE2 */
 };
 
 #define FLAGS(x, f) (((x) & (f)) != 0)
@@ -82,40 +96,43 @@ enum cpu_arch_features {
 enum cpu_support {
        cpu_generic     = arch_generic32,
 
-       /* intel CPU's */
+       /* intel CPUs */
        cpu_i386        = arch_i386,
        cpu_i486        = arch_i486,
        cpu_pentium     = arch_pentium,
-       cpu_pentium_mmx = arch_pentium | arch_feature_mmx,
+       cpu_pentium_mmx = arch_pentium | arch_mmx_insn,
        cpu_pentium_pro = arch_ppro | arch_feature_p6_insn,
-       cpu_pentium_2   = arch_ppro | arch_feature_p6_insn | arch_feature_mmx,
-       cpu_pentium_3   = arch_ppro | arch_feature_p6_insn | arch_feature_sse1,
-       cpu_pentium_m   = arch_ppro | arch_feature_p6_insn | arch_feature_sse2,
-       cpu_pentium_4   = arch_netburst | arch_feature_p6_insn | arch_feature_sse2,
-       cpu_prescott    = arch_nocona | arch_feature_p6_insn | arch_feature_sse3,
-       cpu_nocona      = arch_nocona | arch_feature_p6_insn | arch_feature_64bit | arch_feature_sse3,
-       cpu_core2       = arch_core2 | arch_feature_p6_insn | arch_feature_64bit | arch_feature_ssse3,
-
-       /* AMD CPU's */
-       cpu_k6          = arch_k6 | arch_feature_mmx,
-       cpu_k6_PLUS     = arch_k6 | arch_feature_mmx | arch_feature_3DNow,
-       cpu_geode       = arch_geode | arch_feature_sse1 | arch_feature_3DNowE,
-       cpu_athlon      = arch_athlon | arch_feature_sse1 | arch_feature_3DNowE | arch_feature_p6_insn,
-       cpu_athlon64    = arch_athlon | arch_feature_sse2 | arch_feature_3DNowE | arch_feature_p6_insn | arch_feature_64bit,
-       cpu_k8          = arch_k8 | arch_feature_sse2 | arch_feature_3DNowE | arch_feature_p6_insn | arch_feature_64bit,
-       cpu_k8_sse3     = arch_k8 | arch_feature_sse3 | arch_feature_3DNowE | arch_feature_p6_insn | arch_feature_64bit,
-       cpu_k10         = arch_k10 | arch_feature_sse3 | arch_feature_3DNowE | arch_feature_p6_insn | arch_feature_64bit,
-
-       /* other CPU's */
+       cpu_pentium_2   = arch_ppro | arch_feature_p6_insn | arch_mmx_insn,
+       cpu_pentium_3   = arch_ppro | arch_feature_p6_insn | arch_sse1_insn,
+       cpu_pentium_m   = arch_ppro | arch_feature_p6_insn | arch_sse2_insn,
+       cpu_pentium_4   = arch_netburst | arch_feature_p6_insn | arch_sse2_insn,
+       cpu_prescott    = arch_nocona | arch_feature_p6_insn | arch_sse3_insn,
+       cpu_nocona      = arch_nocona | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
+       cpu_core2       = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_ssse3_insn,
+       cpu_penryn      = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_sse4_1_insn,
+
+       /* AMD CPUs */
+       cpu_k6          = arch_k6 | arch_mmx_insn,
+       cpu_k6_PLUS     = arch_k6 | arch_3DNow_insn,
+       cpu_geode       = arch_geode  | arch_sse1_insn | arch_3DNowE_insn,
+       cpu_athlon_old  = arch_athlon | arch_3DNowE_insn | arch_feature_p6_insn,
+       cpu_athlon      = arch_athlon | arch_sse1_insn | arch_3DNowE_insn | arch_feature_p6_insn,
+       cpu_athlon64    = arch_athlon | arch_sse2_insn | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
+       cpu_k8          = arch_k8  | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
+       cpu_k8_sse3     = arch_k8  | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
+       cpu_k10         = arch_k10 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse4a_insn,
+
+       /* other CPUs */
        cpu_winchip_c6  = arch_i486 | arch_feature_mmx,
        cpu_winchip2    = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
        cpu_c3          = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
-       cpu_c3_2        = arch_ppro | arch_feature_sse1,  /* really no 3DNow! */
+       cpu_c3_2        = arch_ppro | arch_feature_p6_insn | arch_sse1_insn, /* really no 3DNow! */
 };
 
 static int         opt_size             = 0;
+static int         emit_machcode        = 0;
 static cpu_support arch                 = cpu_generic;
-static cpu_support opt_arch             = cpu_core2;
+static cpu_support opt_arch             = cpu_generic;
 static int         use_sse2             = 0;
 static int         opt_cc               = 1;
 static int         opt_unsafe_floatconv = 0;
@@ -143,12 +160,13 @@ static const lc_opt_enum_int_items_t arch_items[] = {
        { "nocona",       cpu_nocona },
        { "merom",        cpu_core2 },
        { "core2",        cpu_core2 },
+       { "penryn",       cpu_penryn },
 
        { "k6",           cpu_k6 },
        { "k6-2",         cpu_k6_PLUS },
        { "k6-3",         cpu_k6_PLUS },
        { "geode",        cpu_geode },
-       { "athlon",       cpu_athlon },
+       { "athlon",       cpu_athlon_old },
        { "athlon-tbird", cpu_athlon },
        { "athlon-4",     cpu_athlon },
        { "athlon-xp",    cpu_athlon },
@@ -203,6 +221,8 @@ static const lc_opt_table_entry_t ia32_architecture_options[] = {
                          &opt_cc, 1),
        LC_OPT_ENT_BIT("unsafe_floatconv", "do unsafe floating point controlword "
                       "optimisations", &opt_unsafe_floatconv, 1),
+       LC_OPT_ENT_BOOL("machcode", "output machine code instead of assembler",
+                       &emit_machcode),
        LC_OPT_LAST
 };
 
@@ -222,7 +242,7 @@ static const insn_const size_cost = {
        2,   /* cost of an add instruction */
        3,   /* cost of a lea instruction */
        3,   /* cost of a constant shift instruction */
-       3,   /* starting cost of a multiply instruction */
+       4,   /* starting cost of a multiply instruction */
        0,   /* cost of multiply for every set bit */
        0,   /* logarithm for alignment of function labels */
        0,   /* logarithm for alignment of loops labels */
@@ -313,16 +333,22 @@ static const insn_const athlon_cost = {
        7,   /* maximum skip for alignment of loops labels */
 };
 
-/* costs for the Opteron/K8/K10 */
+/* costs for the Opteron/K8 */
 static const insn_const k8_cost = {
        1,   /* cost of an add instruction */
        2,   /* cost of a lea instruction */
        1,   /* cost of a constant shift instruction */
        3,   /* starting cost of a multiply instruction */
        0,   /* cost of multiply for every set bit */
+#if 0 /* TEST */
        4,   /* logarithm for alignment of function labels */
        4,   /* logarithm for alignment of loops labels */
        7,   /* maximum skip for alignment of loops labels */
+#else
+       0,
+       0,
+       0
+#endif
 };
 
 /* costs for the K10 */
@@ -394,51 +420,24 @@ static void set_arch_costs(void)
                return;
        }
        switch (opt_arch & arch_mask) {
-       case arch_i386:
-               arch_costs = &i386_cost;
-               break;
-       case arch_i486:
-               arch_costs = &i486_cost;
-               break;
-       case arch_pentium:
-               arch_costs = &pentium_cost;
-               break;
-       case arch_ppro:
-               arch_costs = &pentiumpro_cost;
-               break;
-       case arch_netburst:
-               arch_costs = &netburst_cost;
-               break;
-       case arch_nocona:
-               arch_costs = &nocona_cost;
-               break;
-       case arch_core2:
-               arch_costs = &core2_cost;
-               break;
-       case arch_k6:
-               arch_costs = &k6_cost;
-               break;
-       case arch_geode:
-               arch_costs = &geode_cost;
-               break;
-       case arch_athlon:
-               arch_costs = &athlon_cost;
-               break;
-       case arch_k8:
-               arch_costs = &k8_cost;
-               break;
-       case arch_k10:
-               arch_costs = &k10_cost;
-               break;
-       case arch_generic32:
+       case arch_i386:      arch_costs = &i386_cost;       break;
+       case arch_i486:      arch_costs = &i486_cost;       break;
+       case arch_pentium:   arch_costs = &pentium_cost;    break;
+       case arch_ppro:      arch_costs = &pentiumpro_cost; break;
+       case arch_netburst:  arch_costs = &netburst_cost;   break;
+       case arch_nocona:    arch_costs = &nocona_cost;     break;
+       case arch_core2:     arch_costs = &core2_cost;      break;
+       case arch_k6:        arch_costs = &k6_cost;         break;
+       case arch_geode:     arch_costs = &geode_cost;      break;
+       case arch_athlon:    arch_costs = &athlon_cost;     break;
+       case arch_k8:        arch_costs = &k8_cost;         break;
+       case arch_k10:       arch_costs = &k10_cost;        break;
        default:
-               arch_costs = &generic32_cost;
+       case arch_generic32: arch_costs = &generic32_cost;  break;
        }
 }
 
-/**
- * Evaluate a given simple instruction.
- */
+/* Evaluate the costs of an instruction. */
 int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
        int cost;
 
@@ -473,58 +472,59 @@ int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
 
 void ia32_setup_cg_config(void)
 {
-       memset(&ia32_cg_config, 0, sizeof(ia32_cg_config));
+       ia32_code_gen_config_t *const c = &ia32_cg_config;
+       memset(c, 0, sizeof(*c));
 
        set_arch_costs();
 
-       ia32_cg_config.optimize_size        = opt_size != 0;
-       /* on newer intel cpus mov, pop is often faster then leave although it has a
+       c->optimize_size        = opt_size != 0;
+       /* on newer intel cpus mov, pop is often faster than leave although it has a
         * longer opcode */
-       ia32_cg_config.use_leave            = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2);
+       c->use_leave            = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2) || opt_size;
        /* P4s don't like inc/decs because they only partially write the flags
-          register which produces false dependencies */
-       ia32_cg_config.use_incdec           = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_geode) || opt_size;
-       ia32_cg_config.use_sse2             = use_sse2;
-       ia32_cg_config.use_ffreep           = FLAGS(opt_arch, arch_athlon_plus);
-       ia32_cg_config.use_ftst             = !FLAGS(arch, arch_feature_p6_insn);
-       ia32_cg_config.use_femms            = FLAGS(opt_arch, arch_athlon_plus) &&
-                                             FLAGS(arch, arch_feature_mmx | arch_all_amd);
-       ia32_cg_config.use_fucomi           = FLAGS(arch, arch_feature_p6_insn);
-       ia32_cg_config.use_cmov             = FLAGS(arch, arch_feature_p6_insn);
-       ia32_cg_config.use_modeD_moves      = FLAGS(opt_arch, arch_athlon_plus | arch_geode | arch_ppro |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32);
-       ia32_cg_config.use_add_esp_4        = FLAGS(opt_arch, arch_geode | arch_athlon_plus |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_add_esp_8        = FLAGS(opt_arch, arch_geode | arch_athlon_plus |
-                                                   arch_i386 | arch_i486 | arch_ppro | arch_netburst |
-                                                   arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_sub_esp_4        = FLAGS(opt_arch, arch_athlon_plus | arch_ppro |
-                                                   arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_sub_esp_8        = FLAGS(opt_arch, arch_athlon_plus | arch_i386 | arch_i486 |
-                                                   arch_ppro | arch_netburst | arch_nocona | arch_core2 | arch_generic32) &&
-                                             !opt_size;
-       ia32_cg_config.use_imul_mem_imm32   = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
-       ia32_cg_config.use_pxor             = FLAGS(opt_arch, arch_netburst);
-       ia32_cg_config.use_mov_0            = FLAGS(opt_arch, arch_k6) && !opt_size;
-       ia32_cg_config.use_pad_return       = FLAGS(opt_arch, arch_athlon_plus | arch_core2 | arch_generic32) && !opt_size;
-       ia32_cg_config.use_bt               = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
-       ia32_cg_config.optimize_cc          = opt_cc;
-       ia32_cg_config.use_unsafe_floatconv = opt_unsafe_floatconv;
-
-       ia32_cg_config.function_alignment       = arch_costs->function_alignment;
-       ia32_cg_config.label_alignment          = arch_costs->label_alignment;
-       ia32_cg_config.label_alignment_max_skip = arch_costs->label_alignment_max_skip;
-
-       if (opt_arch & (arch_i386 | arch_i486)) {
-               ia32_cg_config.label_alignment_factor = 0;
-       } else if (opt_arch & arch_all_amd) {
-               ia32_cg_config.label_alignment_factor = 3;
-       } else {
-               ia32_cg_config.label_alignment_factor = 2;
-       }
+        * register which produces false dependencies */
+       c->use_incdec           = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_core2 | arch_geode) || opt_size;
+       c->use_sse2             = use_sse2 && FLAGS(arch, arch_feature_sse2);
+       c->use_ffreep           = FLAGS(opt_arch, arch_athlon_plus);
+       c->use_ftst             = !FLAGS(arch, arch_feature_p6_insn);
+       /* valgrind can't cope with femms yet and the usefulness of the optimization
+        * is questionable anyway */
+#if 0
+       c->use_femms            = FLAGS(opt_arch, arch_athlon_plus) &&
+               FLAGS(arch, arch_feature_mmx | arch_all_amd);
+#else
+       c->use_femms            = 0;
+#endif
+       c->use_fucomi           = FLAGS(arch, arch_feature_p6_insn);
+       c->use_cmov             = FLAGS(arch, arch_feature_p6_insn);
+       c->use_modeD_moves      = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode);
+       c->use_add_esp_4        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 |             arch_geode)                         && !opt_size;
+       c->use_add_esp_8        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode | arch_i386 | arch_i486) && !opt_size;
+       c->use_sub_esp_4        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro)                                      && !opt_size;
+       c->use_sub_esp_8        = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro |              arch_i386 | arch_i486) && !opt_size;
+       c->use_imul_mem_imm32   = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
+       c->use_pxor             = FLAGS(opt_arch, arch_netburst);
+       c->use_mov_0            = FLAGS(opt_arch, arch_k6) && !opt_size;
+       c->use_short_sex_eax    = !FLAGS(opt_arch, arch_k6) && !opt_size;
+       c->use_pad_return       = FLAGS(opt_arch, arch_athlon_plus | arch_core2 | arch_generic32) && !opt_size;
+       c->use_bt               = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
+       c->use_fisttp           = FLAGS(opt_arch & arch, arch_feature_sse3);
+       c->use_sse_prefetch     = FLAGS(arch, (arch_feature_3DNowE | arch_feature_sse1));
+       c->use_3dnow_prefetch   = FLAGS(arch, arch_feature_3DNow);
+       c->use_popcnt           = FLAGS(arch, (arch_feature_sse4_2 | arch_feature_sse4a));
+       c->use_i486             = (arch & arch_mask) >= arch_i486;
+       c->optimize_cc          = opt_cc;
+       c->use_unsafe_floatconv = opt_unsafe_floatconv;
+       c->emit_machcode        = emit_machcode;
+
+       c->function_alignment       = arch_costs->function_alignment;
+       c->label_alignment          = arch_costs->label_alignment;
+       c->label_alignment_max_skip = arch_costs->label_alignment_max_skip;
+
+       c->label_alignment_factor =
+               FLAGS(opt_arch, arch_i386 | arch_i486) || opt_size ? 0 :
+               opt_arch & arch_all_amd                            ? 3 :
+               2;
 }
 
 void ia32_init_architecture(void)