/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @author Michael Beck, Matthias Braun
* @version $Id: bearch_ia32_t.h 16363 2007-10-25 23:27:07Z beck $
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
#include "irtools.h"
ia32_code_gen_config_t ia32_cg_config;
/**
- * CPU features.
+ * CPU architectures and features.
*/
enum cpu_arch_features {
- arch_feature_intel = 0x80000000, /**< Intel CPU */
- arch_feature_amd = 0x40000000, /**< AMD CPU */
- arch_feature_p6 = 0x20000000, /**< P6 instructions */
- arch_feature_mmx = 0x10000000, /**< MMX instructions */
- arch_feature_sse1 = 0x08000000 | arch_feature_mmx, /**< SSE1 instructions, include MMX */
- arch_feature_sse2 = 0x04000000 | arch_feature_sse1, /**< SSE2 instructions, include SSE1 */
- arch_feature_sse3 = 0x02000000 | arch_feature_sse2, /**< SSE3 instructions, include SSE2 */
- arch_feature_ssse3 = 0x01000000 | arch_feature_sse3, /**< SSSE3 instructions, include SSE3 */
- arch_feature_3DNow = 0x00800000, /**< 3DNow! instructions */
- arch_feature_3DNowE = 0x00400000 | arch_feature_3DNow, /**< Enhanced 3DNow! instructions */
- arch_feature_netburst = 0x00200000 | arch_feature_intel, /**< Netburst architecture */
- arch_feature_64bit = 0x00100000 | arch_feature_sse2, /**< x86_64 support, include SSE2 */
+ arch_generic32 = 0x00000001, /**< no specific architecture */
+
+ arch_i386 = 0x00000002, /**< i386 architecture */
+ arch_i486 = 0x00000004, /**< i486 architecture */
+ arch_pentium = 0x00000008, /**< Pentium architecture */
+ arch_ppro = 0x00000010, /**< PentiumPro architecture */
+ arch_netburst = 0x00000020, /**< Netburst architecture */
+ arch_nocona = 0x00000040, /**< Nocona architecture */
+ arch_core2 = 0x00000080, /**< Core2 architecture */
+ arch_atom = 0x00000100, /**< Atom architecture */
+
+ arch_k6 = 0x00000200, /**< k6 architecture */
+ arch_geode = 0x00000400, /**< Geode architecture */
+ arch_athlon = 0x00000800, /**< Athlon architecture */
+ arch_k8 = 0x00001000, /**< K8/Opteron architecture */
+ arch_k10 = 0x00002000, /**< K10/Barcelona architecture */
+
+ arch_mask = 0x00003FFF,
+
+ arch_athlon_plus = arch_athlon | arch_k8 | arch_k10,
+ arch_all_amd = arch_k6 | arch_geode | arch_athlon_plus,
+
+ arch_feature_mmx = 0x00004000, /**< MMX instructions */
+ arch_feature_p6_insn = 0x00008000, /**< PentiumPro instructions */
+ arch_feature_sse1 = 0x00010000, /**< SSE1 instructions */
+ arch_feature_sse2 = 0x00020000, /**< SSE2 instructions */
+ arch_feature_sse3 = 0x00040000, /**< SSE3 instructions */
+ arch_feature_ssse3 = 0x00080000, /**< SSSE3 instructions */
+ arch_feature_3DNow = 0x00100000, /**< 3DNow! instructions */
+ arch_feature_3DNowE = 0x00200000, /**< Enhanced 3DNow! instructions */
+ arch_feature_64bit = 0x00400000, /**< x86_64 support */
+ arch_feature_sse4_1 = 0x00800000, /**< SSE4.1 instructions */
+ arch_feature_sse4_2 = 0x01000000, /**< SSE4.2 instructions */
+ arch_feature_sse4a = 0x02000000, /**< SSE4a instructions */
+
+ arch_mmx_insn = arch_feature_mmx, /**< MMX instructions */
+ arch_sse1_insn = arch_feature_sse1 | arch_mmx_insn, /**< SSE1 instructions, include MMX */
+ arch_sse2_insn = arch_feature_sse2 | arch_sse1_insn, /**< SSE2 instructions, include SSE1 */
+ arch_sse3_insn = arch_feature_sse3 | arch_sse2_insn, /**< SSE3 instructions, include SSE2 */
+ arch_ssse3_insn = arch_feature_ssse3 | arch_sse3_insn, /**< SSSE3 instructions, include SSE3 */
+ arch_sse4_1_insn = arch_feature_sse4_1 | arch_ssse3_insn, /**< SSE4.1 instructions, include SSSE3 */
+ arch_sse4_2_insn = arch_feature_sse4_2 | arch_sse4_1_insn, /**< SSE4.2 instructions, include SSE4.1 */
+ arch_sse4a_insn = arch_feature_sse4a | arch_ssse3_insn, /**< SSE4a instructions, include SSSE3 */
+
+ arch_3DNow_insn = arch_feature_3DNow | arch_feature_mmx, /**< 3DNow! instructions, including MMX */
+ arch_3DNowE_insn = arch_feature_3DNowE | arch_3DNow_insn, /**< Enhanced 3DNow! instructions */
+ arch_64bit_insn = arch_feature_64bit | arch_sse2_insn, /**< x86_64 support, includes SSE2 */
};
+#define FLAGS(x, f) (((x) & (f)) != 0)
+
/**
- * Architectures.
+ * CPU's.
*/
enum cpu_support {
- /* intel CPU's */
- arch_generic = 0,
-
- arch_i386 = 1,
- arch_i486 = 2,
- arch_pentium = 3 | arch_feature_intel,
- arch_pentium_mmx = 4 | arch_feature_intel | arch_feature_mmx,
- arch_pentium_pro = 5 | arch_feature_intel | arch_feature_p6,
- arch_pentium_2 = 6 | arch_feature_intel | arch_feature_p6 | arch_feature_mmx,
- arch_pentium_3 = 7 | arch_feature_intel | arch_feature_p6 | arch_feature_sse1,
- arch_pentium_4 = 8 | arch_feature_netburst | arch_feature_p6 | arch_feature_sse2,
- arch_pentium_m = 9 | arch_feature_intel | arch_feature_p6 | arch_feature_sse2,
- arch_core = 10 | arch_feature_intel | arch_feature_p6 | arch_feature_sse3,
- arch_prescott = 11 | arch_feature_netburst | arch_feature_p6 | arch_feature_sse3,
- arch_core2 = 12 | arch_feature_intel | arch_feature_p6 | arch_feature_64bit | arch_feature_ssse3,
-
- /* AMD CPU's */
- arch_k6 = 13 | arch_feature_amd | arch_feature_mmx,
- arch_k6_2 = 14 | arch_feature_amd | arch_feature_mmx | arch_feature_3DNow,
- arch_k6_3 = 15 | arch_feature_amd | arch_feature_mmx | arch_feature_3DNow,
- arch_athlon = 16 | arch_feature_amd | arch_feature_mmx | arch_feature_3DNowE | arch_feature_p6,
- arch_athlon_xp = 17 | arch_feature_amd | arch_feature_sse1 | arch_feature_3DNowE | arch_feature_p6,
- arch_opteron = 18 | arch_feature_amd | arch_feature_64bit | arch_feature_3DNowE | arch_feature_p6,
-
- /* other */
- arch_winchip_c6 = 19 | arch_feature_mmx,
- arch_winchip2 = 20 | arch_feature_mmx | arch_feature_3DNow,
- arch_c3 = 21 | arch_feature_mmx | arch_feature_3DNow,
- arch_c3_2 = 22 | arch_feature_sse1, /* really no 3DNow! */
+ cpu_generic = arch_generic32,
+
+ /* intel CPUs */
+ cpu_i386 = arch_i386,
+ cpu_i486 = arch_i486,
+ cpu_pentium = arch_pentium,
+ cpu_pentium_mmx = arch_pentium | arch_mmx_insn,
+ cpu_pentium_pro = arch_ppro | arch_feature_p6_insn,
+ cpu_pentium_2 = arch_ppro | arch_feature_p6_insn | arch_mmx_insn,
+ cpu_pentium_3 = arch_ppro | arch_feature_p6_insn | arch_sse1_insn,
+ cpu_pentium_m = arch_ppro | arch_feature_p6_insn | arch_sse2_insn,
+ cpu_pentium_4 = arch_netburst | arch_feature_p6_insn | arch_sse2_insn,
+ cpu_prescott = arch_nocona | arch_feature_p6_insn | arch_sse3_insn,
+ cpu_nocona = arch_nocona | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
+ cpu_core2 = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_ssse3_insn,
+ cpu_penryn = arch_core2 | arch_feature_p6_insn | arch_64bit_insn | arch_sse4_1_insn,
+
+ /* AMD CPUs */
+ cpu_k6 = arch_k6 | arch_mmx_insn,
+ cpu_k6_PLUS = arch_k6 | arch_3DNow_insn,
+ cpu_geode = arch_geode | arch_sse1_insn | arch_3DNowE_insn,
+ cpu_athlon_old = arch_athlon | arch_3DNowE_insn | arch_feature_p6_insn,
+ cpu_athlon = arch_athlon | arch_sse1_insn | arch_3DNowE_insn | arch_feature_p6_insn,
+ cpu_athlon64 = arch_athlon | arch_sse2_insn | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
+ cpu_k8 = arch_k8 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn,
+ cpu_k8_sse3 = arch_k8 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse3_insn,
+ cpu_k10 = arch_k10 | arch_3DNowE_insn | arch_feature_p6_insn | arch_64bit_insn | arch_sse4a_insn,
+
+ /* other CPUs */
+ cpu_winchip_c6 = arch_i486 | arch_feature_mmx,
+ cpu_winchip2 = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
+ cpu_c3 = arch_i486 | arch_feature_mmx | arch_feature_3DNow,
+ cpu_c3_2 = arch_ppro | arch_feature_p6_insn | arch_sse1_insn, /* really no 3DNow! */
};
-/** checks for l <= x <= h */
-#define _IN_RANGE(x, l, h) ((unsigned)((x) - (l)) <= (unsigned)((h) - (l)))
-
-/** returns true if it's Intel architecture */
-#define ARCH_INTEL(x) (((x) & arch_feature_intel) != 0)
-
-/** returns true if it's AMD architecture */
-#define ARCH_AMD(x) (((x) & arch_feature_amd) != 0)
-
-/** return true if it's a Athlon/Opteron */
-#define ARCH_ATHLON(x) _IN_RANGE((x), arch_athlon, arch_opteron)
-
-/** return true if the CPU has MMX support */
-#define ARCH_MMX(x) (((x) & arch_feature_mmx) != 0)
-
-/** return true if the CPU has 3DNow! support */
-#define ARCH_3DNow(x) (((x) & arch_feature_3DNow) != 0)
-
-/** return true if the CPU has P6 features (CMOV) */
-#define IS_P6_ARCH(x) (((x) & arch_feature_p6) != 0)
-
-static cpu_support arch = arch_generic;
-static cpu_support opt_arch = arch_pentium_4;
+static int opt_size = 0;
+static int emit_machcode = 0;
+static cpu_support arch = cpu_generic;
+static cpu_support opt_arch = cpu_generic;
static int use_sse2 = 0;
static int opt_cc = 1;
static int opt_unsafe_floatconv = 0;
/* instruction set architectures. */
static const lc_opt_enum_int_items_t arch_items[] = {
- { "i386", arch_i386, },
- { "i486", arch_i486, },
- { "pentium", arch_pentium, },
- { "i586", arch_pentium, },
- { "pentiumpro", arch_pentium_pro, },
- { "i686", arch_pentium_pro, },
- { "pentiummmx", arch_pentium_mmx, },
- { "pentium2", arch_pentium_2, },
- { "p2", arch_pentium_2, },
- { "pentium3", arch_pentium_3, },
- { "p3", arch_pentium_3, },
- { "pentium4", arch_pentium_4, },
- { "p4", arch_pentium_4, },
- { "prescott", arch_pentium_4, },
- { "pentiumm", arch_pentium_m, },
- { "pm", arch_pentium_m, },
- { "core", arch_core, },
- { "yonah", arch_core, },
- { "merom", arch_core2, },
- { "core2", arch_core2, },
- { "k6", arch_k6, },
- { "k6-2", arch_k6_2, },
- { "k6-3", arch_k6_2, },
- { "athlon", arch_athlon, },
- { "athlon-xp", arch_athlon_xp, },
- { "athlon-mp", arch_athlon_xp, },
- { "athlon-4", arch_athlon_xp, },
- { "athlon64", arch_opteron, },
- { "k8", arch_opteron, },
- { "opteron", arch_opteron, },
- { "generic", arch_generic, },
- { NULL, 0 }
+ { "i386", cpu_i386 },
+ { "i486", cpu_i486 },
+ { "i586", cpu_pentium },
+ { "pentium", cpu_pentium },
+ { "pentium-mmx", cpu_pentium_mmx },
+ { "i686", cpu_pentium_pro },
+ { "pentiumpro", cpu_pentium_pro },
+ { "pentium2", cpu_pentium_2 },
+ { "p2", cpu_pentium_2 },
+ { "pentium3", cpu_pentium_3 },
+ { "pentium3m", cpu_pentium_3 },
+ { "p3", cpu_pentium_3 },
+ { "pentium-m", cpu_pentium_m },
+ { "pm", cpu_pentium_m },
+ { "pentium4", cpu_pentium_4 },
+ { "pentium4m", cpu_pentium_4 },
+ { "p4", cpu_pentium_4 },
+ { "prescott", cpu_prescott },
+ { "nocona", cpu_nocona },
+ { "merom", cpu_core2 },
+ { "core2", cpu_core2 },
+ { "penryn", cpu_penryn },
+
+ { "k6", cpu_k6 },
+ { "k6-2", cpu_k6_PLUS },
+ { "k6-3", cpu_k6_PLUS },
+ { "geode", cpu_geode },
+ { "athlon", cpu_athlon_old },
+ { "athlon-tbird", cpu_athlon },
+ { "athlon-4", cpu_athlon },
+ { "athlon-xp", cpu_athlon },
+ { "athlon-mp", cpu_athlon },
+ { "athlon64", cpu_athlon64 },
+ { "k8", cpu_k8 },
+ { "opteron", cpu_k8 },
+ { "athlon-fx", cpu_k8 },
+ { "k8-sse3", cpu_k8_sse3 },
+ { "opteron-sse3", cpu_k8_sse3 },
+ { "k10", cpu_k10 },
+ { "barcelona", cpu_k10 },
+ { "amdfam10", cpu_k10 },
+
+ { "winchip-c6", cpu_winchip_c6, },
+ { "winchip2", cpu_winchip2 },
+ { "c3", cpu_c3 },
+ { "c3-2", cpu_c3_2 },
+
+ { "generic", cpu_generic },
+ { "generic32", cpu_generic },
+ { NULL, 0 }
};
static lc_opt_enum_int_var_t arch_var = {
};
static const lc_opt_table_entry_t ia32_architecture_options[] = {
+ LC_OPT_ENT_BOOL("size", "optimize for size", &opt_size),
LC_OPT_ENT_ENUM_INT("arch", "select the instruction architecture",
&arch_var),
LC_OPT_ENT_ENUM_INT("opt", "optimize for instruction architecture",
&fp_unit_var),
LC_OPT_ENT_NEGBIT("nooptcc", "do not optimize calling convention",
&opt_cc, 1),
- LC_OPT_ENT_BIT("unsafe_floatconv", "do unsage floating point controlword "
+ LC_OPT_ENT_BIT("unsafe_floatconv", "do unsafe floating point controlword "
"optimisations", &opt_unsafe_floatconv, 1),
+ LC_OPT_ENT_BOOL("machcode", "output machine code instead of assembler",
+ &emit_machcode),
LC_OPT_LAST
};
typedef struct insn_const {
- int add_cost; /**< cost of an add instruction */
- int lea_cost; /**< cost of a lea instruction */
- int const_shf_cost; /**< cost of a constant shift instruction */
- int cost_mul_start; /**< starting cost of a multiply instruction */
- int cost_mul_bit; /**< cost of multiply for every set bit */
+ int add_cost; /**< cost of an add instruction */
+ int lea_cost; /**< cost of a lea instruction */
+ int const_shf_cost; /**< cost of a constant shift instruction */
+ int cost_mul_start; /**< starting cost of a multiply instruction */
+ int cost_mul_bit; /**< cost of multiply for every set bit */
+ unsigned function_alignment; /**< logarithm for alignment of function labels */
+ unsigned label_alignment; /**< logarithm for alignment of loops labels */
+ unsigned label_alignment_max_skip; /**< maximum skip for alignment of loops labels */
} insn_const;
+/* costs for optimizing for size */
+static const insn_const size_cost = {
+ 2, /* cost of an add instruction */
+ 3, /* cost of a lea instruction */
+ 3, /* cost of a constant shift instruction */
+ 4, /* starting cost of a multiply instruction */
+ 0, /* cost of multiply for every set bit */
+ 0, /* logarithm for alignment of function labels */
+ 0, /* logarithm for alignment of loops labels */
+ 0, /* maximum skip for alignment of loops labels */
+};
+
/* costs for the i386 */
static const insn_const i386_cost = {
1, /* cost of an add instruction */
1, /* cost of a lea instruction */
- 2, /* cost of a constant shift instruction */
- 6, /* starting cost of a multiply instruction */
- 1 /* cost of multiply for every set bit */
+ 3, /* cost of a constant shift instruction */
+ 9, /* starting cost of a multiply instruction */
+ 1, /* cost of multiply for every set bit */
+ 2, /* logarithm for alignment of function labels */
+ 2, /* logarithm for alignment of loops labels */
+ 3, /* maximum skip for alignment of loops labels */
};
/* costs for the i486 */
1, /* cost of a lea instruction */
2, /* cost of a constant shift instruction */
12, /* starting cost of a multiply instruction */
- 1 /* cost of multiply for every set bit */
+ 1, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 15, /* maximum skip for alignment of loops labels */
};
/* costs for the Pentium */
1, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
11, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
};
/* costs for the Pentium Pro */
1, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
4, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 10, /* maximum skip for alignment of loops labels */
};
/* costs for the K6 */
2, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
3, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 5, /* logarithm for alignment of function labels */
+ 5, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
+};
+
+/* costs for the Geode */
+static const insn_const geode_cost = {
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 1, /* cost of a constant shift instruction */
+ 7, /* starting cost of a multiply instruction */
+ 0, /* cost of multiply for every set bit */
+ 0, /* logarithm for alignment of function labels */
+ 0, /* logarithm for alignment of loops labels */
+ 0, /* maximum skip for alignment of loops labels */
};
/* costs for the Athlon */
2, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
5, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
+};
+
+/* costs for the Opteron/K8 */
+static const insn_const k8_cost = {
+ 1, /* cost of an add instruction */
+ 2, /* cost of a lea instruction */
+ 1, /* cost of a constant shift instruction */
+ 3, /* starting cost of a multiply instruction */
+ 0, /* cost of multiply for every set bit */
+#if 0 /* TEST */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
+#else
+ 0,
+ 0,
+ 0
+#endif
+};
+
+/* costs for the K10 */
+static const insn_const k10_cost = {
+ 1, /* cost of an add instruction */
+ 2, /* cost of a lea instruction */
+ 1, /* cost of a constant shift instruction */
+ 3, /* starting cost of a multiply instruction */
+ 0, /* cost of multiply for every set bit */
+ 5, /* logarithm for alignment of function labels */
+ 5, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
};
/* costs for the Pentium 4 */
-static const insn_const pentium4_cost = {
+static const insn_const netburst_cost = {
1, /* cost of an add instruction */
3, /* cost of a lea instruction */
4, /* cost of a constant shift instruction */
15, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
};
-/* costs for the Core */
-static const insn_const core_cost = {
+/* costs for the Nocona and Core */
+static const insn_const nocona_cost = {
1, /* cost of an add instruction */
1, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
10, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
};
-/* costs for the generic */
-static const insn_const generic_cost = {
+/* costs for the Core2 */
+static const insn_const core2_cost = {
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 1, /* cost of a constant shift instruction */
+ 3, /* starting cost of a multiply instruction */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 10, /* maximum skip for alignment of loops labels */
+};
+
+/* costs for the generic32 */
+static const insn_const generic32_cost = {
1, /* cost of an add instruction */
2, /* cost of a lea instruction */
1, /* cost of a constant shift instruction */
4, /* starting cost of a multiply instruction */
- 0 /* cost of multiply for every set bit */
+ 0, /* cost of multiply for every set bit */
+ 4, /* logarithm for alignment of function labels */
+ 4, /* logarithm for alignment of loops labels */
+ 7, /* maximum skip for alignment of loops labels */
};
-static const insn_const *arch_costs = &generic_cost;
+static const insn_const *arch_costs = &generic32_cost;
static void set_arch_costs(void)
{
- switch (opt_arch) {
- case arch_i386:
- arch_costs = &i386_cost;
- break;
- case arch_i486:
- arch_costs = &i486_cost;
- break;
- case arch_pentium:
- case arch_pentium_mmx:
- arch_costs = &pentium_cost;
- break;
- case arch_pentium_pro:
- case arch_pentium_2:
- case arch_pentium_3:
- arch_costs = &pentiumpro_cost;
- break;
- case arch_pentium_4:
- arch_costs = &pentium4_cost;
- break;
- case arch_pentium_m:
- arch_costs = &pentiumpro_cost;
- break;
- case arch_core:
- arch_costs = &core_cost;
- break;
- case arch_prescott:
- arch_costs = &pentium4_cost;
- break;
- case arch_core2:
- arch_costs = &core_cost;
- break;
- case arch_k6:
- case arch_k6_2:
- arch_costs = &k6_cost;
- break;
- case arch_athlon:
- case arch_athlon_xp:
- case arch_opteron:
- arch_costs = &athlon_cost;
- break;
- case arch_generic:
+ if (opt_size) {
+ arch_costs = &size_cost;
+ return;
+ }
+ switch (opt_arch & arch_mask) {
+ case arch_i386: arch_costs = &i386_cost; break;
+ case arch_i486: arch_costs = &i486_cost; break;
+ case arch_pentium: arch_costs = &pentium_cost; break;
+ case arch_ppro: arch_costs = &pentiumpro_cost; break;
+ case arch_netburst: arch_costs = &netburst_cost; break;
+ case arch_nocona: arch_costs = &nocona_cost; break;
+ case arch_core2: arch_costs = &core2_cost; break;
+ case arch_k6: arch_costs = &k6_cost; break;
+ case arch_geode: arch_costs = &geode_cost; break;
+ case arch_athlon: arch_costs = &athlon_cost; break;
+ case arch_k8: arch_costs = &k8_cost; break;
+ case arch_k10: arch_costs = &k10_cost; break;
default:
- arch_costs = &generic_cost;
+ case arch_generic32: arch_costs = &generic32_cost; break;
}
}
-/**
- * Evaluate a given simple instruction.
- */
+/* Evaluate the costs of an instruction. */
int ia32_evaluate_insn(insn_kind kind, tarval *tv) {
int cost;
}
}
-
-
void ia32_setup_cg_config(void)
{
- memset(&ia32_cg_config, 0, sizeof(ia32_cg_config));
+ ia32_code_gen_config_t *const c = &ia32_cg_config;
+ memset(c, 0, sizeof(*c));
+
+ set_arch_costs();
- /* on newer intel cpus mov, pop is often faster then leave although it has a
+ c->optimize_size = opt_size != 0;
+ /* on newer intel cpus mov, pop is often faster than leave although it has a
* longer opcode */
- ia32_cg_config.use_leave = !ARCH_INTEL(opt_arch)
- || !IS_P6_ARCH(opt_arch);
+ c->use_leave = FLAGS(opt_arch, arch_i386 | arch_all_amd | arch_core2) || opt_size;
/* P4s don't like inc/decs because they only partially write the flags
- register which produces false dependencies */
- ia32_cg_config.use_incdec = (opt_arch != arch_pentium_4);
- ia32_cg_config.use_sse2 = use_sse2;
- ia32_cg_config.use_ffreep = ARCH_ATHLON(opt_arch);
- ia32_cg_config.use_ftst = !IS_P6_ARCH(arch);
- ia32_cg_config.use_femms = ARCH_ATHLON(opt_arch)
- && ARCH_MMX(arch) && ARCH_AMD(arch);
- ia32_cg_config.use_fucomi = IS_P6_ARCH(arch);
- ia32_cg_config.use_cmov = IS_P6_ARCH(arch);
- ia32_cg_config.optimize_cc = opt_cc;
- ia32_cg_config.use_unsafe_floatconv = opt_unsafe_floatconv;
-
- if(opt_arch == arch_i386) {
- ia32_cg_config.function_alignment = 2;
- } else if(opt_arch == arch_i486) {
- ia32_cg_config.function_alignment = 4;
- } else if(opt_arch == arch_k6) {
- ia32_cg_config.function_alignment = 5;
- ia32_cg_config.label_alignment = 5;
- } else {
- ia32_cg_config.function_alignment = 4;
- ia32_cg_config.label_alignment = 4;
- }
-
- if(opt_arch == arch_i386 || opt_arch == arch_i486) {
- ia32_cg_config.label_alignment_factor = -1;
- } else if(ARCH_AMD(opt_arch)) {
- ia32_cg_config.label_alignment_factor = 3;
- } else {
- ia32_cg_config.label_alignment_factor = 2;
- }
-
- set_arch_costs();
+ * register which produces false dependencies */
+ c->use_incdec = !FLAGS(opt_arch, arch_netburst | arch_nocona | arch_core2 | arch_geode) || opt_size;
+ c->use_sse2 = use_sse2 && FLAGS(arch, arch_feature_sse2);
+ c->use_ffreep = FLAGS(opt_arch, arch_athlon_plus);
+ c->use_ftst = !FLAGS(arch, arch_feature_p6_insn);
+ /* valgrind can't cope with femms yet and the usefulness of the optimization
+ * is questionable anyway */
+#if 0
+ c->use_femms = FLAGS(opt_arch, arch_athlon_plus) &&
+ FLAGS(arch, arch_feature_mmx | arch_all_amd);
+#else
+ c->use_femms = 0;
+#endif
+ c->use_fucomi = FLAGS(arch, arch_feature_p6_insn);
+ c->use_cmov = FLAGS(arch, arch_feature_p6_insn);
+ c->use_modeD_moves = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode);
+ c->use_add_esp_4 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_geode) && !opt_size;
+ c->use_add_esp_8 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_geode | arch_i386 | arch_i486) && !opt_size;
+ c->use_sub_esp_4 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro) && !opt_size;
+ c->use_sub_esp_8 = FLAGS(opt_arch, arch_generic32 | arch_athlon_plus | arch_netburst | arch_nocona | arch_core2 | arch_ppro | arch_i386 | arch_i486) && !opt_size;
+ c->use_imul_mem_imm32 = !FLAGS(opt_arch, arch_k8 | arch_k10) || opt_size;
+ c->use_pxor = FLAGS(opt_arch, arch_netburst);
+ c->use_mov_0 = FLAGS(opt_arch, arch_k6) && !opt_size;
+ c->use_short_sex_eax = !FLAGS(opt_arch, arch_k6) && !opt_size;
+ c->use_pad_return = FLAGS(opt_arch, arch_athlon_plus | arch_core2 | arch_generic32) && !opt_size;
+ c->use_bt = FLAGS(opt_arch, arch_core2 | arch_athlon_plus) || opt_size;
+ c->use_fisttp = FLAGS(opt_arch & arch, arch_feature_sse3);
+ c->use_sse_prefetch = FLAGS(arch, (arch_feature_3DNowE | arch_feature_sse1));
+ c->use_3dnow_prefetch = FLAGS(arch, arch_feature_3DNow);
+ c->use_popcnt = FLAGS(arch, (arch_feature_sse4_2 | arch_feature_sse4a));
+ c->use_i486 = (arch & arch_mask) >= arch_i486;
+ c->optimize_cc = opt_cc;
+ c->use_unsafe_floatconv = opt_unsafe_floatconv;
+ c->emit_machcode = emit_machcode;
+
+ c->function_alignment = arch_costs->function_alignment;
+ c->label_alignment = arch_costs->label_alignment;
+ c->label_alignment_max_skip = arch_costs->label_alignment_max_skip;
+
+ c->label_alignment_factor =
+ FLAGS(opt_arch, arch_i386 | arch_i486) || opt_size ? 0 :
+ opt_arch & arch_all_amd ? 3 :
+ 2;
}
void ia32_init_architecture(void)