* @author Christian Wuerdig
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "lc_opts.h"
#include "lc_opts_enum.h"
#include "../begnuas.h"
#include "../bestate.h"
#include "../beflags.h"
+#include "../betranshlp.h"
#include "bearch_ia32_t.h"
block = get_irg_start_block(cg->irg);
res = func(NULL, cg->irg, block);
- arch_set_irn_register(cg->arch_env, res, reg);
+ arch_set_irn_register(res, reg);
*place = res;
add_irn_dep(get_irg_end(cg->irg), res);
/**
* Returns the admissible noreg register node for input register pos of node irn.
*/
-ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos) {
- const arch_register_req_t *req;
+static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
+{
+ const arch_register_req_t *req = arch_get_register_req(irn, pos);
- req = arch_get_register_req(cg->arch_env, irn, pos);
assert(req != NULL && "Missing register requirements");
if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
return ia32_new_NoReg_gp(cg);
static const arch_register_t *ia32_get_irn_reg(const ir_node *irn)
{
int pos = 0;
- const arch_register_t *reg = NULL;
if (is_Proj(irn)) {
-
if (get_irn_mode(irn) == mode_X) {
return NULL;
}
}
if (is_ia32_irn(irn)) {
- const arch_register_t **slots;
- slots = get_ia32_slots(irn);
+ const arch_register_t **slots = get_ia32_slots(irn);
assert(pos < get_ia32_n_res(irn));
- reg = slots[pos];
+ return slots[pos];
} else {
- reg = ia32_get_firm_reg(irn, cur_reg_set);
+ return ia32_get_firm_reg(irn, cur_reg_set);
}
-
- return reg;
}
static arch_irn_class_t ia32_classify(const ir_node *irn) {
- arch_irn_class_t classification = arch_irn_class_normal;
+ arch_irn_class_t classification = 0;
irn = skip_Proj_const(irn);
classification |= arch_irn_class_branch;
if (! is_ia32_irn(irn))
- return classification & ~arch_irn_class_normal;
-
- if (is_ia32_Ld(irn))
- classification |= arch_irn_class_load;
-
- if (is_ia32_St(irn))
- classification |= arch_irn_class_store;
+ return classification;
if (is_ia32_is_reload(irn))
classification |= arch_irn_class_reload;
static int ia32_get_sp_bias(const ir_node *node)
{
+ if (is_ia32_Call(node))
+ return -(int)get_ia32_call_attr_const(node)->pop;
+
if (is_ia32_Push(node))
return 4;
*mem = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
/* the push must have SP out register */
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
+ arch_set_irn_register(curr_sp, arch_env->sp);
set_ia32_flags(push, arch_irn_flags_ignore);
/* this modifies the stack bias, because we pushed 32bit */
/* move esp to ebp */
curr_bp = be_new_Copy(arch_env->bp->reg_class, irg, bl, curr_sp);
be_set_constr_single_reg(curr_bp, BE_OUT_POS(0), arch_env->bp);
- arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
+ arch_set_irn_register(curr_bp, arch_env->bp);
be_node_set_flags(curr_bp, BE_OUT_POS(0), arch_irn_flags_ignore);
/* beware: the copy must be done before any other sp use */
curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, irg, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
be_set_constr_single_reg(curr_sp, BE_OUT_POS(0), arch_env->sp);
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
+ arch_set_irn_register(curr_sp, arch_env->sp);
be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
/* copy ebp to esp */
curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], irg, bl, curr_bp);
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
+ arch_set_irn_register(curr_sp, arch_env->sp);
be_node_set_flags(curr_sp, BE_OUT_POS(0), arch_irn_flags_ignore);
/* pop ebp */
*mem = new_r_Proj(irg, bl, pop, mode_M, pn_ia32_Pop_M);
}
- arch_set_irn_register(arch_env, curr_sp, arch_env->sp);
- arch_set_irn_register(arch_env, curr_bp, arch_env->bp);
+ arch_set_irn_register(curr_sp, arch_env->sp);
+ arch_set_irn_register(curr_bp, arch_env->bp);
}
be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
*/
static void *ia32_abi_init(const be_abi_call_t *call, const arch_env_t *aenv, ir_graph *irg)
{
- ia32_abi_env_t *env = xmalloc(sizeof(env[0]));
- be_abi_call_flags_t fl = be_abi_call_get_flags(call);
+ ia32_abi_env_t *env = XMALLOC(ia32_abi_env_t);
+ be_abi_call_flags_t fl = be_abi_call_get_flags(call);
env->flags = fl.bits;
env->irg = irg;
env->aenv = aenv;
*/
static int ia32_is_spillmode_compatible(const ir_mode *mode, const ir_mode *spillmode)
{
- if(mode_is_float(mode)) {
- return mode == spillmode;
- } else {
- return 1;
- }
+ return !mode_is_float(mode) || mode == spillmode;
}
/**
* Check if irn can load its operand at position i from memory (source addressmode).
- * @param self Pointer to irn ops itself
* @param irn The irn to be checked
* @param i The operands position
* @return Non-Zero if operand can be loaded
*/
-static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i) {
- ir_node *op = get_irn_n(irn, i);
- const ir_mode *mode = get_irn_mode(op);
+static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
+{
+ ir_node *op = get_irn_n(irn, i);
+ const ir_mode *mode = get_irn_mode(op);
const ir_mode *spillmode = get_spill_mode(op);
- if (
- (i != n_ia32_binary_left && i != n_ia32_binary_right) || /* a "real" operand position must be requested */
- ! is_ia32_irn(irn) || /* must be an ia32 irn */
- get_ia32_am_arity(irn) != ia32_am_binary || /* must be a binary operation TODO is this necessary? */
- get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
- ! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
- ! ia32_is_spillmode_compatible(mode, spillmode) ||
- is_ia32_use_frame(irn)) /* must not already use frame */
+ if (!is_ia32_irn(irn) || /* must be an ia32 irn */
+ get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
+ !ia32_is_spillmode_compatible(mode, spillmode) ||
+ is_ia32_use_frame(irn)) /* must not already use frame */
return 0;
- if (i == n_ia32_binary_left) {
- const arch_register_req_t *req;
- if(!is_ia32_commutative(irn))
- return 0;
- /* we can't swap left/right for limited registers
- * (As this (currently) breaks constraint handling copies)
- */
- req = get_ia32_in_req(irn, n_ia32_binary_left);
- if (req->type & arch_register_req_type_limited) {
+ switch (get_ia32_am_support(irn)) {
+ case ia32_am_none:
return 0;
- }
+
+ case ia32_am_unary:
+ if (i != n_ia32_unary_op)
+ return 0;
+ break;
+
+ case ia32_am_binary:
+ switch (i) {
+ case n_ia32_binary_left: {
+ const arch_register_req_t *req;
+ if (!is_ia32_commutative(irn))
+ return 0;
+
+ /* we can't swap left/right for limited registers
+ * (As this (currently) breaks constraint handling copies)
+ */
+ req = get_ia32_in_req(irn, n_ia32_binary_left);
+ if (req->type & arch_register_req_type_limited)
+ return 0;
+ break;
+ }
+
+ case n_ia32_binary_right:
+ break;
+
+ default:
+ return 0;
+ }
+ break;
+
+ default:
+ panic("Unknown AM type");
}
+ /* HACK: must not already use "real" memory.
+ * This can happen for Call and Div */
+ if (!is_NoMem(get_irn_n(irn, n_ia32_mem)))
+ return 0;
+
return 1;
}
ir_mode *load_mode;
ir_mode *dest_op_mode;
- ia32_code_gen_t *cg = ia32_current_cg;
-
assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
- if (i == n_ia32_binary_left) {
- ia32_swap_left_right(irn);
- }
-
set_ia32_op_type(irn, ia32_AddrModeS);
load_mode = get_irn_mode(get_irn_n(irn, i));
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
- set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
- set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
- set_irn_n(irn, n_ia32_mem, spill);
- set_ia32_is_reload(irn);
-
- /* immediates are only allowed on the right side */
- if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
+ if (i == n_ia32_binary_left &&
+ get_ia32_am_support(irn) == ia32_am_binary &&
+ /* immediates are only allowed on the right side */
+ !is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right))) {
ia32_swap_left_right(irn);
+ i = n_ia32_binary_right;
}
+
+ assert(is_NoMem(get_irn_n(irn, n_ia32_mem)));
+
+ set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
+ set_irn_n(irn, n_ia32_mem, spill);
+ set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
+ set_ia32_is_reload(irn);
}
static const be_abi_callbacks_t ia32_abi_callbacks = {
(void) self;
}
-static void turn_back_am(ir_node *node)
+ir_node *turn_back_am(ir_node *node)
{
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
set_ia32_is_reload(load);
set_irn_n(node, n_ia32_mem, new_NoMem());
- switch (get_ia32_am_arity(node)) {
+ switch (get_ia32_am_support(node)) {
case ia32_am_unary:
set_irn_n(node, n_ia32_unary_op, load_res);
break;
case ia32_am_binary:
if (is_ia32_Immediate(get_irn_n(node, n_ia32_binary_right))) {
- assert(is_ia32_Cmp(node) || is_ia32_Cmp8Bit(node) ||
- is_ia32_Test(node) || is_ia32_Test8Bit(node));
set_irn_n(node, n_ia32_binary_left, load_res);
} else {
set_irn_n(node, n_ia32_binary_right, load_res);
}
break;
- case ia32_am_ternary:
- set_irn_n(node, n_ia32_binary_right, load_res);
- break;
-
default:
- panic("Unknown arity");
+ panic("Unknown AM type");
}
noreg = ia32_new_NoReg_gp(ia32_current_cg);
set_irn_n(node, n_ia32_base, noreg);
set_ia32_op_type(node, ia32_Normal);
if (sched_is_scheduled(node))
sched_add_before(node, load);
+
+ return load_res;
}
static ir_node *flags_remat(ir_node *node, ir_node *after)
}
/* copy the register from the old node to the new Load */
- reg = arch_get_irn_register(cg->arch_env, node);
- arch_set_irn_register(cg->arch_env, new_op, reg);
+ reg = arch_get_irn_register(node);
+ arch_set_irn_register(new_op, reg);
SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
set_ia32_use_frame(push);
set_ia32_op_type(push, ia32_AddrModeS);
set_ia32_ls_mode(push, mode_Is);
+ set_ia32_is_spill(push);
sched_add_before(schedpoint, push);
return push;
set_ia32_use_frame(pop);
set_ia32_op_type(pop, ia32_AddrModeD);
set_ia32_ls_mode(pop, mode_Is);
+ set_ia32_is_reload(pop);
sched_add_before(schedpoint, pop);
return pop;
}
-static ir_node* create_spproj(ia32_code_gen_t *cg, ir_node *node, ir_node *pred, int pos) {
+static ir_node* create_spproj(ir_node *node, ir_node *pred, int pos)
+{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *sp;
sp = new_rd_Proj(dbg, irg, block, pred, spmode, pos);
- arch_set_irn_register(cg->arch_env, sp, spreg);
+ arch_set_irn_register(sp, spreg);
return sp;
}
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
push = create_push(cg, node, node, sp, mem, inent);
- sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
+ sp = create_spproj(node, push, pn_ia32_Push_stack);
if(entsize == 8) {
/* add another push after the first one */
push = create_push(cg, node, node, sp, mem, inent);
add_ia32_am_offs_int(push, 4);
- sp = create_spproj(cg, node, push, pn_ia32_Push_stack);
+ sp = create_spproj(node, push, pn_ia32_Push_stack);
}
set_irn_n(node, i, new_Bad());
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
pop = create_pop(cg, node, node, sp, outent);
- sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
+ sp = create_spproj(node, pop, pn_ia32_Pop_stack);
if(entsize == 8) {
add_ia32_am_offs_int(pop, 4);
/* add another pop after the first one */
pop = create_pop(cg, node, node, sp, outent);
- sp = create_spproj(cg, node, pop, pn_ia32_Pop_stack);
+ sp = create_spproj(node, pop, pn_ia32_Pop_stack);
}
pops[i] = pop;
*/
static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
{
- be_fec_env_t *env = data;
+ be_fec_env_t *env = data;
+ const ir_mode *mode;
+ int align;
if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
- const ir_mode *mode = get_spill_mode_mode(get_irn_mode(node));
- int align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL
- && is_ia32_use_frame(node)) {
- if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
- const ir_mode *mode = get_ia32_ls_mode(node);
- const ia32_attr_t *attr = get_ia32_attr_const(node);
- int align;
-
- if (is_ia32_is_reload(node)) {
- mode = get_spill_mode_mode(mode);
+ mode = get_spill_mode_mode(get_irn_mode(node));
+ align = get_mode_size_bytes(mode);
+ } else if (is_ia32_irn(node) &&
+ get_ia32_frame_ent(node) == NULL &&
+ is_ia32_use_frame(node)) {
+ if (is_ia32_need_stackent(node))
+ goto need_stackent;
+
+ switch (get_ia32_irn_opcode(node)) {
+need_stackent:
+ case iro_ia32_Load: {
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+
+ if (attr->data.need_32bit_stackent) {
+ mode = mode_Is;
+ } else if (attr->data.need_64bit_stackent) {
+ mode = mode_Ls;
+ } else {
+ mode = get_ia32_ls_mode(node);
+ if (is_ia32_is_reload(node))
+ mode = get_spill_mode_mode(mode);
+ }
+ align = get_mode_size_bytes(mode);
+ break;
}
- if(attr->data.need_64bit_stackent) {
- mode = mode_Ls;
+ case iro_ia32_vfild:
+ case iro_ia32_vfld:
+ case iro_ia32_xLoad: {
+ mode = get_ia32_ls_mode(node);
+ align = 4;
+ break;
}
- if(attr->data.need_32bit_stackent) {
- mode = mode_Is;
+
+ case iro_ia32_FldCW: {
+ /* although 2 byte would be enough 4 byte performs best */
+ mode = mode_Iu;
+ align = 4;
+ break;
}
- align = get_mode_size_bytes(mode);
- be_node_needs_frame_entity(env, node, mode, align);
- } else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
- || is_ia32_vfld(node)) {
- const ir_mode *mode = get_ia32_ls_mode(node);
- int align = 4;
- be_node_needs_frame_entity(env, node, mode, align);
- } else if(is_ia32_FldCW(node)) {
- /* although 2 byte would be enough 4 byte performs best */
- const ir_mode *mode = mode_Iu;
- int align = 4;
- be_node_needs_frame_entity(env, node, mode, align);
- } else {
+
+ default:
#ifndef NDEBUG
- assert(is_ia32_St(node) ||
- is_ia32_xStoreSimple(node) ||
- is_ia32_vfst(node) ||
- is_ia32_vfist(node) ||
- is_ia32_vfisttp(node) ||
- is_ia32_FnstCW(node));
+ panic("unexpected frame user while collection frame entity nodes");
+
+ case iro_ia32_FnstCW:
+ case iro_ia32_Store8Bit:
+ case iro_ia32_Store:
+ case iro_ia32_fst:
+ case iro_ia32_fstp:
+ case iro_ia32_vfist:
+ case iro_ia32_vfisttp:
+ case iro_ia32_vfst:
+ case iro_ia32_xStore:
+ case iro_ia32_xStoreSimple:
#endif
+ return;
}
+ } else {
+ return;
}
+ be_node_needs_frame_entity(env, node, mode, align);
}
/**
/* we might have to rewrite x87 virtual registers */
if (cg->do_x87_sim) {
- x87_simulate_graph(cg->arch_env, cg->birg);
+ x87_simulate_graph(cg->birg);
}
/* do peephole optimisations */
get_eip = new_rd_ia32_GetEIP(NULL, cg->irg, block);
cg->get_eip = get_eip;
- add_irn_dep(get_eip, get_irg_frame(cg->irg));
-
+ be_dep_on_frame(get_eip);
return get_eip;
}
*/
static void *ia32_cg_init(be_irg_t *birg) {
ia32_isa_t *isa = (ia32_isa_t *)birg->main_env->arch_env;
- ia32_code_gen_t *cg = xcalloc(1, sizeof(*cg));
+ ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
cg->impl = &ia32_code_gen_if;
cg->irg = birg->irg;
cg->reg_set = new_set(ia32_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
- cg->arch_env = birg->main_env->arch_env;
cg->birg = birg;
cg->blk_sched = NULL;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
set_tarval_output_modes();
- isa = xmalloc(sizeof(*isa));
+ isa = XMALLOC(ia32_isa_t);
memcpy(isa, &ia32_isa_template, sizeof(*isa));
if(mode_fpcw == NULL) {
ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
#ifndef NDEBUG
- isa->name_obst = xmalloc(sizeof(*isa->name_obst));
+ isa->name_obst = XMALLOC(struct obstack);
obstack_init(isa->name_obst);
#endif /* NDEBUG */
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
- call_flags.bits.call_has_imm = 1; /* No call immediates, we handle this by ourselves */
+ call_flags.bits.call_has_imm = 0; /* No call immediates, we handle this by ourselves */
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
if (is_ia32_irn(irn)) {
ret = get_ia32_exec_units(irn);
- }
- else if (is_be_node(irn)) {
- if (be_is_Call(irn) || be_is_Return(irn)) {
+ } else if (is_be_node(irn)) {
+ if (be_is_Return(irn)) {
ret = _units_callret;
- }
- else if (be_is_Barrier(irn)) {
+ } else if (be_is_Barrier(irn)) {
ret = _units_dummy;
- }
- else {
- ret = _units_other;
+ } else {
+ ret = _units_other;
}
}
else {