#include "ircons.h"
#include "irgmod.h"
#include "irgopt.h"
-
-#include "bitset.h"
+#include "irbitset.h"
+#include "pdeq.h"
#include "debug.h"
#include "../beabi.h" /* the general register allocator interface */
USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]);
}
+/* returns the first Proj with given mode from mode_T node */
+static ir_node *get_proj_for_mode(ir_node *node, ir_mode *mode) {
+ const ir_edge_t *edge;
+
+ assert(get_irn_mode(node) == mode_T && "Need mode_T node.");
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ if (get_irn_mode(proj) == mode)
+ return proj;
+ }
+
+ return NULL;
+}
+
/**************************************************
* _ _ _ __
* | | | (_)/ _|
*/
static ir_type *ia32_abi_get_between_type(void *self)
{
+#define IDENT(s) new_id_from_chars(s, sizeof(s)-1)
static ir_type *omit_fp_between_type = NULL;
static ir_type *between_type = NULL;
entity *ret_addr_ent;
entity *omit_fp_ret_addr_ent;
- ir_type *old_bp_type = new_type_primitive(new_id_from_str("bp"), mode_P);
- ir_type *ret_addr_type = new_type_primitive(new_id_from_str("return_addr"), mode_P);
+ ir_type *old_bp_type = new_type_primitive(IDENT("bp"), mode_P);
+ ir_type *ret_addr_type = new_type_primitive(IDENT("return_addr"), mode_P);
- between_type = new_type_class(new_id_from_str("ia32_between_type"));
- old_bp_ent = new_entity(between_type, new_id_from_str("old_bp"), old_bp_type);
- ret_addr_ent = new_entity(between_type, new_id_from_str("ret_addr"), ret_addr_type);
+ between_type = new_type_struct(IDENT("ia32_between_type"));
+ old_bp_ent = new_entity(between_type, IDENT("old_bp"), old_bp_type);
+ ret_addr_ent = new_entity(between_type, IDENT("ret_addr"), ret_addr_type);
set_entity_offset_bytes(old_bp_ent, 0);
set_entity_offset_bytes(ret_addr_ent, get_type_size_bytes(old_bp_type));
set_type_size_bytes(between_type, get_type_size_bytes(old_bp_type) + get_type_size_bytes(ret_addr_type));
+ set_type_state(between_type, layout_fixed);
- omit_fp_between_type = new_type_class(new_id_from_str("ia32_between_type_omit_fp"));
- omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, new_id_from_str("ret_addr"), ret_addr_type);
+ omit_fp_between_type = new_type_struct(IDENT("ia32_between_type_omit_fp"));
+ omit_fp_ret_addr_ent = new_entity(omit_fp_between_type, IDENT("ret_addr"), ret_addr_type);
set_entity_offset_bytes(omit_fp_ret_addr_ent, 0);
set_type_size_bytes(omit_fp_between_type, get_type_size_bytes(ret_addr_type));
+ set_type_state(omit_fp_between_type, layout_fixed);
}
return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
+#undef IDENT
}
/**
dom = be_compute_dominance_frontiers(cg->irg);
irg_walk_blkwise_graph(cg->irg, NULL, ia32_transform_node, cg);
be_free_dominance_frontiers(dom);
- be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
+
+ if (cg->dump)
+ be_dump(cg->irg, "-transformed", dump_ir_block_graph_sched);
/* 3rd: optimize address mode */
FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.am");
ia32_optimize_addressmode(cg);
- be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
+
+ if (cg->dump)
+ be_dump(cg->irg, "-am", dump_ir_block_graph_sched);
+
DEBUG_ONLY(cg->mod = old_mod;)
}
static void ia32_finish_irg_walker(ir_node *block, void *env) {
ir_node *irn, *next;
- for (irn = sched_first(block); !sched_is_end(irn); irn = next) {
+ for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
next = sched_next(irn);
ia32_finish_node(irn, env);
}
}
+static void ia32_push_on_queue_walker(ir_node *block, void *env) {
+ waitq *wq = env;
+ waitq_put(wq, block);
+}
+
+
/**
* Add Copy nodes for not fulfilled should_be_equal constraints
*/
static void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
- irg_block_walk_graph(irg, NULL, ia32_finish_irg_walker, cg);
+ waitq *wq = new_waitq();
+
+ /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
+ irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
+
+ while (! waitq_empty(wq)) {
+ ir_node *block = waitq_get(wq);
+ ia32_finish_irg_walker(block, cg);
+ }
+ del_waitq(wq);
}
static void ia32_before_sched(void *self) {
}
+static void remove_unused_nodes(ir_node *irn, bitset_t *already_visited) {
+ int i;
+ ir_mode *mode;
+ ir_node *mem_proj;
+
+ if (is_Block(irn))
+ return;
+
+ mode = get_irn_mode(irn);
+
+ /* check if we already saw this node or the node has more than one user */
+ if (bitset_contains_irn(already_visited, irn) || get_irn_n_edges(irn) > 1)
+ return;
+
+ /* mark irn visited */
+ bitset_add_irn(already_visited, irn);
+
+ /* non-Tuple nodes with one user: ok, return */
+ if (get_irn_n_edges(irn) >= 1 && mode != mode_T)
+ return;
+
+ /* tuple node has one user which is not the mem proj-> ok */
+ if (mode == mode_T && get_irn_n_edges(irn) == 1) {
+ mem_proj = get_proj_for_mode(irn, mode_M);
+ if (! mem_proj)
+ return;
+ }
+
+ for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+ ir_node *pred = get_irn_n(irn, i);
+
+ /* do not follow memory edges or we will accidentally remove stores */
+ if (is_Proj(pred) && get_irn_mode(pred) == mode_M)
+ continue;
+
+ set_irn_n(irn, i, new_Bad());
+
+ /*
+ The current node is about to be removed: if the predecessor
+ has only this node as user, it need to be removed as well.
+ */
+ if (get_irn_n_edges(pred) <= 1)
+ remove_unused_nodes(pred, already_visited);
+ }
+
+ if (sched_is_scheduled(irn))
+ sched_remove(irn);
+}
+
+static void remove_unused_loads_walker(ir_node *irn, void *env) {
+ bitset_t *already_visited = env;
+ if (is_ia32_Ld(irn) && ! bitset_contains_irn(already_visited, irn))
+ remove_unused_nodes(irn, env);
+}
+
/**
* Called before the register allocator.
* Calculate a block schedule here. We need it for the x87
* simulator and the emitter.
*/
static void ia32_before_ra(void *self) {
- ia32_code_gen_t *cg = self;
+ ia32_code_gen_t *cg = self;
+ bitset_t *already_visited = bitset_irg_malloc(cg->irg);
cg->blk_sched = sched_create_block_schedule(cg->irg);
+
+ /*
+ Handle special case:
+ There are sometimes unused loads, only pinned by memory.
+ We need to remove those Loads and all other nodes which won't be used
+ after removing the Load from schedule.
+ */
+ irg_walk_graph(cg->irg, remove_unused_loads_walker, NULL, already_visited);
+ bitset_free(already_visited);
}
if (sched_point) {
sched_add_after(sched_point, new_op);
- sched_add_after(new_op, proj);
-
sched_remove(irn);
}
ir_graph *irg = cg->irg;
ia32_finish_irg(irg, cg);
- be_dump(irg, "-finished", dump_ir_block_graph_sched);
+ if (cg->dump)
+ be_dump(irg, "-finished", dump_ir_block_graph_sched);
ia32_gen_routine(cg->isa->out, irg, cg);
cur_reg_set = NULL;
cg->gp_to_fp = NULL;
cg->fp_kind = isa->fp_kind;
cg->used_fp = fp_none;
+ cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
FIRM_DBG_REGISTER(cg->mod, "firm.be.ia32.cg");
/* patch register names of x87 registers */
if (USE_x87(isa)) {
- ia32_st_regs[0].name = "st";
- ia32_st_regs[1].name = "st(1)";
- ia32_st_regs[2].name = "st(2)";
- ia32_st_regs[3].name = "st(3)";
- ia32_st_regs[4].name = "st(4)";
- ia32_st_regs[5].name = "st(5)";
- ia32_st_regs[6].name = "st(6)";
- ia32_st_regs[7].name = "st(7)";
+ ia32_st_regs[0].name = "st";
+ ia32_st_regs[1].name = "st(1)";
+ ia32_st_regs[2].name = "st(2)";
+ ia32_st_regs[3].name = "st(3)";
+ ia32_st_regs[4].name = "st(4)";
+ ia32_st_regs[5].name = "st(5)";
+ ia32_st_regs[6].name = "st(6)";
+ ia32_st_regs[7].name = "st(7)";
}
#ifndef NDEBUG
}
int ia32_to_appear_in_schedule(void *block_env, const ir_node *irn) {
- return is_ia32_irn(irn);
+ return is_ia32_irn(irn) ? 1 : -1;
}
/**