}
}
} else {
- assert(arch_irn_class_is(arch_env, node, spill));
arch_set_frame_entity(arch_env, node, slot->entity);
}
}
#include "../beblocksched.h"
#include "../bemachine.h"
#include "../beilpsched.h"
+#include "../bespillslots.h"
#include "bearch_ia32_t.h"
}
}
+/**
+ * Collects nodes that need frame entities assigned.
+ */
+static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
+{
+ be_fec_env_t *env = data;
+
+ if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
+ const ir_mode *mode = get_irn_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if(is_ia32_irn(node) && get_ia32_frame_ent(node) == NULL) {
+ if (is_ia32_Load(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = get_mode_size_bytes(mode);
+ be_node_needs_frame_entity(env, node, mode, align);
+ } else if (is_ia32_vfild(node)) {
+ const ir_mode *mode = get_ia32_ls_mode(node);
+ int align = 4;
+ be_node_needs_frame_entity(env, node, mode, align);
+ }
+ }
+}
+
/**
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
static void ia32_after_ra(void *self) {
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->birg);
+
+ /* create and coalesce frame entities */
+ irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
+ be_assign_entities(fec_env);
+ be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
cg->isa = isa;
cg->birg = birg;
cg->blk_sched = NULL;
- cg->fp_to_gp = NULL;
- cg->gp_to_fp = NULL;
cg->fp_kind = isa->fp_kind;
cg->used_fp = fp_none;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
ir_node **blk_sched; /**< an array containing the scheduled blocks */
ia32_optimize_t opt; /**< contains optimization information */
- ir_entity *fp_to_gp; /**< allocated entity for fp to gp conversion */
- ir_entity *gp_to_fp; /**< allocated entity for gp to fp conversion */
nodeset *kill_conv; /**< Remember all convs to be killed */
int arch; /**< instruction architecture */
int opt_arch; /**< optimize for architecture */
static struct obstack *obst = NULL;
ir_mode *mode = get_ia32_ls_mode(n);
+ /* just to be sure... */
+ assert(!is_ia32_use_frame(n) || get_ia32_frame_ent(n) != NULL);
+
if (! is_ia32_Lea(n))
assert(mode && "AM node must have ls_mode attribute set.");
void set_ia32_frame_ent(ir_node *node, ir_entity *ent) {
ia32_attr_t *attr = get_ia32_attr(node);
attr->frame_ent = ent;
- set_ia32_use_frame(node);
+ if(ent != NULL)
+ set_ia32_use_frame(node);
+ else
+ clear_ia32_use_frame(node);
}
/* copy the frame entity (could be set in case of Add */
/* which was a FrameAddr) */
- if (lea_ent)
+ if (lea_ent != NULL) {
set_ia32_frame_ent(res, lea_ent);
- else
- set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
-
- if (get_ia32_frame_ent(res))
set_ia32_use_frame(res);
+ } else {
+ set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
+ if(is_ia32_use_frame(irn))
+ set_ia32_use_frame(res);
+ }
/* set scale */
set_ia32_am_scale(res, scale);
ir_entity *lea_ent = get_ia32_frame_ent(lea);
/* If the irn and the LEA both have a different frame entity set: do not merge */
- if (irn_ent && lea_ent && (irn_ent != lea_ent))
+ if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
return;
- else if (! irn_ent && lea_ent) {
+ else if (irn_ent == NULL && lea_ent != NULL) {
set_ia32_frame_ent(irn, lea_ent);
set_ia32_use_frame(irn);
}
set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
set_ia32_op_type(irn, ia32_AddrModeD);
set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
+ if(is_ia32_use_frame(load))
+ set_ia32_use_frame(irn);
set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
set_ia32_am_sc(irn, get_ia32_am_sc(load));
*/
static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_mode *tgt_mode) {
ia32_code_gen_t *cg = env->cg;
- ir_entity *ent = cg->fp_to_gp;
ir_graph *irg = env->irg;
ir_node *irn = env->irn;
ir_node *block = env->block;
ir_node *op = get_Conv_op(env->irn);
ir_node *fist, *mem, *load;
-#if 0
- if (! ent) {
- int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_vfp].mode);
- ent = cg->fp_to_gp =
- frame_alloc_area(get_irg_frame_type(env->irg), size, 16, 0);
- if(ent == NULL) {
- panic("Couldn't allocate space on stack for fp conversion");
- }
- }
-#endif
-
/* do a fist */
fist = new_rd_ia32_vfist(env->dbg, irg, block, get_irg_frame(irg), noreg, op, get_irg_no_mem(irg));
- set_ia32_frame_ent(fist, ent);
set_ia32_use_frame(fist);
set_ia32_am_support(fist, ia32_am_Dest);
set_ia32_op_type(fist, ia32_AddrModeD);
/* do a Load */
load = new_rd_ia32_Load(env->dbg, irg, block, get_irg_frame(irg), noreg, mem);
- set_ia32_frame_ent(load, ent);
set_ia32_use_frame(load);
set_ia32_am_support(load, ia32_am_Source);
set_ia32_op_type(load, ia32_AddrModeS);
*/
static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) {
ia32_code_gen_t *cg = env->cg;
- ir_entity *ent = cg->gp_to_fp;
ir_node *irn = env->irn;
ir_graph *irg = env->irg;
ir_node *block = env->block;
ir_node *fild, *store;
int src_bits;
- if (ent == NULL) {
- int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
- ent = cg->gp_to_fp =
- frame_alloc_area(get_irg_frame_type(env->irg), size, size, 0);
- if(ent == NULL) {
- panic("Couldn't allocate space on stack for fp conversion");
- }
- }
-
/* first convert to 32 bit */
src_bits = get_mode_size_bits(src_mode);
if (src_bits == 8) {
/* do a store */
store = new_rd_ia32_Store(env->dbg, irg, block, get_irg_frame(irg), noreg, op, nomem);
- set_ia32_frame_ent(store, ent);
set_ia32_use_frame(store);
set_ia32_am_support(store, ia32_am_Dest);
set_ia32_op_type(store, ia32_AddrModeD);
/* do a fild */
fild = new_rd_ia32_vfild(env->dbg, irg, block, get_irg_frame(irg), noreg, store);
- set_ia32_frame_ent(fild, ent);
set_ia32_use_frame(fild);
set_ia32_am_support(fild, ia32_am_Source);
set_ia32_op_type(fild, ia32_AddrModeS);