#include "irgwalk.h"
#include "irprintf_t.h"
#include "irgopt.h"
+#include "irbitset.h"
+#include "height.h"
+#include "pdeq.h"
+#include "irtools.h"
#include "be.h"
#include "beabi.h"
#include "belive_t.h"
#include "besched_t.h"
-#define MAX(x, y) ((x) > (y) ? (x) : (y))
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
-
typedef struct _be_abi_call_arg_t {
unsigned is_res : 1; /**< 1: the call argument is a return value. 0: it's a call parameter. */
unsigned in_reg : 1; /**< 1: this argument is transmitted in registers. */
/* Forward, since be need it in be_abi_introduce(). */
static const arch_irn_ops_if_t abi_irn_ops;
static const arch_irn_handler_t abi_irn_handler;
+static heights_t *ir_heights;
/* Flag: if set, try to omit the frame pointer if called by the backend */
int be_omit_fp = 1;
*
* @return the new ABI call object
*/
-static be_abi_call_t *be_abi_call_new()
+static be_abi_call_t *be_abi_call_new(void)
{
be_abi_call_t *call = xmalloc(sizeof(call[0]));
call->flags.val = 0;
*/
be_node_set_reg_class(low_call, be_pos_Call_ptr, sp->reg_class);
+ DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
+
/* Set the register classes and constraints of the Call parameters. */
for(i = 0; i < n_low_args; ++i) {
int index = low_args[i];
const ir_edge_t *edge;
ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
/* Beware: currently Alloc nodes without a result might happen,
only escape analysis kills them and this phase runs only for object
oriented source. We kill the Alloc here. */
- if (alloc_res == NULL) {
+ if (alloc_res == NULL && alloc_mem) {
exchange(alloc_mem, get_Alloc_mem(alloc));
return curr_sp;
}
env->call->flags.bits.try_omit_fp = 0;
new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
- exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
+ exchange(alloc, new_alloc);
if(alloc_mem != NULL)
- exchange(alloc_mem, new_r_NoMem(irg));
+ set_Proj_proj(alloc_mem, pn_be_AddSP_M);
+
+ /* fix projnum of alloca res */
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
+
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifictions. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
- curr_sp = new_alloc;
+ curr_sp = alloc_res;
}
return curr_sp;
}
+/* the following function is replaced by the usage of the heights module */
+#if 0
/**
* Walker for dependent_on().
* This function searches a node tgt recursively from a given node
* but is restricted to the given block.
* @return 1 if tgt was reachable from curr, 0 if not.
*/
-static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
{
int n, i;
- if(get_irn_visited(curr) >= visited_nr)
+ if (get_nodes_block(curr) != bl)
return 0;
- set_irn_visited(curr, visited_nr);
- if(get_nodes_block(curr) != bl)
- return 0;
-
- if(curr == tgt)
+ if (curr == tgt)
return 1;
/* Phi functions stop the recursion inside a basic block */
- if(!is_Phi(curr)) {
+ if (! is_Phi(curr)) {
for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
- if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
+ if (check_dependence(get_irn_n(curr, i), tgt, bl))
return 1;
}
}
return 0;
}
+#endif /* if 0 */
/**
* Check if a node is somehow data dependent on another one.
static int dependent_on(ir_node *n1, ir_node *n2)
{
ir_node *bl = get_nodes_block(n1);
- ir_graph *irg = get_irn_irg(bl);
- long vis_nr = get_irg_visited(irg) + 1;
assert(bl == get_nodes_block(n2));
- set_irg_visited(irg, vis_nr);
- return check_dependence(n1, n2, bl, vis_nr);
+
+ return heights_reachable_in_block(ir_heights, n1, n2);
+ //return check_dependence(n1, n2, bl);
}
static int cmp_call_dependecy(const void *c1, const void *c2)
1 if second is "smaller" that first
-1 if first is "smaller" that second
*/
- return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
+ if (dependent_on(n1, n2))
+ return -1;
+
+ if (dependent_on(n2, n1))
+ return 1;
+
+ return 0;
}
/**
*/
static void link_calls_in_block_walker(ir_node *irn, void *data)
{
- if(is_Call(irn)) {
+ if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) {
be_abi_irg_t *env = data;
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
- env->call->flags.bits.irg_is_leaf = 0;
+ if (is_Call(irn))
+ env->call->flags.bits.irg_is_leaf = 0;
set_irn_link(irn, save);
set_irn_link(bl, irn);
for(i = n - 1; i >= 0; --i) {
ir_node *irn = nodes[i];
+ DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch(get_irn_opcode(irn)) {
case iro_Call:
curr_sp = adjust_call(env, irn, curr_sp);
env->call->flags.bits.irg_is_leaf = 1;
irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+
+ ir_heights = heights_new(env->birg->irg);
irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ heights_free(ir_heights);
}
static void collect_return_walker(ir_node *irn, void *data)
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) isa->sp, NULL);
for(i = 0; i < n_res; ++i) {
- ir_node *res = get_Return_res(irn, i);
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
restore_optimization_state(&state);
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
-
memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
env->irn_ops.impl = &abi_irn_ops;
/* Lower all call nodes in the IRG. */
process_calls(env);
+ /*
+ Beware: init backend abi call object after processing calls,
+ otherwise some information might be not yet available.
+ */
+ env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+
/* Process the IRG */
modify_irg(env);
arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
env->call->cb->done(env->cb);
- be_liveness(irg);
return env;
}
static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
struct fix_stack_walker_info *info = data;
+ ir_mode *mode;
+
+ if (is_Block(irn))
+ return;
- if(arch_irn_is(info->aenv, irn, modify_sp))
+ mode = get_irn_mode(irn);
+
+ if (arch_irn_is(info->aenv, irn, modify_sp) && mode != mode_T && mode != mode_M)
pset_insert_ptr(info->nodes, irn);
}
-void be_abi_fix_stack_nodes(be_abi_irg_t *env)
+void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
{
dom_front_info_t *df;
pset *stack_nodes = pset_new_ptr(16);
df = be_compute_dominance_frontiers(env->birg->irg);
irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
pset_insert_ptr(stack_nodes, env->init_sp);
- be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
+ be_ssa_constr_set_phis(df, lv, stack_nodes, env->stack_phis);
del_pset(stack_nodes);
- /* Liveness could have changed due to Phi nodes. */
- be_liveness(env->birg->irg);
-
/* free these dominance frontiers */
be_free_dominance_frontiers(df);
}
return NULL;
}
+static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
+{
+}
+
static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
{
}
abi_classify,
abi_get_flags,
abi_get_frame_entity,
- abi_set_stack_bias
+ abi_set_frame_entity,
+ abi_set_stack_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
static const arch_irn_handler_t abi_irn_handler = {