#ifndef _arm_MAP_REGS_H_
+
#define _arm_MAP_REGS_H_
+
+
#include "irnode.h"
+
#include "set.h"
+
+
#include "../bearch.h"
+
#include "arm_nodes_attr.h"
+
+
const arch_register_t *arm_get_RegParam_reg(int n);
+
+
int arm_cmp_irn_reg_assoc(const void *a, const void *b, size_t len);
+
void arm_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set);
+
const arch_register_t *arm_get_firm_reg(const ir_node *irn, set *reg_set);
+
+
long arm_translate_proj_pos(const ir_node *proj);
+
+
#endif /* _arm_MAP_REGS_H_ */
#include "firm_types.h"
#include "obst.h"
#include "debug.h"
+#include "bitset.h"
#include "be.h"
#include "bearch.h"
struct _arch_code_generator_t *cg;
};
+/**
+* Put the registers to be ignored in this IRG into a bitset.
+* @param birg The backend IRG data structure.
+* @param cls The register class.
+* @param bs The bitset (may be NULL).
+* @return The number of registers to be ignored.
+*/
+int be_put_ignore_regs(const struct _be_irg_t *birg, const struct _arch_register_class_t *cls, bitset_t *bs);
+
+
+
#endif /* _BE_T_H */
* @param curr_sp The stack pointer node to use.
* @return The stack pointer after the call.
*/
-static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
+static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
{
ir_graph *irg = env->birg->irg;
const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
* moving the stack pointer along the stack's direction.
*/
if(stack_dir < 0 && !do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
*/
if (do_seq) {
curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
- param_size + arg->space_before);
+ addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
+ add_irn_dep(curr_sp, curr_mem);
}
else {
curr_ofs += arg->space_before;
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
/* Clean up the stack frame if we allocated it */
- if(!no_alloc)
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, -stack_size);
+ if(!no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
+ add_irn_dep(curr_sp, mem_proj);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
+ }
}
be_abi_call_free(call);
* Adjust an alloca.
* The alloca is transformed into a back end alloca node and connected to the stack nodes.
*/
-static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
{
if (get_Alloc_where(alloc) == stack_alloc) {
ir_node *bl = get_nodes_block(alloc);
addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
- /* copy the address away, since it could be used after further stack pointer modifictions. */
+ /* copy the address away, since it could be used after further stack pointer modifications. */
/* Let it point curr_sp just for the moment, I'll reroute it in a second. */
- copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+ *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
/* Let all users of the Alloc() result now point to the copy. */
edges_reroute(alloc_res, copy, irg);
if(n > 0) {
ir_node *keep;
ir_node **nodes;
+ ir_node *copy = NULL;
int i;
nodes = obstack_finish(&env->obst);
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch(get_irn_opcode(irn)) {
case iro_Call:
- curr_sp = adjust_call(env, irn, curr_sp);
+ curr_sp = adjust_call(env, irn, curr_sp, copy);
break;
case iro_Alloc:
- curr_sp = adjust_alloc(env, irn, curr_sp);
+ curr_sp = adjust_alloc(env, irn, curr_sp, ©);
break;
default:
break;
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
barrier = create_barrier(env, bl, &mem, env->regs, 0);
void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv);
void be_abi_free(be_abi_irg_t *abi);
+/**
+ * Put the registers which are forbidden specifically for this IRG in a bitset.
+ */
void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs);
ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg);
*/
extern const arch_irn_handler_t *arch_env_pop_irn_handler(arch_env_t *env);
-
#endif /* _FIRM_BEARCH_H */
bitset_clear_all(bs);
arch_put_non_ignore_regs(aenv, env->cls, bs);
+ bitset_andnot(bs, env->ignore_colors);
bitset_foreach(bs, col)
bipartite_add(bp, n_alloc, col);
/* verify schedule and register pressure */
if (options.vrfy_option == BE_CH_VRFY_WARN) {
be_verify_schedule(irg);
- be_verify_register_pressure(chordal_env.birg->main_env->arch_env, chordal_env.cls, irg);
+ be_verify_register_pressure(chordal_env.birg, chordal_env.cls, irg);
}
else if (options.vrfy_option == BE_CH_VRFY_ASSERT) {
assert(be_verify_schedule(irg) && "Schedule verification failed");
- assert(be_verify_register_pressure(chordal_env.birg->main_env->arch_env, chordal_env.cls, irg)
+ assert(be_verify_register_pressure(chordal_env.birg, chordal_env.cls, irg)
&& "Register pressure verification failed");
}
BE_TIMER_POP(ra_timer.t_verify);
if(!nodeset_find(env->already_scheduled, irn)) {
int i, n;
int res = 0;
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *operand = get_irn_n(irn, i);
+ for(i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
+ ir_node *operand = get_irn_in_or_dep(irn, i);
if(get_irn_visited(operand) < visited_nr) {
int tmp;
if (env->block != get_nodes_block(irn))
return 0;
- for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
+ for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
+ ir_node *op = get_irn_in_or_dep(irn, i);
/* if irn is an End we have keep-alives and op might be a block, skip that */
if (is_Block(op)) {
return 0;
}
- nodeset_insert(env->cands, irn);
+ nodeset_insert(env->cands, irn);
/* calculate the etime of this node */
etime = env->curr_time;
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
- ir_node *user = edge->src;
+ ir_node *user = get_edge_src_irn(edge);
+ if(!is_Phi(user))
+ make_ready(env, irn, user);
+ }
+
+ foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
+ ir_node *user = get_edge_src_irn(edge);
if(!is_Phi(user))
make_ready(env, irn, user);
}
if (is_Proj(irn))
return;
- for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
- ir_node *in = get_irn_n(irn, i);
+ for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
+ ir_node *in = get_irn_in_or_dep(irn, i);
/* if in is a proj: update predecessor */
while (is_Proj(in))
num_out = 1;
/* num in regs: number of ins with mode datab and not ignore */
- for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
- ir_node *in = get_irn_n(irn, i);
+ for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; i--) {
+ ir_node *in = get_irn_in_or_dep(irn, i);
if (mode_is_datab(get_irn_mode(in)) && ! arch_irn_is(be->sched_env->arch_env, in, ignore))
num_in++;
}
}
/* Phi nodes always leave the block */
- for (i = get_irn_arity(root) - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(root, i);
+ for (i = get_irn_ins_or_deps(root) - 1; i >= 0; --i) {
+ ir_node *pred = get_irn_in_or_dep(root, i);
DBG((xxxdbg, LEVEL_3, " node %+F\n", pred));
/* Blocks may happen as predecessors of End nodes */
be.selector = selector;
be.sched_env = env;
FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
- FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
+ FIRM_DBG_REGISTER(xxxdbg, "firm.be.schedxxx");
// firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
d = ld > d ? ld : d;
}
}
+
+ foreach_out_edge_kind(curr, edge, EDGE_KIND_DEP) {
+ ir_node *n = get_edge_src_irn(edge);
+
+ if (get_nodes_block(n) == block) {
+ sched_timestep_t ld;
+
+ ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
+ d = ld > d ? ld : d;
+ }
+ }
}
}
set_irn_delay(&be, curr, d);
int ready = 1;
/* Check, if the operands of a node are not local to this block */
- for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
- ir_node *operand = get_irn_n(irn, j);
+ for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
+ ir_node *operand = get_irn_in_or_dep(irn, j);
if (get_nodes_block(operand) == block) {
ready = 0;
compute_doms(irg);
/* Ensure, that the ir_edges are computed. */
- edges_activate(irg);
+ edges_assure(irg);
/* check, if the dominance property is fulfilled. */
be_check_dominance(irg);
birg.irg = irg;
birg.main_env = &env;
+ edges_deactivate_kind(irg, EDGE_KIND_DEP);
+ edges_activate_kind(irg, EDGE_KIND_DEP);
+
DBG((env.dbg, LEVEL_2, "====> IRG: %F\n", irg));
dump(DUMP_INITIAL, irg, "-begin", dump_ir_block_graph);
/* never build code for pseudo irgs */
set_visit_pseudo_irgs(0);
- be_node_init();
+ be_node_init();
be_main_loop(file_handle);
#ifdef WITH_LIBCORE
*line = 0;
return NULL;
}
+
+int be_put_ignore_regs(const be_irg_t *birg, const arch_register_class_t *cls, bitset_t *bs)
+{
+ if(bs == NULL)
+ bs = bitset_alloca(cls->n_regs);
+ else
+ bitset_clear_all(bs);
+
+ assert(bitset_size(bs) == cls->n_regs);
+ arch_put_non_ignore_regs(birg->main_env->arch_env, cls, bs);
+ bitset_flip_all(bs);
+ be_abi_put_ignore_regs(birg->abi, cls, bs);
+ return bitset_popcnt(bs);
+
+}
return a->num_ret_vals;
}
-ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset)
+ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
{
be_stack_attr_t *a;
ir_node *irn;
- ir_node *in[2];
+ ir_node *in[1];
in[0] = old_sp;
- in[1] = mem;
- irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, 2, in);
+ irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
a = init_node_attr(irn, 1);
a->offset = offset;
* @return A new stack pointer increment/decrement node.
* @note This node sets a register constraint to the @p sp register on its output.
*/
-ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset);
+ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset);
/** Returns the previous node that computes the stack pointer. */
ir_node *be_get_IncSP_pred(ir_node *incsp);
/** Sets the previous node that computes the stack pointer. */
void be_set_IncSP_pred(ir_node *incsp, ir_node *pred);
-/** Returns the memory input of the IncSP. */
-ir_node *be_get_IncSP_mem(ir_node *irn);
-
/**
* Sets a new offset to a IncSP node.
* A positive offset means expanding the stack, a negative offset shrinking
const arch_env_t *aenv;
ir_graph *irg;
ir_node *bl;
- nodeset *inserted;
int visited;
struct list_head lineage_head;
struct obstack obst;
}
#endif
-#define valid_node(env, dep) (to_appear(env, dep) && !nodeset_find(env->inserted, dep) && !be_is_Keep(dep))
+#define valid_node(env, dep) (to_appear(env, dep) && !be_is_Keep(dep))
static void grow_all_descendands(mris_env_t *env, ir_node *irn, unsigned long visited)
{
set_irn_visited(desc, visited);
}
}
+
+ foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
+ ir_node *desc = get_edge_src_irn(edge);
+ if(valid_node(env, desc) && get_irn_visited(desc) < visited) {
+ obstack_ptr_grow(&env->obst, desc);
+ set_irn_visited(desc, visited);
+ }
+ }
}
static ir_node **all_descendants(mris_env_t *env, ir_node *irn)
*/
if(n_desc > 1 && !be_is_Keep(lowest_desc)) {
const arch_register_class_t *cls;
- ir_node *copy_keep, *op;
+ ir_node *op;
int i, n;
- for(i = 0, n = get_irn_arity(lowest_desc); i < n; ++i) {
+ for(i = 0, n = get_irn_ins_or_deps(lowest_desc); i < n; ++i) {
ir_node *cmp;
- op = get_irn_n(lowest_desc, i);
+ op = get_irn_in_or_dep(lowest_desc, i);
cmp = highest_is_tuple ? skip_Projs(op) : op;
if(cmp == highest_node)
cls = arch_get_irn_reg_class(env->aenv, op, BE_OUT_POS(0));
replace_tuple_by_repr_proj(env, &in[1]);
- copy_keep = be_new_CopyKeep(cls, env->irg, env->bl, op, n_desc, &in[1], get_irn_mode(op));
- set_irn_n(lowest_desc, i, copy_keep);
- nodeset_insert(env->inserted, copy_keep);
+ add_irn_dep(lowest_desc, in[1]);
}
obstack_free(&env->obst, in);
/* insert a CopyKeep to make lineage v dependent on u. */
{
- const arch_register_class_t *cls;
- ir_node *op = NULL;
-
- if(get_irn_arity(start) == 0)
+ if(get_irn_ins_or_deps(start) == 0)
return 0;
- op = get_irn_n(start, 0);
-
- cls = arch_get_irn_reg_class(env->aenv, op, BE_OUT_POS(0));
if(get_irn_mode(last) == mode_T) {
const ir_edge_t *edge;
foreach_out_edge(last, edge) {
break;
}
}
- copy = be_new_CopyKeep_single(cls, env->irg, env->bl, op, last, get_irn_mode(op));
- set_irn_n(start, 0, copy);
- copy_mi = get_mris_irn(env, copy);
- nodeset_insert(env->inserted, copy);
+
+ add_irn_dep(start, last);
}
/* irn now points to the last node in lineage u; mi has the info for the node _before_ the terminator of the lineage. */
env->aenv = birg->main_env->arch_env;
env->irg = birg->irg;
env->visited = 0;
- env->inserted = new_nodeset(128);
env->heights = heights_new(birg->irg);
INIT_LIST_HEAD(&env->lineage_head);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.mris");
return env;
}
-static void cleanup_inserted(mris_env_t *env)
-{
- ir_node *irn;
-
- foreach_nodeset(env->inserted, irn) {
- int i, n;
- ir_node *tgt;
-
- assert(be_is_CopyKeep(irn));
- tgt = get_irn_n(irn, be_pos_CopyKeep_op);
-
- /* reroute the edges, remove from schedule and make it invisible. */
- edges_reroute(irn, tgt, env->irg);
- if (sched_is_scheduled(irn))
- sched_remove(irn);
- for(i = -1, n = get_irn_arity(irn); i < n; ++i)
- set_irn_n(irn, i, new_r_Bad(env->irg));
- }
-}
-
void be_sched_mris_free(mris_env_t *env)
{
- cleanup_inserted(env);
phase_free(&env->ph);
- del_nodeset(env->inserted);
heights_free(env->heights);
free(env);
}
}
void be_spill_phi(spill_env_t *env, ir_node *node) {
+ spill_info_t* spill;
int i, arity;
assert(is_Phi(node));
pset_insert_ptr(env->mem_phis, node);
// create spillinfos for the phi arguments
- spill_info_t* spill = get_spillinfo(env, node);
+ spill = get_spillinfo(env, node);
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *arg = get_irn_n(node, i);
get_spillinfo(env, arg);
env.cenv = chordal_env;
env.arch = chordal_env->birg->main_env->arch_env;
env.cls = chordal_env->cls;
- env.n_regs = arch_count_non_ignore_regs(env.arch, env.cls);
+ env.n_regs = env.cls->n_regs - be_put_ignore_regs(chordal_env->birg, chordal_env->cls, NULL);
env.ws = new_workset(&env, &env.ob);
env.uses = be_begin_uses(chordal_env->irg, chordal_env->lv, chordal_env->birg->main_env->arch_env, env.cls);
if(spill_env == NULL) {
return outer_spills_needed;
}
-static int count_available_registers(be_abi_irg_t *abi, const arch_register_class_t *cls)
-{
- bitset_t* bs = bitset_alloca(cls->n_regs);
- be_abi_put_ignore_regs(abi, cls, bs);
- return bitset_popcnt(bs);
-}
-
void be_spill_morgan(be_chordal_env_t *chordal_env) {
morgan_env_t env;
obstack_init(&env.obst);
- env.registers_available = count_available_registers(chordal_env->birg->abi, chordal_env->cls);
+ env.registers_available = env.cls->n_regs - be_put_ignore_regs(chordal_env->birg, env.cls, NULL);
env.loop_attr_set = new_set(loop_attr_cmp, 5);
env.block_attr_set = new_set(block_attr_cmp, 20);
/**
* Start a walk over the irg and check the register pressure.
*/
-int be_verify_register_pressure(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_graph *irg) {
+int be_verify_register_pressure(const be_irg_t *birg, const arch_register_class_t *cls, ir_graph *irg) {
be_verify_register_pressure_env_t env;
env.lv = be_liveness(irg);
env.irg = irg;
- env.arch_env = arch_env;
+ env.arch_env = birg->main_env->arch_env;
env.cls = cls;
- env.registers_available = arch_count_non_ignore_regs(arch_env, cls);
+ env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
env.problem_found = 0;
irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
* Verifies, that the register pressure for a given register class doesn't exceed the limit
* of available registers.
*
- * @param arch_env An architecture environment
- * @param cls The register class to check
- * @param irg The irg to check
- * @return 1 if the pressure is valid, 0 otherwise
+ * @param birg The backend IRG.
+ * @param cls The register class to check.
+ * @param irg The irg to check.
+ * @return 1 if the pressure is valid, 0 otherwise.
*/
-int be_verify_register_pressure(const arch_env_t *arch_env, const arch_register_class_t* cls, ir_graph *irg);
+int be_verify_register_pressure(const be_irg_t *birg, const arch_register_class_t* cls, ir_graph *irg);
/**
* Does some sanity checks on the schedule.
if(is_Proj(irn)) {
ir_node *pred = get_Proj_pred(irn);
- if(is_ia32_Push(pred) && get_Proj_proj(irn) == 0) {
+ if(is_ia32_Push(pred) && get_Proj_proj(irn) == pn_ia32_Push_stack) {
return arch_irn_flags_modify_sp;
}
- if(is_ia32_Pop(pred) && get_Proj_proj(irn) == 1) {
+ if(is_ia32_Pop(pred) && get_Proj_proj(irn) == pn_ia32_Pop_stack) {
+ return arch_irn_flags_modify_sp;
+ }
+ if(is_ia32_AddSP(pred) && get_Proj_proj(irn) == pn_ia32_AddSP_stack) {
return arch_irn_flags_modify_sp;
}
}
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
- curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE_SHRINK);
+ curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK);
+ add_irn_dep(curr_sp, *mem);
}
else {
const ia32_isa_t *isa = (ia32_isa_t *)env->isa;
ir_node *leave;
/* leave */
- leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, *mem);
+ leave = new_rd_ia32_Leave(NULL, env->irg, bl, curr_sp, curr_bp);
set_ia32_flags(leave, arch_irn_flags_ignore);
curr_bp = new_r_Proj(current_ir_graph, bl, leave, mode_bp, pn_ia32_Leave_frame);
curr_sp = new_r_Proj(current_ir_graph, bl, leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
/* l_res = SHL a_l, cnt */
h_res = new_rd_ia32_l_Shl(dbg, irg, block, a_l, cnt, h_res_mode);
+ add_irn_dep(h_res, l_res);
resolve_call(call, l_res, h_res, irg, block);
/* h_res = SHR a_h, cnt */
h_res = new_rd_ia32_l_Shr(dbg, irg, block, a_h, cnt, h_res_mode);
+ add_irn_dep(h_res, l_res);
resolve_call(call, l_res, h_res, irg, block);
/* h_res = SAR a_h, cnt */
h_res = new_rd_ia32_l_Shrs(dbg, irg, block, a_h, cnt, h_res_mode);
+ add_irn_dep(h_res, l_res);
resolve_call(call, l_res, h_res, irg, block);
/* too bad: we need 0 in a register here */
cnst = new_Const_long(h_res_mode, 0);
h_res = new_rd_ia32_l_SubC(dbg, irg, block, cnst, a_h, h_res_mode);
+ add_irn_dep(h_res, l_res);
resolve_call(call, l_res, h_res, irg, block);
sub_h = new_rd_ia32_l_Eor(dbg, irg, block, a_h, sign, h_res_mode);
l_res = new_rd_ia32_l_Sub(dbg, irg, block, sub_l, sign, l_res_mode);
h_res = new_rd_ia32_l_SubC(dbg, irg, block, sub_h, sign, l_res_mode);
+ add_irn_dep(h_res, l_res);
resolve_call(call, l_res, h_res, irg, block);
if the IncSP points to NoMem -> just use the memory input from store
if IncSP points to somewhere else -> sync memory of IncSP and Store
*/
- mem = be_get_IncSP_mem(sp);
- if (mem == get_irg_no_mem(irg))
- mem = get_irn_n(irn, 3);
- else {
- ir_node *in[2];
-
- in[0] = mem;
- in[1] = get_irn_n(irn, 3);
- mem = new_r_Sync(irg, bl, 2, in);
- }
+ mem = get_irn_n(irn, 3);
push = new_rd_ia32_Push(NULL, irg, bl, be_get_IncSP_pred(sp), val, mem);
proj_res = new_r_Proj(irg, bl, push, get_irn_mode(sp), pn_ia32_Push_stack);
proj_M = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
+ add_irn_deps(push, sp);
/* copy a possible constant from the store */
set_ia32_id_cnst(push, get_ia32_id_cnst(irn));
},
"AddSP" => {
- "irn_flags" => "S|I",
+ "irn_flags" => "I",
"comment" => "allocate space on stack",
"reg_req" => { "in" => [ "esp", "gp" ], "out" => [ "esp", "none" ] },
"outs" => [ "stack", "M" ],
+#ifdef _WIN32
+#include <malloc.h>
+#else
#include <alloca.h>
+#endif
+
#include <stdio.h>
struct x {
return a - b;
}
+#if 0
ll_t div_ll(ll_t a, ll_t b) {
return a / b;
}
ll_t divmod_ll(ll_t a, ll_t b) {
return (a / b) + (a % b);
}
+#endif
ll_t neg_ll(ll_t a) {
return -a;
return llabs(a);
}
+#if 0
double conv_ll_d(ll_t a) {
return (double)a;
}
ll_t conv_d_ll(double a) {
return (ll_t)a;
}
+#endif
int main(void) {
ll_t a = 0xff;
printf("%lld * %lld = %lld\n", a, b, mul_ll(a, b));
printf("%lld + %lld = %lld\n", a, b, add_ll(a, b));
printf("%lld - %lld = %lld\n", a, b, sub_ll(a, b));
+#if 0
printf("%lld / %lld = %lld\n", a, b, div_ll(a, b));
printf("%lld % %lld = %lld\n", a, b, mod_ll(a, b));
printf("%lld / + % %lld = %lld\n", a, b, divmod_ll(a, b));
+#endif
printf("%lld << %lld = %lld\n", a, 2, shl_ll(a, 2));
printf("%lld >> %lld = %lld\n", a, 2, shr_ll(a, 2));
printf("abs(%lld) = %lld\n", c, abs_ll(c));
printf("neg(%lld) = %lld\n", b, neg_ll(b));
+#if 0
printf("conv(%lld) = %lf\n", c, conv_ll_d(c));
printf("conv(%lf) = %lld\n", d, conv_d_ll(d));
+#endif
return 0;
}