* @param node The node to be turned into a tuple.
* @param arity The number of values formed into a Tuple.
*/
-FIRM_API void turn_into_tuple(ir_node *node, int arity);
+FIRM_API void turn_into_tuple(ir_node *node, int arity, ir_node *const in[]);
/** Walks over the passed IR graph and collects all Phi nodes as a
* list in their corresponding block (using get_Block_phis() API).
* This function is necessary to adjust in arrays of blocks, calls and phis.
* "in" must contain all predecessors except the block that are required for
* the nodes opcode. */
-FIRM_API void set_irn_in(ir_node *node, int arity, ir_node *in[]);
+FIRM_API void set_irn_in(ir_node *node, int arity, ir_node *const in[]);
/**
* Add an artificial dependency to the node.
#include "lower_dw.h"
#include "array.h"
#include "error.h"
+#include "util.h"
#include "ia32_new_nodes.h"
#include "bearch_ia32_t.h"
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
- turn_into_tuple(call, pn_Call_max+1);
- set_Tuple_pred(call, pn_Call_M, nomem);
- set_Tuple_pred(call, pn_Call_X_regular, jmp);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(call, pn_Call_T_result, res);
+ ir_node *const in[] = {
+ [pn_Call_M] = nomem,
+ [pn_Call_T_result] = res,
+ [pn_Call_X_regular] = jmp,
+ [pn_Call_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(call, ARRAY_SIZE(in), in);
}
}
#include "irtools.h"
#include "error.h"
-void turn_into_tuple(ir_node *node, int arity)
+void turn_into_tuple(ir_node *const node, int const arity, ir_node *const *const in)
{
- ir_graph *irg = get_irn_irg(node);
- ir_node **in = ALLOCAN(ir_node*, arity);
- ir_node *bad = new_r_Bad(irg, mode_ANY);
- int i;
-
- /* construct a new in array, with every input being bad */
- for (i = 0; i < arity; ++i) {
- in[i] = bad;
- }
set_irn_in(node, arity, in);
set_irn_op(node, op_Tuple);
}
return node->in;
}
-void set_irn_in(ir_node *node, int arity, ir_node **in)
+void set_irn_in(ir_node *const node, int const arity, ir_node *const *const in)
{
int i;
ir_node *** pOld_in;
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Div_max+1);
- set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(n, pn_Div_res, value);
+ ir_node *const in[] = {
+ [pn_Div_M] = mem,
+ [pn_Div_res] = value,
+ [pn_Div_X_regular] = new_r_Jmp(blk),
+ [pn_Div_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(n, ARRAY_SIZE(in), in);
}
return n;
}
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Mod_max+1);
- set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(n, pn_Mod_res, value);
+ ir_node *const in[] = {
+ [pn_Mod_M] = mem,
+ [pn_Mod_res] = value,
+ [pn_Mod_X_regular] = new_r_Jmp(blk),
+ [pn_Mod_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(n, ARRAY_SIZE(in), in);
}
return n;
}
ir_node *a = get_Cond_selector(n);
ir_graph *irg = get_irn_irg(n);
ir_tarval *ta;
- ir_node *jmp;
/* we need block info which is not available in floating irgs */
if (get_irg_pinned(irg) == op_pin_state_floats)
if (ta != tarval_bad) {
/* It's branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
- ir_node *blk = get_nodes_block(n);
- jmp = new_r_Jmp(blk);
- turn_into_tuple(n, pn_Cond_max+1);
- if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
- set_Tuple_pred(n, pn_Cond_true, jmp);
- } else {
- set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
- }
+ ir_node *const blk = get_nodes_block(n);
+ ir_node *const jmp = new_r_Jmp(blk);
+ ir_node *const bad = new_r_Bad(irg, mode_X);
+ bool const cond = ta == tarval_b_true;
+ ir_node *const in[] = {
+ [pn_Cond_false] = cond ? bad : jmp,
+ [pn_Cond_true] = cond ? jmp : bad,
+ };
+ turn_into_tuple(n, ARRAY_SIZE(in), in);
clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
}
return n;
#include "irgwalk.h"
#include "iroptimize.h"
#include "error.h"
+#include "util.h"
static pmap *entities;
static bool dont_lower[ir_bk_last+1];
call_ress = new_r_Proj(call, mode_T, pn_Call_T_result);
call_res = new_r_Proj(call_ress, res_mode, 0);
- turn_into_tuple(node, 2);
- set_irn_n(node, pn_Builtin_M, call_mem);
- set_irn_n(node, pn_Builtin_max+1, call_res);
+ ir_node *const in[] = {
+ [pn_Builtin_M] = call_mem,
+ [pn_Builtin_max + 1] = call_res,
+ };
+ turn_into_tuple(node, ARRAY_SIZE(in), in);
}
static void lower_builtin(ir_node *node, void *env)
case ir_bk_prefetch: {
/* just remove it */
ir_node *mem = get_Builtin_mem(node);
- turn_into_tuple(node, 1);
- set_irn_n(node, pn_Builtin_M, mem);
+ ir_node *const in[] = { mem };
+ turn_into_tuple(node, ARRAY_SIZE(in), in);
break;
}
+
case ir_bk_ffs:
case ir_bk_clz:
case ir_bk_ctz:
#include "array_t.h"
#include "pmap.h"
#include "error.h"
+#include "util.h"
static pmap *pointer_types;
static pmap *lowered_mtps;
/* get rid of the CopyB */
if (ir_throws_exception(p)) {
- turn_into_tuple(p, pn_CopyB_max+1);
- set_Tuple_pred(p, pn_CopyB_M, mem);
- set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(block));
- set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
+ ir_node *const in[] = {
+ [pn_CopyB_M] = mem,
+ [pn_CopyB_X_regular] = new_r_Jmp(block),
+ [pn_CopyB_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(p, ARRAY_SIZE(in), in);
} else {
- turn_into_tuple(p, pn_CopyB_M+1);
- set_Tuple_pred(p, pn_CopyB_M, mem);
+ ir_node *const in[] = { mem };
+ turn_into_tuple(p, ARRAY_SIZE(in), in);
}
++n_args;
}
#include "irgmod.h"
#include "error.h"
#include "be.h"
+#include "util.h"
typedef struct entry entry_t;
struct entry {
mode_bytes /= 2;
}
- turn_into_tuple(irn, pn_CopyB_max+1);
- set_Tuple_pred(irn, pn_CopyB_M, mem);
- set_Tuple_pred(irn, pn_CopyB_X_regular, new_r_Bad(irg, mode_X));
- set_Tuple_pred(irn, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
+ ir_node *const bad = new_r_Bad(irg, mode_X);
+ ir_node *const in[] = {
+ [pn_CopyB_M] = mem,
+ [pn_CopyB_X_regular] = bad,
+ [pn_CopyB_X_except] = bad,
+ };
+ turn_into_tuple(irn, ARRAY_SIZE(in), in);
}
static ir_type *get_memcpy_methodtype(void)
call = new_rd_Call(dbgi, block, mem, symconst, 3, in, call_tp);
call_mem = new_r_Proj(call, mode_M, pn_Call_M);
- turn_into_tuple(irn, 1);
- set_irn_n(irn, pn_CopyB_M, call_mem);
+ ir_node *const tuple_in[] = { call_mem };
+ turn_into_tuple(irn, ARRAY_SIZE(tuple_in), tuple_in);
}
static void lower_copyb_node(ir_node *irn)
panic("unexpected builtin");
}
- turn_into_tuple(builtin, 2);
- set_irn_n(builtin, pn_Builtin_M, mem);
- set_irn_n(builtin, pn_Builtin_max+1, res);
+ ir_node *const in[] = {
+ [pn_Builtin_M] = mem,
+ [pn_Builtin_max + 1] = res,
+ };
+ turn_into_tuple(builtin, ARRAY_SIZE(in), in);
}
}
ir_node *rest = new_r_Tuple(block, 1, &irn);
if (ir_throws_exception(call)) {
- turn_into_tuple(call, pn_Call_max+1);
if (reg_jmp == NULL) {
reg_jmp = new_r_Jmp(block);
}
if (exc_jmp == NULL) {
exc_jmp = new_r_Bad(irg, mode_X);
}
- set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
- set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
+ ir_node *const in[] = {
+ [pn_Call_M] = mem,
+ [pn_Call_T_result] = rest,
+ [pn_Call_X_regular] = reg_jmp,
+ [pn_Call_X_except] = exc_jmp,
+ };
+ turn_into_tuple(call, ARRAY_SIZE(in), in);
} else {
assert(reg_jmp == NULL);
assert(exc_jmp == NULL);
- turn_into_tuple(call, pn_Call_T_result+1);
assert(pn_Call_M <= pn_Call_T_result);
assert(pn_Call_X_regular > pn_Call_T_result);
assert(pn_Call_X_except > pn_Call_T_result);
+ ir_node *const in[] = {
+ [pn_Call_M] = mem,
+ [pn_Call_T_result] = rest,
+ };
+ turn_into_tuple(call, ARRAY_SIZE(in), in);
}
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_T_result, rest);
}
int i_mapper_abs(ir_node *call, void *ctx)
if (n_proj > 0) {
n_proj += n_res - 1;
- /* we are ready */
- turn_into_tuple(node, n_proj);
+ ir_node **const in = ALLOCAN(ir_node*, n_proj);
+ ir_node *const bad = new_r_Bad(irg, mode_ANY);
+ for (i = 0; i != n_proj; ++i) {
+ in[i] = bad;
+ }
if (rt->mem_proj_nr >= 0)
- set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M));
+ in[rt->mem_proj_nr] = new_r_Proj(call, mode_M, pn_Call_M);
if (throws_exception) {
- set_Tuple_pred(node, op->pn_x_regular, new_r_Proj(call, mode_X, pn_Call_X_regular));
- set_Tuple_pred(node, op->pn_x_except, new_r_Proj(call, mode_X, pn_Call_X_except));
+ in[op->pn_x_regular] = new_r_Proj(call, mode_X, pn_Call_X_regular);
+ in[op->pn_x_except] = new_r_Proj(call, mode_X, pn_Call_X_except);
}
if (rt->res_proj_nr >= 0) {
for (i = 0; i < n_res; ++i) {
ir_mode *mode = get_type_mode(get_method_res_type(mtp, i));
ir_node *proj = new_r_Proj(res_proj, mode, i);
- set_Tuple_pred(node, rt->res_proj_nr + i, proj);
+ in[rt->res_proj_nr + i] = proj;
}
}
+
+ turn_into_tuple(node, n_proj, in);
return 1;
} else {
/* only one return value supported */
#include "irprintf.h"
#include "debug.h"
#include "error.h"
+#include "util.h"
/**
* walker environment
mem = get_Alloc_mem(alloc);
blk = get_nodes_block(alloc);
- turn_into_tuple(alloc, pn_Alloc_max+1);
- set_Tuple_pred(alloc, pn_Alloc_M, mem);
- set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
+ ir_node *const in[] = {
+ [pn_Alloc_M] = mem,
+ [pn_Alloc_X_regular] = new_r_Jmp(blk),
+ [pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(alloc, ARRAY_SIZE(in), in);
++env->nr_deads;
}
sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
mem = get_Alloc_mem(alloc);
- turn_into_tuple(alloc, pn_Alloc_max+1);
- set_Tuple_pred(alloc, pn_Alloc_M, mem);
- set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(alloc, pn_Alloc_res, sel);
+ ir_node *const in[] = {
+ [pn_Alloc_M] = mem,
+ [pn_Alloc_res] = sel,
+ [pn_Alloc_X_regular] = new_r_Jmp(blk),
+ [pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(alloc, ARRAY_SIZE(in), in);
++env->nr_removed;
}
mem = get_Call_mem(call);
blk = get_nodes_block(call);
- turn_into_tuple(call, pn_Call_max+1);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
+ ir_node *const in[] = {
+ [pn_Call_M] = mem,
+ [pn_Call_T_result] = new_r_Bad(irg, mode_T),
+ [pn_Call_X_regular] = new_r_Jmp(blk),
+ [pn_Call_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(call, ARRAY_SIZE(in), in);
++env->nr_deads;
}
}
/* Inlines a method at the given call site. */
-int inline_method(ir_node *call, ir_graph *called_graph)
+int inline_method(ir_node *const call, ir_graph *called_graph)
{
/* we cannot inline some types of calls */
if (! can_inline(call, called_graph))
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
ir_node *pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
- ir_node *post_call = call;
/* --
The new block gets the ins of the old block, pre_call and all its
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
- turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
int n_mem_phi = 0;
for (int i = 0; i < arity; i++) {
cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
}
}
- ir_node *phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M, phi);
+ ir_node *const call_mem = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
/* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
+ if (get_nodes_block(call_mem) == post_bl) {
+ set_irn_link(call_mem, get_irn_link(post_bl));
+ set_irn_link(post_bl, call_mem);
}
/* Now the real results */
+ ir_node *call_res;
if (n_res > 0) {
for (int j = 0; j < n_res; j++) {
ir_type *res_type = get_method_res_type(ctp, j);
n_ret++;
}
}
- if (n_ret > 0) {
- phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
- } else {
- phi = new_r_Bad(irg, res_mode);
- }
+ ir_node *const phi = n_ret > 0
+ ? new_r_Phi(post_bl, n_ret, cf_pred, res_mode)
+ : new_r_Bad(irg, res_mode);
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
set_Block_phis(post_bl, phi);
}
}
- ir_node *result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
- set_Tuple_pred(call, pn_Call_T_result, result_tuple);
+ call_res = new_r_Tuple(post_bl, n_res, res_pred);
} else {
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
+ call_res = new_r_Bad(irg, mode_T);
}
/* handle the regular call */
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
+ ir_node *const call_x_reg = new_r_Jmp(post_bl);
/* Finally the exception control flow.
We have two possible situations:
Second: There is no exception edge. Just add all inlined exception
branches to the End node.
*/
+ ir_node *call_x_exc;
if (exc_handling == exc_handler) {
int n_exc = 0;
for (int i = 0; i < arity; i++) {
if (n_exc > 0) {
if (n_exc == 1) {
/* simple fix */
- set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
+ call_x_exc = cf_pred[0];
} else {
ir_node *block = new_r_Block(irg, n_exc, cf_pred);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
+ call_x_exc = new_r_Jmp(block);
}
} else {
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ call_x_exc = new_r_Bad(irg, mode_X);
}
} else {
/* assert(exc_handling == 1 || no exceptions. ) */
for (int i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ call_x_exc = new_r_Bad(irg, mode_X);
free(end_preds);
}
free(res_pred);
free(cf_pred);
+ ir_node *const call_in[] = {
+ [pn_Call_M] = call_mem,
+ [pn_Call_T_result] = call_res,
+ [pn_Call_X_regular] = call_x_reg,
+ [pn_Call_X_except] = call_x_exc,
+ };
+ turn_into_tuple(call, ARRAY_SIZE(call_in), call_in);
+
/* -- Turn CSE back on. -- */
set_optimize(rem_opt);
current_ir_graph = rem;
val = new_rd_Conv(get_irn_dbg_info(node), block, val, mode);
mem = get_Load_mem(node);
- turn_into_tuple(node, pn_Load_max+1);
- set_Tuple_pred(node, pn_Load_M, mem);
- set_Tuple_pred(node, pn_Load_res, val);
- set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block));
- set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg, mode_X));
+ ir_node *const in[] = {
+ [pn_Load_M] = mem,
+ [pn_Load_res] = val,
+ [pn_Load_X_regular] = new_r_Jmp(block),
+ [pn_Load_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(node, ARRAY_SIZE(in), in);
} else if (is_Store(node)) {
DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
set_value(vnum, val);
mem = get_Store_mem(node);
- turn_into_tuple(node, pn_Store_max+1);
- set_Tuple_pred(node, pn_Store_M, mem);
- set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block));
- set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg, mode_X));
+ ir_node *const in[] = {
+ [pn_Store_M] = mem,
+ [pn_Store_X_regular] = new_r_Jmp(block),
+ [pn_Store_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(node, ARRAY_SIZE(in), in);
}
}
#include "irhooks.h"
#include "ircons_t.h"
#include "irpass.h"
+#include "util.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/* create a new tuple for the return values */
tuple = new_r_Tuple(block, env->n_ress, in);
- turn_into_tuple(call, pn_Call_max+1);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, jmp);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
- set_Tuple_pred(call, pn_Call_T_result, tuple);
+ ir_node *const in[] = {
+ [pn_Call_M] = mem,
+ [pn_Call_T_result] = tuple,
+ [pn_Call_X_regular] = jmp,
+ [pn_Call_X_except] = new_r_Bad(irg, mode_X),
+ };
+ turn_into_tuple(call, ARRAY_SIZE(in), in);
for (i = 0; i < env->n_ress; ++i) {
ir_node *res = get_Return_res(p, i);