-/* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
-* All rights reserved.
-*
-* Authors: Martin Trapp, Christian Schaefer
-*
-* ircons.c: basic and more detailed irnode constructors
-* store, block and parameter administration.
-* Adapted to extended FIRM nodes (exceptions...) and commented
-* by Goetz Lindenmaier
-*/
-
-/* $Id$ */
+/*
+ * Project: libFIRM
+ * File name: ir/ir/ircons.c
+ * Purpose: Various irnode constructors. Automatic construction
+ * of SSA representation.
+ * Author: Martin Trapp, Christian Schaefer
+ * Modified by: Goetz Lindenmaier, Boris Boesler
+ * Created:
+ * CVS-ID: $Id$
+ * Copyright: (c) 1998-2003 Universität Karlsruhe
+ * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ */
#ifdef HAVE_CONFIG_H
# include <config.h>
# include "irgraph_t.h"
# include "irnode_t.h"
# include "irmode_t.h"
-# include "ircons.h"
+# include "ircons_t.h"
# include "firm_common_t.h"
# include "irvrfy.h"
-# include "irop.h"
+# include "irop_t.h"
# include "iropt_t.h"
# include "irgmod.h"
# include "array.h"
/* memset belongs to string.h */
# include "string.h"
# include "irbackedge_t.h"
+# include "irflag_t.h"
#if USE_EXPLICIT_PHI_IN_STACK
/* A stack needed for the automatic Phi node construction in constructor
typedef struct Phi_in_stack Phi_in_stack;
#endif
+/* when we need verifying */
+#ifdef NDEBUG
+# define IRN_VRFY_IRG(res, irg)
+#else
+# define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
+#endif
+
+/*
+ * language dependant initialization variable
+ */
+static default_initialize_local_variable_func_t *default_initialize_local_variable = NULL;
+
/*** ******************************************** */
/** privat interfaces, for professional use only */
set_Block_matured(res, 1);
set_Block_block_visited(res, 0);
- res->attr.block.exc = exc_normal;
- res->attr.block.handler_entry = 0;
+ /* res->attr.block.exc = exc_normal; */
+ /* res->attr.block.handler_entry = 0; */
+ res->attr.block.irg = irg;
res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
res->attr.block.in_cg = NULL;
res->attr.block.cg_backedge = NULL;
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
+ res = new_ir_node(db, irg, block, op_Start, mode_T, 0, NULL);
+ /* res->attr.start.irg = irg; */
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_End, mode_X, -1, NULL);
+ res = new_ir_node(db, irg, block, op_End, mode_X, -1, NULL);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
new_rd_Phi (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in, ir_mode *mode)
{
ir_node *res;
+ int i;
+ bool has_unknown = false;
- assert( get_Block_matured(block) );
- assert( get_irn_arity(block) == arity );
+ /* Don't assert that block matured: the use of this constructor is strongly
+ restricted ... */
+ if ( get_Block_matured(block) )
+ assert( get_irn_arity(block) == arity );
- res = new_ir_node (db, irg, block, op_Phi, mode, arity, in);
+ res = new_ir_node(db, irg, block, op_Phi, mode, arity, in);
res->attr.phi_backedge = new_backedge_arr(irg->obst, arity);
- res = optimize_node (res);
- irn_vrfy (res);
+ for (i = arity-1; i >= 0; i--)
+ if (get_irn_op(in[i]) == op_Unknown) {
+ has_unknown = true;
+ break;
+ }
+
+ if (!has_unknown) res = optimize_node (res);
+ IRN_VRFY_IRG(res, irg);
/* Memory Phis in endless loops must be kept alive.
As we can't distinguish these easily we keep all of them alive. */
}
INLINE ir_node *
-new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
+new_rd_Const_type (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con, type *tp)
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Const, mode, 0, NULL);
- res->attr.con = con;
- res = optimize_node (res);
- irn_vrfy (res);
-#if 0
- res = local_optimize_newby (res);
-# endif
+ res = new_ir_node (db, irg, irg->start_block, op_Const, mode, 0, NULL);
+ res->attr.con.tv = con;
+ set_Const_type(res, tp); /* Call method because of complex assertion. */
+ res = optimize_node (res);
+ assert(get_Const_type(res) == tp);
+ IRN_VRFY_IRG(res, irg);
return res;
}
+INLINE ir_node *
+new_rd_Const (dbg_info* db, ir_graph *irg, ir_node *block, ir_mode *mode, tarval *con)
+{
+ type *tp = unknown_type;
+ /* removing this somehow causes errors in jack. */
+ return new_rd_Const_type (db, irg, block, mode, con, tp);
+}
+
INLINE ir_node *
new_rd_Id (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *val, ir_mode *mode)
{
- ir_node *in[1] = {val};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Id, mode, 1, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ res = new_ir_node(db, irg, block, op_Id, mode, 1, &val);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
- long proj)
+ long proj)
{
- ir_node *in[1] = {arg};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Proj, mode, 1, in);
+
+ res = new_ir_node (db, irg, block, op_Proj, mode, 1, &arg);
res->attr.proj = proj;
assert(res);
assert(get_Proj_pred(res));
- assert(get_nodes_Block(get_Proj_pred(res)));
+ assert(get_nodes_block(get_Proj_pred(res)));
- res = optimize_node (res);
+ res = optimize_node(res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
- long max_proj)
+ long max_proj)
{
ir_node *res;
- assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
+ assert(arg->op == op_Cond);
arg->attr.c.kind = fragmentary;
arg->attr.c.default_proj = max_proj;
res = new_rd_Proj (db, irg, block, arg, mode_X, max_proj);
INLINE ir_node *
new_rd_Conv (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, ir_mode *mode)
{
- ir_node *in[1] = {op};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Conv, mode, 1, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
+}
+
+INLINE ir_node *
+new_rd_Cast (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *op, type *to_tp)
+{
+ ir_node *res;
+ res = new_ir_node(db, irg, block, op_Cast, get_irn_mode(op), 1, &op);
+ res->attr.cast.totype = to_tp;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
+ return res;
}
INLINE ir_node *
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Tuple, mode_T, arity, in);
+ res = new_ir_node(db, irg, block, op_Tuple, mode_T, arity, in);
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Add, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op1;
+ in[1] = op2;
+ res = new_ir_node(db, irg, block, op_Add, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
+
+ in[0] = op1;
+ in[1] = op2;
res = new_ir_node (db, irg, block, op_Sub, mode, 2, in);
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
- ir_node *in[1] = {op};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Minus, mode, 1, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ res = new_ir_node(db, irg, block, op_Minus, mode, 1, &op);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Mul, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op1;
+ in[1] = op2;
+ res = new_ir_node(db, irg, block, op_Mul, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
- ir_node *in[3] = {memop, op1, op2};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Quot, mode_T, 3, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = memop;
+ in[1] = op1;
+ in[2] = op2;
+ res = new_ir_node(db, irg, block, op_Quot, mode_T, 3, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
- ir_node *in[3] = {memop, op1, op2};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_DivMod, mode_T, 3, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = memop;
+ in[1] = op1;
+ in[2] = op2;
+ res = new_ir_node(db, irg, block, op_DivMod, mode_T, 3, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
- ir_node *in[3] = {memop, op1, op2};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Div, mode_T, 3, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = memop;
+ in[1] = op1;
+ in[2] = op2;
+ res = new_ir_node(db, irg, block, op_Div, mode_T, 3, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
- ir_node *in[3] = {memop, op1, op2};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Mod, mode_T, 3, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = memop;
+ in[1] = op1;
+ in[2] = op2;
+ res = new_ir_node(db, irg, block, op_Mod, mode_T, 3, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_And, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op1;
+ in[1] = op2;
+ res = new_ir_node(db, irg, block, op_And, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Or, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op1;
+ in[1] = op2;
+ res = new_ir_node(db, irg, block, op_Or, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
+
+ in[0] = op1;
+ in[1] = op2;
res = new_ir_node (db, irg, block, op_Eor, mode, 2, in);
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
- ir_node *in[1] = {op};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Not, mode, 1, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ res = new_ir_node(db, irg, block, op_Not, mode, 1, &op);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
- ir_node *in[2] = {op, k};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Shl, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op;
+ in[1] = k;
+ res = new_ir_node(db, irg, block, op_Shl, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
- ir_node *in[2] = {op, k};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Shr, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op;
+ in[1] = k;
+ res = new_ir_node(db, irg, block, op_Shr, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
- ir_node *in[2] = {op, k};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Shrs, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op;
+ in[1] = k;
+ res = new_ir_node(db, irg, block, op_Shrs, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
- ir_node *in[2] = {op, k};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Rot, mode, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = op;
+ in[1] = k;
+ res = new_ir_node(db, irg, block, op_Rot, mode, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
- ir_node *in[1] = {op};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Abs, mode, 1, in);
+
+ res = new_ir_node(db, irg, block, op_Abs, mode, 1, &op);
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2)
+ ir_node *op1, ir_node *op2)
{
- ir_node *in[2] = {op1, op2};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Cmp, mode_T, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+ in[0] = op1;
+ in[1] = op2;
+
+ res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
new_rd_Jmp (dbg_info* db, ir_graph *irg, ir_node *block)
{
ir_node *res;
+
res = new_ir_node (db, irg, block, op_Jmp, mode_X, 0, NULL);
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG (res, irg);
return res;
}
INLINE ir_node *
new_rd_Cond (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *c)
{
- ir_node *in[1] = {c};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, in);
- res->attr.c.kind = dense;
+
+ res = new_ir_node (db, irg, block, op_Cond, mode_T, 1, &c);
+ res->attr.c.kind = dense;
res->attr.c.default_proj = 0;
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
ir_node *
new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *callee, int arity, ir_node **in, type *tp)
+ ir_node *callee, int arity, ir_node **in, type *tp)
{
ir_node **r_in;
ir_node *res;
int r_arity;
r_arity = arity+2;
- NEW_ARR_A (ir_node *, r_in, r_arity);
+ NEW_ARR_A(ir_node *, r_in, r_arity);
r_in[0] = store;
r_in[1] = callee;
- memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
+ memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
- res = new_ir_node (db, irg, block, op_Call, mode_T, r_arity, r_in);
+ res = new_ir_node(db, irg, block, op_Call, mode_T, r_arity, r_in);
assert(is_method_type(tp));
set_Call_type(res, tp);
- res->attr.call.callee_arr = NULL;
- res = optimize_node (res);
- irn_vrfy (res);
+ res->attr.call.exc.pin_state = op_pin_state_pinned;
+ res->attr.call.callee_arr = NULL;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
r_arity = arity+1;
NEW_ARR_A (ir_node *, r_in, r_arity);
r_in[0] = store;
- memcpy (&r_in[1], in, sizeof (ir_node *) * arity);
- res = new_ir_node (db, irg, block, op_Return, mode_X, r_arity, r_in);
- res = optimize_node (res);
- irn_vrfy (res);
+ memcpy(&r_in[1], in, sizeof(ir_node *) * arity);
+ res = new_ir_node(db, irg, block, op_Return, mode_X, r_arity, r_in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Raise (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
{
- ir_node *in[2] = {store, obj};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Raise, mode_T, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ in[0] = store;
+ in[1] = obj;
+ res = new_ir_node(db, irg, block, op_Raise, mode_T, 2, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr)
+ ir_node *store, ir_node *adr, ir_mode *mode)
{
- ir_node *in[2] = {store, adr};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Load, mode_T, 2, in);
- res = optimize_node (res);
- irn_vrfy (res);
+ in[0] = store;
+ in[1] = adr;
+ res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
+ res->attr.load.exc.pin_state = op_pin_state_pinned;
+ res->attr.load.load_mode = mode;
+ res->attr.load.volatility = volatility_non_volatile;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val)
+ ir_node *store, ir_node *adr, ir_node *val)
{
- ir_node *in[3] = {store, adr, val};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Store, mode_T, 3, in);
-
- res = optimize_node (res);
- irn_vrfy (res);
+ in[0] = store;
+ in[1] = adr;
+ in[2] = val;
+ res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
+ res->attr.store.exc.pin_state = op_pin_state_pinned;
+ res->attr.store.volatility = volatility_non_volatile;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, type *alloc_type, where_alloc where)
+ ir_node *size, type *alloc_type, where_alloc where)
{
- ir_node *in[2] = {store, size};
+ ir_node *in[2];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Alloc, mode_T, 2, in);
-
- res->attr.a.where = where;
- res->attr.a.type = alloc_type;
- res = optimize_node (res);
- irn_vrfy (res);
+ in[0] = store;
+ in[1] = size;
+ res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
+ res->attr.a.exc.pin_state = op_pin_state_pinned;
+ res->attr.a.where = where;
+ res->attr.a.type = alloc_type;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, type *free_type)
+ ir_node *ptr, ir_node *size, type *free_type)
{
- ir_node *in[3] = {store, ptr, size};
+ ir_node *in[3];
ir_node *res;
- res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
+ in[0] = store;
+ in[1] = ptr;
+ in[2] = size;
+ res = new_ir_node (db, irg, block, op_Free, mode_T, 3, in);
res->attr.f = free_type;
-
- res = optimize_node (res);
- irn_vrfy (res);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
ir_node *res;
int r_arity;
+ assert(ent != NULL && is_entity(ent) && "entity expected in Sel construction");
+
r_arity = arity + 2;
- NEW_ARR_A (ir_node *, r_in, r_arity); /* uses alloca */
+ NEW_ARR_A(ir_node *, r_in, r_arity); /* uses alloca */
r_in[0] = store;
r_in[1] = objptr;
- memcpy (&r_in[2], in, sizeof (ir_node *) * arity);
- res = new_ir_node (db, irg, block, op_Sel, mode_P, r_arity, r_in);
-
+ memcpy(&r_in[2], in, sizeof(ir_node *) * arity);
+ res = new_ir_node(db, irg, block, op_Sel, mode_P_mach, r_arity, r_in);
res->attr.s.ent = ent;
-
- res = optimize_node (res);
- irn_vrfy (res);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
ir_node *
new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *objptr, type *ent)
+ ir_node *objptr, type *ent)
{
ir_node **r_in;
ir_node *res;
int r_arity;
r_arity = 2;
- NEW_ARR_A (ir_node *, r_in, r_arity);
- r_in [0] = store;
- r_in [1] = objptr;
-
- res = new_ir_node (db, irg, block, op_Sel, mode_T, r_arity, r_in);
+ NEW_ARR_A(ir_node *, r_in, r_arity);
+ r_in[0] = store;
+ r_in[1] = objptr;
+ res = new_ir_node(db, irg, block, op_Sel, mode_T, r_arity, r_in);
res->attr.io.ent = ent;
- /* res = optimize (res);
- * irn_vrfy (res); */
- return (res);
+ /* res = optimize(res); */
+ IRN_VRFY_IRG(res, irg);
+ return res;
}
INLINE ir_node *
-new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, type_or_id_p value,
- symconst_kind symkind)
+new_rd_SymConst_type (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
+ symconst_kind symkind, type *tp)
{
ir_node *res;
ir_mode *mode;
- if (symkind == linkage_ptr_info)
- mode = mode_P;
+
+ if ((symkind == symconst_addr_name) || (symkind == symconst_addr_ent))
+ mode = mode_P_mach;
else
mode = mode_Iu;
- res = new_ir_node (db, irg, block, op_SymConst, mode, 0, NULL);
+ res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
res->attr.i.num = symkind;
- if (symkind == linkage_ptr_info) {
- res->attr.i.tori.ptrinfo = (ident *)value;
- } else {
- assert ( ( (symkind == type_tag)
- || (symkind == size))
- && (is_type(value)));
- res->attr.i.tori.typ = (type *)value;
- }
- res = optimize_node (res);
- irn_vrfy (res);
+ res->attr.i.sym = value;
+ res->attr.i.tp = tp;
+
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
+INLINE ir_node *
+new_rd_SymConst (dbg_info* db, ir_graph *irg, ir_node *block, symconst_symbol value,
+ symconst_kind symkind)
+{
+ ir_node *res = new_rd_SymConst_type(db, irg, block, value, symkind, unknown_type);
+ return res;
+}
+
+ir_node *new_rd_SymConst_addr_ent (dbg_info *db, ir_graph *irg, entity *symbol, type *tp) {
+ symconst_symbol sym = {(type *)symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_ent, tp);
+}
+
+ir_node *new_rd_SymConst_addr_name (dbg_info *db, ir_graph *irg, ident *symbol, type *tp) {
+ symconst_symbol sym = {(type *)symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_addr_name, tp);
+}
+
+ir_node *new_rd_SymConst_type_tag (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
+ symconst_symbol sym = {symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_type_tag, tp);
+}
+
+ir_node *new_rd_SymConst_size (dbg_info *db, ir_graph *irg, type *symbol, type *tp) {
+ symconst_symbol sym = {symbol};
+ return new_rd_SymConst_type(db, irg, irg->start_block, sym, symconst_size, tp);
+}
+
INLINE ir_node *
new_rd_Sync (dbg_info* db, ir_graph *irg, ir_node *block, int arity, ir_node **in)
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Sync, mode_M, arity, in);
-
- res = optimize_node (res);
- irn_vrfy (res);
+ res = new_ir_node(db, irg, block, op_Sync, mode_M, arity, in);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
}
INLINE ir_node *
-new_rd_Unknown (ir_graph *irg)
+new_rd_Confirm (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
+{
+ ir_node *in[2], *res;
+
+ in[0] = val;
+ in[1] = bound;
+ res = new_ir_node (db, irg, block, op_Confirm, get_irn_mode(val), 2, in);
+ res->attr.confirm_cmp = cmp;
+ res = optimize_node (res);
+ IRN_VRFY_IRG(res, irg);
+ return res;
+}
+
+INLINE ir_node *
+new_rd_Unknown (ir_graph *irg, ir_mode *m)
{
- return irg->unknown;
+ return new_ir_node(NULL, irg, irg->start_block, op_Unknown, m, 0, NULL);
}
INLINE ir_node *
new_rd_CallBegin (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *call)
{
- ir_node *in[1] = { get_Call_ptr(call) };
+ ir_node *in[1];
ir_node *res;
- res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
- res->attr.callbegin.irg = irg;
+
+ in[0] = get_Call_ptr(call);
+ res = new_ir_node(db, irg, block, op_CallBegin, mode_T, 1, in);
+ /* res->attr.callbegin.irg = irg; */
res->attr.callbegin.call = call;
- res = optimize_node (res);
- irn_vrfy (res);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
- res->attr.end.irg = irg;
-
- irn_vrfy (res);
+ res = new_ir_node(db, irg, block, op_EndReg, mode_T, -1, NULL);
+ irg->end_reg = res;
+ IRN_VRFY_IRG(res, irg);
return res;
}
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
- res->attr.end.irg = irg;
-
- irn_vrfy (res);
+ res = new_ir_node(db, irg, block, op_EndExcept, mode_T, -1, NULL);
+ irg->end_except = res;
+ IRN_VRFY_IRG (res, irg);
return res;
}
new_rd_Break (dbg_info *db, ir_graph *irg, ir_node *block)
{
ir_node *res;
- res = new_ir_node (db, irg, block, op_Break, mode_X, 0, NULL);
- res = optimize_node (res);
- irn_vrfy (res);
+
+ res = new_ir_node(db, irg, block, op_Break, mode_X, 0, NULL);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
INLINE ir_node *
new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
- long proj)
+ long proj)
{
- ir_node *in[1] = {arg};
ir_node *res;
- res = new_ir_node (db, irg, block, op_Filter, mode, 1, in);
+
+ res = new_ir_node(db, irg, block, op_Filter, mode, 1, &arg);
res->attr.filter.proj = proj;
res->attr.filter.in_cg = NULL;
res->attr.filter.backedge = NULL;
assert(res);
assert(get_Proj_pred(res));
- assert(get_nodes_Block(get_Proj_pred(res)));
-
- res = optimize_node (res);
+ assert(get_nodes_block(get_Proj_pred(res)));
- irn_vrfy (res);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
return res;
}
+ir_node *
+new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
+ ir_node *callee, int arity, ir_node **in, type *tp)
+{
+ ir_node **r_in;
+ ir_node *res;
+ int r_arity;
+
+ r_arity = arity+1;
+ NEW_ARR_A(ir_node *, r_in, r_arity);
+ r_in[0] = callee;
+ memcpy(&r_in[1], in, sizeof (ir_node *) * arity);
+
+ res = new_ir_node(db, irg, block, op_FuncCall, mode_T, r_arity, r_in);
+
+ assert(is_method_type(tp));
+ set_FuncCall_type(res, tp);
+ res->attr.call.callee_arr = NULL;
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, irg);
+ return res;
+}
+
+
INLINE ir_node *new_r_Block (ir_graph *irg, int arity, ir_node **in) {
return new_rd_Block(NULL, irg, arity, in);
}
return new_rd_Cond(NULL, irg, block, c);
}
INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
- ir_node *store, int arity, ir_node **in) {
+ ir_node *store, int arity, ir_node **in) {
return new_rd_Return(NULL, irg, block, store, arity, in);
}
INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *obj) {
+ ir_node *store, ir_node *obj) {
return new_rd_Raise(NULL, irg, block, store, obj);
}
INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
- ir_mode *mode, tarval *con) {
+ ir_mode *mode, tarval *con) {
return new_rd_Const(NULL, irg, block, mode, con);
}
INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
- type_or_id_p value, symconst_kind symkind) {
+ symconst_symbol value, symconst_kind symkind) {
return new_rd_SymConst(NULL, irg, block, value, symkind);
}
INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *objptr, int n_index, ir_node **index,
- entity *ent) {
+ ir_node *objptr, int n_index, ir_node **index,
+ entity *ent) {
return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
}
INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
- type *ent) {
+ type *ent) {
return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
}
INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *callee, int arity, ir_node **in,
- type *tp) {
+ ir_node *callee, int arity, ir_node **in,
+ type *tp) {
return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
}
INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Add(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Sub(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Minus(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Mul(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Quot(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Div(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Mod(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Abs(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_And(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Or(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Eor(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Not(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2) {
+ ir_node *op1, ir_node *op2) {
return new_rd_Cmp(NULL, irg, block, op1, op2);
}
INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shl(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shr(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shrs(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Rot(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Conv(NULL, irg, block, op, mode);
}
+INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
+ return new_rd_Cast(NULL, irg, block, op, to_tp);
+}
INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
- ir_node **in, ir_mode *mode) {
+ ir_node **in, ir_mode *mode) {
return new_rd_Phi(NULL, irg, block, arity, in, mode);
}
INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr) {
- return new_rd_Load(NULL, irg, block, store, adr);
+ ir_node *store, ir_node *adr, ir_mode *mode) {
+ return new_rd_Load(NULL, irg, block, store, adr, mode);
}
INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val) {
+ ir_node *store, ir_node *adr, ir_node *val) {
return new_rd_Store(NULL, irg, block, store, adr, val);
}
INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, type *alloc_type, where_alloc where) {
+ ir_node *size, type *alloc_type, where_alloc where) {
return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
}
INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, type *free_type) {
+ ir_node *ptr, ir_node *size, type *free_type) {
return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
}
INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
return new_rd_Sync(NULL, irg, block, arity, in);
}
INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
- ir_mode *mode, long proj) {
+ ir_mode *mode, long proj) {
return new_rd_Proj(NULL, irg, block, arg, mode, proj);
}
INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
- long max_proj) {
+ long max_proj) {
return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
}
INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
- int arity, ir_node **in) {
+ int arity, ir_node **in) {
return new_rd_Tuple(NULL, irg, block, arity, in );
}
INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
- ir_node *val, ir_mode *mode) {
+ ir_node *val, ir_mode *mode) {
return new_rd_Id(NULL, irg, block, val, mode);
}
INLINE ir_node *new_r_Bad (ir_graph *irg) {
return new_rd_Bad(irg);
}
-INLINE ir_node *new_r_Unknown (ir_graph *irg) {
- return new_rd_Unknown(irg);
+INLINE ir_node *new_r_Confirm (ir_graph *irg, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp) {
+ return new_rd_Confirm (NULL, irg, block, val, bound, cmp);
+}
+INLINE ir_node *new_r_Unknown (ir_graph *irg, ir_mode *m) {
+ return new_rd_Unknown(irg, m);
}
INLINE ir_node *new_r_CallBegin (ir_graph *irg, ir_node *block, ir_node *callee) {
return new_rd_CallBegin(NULL, irg, block, callee);
return new_rd_Break(NULL, irg, block);
}
INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
- ir_mode *mode, long proj) {
+ ir_mode *mode, long proj) {
return new_rd_Filter(NULL, irg, block, arg, mode, proj);
}
+INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
+ ir_node *callee, int arity, ir_node **in,
+ type *tp) {
+ return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
+}
/** ********************/
ir_node *res;
res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
- op_Start, mode_T, 0, NULL);
+ op_Start, mode_T, 0, NULL);
+ /* res->attr.start.irg = current_ir_graph; */
- res = optimize_node (res);
- irn_vrfy (res);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, current_ir_graph);
return res;
}
new_d_End (dbg_info* db)
{
ir_node *res;
- res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
- op_End, mode_X, -1, NULL);
- res = optimize_node (res);
- irn_vrfy (res);
+ res = new_ir_node(db, current_ir_graph, current_ir_graph->current_block,
+ op_End, mode_X, -1, NULL);
+ res = optimize_node(res);
+ IRN_VRFY_IRG(res, current_ir_graph);
return res;
}
new_d_Block (dbg_info* db, int arity, ir_node **in)
{
ir_node *res;
+ int i;
+ bool has_unknown = false;
- res = new_rd_Block (db, current_ir_graph, arity, in);
+ res = new_rd_Block(db, current_ir_graph, arity, in);
/* Create and initialize array for Phi-node construction. */
- res->attr.block.graph_arr = NEW_ARR_D (ir_node *, current_ir_graph->obst,
- current_ir_graph->n_loc);
- memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
+ if (get_irg_phase_state(current_ir_graph) == phase_building) {
+ res->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
+ current_ir_graph->n_loc);
+ memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
+ }
- res = optimize_node (res);
+ for (i = arity-1; i >= 0; i--)
+ if (get_irn_op(in[i]) == op_Unknown) {
+ has_unknown = true;
+ break;
+ }
+
+ if (!has_unknown) res = optimize_node(res);
current_ir_graph->current_block = res;
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, current_ir_graph);
return res;
}
Call Graph: ( A ---> B == A "calls" B)
- get_value mature_block
+ get_value mature_immBlock
| |
| |
| |
\|/ / |/_ \
get_r_value_internal |
| |
- | |
- \|/ \|/
- new_rd_Phi0 new_rd_Phi_in
+ | |
+ \|/ \|/
+ new_rd_Phi0 new_rd_Phi_in
* *************************************************************************** */
-/* Creates a Phi node with 0 predecessors */
+/** Creates a Phi node with 0 predecessors */
static INLINE ir_node *
new_rd_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
{
ir_node *res;
- res = new_ir_node (NULL, irg, block, op_Phi, mode, 0, NULL);
- irn_vrfy (res);
+
+ res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
+ IRN_VRFY_IRG(res, irg);
return res;
}
*/
#if USE_EXPLICIT_PHI_IN_STACK
INLINE Phi_in_stack *
-new_Phi_in_stack() {
+new_Phi_in_stack(void) {
Phi_in_stack *res;
res = (Phi_in_stack *) malloc ( sizeof (Phi_in_stack));
- res->stack = NEW_ARR_F (ir_node *, 1);
+ res->stack = NEW_ARR_F (ir_node *, 0);
res->pos = 0;
return res;
}
static INLINE void
free_to_Phi_in_stack(ir_node *phi) {
- assert(get_irn_opcode(phi) == iro_Phi);
-
if (ARR_LEN(current_ir_graph->Phi_in_stack->stack) ==
current_ir_graph->Phi_in_stack->pos)
ARR_APP1 (ir_node *, current_ir_graph->Phi_in_stack->stack, phi);
static INLINE ir_node *
alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
- int arity, ir_node **in) {
+ int arity, ir_node **in) {
ir_node *res;
ir_node **stack = current_ir_graph->Phi_in_stack->stack;
int pos = current_ir_graph->Phi_in_stack->pos;
eliminates itself.
*/
static INLINE ir_node *
-new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
- ir_node **in, int ins)
+new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode, ir_node **in, int ins)
{
int i;
ir_node *res, *known;
- /* allocate a new node on the obstack.
- This can return a node to which some of the pointers in the in-array
- already point.
- Attention: the constructor copies the in array, i.e., the later changes
- to the array in this routine do not affect the constructed node! If
- the in array contains NULLs, there will be missing predecessors in the
- returned node.
- Is this a possible internal state of the Phi node generation? */
+ /* Allocate a new node on the obstack. This can return a node to
+ which some of the pointers in the in-array already point.
+ Attention: the constructor copies the in array, i.e., the later
+ changes to the array in this routine do not affect the
+ constructed node! If the in array contains NULLs, there will be
+ missing predecessors in the returned node. Is this a possible
+ internal state of the Phi node generation? */
#if USE_EXPLICIT_PHI_IN_STACK
res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
#else
res = known = new_ir_node (NULL, irg, block, op_Phi, mode, ins, in);
res->attr.phi_backedge = new_backedge_arr(irg->obst, ins);
#endif
+
/* The in-array can contain NULLs. These were returned by
get_r_value_internal if it reached the same block/definition on a
- second path.
- The NULLs are replaced by the node itself to simplify the test in the
- next loop. */
- for (i=0; i < ins; ++i)
- if (in[i] == NULL) in[i] = res;
+ second path. The NULLs are replaced by the node itself to
+ simplify the test in the next loop. */
+ for (i = 0; i < ins; ++i) {
+ if (in[i] == NULL)
+ in[i] = res;
+ }
/* This loop checks whether the Phi has more than one predecessor.
- If so, it is a real Phi node and we break the loop. Else the
- Phi node merges the same definition on several paths and therefore
- is not needed. */
- for (i=0; i < ins; ++i)
+ If so, it is a real Phi node and we break the loop. Else the Phi
+ node merges the same definition on several paths and therefore is
+ not needed. */
+ for (i = 0; i < ins; ++i)
{
- if (in[i]==res || in[i]==known) continue;
+ if (in[i] == res || in[i] == known) continue;
- if (known==res)
+ if (known == res)
known = in[i];
else
break;
res = known;
} else {
res = optimize_node (res);
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, irg);
}
/* return the pointer to the Phi node. This node might be deallocated! */
The call order
get_value (makes Phi0, put's it into graph_arr)
set_value (overwrites Phi0 in graph_arr)
- mature_block (upgrades Phi0, puts it again into graph_arr, overwriting
+ mature_immBlock (upgrades Phi0, puts it again into graph_arr, overwriting
the proper value.)
fails. */
if (!block->attr.block.graph_arr[pos]) {
block->attr.block.graph_arr[pos] = res;
} else {
/* printf(" value already computed by %s\n",
- id_to_str(block->attr.block.graph_arr[pos]->op->name)); */
+ get_id_str(block->attr.block.graph_arr[pos]->op->name)); */
}
return res;
* The recursion that visited this node and set the flag did not
return yet. We are computing a value in a loop and need to
break the recursion without knowing the result yet.
- @@@ strange case. Straight forward we would create a Phi before
- starting the computation of it's predecessors. In this case we will
- find a Phi here in any case. The problem is that this implementation
- only creates a Phi after computing the predecessors, so that it is
- hard to compute self references of this Phi. @@@
+ @@@ strange case. Straight forward we would create a Phi before
+ starting the computation of it's predecessors. In this case we will
+ find a Phi here in any case. The problem is that this implementation
+ only creates a Phi after computing the predecessors, so that it is
+ hard to compute self references of this Phi. @@@
There is no simple check for the second subcase. Therefore we check
for a second visit and treat all such cases as the second subcase.
Anyways, the basic situation is the same: we reached a block
implementation that relies on the fact that an obstack is a stack and
will return a node with the same address on different allocations.
Look also at phi_merge and new_rd_phi_in to understand this.
- @@@ Unfortunately this does not work, see testprogram
- three_cfpred_example.
+ @@@ Unfortunately this does not work, see testprogram
+ three_cfpred_example.
*/
res = block->attr.block.graph_arr[pos];
/* case 2 -- If the value is actually computed, return it. */
- if (res) { return res;};
+ if (res) return res;
if (block->attr.block.matured) { /* case 3 */
/* The block is not mature, we don't know how many in's are needed. A Phi
with zero predecessors is created. Such a Phi node is called Phi0
node. (There is also an obsolete Phi0 opcode.) The Phi0 is then added
- to the list of Phi0 nodes in this block to be matured by mature_block
+ to the list of Phi0 nodes in this block to be matured by mature_immBlock
later.
The Phi0 has to remember the pos of it's internal value. If the real
Phi is computed, pos is used to update the array with the local
printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
assert (mode->code >= irm_F && mode->code <= irm_P);
res = new_rd_Const (NULL, current_ir_graph, block, mode,
- tarval_mode_null[mode->code]);
+ tarval_mode_null[mode->code]);
}
/* The local valid value is available now. */
static INLINE ir_node *
new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
- ir_node **in, int ins)
+ ir_node **in, int ins, ir_node *phi0)
{
int i;
ir_node *res, *known;
known = res;
for (i=0; i < ins; ++i)
{
- assert(in[i]);
+ assert(in[i]);
+
+ in[i] = skip_Id(in[i]); /* increasses the number of freed Phis. */
+
+ /* Optimize self referencing Phis: We can't detect them yet properly, as
+ they still refer to the Phi0 they will replace. So replace right now. */
+ if (phi0 && in[i] == phi0) in[i] = res;
if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
}
/* i==ins: there is at most one predecessor, we don't need a phi node. */
- if (i==ins) {
+ if (i == ins) {
if (res != known) {
obstack_free (current_ir_graph->obst, res);
- res = known;
+ if (is_Phi(known)) {
+ /* If pred is a phi node we want to optmize it: If loops are matured in a bad
+ order, an enclosing Phi know may get superfluous. */
+ res = optimize_in_place_2(known);
+ if (res != known) { exchange(known, res); }
+ } else {
+ res = known;
+ }
} else {
/* A undefined value, e.g., in unreachable code. */
res = new_Bad();
}
} else {
- res = optimize_node (res);
- irn_vrfy (res);
+ res = optimize_node (res); /* This is necessary to add the node to the hash table for cse. */
+ IRN_VRFY_IRG(res, irg);
/* Memory Phis in endless loops must be kept alive.
- As we can't distinguish these easily we keep all of the alive. */
+ As we can't distinguish these easily we keep all of them alive. */
if ((res->op == op_Phi) && (mode == mode_M))
add_End_keepalive(irg->end, res);
}
static ir_node *
phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
-static INLINE ir_node **
-new_frag_arr (ir_node *n) {
+/* Construct a new frag_array for node n.
+ Copy the content from the current graph_arr of the corresponding block:
+ this is the current state.
+ Set ProjM(n) as current memory state.
+ Further the last entry in frag_arr of current block points to n. This
+ constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
+ */
+static INLINE ir_node ** new_frag_arr (ir_node *n)
+{
ir_node **arr;
int opt;
+
arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
- sizeof(ir_node *)*current_ir_graph->n_loc);
+ sizeof(ir_node *)*current_ir_graph->n_loc);
+
/* turn off optimization before allocating Proj nodes, as res isn't
finished yet. */
- opt = get_optimize(); set_optimize(0);
+ opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (get_irn_op(n) == op_Call)
- arr[0] = new_Proj(n, mode_M, 3);
- else
- arr[0] = new_Proj(n, mode_M, 0);
+ arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
+ else {
+ assert((pn_Quot_M == pn_DivMod_M) &&
+ (pn_Quot_M == pn_Div_M) &&
+ (pn_Quot_M == pn_Mod_M) &&
+ (pn_Quot_M == pn_Load_M) &&
+ (pn_Quot_M == pn_Store_M) &&
+ (pn_Quot_M == pn_Alloc_M) );
+ arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
+ }
set_optimize(opt);
+
current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
return arr;
}
+/**
+ * returns the frag_arr from a node
+ */
static INLINE ir_node **
get_frag_arr (ir_node *n) {
- if (get_irn_op(n) == op_Call) {
- return n->attr.call.frag_arr;
- } else if (get_irn_op(n) == op_Alloc) {
- return n->attr.a.frag_arr;
- } else {
- return n->attr.frag_arr;
+ switch (get_irn_opcode(n)) {
+ case iro_Call:
+ return n->attr.call.exc.frag_arr;
+ case iro_Alloc:
+ return n->attr.a.exc.frag_arr;
+ case iro_Load:
+ return n->attr.load.exc.frag_arr;
+ case iro_Store:
+ return n->attr.store.exc.frag_arr;
+ default:
+ return n->attr.except.frag_arr;
}
}
static void
set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
+#if 0
if (!frag_arr[pos]) frag_arr[pos] = val;
- if (frag_arr[current_ir_graph->n_loc - 1])
- set_frag_value (get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]), pos, val);
+ if (frag_arr[current_ir_graph->n_loc - 1]) {
+ ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
+ assert(arr != frag_arr && "Endless recursion detected");
+ set_frag_value(arr, pos, val);
+ }
+#else
+ int i;
+
+ for (i = 0; i < 1000; ++i) {
+ if (!frag_arr[pos]) {
+ frag_arr[pos] = val;
+ }
+ if (frag_arr[current_ir_graph->n_loc - 1]) {
+ ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
+ frag_arr = arr;
+ }
+ else
+ return;
+ }
+ assert(0 && "potential endless recursion");
+#endif
}
static ir_node *
if (!res) {
if (block->attr.block.graph_arr[pos]) {
/* There was a set_value after the cfOp and no get_value before that
- set_value. We must build a Phi node now. */
+ set_value. We must build a Phi node now. */
if (block->attr.block.matured) {
- int ins = get_irn_arity(block);
- ir_node **nin;
- NEW_ARR_A (ir_node *, nin, ins);
- res = phi_merge(block, pos, mode, nin, ins);
+ int ins = get_irn_arity(block);
+ ir_node **nin;
+ NEW_ARR_A (ir_node *, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
} else {
- res = new_rd_Phi0 (current_ir_graph, block, mode);
- res->attr.phi0_pos = pos;
- res->link = block->link;
- block->link = res;
+ res = new_rd_Phi0 (current_ir_graph, block, mode);
+ res->attr.phi0_pos = pos;
+ res->link = block->link;
+ block->link = res;
}
assert(res);
/* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
- but this should be better: (remove comment if this works) */
+ but this should be better: (remove comment if this works) */
/* It's a Phi, we can write this into all graph_arrs with NULL */
set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
static ir_node *
phi_merge (ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins)
{
- ir_node *prevBlock, *prevCfOp, *res, *phi0;
+ ir_node *prevBlock, *prevCfOp, *res, *phi0, *phi0_all;
int i;
/* If this block has no value at pos create a Phi0 and remember it
Else we may not set graph_arr as there a later value is remembered. */
phi0 = NULL;
if (!block->attr.block.graph_arr[pos]) {
- /* This is commented out as collapsing to Bads is no good idea.
- Either we need an assert here, or we need to call a routine
- that deals with this case as appropriate for the given language.
- Right now a self referencing Id is created which will crash irg_vrfy().
-
- Even if all variables are defined before use, it can happen that
- we get to the start block, if a cond has been replaced by a tuple
- (bad, jmp). As the start has a self referencing control flow edge,
- we get a self referencing Id, which is hard to optimize away. We avoid
- this by defining the value as a Bad node.
- Returning a const with tarval_bad is a preliminary solution. In some
- situations we might want a Warning or an Error. */
-
if (block == get_irg_start_block(current_ir_graph)) {
- block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
+ /* Collapsing to Bad tarvals is no good idea.
+ So we call a user-supplied routine here that deals with this case as
+ appropriate for the given language. Sorryly the only help we can give
+ here is the position.
+
+ Even if all variables are defined before use, it can happen that
+ we get to the start block, if a cond has been replaced by a tuple
+ (bad, jmp). In this case we call the function needlessly, eventually
+ generating an non existant error.
+ However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
+ before recuring.
+ */
+ if (default_initialize_local_variable)
+ block->attr.block.graph_arr[pos] = default_initialize_local_variable(mode, pos - 1);
+ else
+ block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
/* We don't need to care about exception ops in the start block.
There are none by definition. */
return block->attr.block.graph_arr[pos];
phi0 = new_rd_Phi0(current_ir_graph, block, mode);
block->attr.block.graph_arr[pos] = phi0;
#if PRECISE_EXC_CONTEXT
- /* Set graph_arr for fragile ops. Also here we should break recursion.
- We could choose a cyclic path through an cfop. But the recursion would
- break at some point. */
- set_frag_value(block->attr.block.graph_arr, pos, phi0);
+ if (get_opt_precise_exc_context()) {
+ /* Set graph_arr for fragile ops. Also here we should break recursion.
+ We could choose a cyclic path through an cfop. But the recursion would
+ break at some point. */
+ set_frag_value(block->attr.block.graph_arr, pos, phi0);
+ }
#endif
}
}
assert (prevCfOp);
if (is_Bad(prevCfOp)) {
/* In case a Cond has been optimized we would get right to the start block
- with an invalid definition. */
+ with an invalid definition. */
nin[i-1] = new_Bad();
continue;
}
assert (prevBlock);
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
- if (is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
+ if (get_opt_precise_exc_context() &&
+ is_fragile_op(prevCfOp) && (get_irn_op (prevCfOp) != op_Bad)) {
assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
} else
#endif
- nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
+ nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
} else {
nin[i-1] = new_Bad();
}
}
+ /* We want to pass the Phi0 node to the constructor: this finds additional
+ optimization possibilities.
+ The Phi0 node either is allocated in this function, or it comes from
+ a former call to get_r_value_internal. In this case we may not yet
+ exchange phi0, as this is done in mature_immBlock. */
+ if (!phi0) {
+ phi0_all = block->attr.block.graph_arr[pos];
+ if (!((get_irn_op(phi0_all) == op_Phi) &&
+ (get_irn_arity(phi0_all) == 0) &&
+ (get_nodes_block(phi0_all) == block)))
+ phi0_all = NULL;
+ } else {
+ phi0_all = phi0;
+ }
+
/* After collecting all predecessors into the array nin a new Phi node
with these predecessors is created. This constructor contains an
optimization: If all predecessors of the Phi node are identical it
returns the only operand instead of a new Phi node. */
- res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins);
+ res = new_rd_Phi_in (current_ir_graph, block, mode, nin, ins, phi0_all);
/* In case we allocated a Phi0 node at the beginning of this procedure,
we need to exchange this Phi0 with the real Phi. */
* The recursion that visited this node and set the flag did not
return yet. We are computing a value in a loop and need to
break the recursion. This case only happens if we visited
- the same block with phi_merge before, which inserted a Phi0.
- So we return the Phi0.
+ the same block with phi_merge before, which inserted a Phi0.
+ So we return the Phi0.
*/
/* case 4 -- already visited. */
/* The block is not mature, we don't know how many in's are needed. A Phi
with zero predecessors is created. Such a Phi node is called Phi0
node. The Phi0 is then added to the list of Phi0 nodes in this block
- to be matured by mature_block later.
+ to be matured by mature_immBlock later.
The Phi0 has to remember the pos of it's internal value. If the real
Phi is computed, pos is used to update the array with the local
values. */
printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
assert (mode->code >= irm_F && mode->code <= irm_P);
res = new_rd_Const (NULL, current_ir_graph, block, mode,
- tarval_mode_null[mode->code]);
+ get_mode_null(mode));
}
/* The local valid value is available now. */
/** Finalize a Block node, when all control flows are known. */
/** Acceptable parameters are only Block nodes. */
void
-mature_block (ir_node *block)
+mature_immBlock (ir_node *block)
{
int ins;
nodes refer to the unoptimized node.
We can call _2, as global cse has no effect on blocks. */
block = optimize_in_place_2(block);
- irn_vrfy(block);
+ IRN_VRFY_IRG(block, current_ir_graph);
}
}
ir_node *
new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
{
- return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
- arity, in, mode);
+ return new_rd_Phi(db, current_ir_graph, current_ir_graph->current_block,
+ arity, in, mode);
}
ir_node *
new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
{
- return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
- mode, con);
+ return new_rd_Const(db, current_ir_graph, current_ir_graph->start_block,
+ mode, con);
}
+ir_node *
+new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
+{
+ return new_rd_Const_type(db, current_ir_graph, current_ir_graph->start_block,
+ mode, con, tp);
+}
+
+
ir_node *
new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
{
- return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
- val, mode);
+ return new_rd_Id(db, current_ir_graph, current_ir_graph->current_block,
+ val, mode);
}
ir_node *
new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
{
- return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
- arg, mode, proj);
+ return new_rd_Proj(db, current_ir_graph, current_ir_graph->current_block,
+ arg, mode, proj);
}
ir_node *
new_d_defaultProj (dbg_info* db, ir_node *arg, long max_proj)
{
ir_node *res;
- assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
+ assert(arg->op == op_Cond);
arg->attr.c.kind = fragmentary;
arg->attr.c.default_proj = max_proj;
res = new_Proj (arg, mode_X, max_proj);
ir_node *
new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
{
- return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ return new_rd_Conv(db, current_ir_graph, current_ir_graph->current_block,
+ op, mode);
+}
+
+ir_node *
+new_d_Cast (dbg_info* db, ir_node *op, type *to_tp)
+{
+ return new_rd_Cast(db, current_ir_graph, current_ir_graph->current_block, op, to_tp);
}
ir_node *
new_d_Tuple (dbg_info* db, int arity, ir_node **in)
{
- return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
- arity, in);
+ return new_rd_Tuple(db, current_ir_graph, current_ir_graph->current_block,
+ arity, in);
}
ir_node *
new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
- return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ return new_rd_Add(db, current_ir_graph, current_ir_graph->current_block,
+ op1, op2, mode);
}
ir_node *
new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
- return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ return new_rd_Sub(db, current_ir_graph, current_ir_graph->current_block,
+ op1, op2, mode);
}
ir_node *
-new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
+new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
{
- return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ return new_rd_Minus(db, current_ir_graph, current_ir_graph->current_block,
+ op, mode);
}
ir_node *
new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
- return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ return new_rd_Mul(db, current_ir_graph, current_ir_graph->current_block,
+ op1, op2, mode);
}
+/**
+ * allocate the frag array
+ */
+static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
+ if (get_opt_precise_exc_context()) {
+ if ((current_ir_graph->phase_state == phase_building) &&
+ (get_irn_op(res) == op) && /* Could be optimized away. */
+ !*frag_store) /* Could be a cse where the arr is already set. */ {
+ *frag_store = new_frag_arr(res);
+ }
+ }
+}
+
+
ir_node *
new_d_Quot (dbg_info* db, ir_node *memop, ir_node *op1, ir_node *op2)
{
ir_node *res;
res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Quot)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
{
ir_node *res;
res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
{
ir_node *res;
res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Div)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
{
ir_node *res;
res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Mod)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
return res;
new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
{
return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2);
+ op1, op2);
}
ir_node *
ir_node *
new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
- type *tp)
+ type *tp)
{
ir_node *res;
res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
- store, callee, arity, in, tp);
+ store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Call)) /* Could be optimized away. */
- res->attr.call.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
{
return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
- store, arity, in);
+ store, arity, in);
}
ir_node *
new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
{
return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
- store, obj);
+ store, obj);
}
ir_node *
-new_d_Load (dbg_info* db, ir_node *store, ir_node *addr)
+new_d_Load (dbg_info* db, ir_node *store, ir_node *addr, ir_mode *mode)
{
ir_node *res;
res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
- store, addr);
+ store, addr, mode);
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Load)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
{
ir_node *res;
res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
- store, addr, val);
+ store, addr, val);
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Store)) /* Could be optimized away. */
- res->attr.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
{
ir_node *res;
res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
- store, size, alloc_type, where);
+ store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
- res->attr.a.frag_arr = new_frag_arr(res);
+ allocate_frag_arr(res, op_Alloc, &res->attr.a.exc.frag_arr); /* Could be optimized away. */
#endif
return res;
new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
{
return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
- store, ptr, size, free_type);
+ store, ptr, size, free_type);
}
ir_node *
as the operand could as well be a pointer to a dynamic object. */
{
return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, 0, NULL, ent);
+ store, objptr, 0, NULL, ent);
}
ir_node *
new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
{
return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, n_index, index, sel);
+ store, objptr, n_index, index, sel);
}
ir_node *
new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
{
return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, ent));
+ store, objptr, ent));
+}
+
+ir_node *
+new_d_SymConst_type (dbg_info* db, symconst_symbol value, symconst_kind kind, type *tp)
+{
+ return new_rd_SymConst_type (db, current_ir_graph, current_ir_graph->start_block,
+ value, kind, tp);
}
ir_node *
-new_d_SymConst (dbg_info* db, type_or_id_p value, symconst_kind kind)
+new_d_SymConst (dbg_info* db, symconst_symbol value, symconst_kind kind)
{
- return new_rd_SymConst (db, current_ir_graph, current_ir_graph->current_block,
+ return new_rd_SymConst (db, current_ir_graph, current_ir_graph->start_block,
value, kind);
}
new_d_Sync (dbg_info* db, int arity, ir_node** in)
{
return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
- arity, in);
+ arity, in);
}
ir_node *
-new_d_Bad (void)
+(new_d_Bad)(void)
{
- return current_ir_graph->bad;
+ return __new_d_Bad();
}
ir_node *
-new_d_Unknown (void)
+new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
{
- return current_ir_graph->unknown;
+ return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
+ val, bound, cmp);
+}
+
+ir_node *
+new_d_Unknown (ir_mode *m)
+{
+ return new_rd_Unknown(current_ir_graph, m);
}
ir_node *
new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
{
return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
- arg, mode, proj);
+ arg, mode, proj);
+}
+
+ir_node *
+new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
+ type *tp)
+{
+ ir_node *res;
+ res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
+ callee, arity, in, tp);
+
+ return res;
}
/* ********************************************************************* */
/* (Uses also constructors of ?? interface, except new_Block. */
/* ********************************************************************* */
-/** Block construction **/
+/* * Block construction **/
/* immature Block without predecessors */
ir_node *new_d_immBlock (dbg_info* db) {
ir_node *res;
res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
current_ir_graph->current_block = res;
res->attr.block.matured = 0;
- res->attr.block.exc = exc_normal;
- res->attr.block.handler_entry = 0;
+ /* res->attr.block.exc = exc_normal; */
+ /* res->attr.block.handler_entry = 0; */
+ res->attr.block.irg = current_ir_graph;
res->attr.block.backedge = NULL;
res->attr.block.in_cg = NULL;
res->attr.block.cg_backedge = NULL;
memset(res->attr.block.graph_arr, 0, sizeof(ir_node *)*current_ir_graph->n_loc);
/* Immature block may not be optimized! */
- irn_vrfy (res);
+ IRN_VRFY_IRG(res, current_ir_graph);
return res;
}
INLINE ir_node *
-new_immBlock () {
+new_immBlock (void) {
return new_d_immBlock(NULL);
}
/* add an adge to a jmp/control flow node */
void
-add_in_edge (ir_node *block, ir_node *jmp)
+add_immBlock_pred (ir_node *block, ir_node *jmp)
{
if (block->attr.block.matured) {
assert(0 && "Error: Block already matured!\n");
}
else {
- assert (jmp != NULL);
- ARR_APP1 (ir_node *, block->in, jmp);
+ assert(jmp != NULL);
+ ARR_APP1(ir_node *, block->in, jmp);
}
}
/* changing the current block */
void
-switch_block (ir_node *target)
+set_cur_block (ir_node *target)
{
current_ir_graph->current_block = target;
}
/** Useful access routines **/
/* Returns the current block of the current graph. To set the current
- block use switch_block(). */
+ block use set_cur_block. */
ir_node *get_cur_block() {
return get_irg_current_block(current_ir_graph);
}
/* call once for each run of the library */
void
-init_cons (void)
+init_cons (default_initialize_local_variable_func_t *func)
{
+ default_initialize_local_variable = func;
}
/* call for each graph */
ir_node *new_Const (ir_mode *mode, tarval *con) {
return new_d_Const(NULL, mode, con);
}
-ir_node *new_SymConst (type_or_id_p value, symconst_kind kind) {
+ir_node *new_SymConst (symconst_symbol value, symconst_kind kind) {
return new_d_SymConst(NULL, value, kind);
}
ir_node *new_simpleSel(ir_node *store, ir_node *objptr, entity *ent) {
return new_d_Sel(NULL, store, objptr, arity, in, ent);
}
ir_node *new_InstOf (ir_node *store, ir_node *objptr, type *ent) {
- return (new_d_InstOf (NULL, store, objptr, ent));
+ return new_d_InstOf (NULL, store, objptr, ent);
}
ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
- type *tp) {
+ type *tp) {
return new_d_Call(NULL, store, callee, arity, in, tp);
}
ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
ir_node *new_Conv (ir_node *op, ir_mode *mode) {
return new_d_Conv(NULL, op, mode);
}
+ir_node *new_Cast (ir_node *op, type *to_tp) {
+ return new_d_Cast(NULL, op, to_tp);
+}
ir_node *new_Phi (int arity, ir_node **in, ir_mode *mode) {
return new_d_Phi(NULL, arity, in, mode);
}
-ir_node *new_Load (ir_node *store, ir_node *addr) {
- return new_d_Load(NULL, store, addr);
+ir_node *new_Load (ir_node *store, ir_node *addr, ir_mode *mode) {
+ return new_d_Load(NULL, store, addr, mode);
}
ir_node *new_Store (ir_node *store, ir_node *addr, ir_node *val) {
return new_d_Store(NULL, store, addr, val);
return new_d_Alloc(NULL, store, size, alloc_type, where);
}
ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
- type *free_type) {
+ type *free_type) {
return new_d_Free(NULL, store, ptr, size, free_type);
}
ir_node *new_Sync (int arity, ir_node **in) {
ir_node *new_Bad (void) {
return new_d_Bad();
}
-ir_node *new_Unknown(void) {
- return new_d_Unknown();
+ir_node *new_Confirm (ir_node *val, ir_node *bound, pn_Cmp cmp) {
+ return new_d_Confirm (NULL, val, bound, cmp);
+}
+ir_node *new_Unknown(ir_mode *m) {
+ return new_d_Unknown(m);
}
ir_node *new_CallBegin (ir_node *callee) {
return new_d_CallBegin(NULL, callee);
ir_node *new_Filter (ir_node *arg, ir_mode *mode, long proj) {
return new_d_Filter(NULL, arg, mode, proj);
}
+ir_node *new_FuncCall (ir_node *callee, int arity, ir_node **in, type *tp) {
+ return new_d_FuncCall(NULL, callee, arity, in, tp);
+}