static uninitialized_local_variable_func_t *default_initialize_local_variable = NULL;
/* creates a bd constructor for a binop */
-#define NEW_BD_BINOP(instr, float_support) \
+#define NEW_BD_BINOP(instr) \
static ir_node * \
new_bd_##instr(dbg_info *db, ir_node *block, \
ir_node *op1, ir_node *op2, ir_mode *mode) \
in[0] = op1; \
in[1] = op2; \
res = new_ir_node(db, irg, block, op_##instr, mode, 2, in); \
- if (float_support && mode_is_float(mode) && \
- (get_irg_fp_model(irg) & fp_exceptions)) \
- res->pinned = 1; \
res = optimize_node(res); \
IRN_VRFY_IRG(res, irg); \
return res; \
}
/* creates a bd constructor for an unop */
-#define NEW_BD_UNOP(instr, float_support) \
+#define NEW_BD_UNOP(instr) \
static ir_node * \
new_bd_##instr(dbg_info *db, ir_node *block, \
ir_node *op, ir_mode *mode) \
ir_node *res; \
ir_graph *irg = current_ir_graph; \
res = new_ir_node(db, irg, block, op_##instr, mode, 1, &op); \
- if (float_support && mode_is_float(mode) && \
- (get_irg_fp_model(irg) & fp_exceptions)) \
- res->pinned = 1; \
res = optimize_node(res); \
IRN_VRFY_IRG(res, irg); \
return res; \
}
/* creates a bd constructor for an divop */
-#define NEW_BD_DIVOP(instr, float_support) \
+#define NEW_BD_DIVOP(instr) \
static ir_node * \
new_bd_##instr(dbg_info *db, ir_node *block, \
ir_node *memop, ir_node *op1, ir_node *op2) \
ir_node *in[3]; \
ir_node *res; \
ir_graph *irg = current_ir_graph; \
- ir_mode *mode = get_irn_mode(op1); \
in[0] = memop; \
in[1] = op1; \
in[2] = op2; \
res = new_ir_node(db, irg, block, op_##instr, mode_T, 3, in); \
- if (float_support && mode_is_float(mode) && \
- (get_irg_fp_model(irg) & fp_exceptions)) \
- res->pinned = 1; \
res = optimize_node(res); \
IRN_VRFY_IRG(res, irg); \
return res; \
res = new_ir_node(db, irg, block, op_Conv, mode, 1, &op);
res->attr.conv.strict = strict_flag;
- if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_exceptions)
- res->pinned = 1;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
return res;
} /* new_bd_Tuple */
-#define supports_float 1
-#define only_integer 0
-
-NEW_BD_BINOP(Add, supports_float)
-NEW_BD_BINOP(Sub, supports_float)
-NEW_BD_UNOP(Minus, supports_float)
-NEW_BD_BINOP(Mul, supports_float)
-NEW_BD_DIVOP(Quot, supports_float)
-NEW_BD_DIVOP(DivMod, only_integer)
-NEW_BD_DIVOP(Div, only_integer)
-NEW_BD_DIVOP(Mod, only_integer)
-NEW_BD_BINOP(And, only_integer)
-NEW_BD_BINOP(Or, only_integer)
-NEW_BD_BINOP(Eor, only_integer)
-NEW_BD_UNOP(Not, only_integer)
-NEW_BD_BINOP(Shl, only_integer)
-NEW_BD_BINOP(Shr, only_integer)
-NEW_BD_BINOP(Shrs, only_integer)
-NEW_BD_BINOP(Rot, only_integer)
-NEW_BD_UNOP(Abs, supports_float)
-NEW_BD_BINOP(Carry, only_integer)
-NEW_BD_BINOP(Borrow, only_integer)
+NEW_BD_BINOP(Add)
+NEW_BD_BINOP(Sub)
+NEW_BD_UNOP(Minus)
+NEW_BD_BINOP(Mul)
+NEW_BD_DIVOP(Quot)
+NEW_BD_DIVOP(DivMod)
+NEW_BD_DIVOP(Div)
+NEW_BD_DIVOP(Mod)
+NEW_BD_BINOP(And)
+NEW_BD_BINOP(Or)
+NEW_BD_BINOP(Eor)
+NEW_BD_UNOP(Not)
+NEW_BD_BINOP(Shl)
+NEW_BD_BINOP(Shr)
+NEW_BD_BINOP(Shrs)
+NEW_BD_BINOP(Rot)
+NEW_BD_UNOP(Abs)
+NEW_BD_BINOP(Carry)
+NEW_BD_BINOP(Borrow)
static ir_node *
new_bd_Cmp(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2)
ir_node *in[2];
ir_node *res;
ir_graph *irg = current_ir_graph;
- ir_mode *mode = get_irn_mode(op1);
in[0] = op1;
in[1] = op2;
res = new_ir_node(db, irg, block, op_Cmp, mode_T, 2, in);
- if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_exceptions)
- res->pinned = 1;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
assert((get_unknown_type() == tp) || is_Method_type(tp));
set_Call_type(res, tp);
- res->attr.call.callee_arr = NULL;
+ res->attr.call.exc.pin_state = op_pin_state_pinned;
+ res->attr.call.callee_arr = NULL;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[0] = store;
in[1] = adr;
res = new_ir_node(db, irg, block, op_Load, mode_T, 2, in);
- res->attr.load.load_mode = mode;
- res->attr.load.volatility = volatility_non_volatile;
+ res->attr.load.exc.pin_state = op_pin_state_pinned;
+ res->attr.load.load_mode = mode;
+ res->attr.load.volatility = volatility_non_volatile;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[1] = adr;
in[2] = val;
res = new_ir_node(db, irg, block, op_Store, mode_T, 3, in);
- res->attr.store.volatility = volatility_non_volatile;
+ res->attr.store.exc.pin_state = op_pin_state_pinned;
+ res->attr.store.volatility = volatility_non_volatile;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[0] = store;
in[1] = size;
res = new_ir_node(db, irg, block, op_Alloc, mode_T, 2, in);
- res->attr.alloc.where = where;
- res->attr.alloc.type = alloc_type;
+ res->attr.alloc.exc.pin_state = op_pin_state_pinned;
+ res->attr.alloc.where = where;
+ res->attr.alloc.type = alloc_type;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
} /* new_bd_Sync */
static ir_node *
-new_bd_Confirm(dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
+new_bd_Confirm (dbg_info *db, ir_node *block, ir_node *val, ir_node *bound, pn_Cmp cmp)
{
ir_node *in[2], *res;
ir_graph *irg = current_ir_graph;
in[2] = src;
res = new_ir_node(db, irg, block, op_CopyB, mode_T, 3, in);
- res->attr.copyb.data_type = data_type;
+
+ res->attr.copyb.exc.pin_state = op_pin_state_pinned;
+ res->attr.copyb.data_type = data_type;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
in[2] = lower;
in[3] = upper;
res = new_ir_node(db, irg, block, op_Bound, mode_T, 4, in);
+ res->attr.bound.exc.pin_state = op_pin_state_pinned;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
return res;
new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
ir_node *res;
res = new_bd_Quot (db, current_ir_graph->current_block, memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
new_d_DivMod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
ir_node *res;
res = new_bd_DivMod (db, current_ir_graph->current_block, memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
{
ir_node *res;
res = new_bd_Div (db, current_ir_graph->current_block, memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
new_d_Mod(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2) {
ir_node *res;
res = new_bd_Mod (db, current_ir_graph->current_block, memop, op1, op2);
+ res->attr.except.pin_state = op_pin_state_pinned;
#if PRECISE_EXC_CONTEXT
allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
#endif
* File name: ir/ir/irnode.c
* Purpose: Representation of an intermediate operation.
* Author: Martin Trapp, Christian Schaefer
- * Modified by: Goetz Lindenmaier
+ * Modified by: Goetz Lindenmaier, Michael Beck
* Created:
* CVS-ID: $Id$
* Copyright: (c) 1998-2003 Universität Karlsruhe
}
/*
- * IR node constructor.
- * Create a new IR node in irg, with an op, mode, arity and
- * some incoming IR nodes.
+ * irnode constructor.
+ * Create a new irnode in irg, with an op, mode, arity and
+ * some incoming irnodes.
* If arity is negative, a node with a dynamic array is created.
*/
ir_node *
-new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
+new_ir_node (dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
int arity, ir_node **in)
{
ir_node *res;
size_t node_size = offsetof(ir_node, attr) + op->attr_size + firm_add_node_size;
char *p;
int i, is_bl;
- op_pin_state state;
assert(irg && op && mode);
p = obstack_alloc (irg->obst, node_size);
memset(p, 0, node_size);
res = (ir_node *) (p + firm_add_node_size);
- state = get_op_pinned(op);
-
- res->kind = k_ir_node;
- res->op = op;
- res->mode = mode;
- res->visited = 0;
- res->node_idx = irg_register_node_idx(irg, res);
- res->pinned = state != op_pin_state_floats;
- res->link = NULL;
+ res->kind = k_ir_node;
+ res->op = op;
+ res->mode = mode;
+ res->visited = 0;
+ res->node_idx = irg_register_node_idx(irg, res);
+ res->link = NULL;
if (arity < 0) {
res->in = NEW_ARR_F (ir_node *, 1); /* 1: space for block */
} else {
if (get_irn_op(node) == op_Tuple)
return;
- assert(node);
- assert(
- /* the node is exception/memory pinned OR */
- (get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned) ||
- /* a floating point node can be pinned if fp_exceptions are enabled */
- (mode_is_float(get_irn_mode(node)) && get_irg_fp_model(get_irn_irg(node)) & fp_exceptions)
- );
+ assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
assert(state == op_pin_state_pinned || state == op_pin_state_floats);
- node->pinned = state != op_pin_state_floats;
+ node->attr.except.pin_state = state;
}
#ifdef DO_HEAPANALYSIS
#include "irextbb_t.h"
-/** IR node attributes **/
+/** ir node attributes **/
/** Block attributes */
typedef struct {
/** Exception attributes. */
typedef struct {
+ op_pin_state pin_state; /**< the pin state for operations that might generate a exception:
+ If it's know that no exception will be generated, could be set to
+ op_pin_state_floats. */
#if PRECISE_EXC_CONTEXT
struct ir_node **frag_arr; /**< For Phi node construction in case of exception */
#endif
struct ir_node **in; /**< The array of predecessors / operands. */
unsigned long visited; /**< Visited counter for walks of the graph. */
unsigned node_idx; /**< The node index of this node in its graph. */
- unsigned pinned : 1; /**< A node is either pinned or not. */
void *link; /**< To attach additional information to the node, e.g.
used while construction to link Phi0 nodes and
during optimization to link to nodes that
state = _get_op_pinned(_get_irn_op(node));
if (state >= op_pin_state_exc_pinned)
- return get_opt_fragile_ops() ? (op_pin_state)node->pinned : op_pin_state_pinned;
- return (op_pin_state)node->pinned;
+ return get_opt_fragile_ops() ? node->attr.except.pin_state : op_pin_state_pinned;
+ return state;
}
static INLINE op_pin_state
_is_irn_pinned_in_irg(const ir_node *node) {
- if (_get_irg_pinned(get_irn_irg(node)) == op_pin_state_floats)
- return _get_irn_pinned(node);
+ if (get_irg_pinned(get_irn_irg(node)) == op_pin_state_floats)
+ return get_irn_pinned(node);
return op_pin_state_pinned;
}
}
}
-static INLINE tarval *_get_Const_tarval (ir_node *node) {
- assert (_get_irn_op(node) == op_Const);
+static INLINE tarval *_get_Const_tarval(ir_node *node) {
+ assert(_get_irn_op(node) == op_Const);
return node->attr.con.tv;
}
}
static INLINE cond_jmp_predicate _get_Cond_jmp_pred(ir_node *node) {
- assert (_get_irn_op(node) == op_Cond);
+ assert(_get_irn_op(node) == op_Cond);
return node->attr.cond.pred;
}
static INLINE void _set_Cond_jmp_pred(ir_node *node, cond_jmp_predicate pred) {
- assert (_get_irn_op(node) == op_Cond);
+ assert(_get_irn_op(node) == op_Cond);
node->attr.cond.pred = pred;
}
#define get_Psi_n_conds(node) _get_Psi_n_conds(node)
#define get_irn_idx(node) _get_irn_idx(node)
-# endif /* _IRNODE_T_H_ */
+#endif /* _IRNODE_T_H_ */