{
libFIRM_opt = *state;
}
+
+/* repeat 'inline' methods here */
+
+# ifndef USE_GCC_INLINE
+
+/** Returns constant folding optimization setting. */
+int get_opt_cse(void) /* iropt.c */
+{
+ return libFIRM_opt & OPT_CSE;
+}
+
+/** Returns constant subexpression elimination setting. */
+int get_opt_global_cse(void) /* irgopt.c iropt.c */
+{
+ return libFIRM_opt & OPT_GLOBAL_CSE;
+}
+
+/** Returns global constant subexpression elimination setting. */
+int get_opt_constant_folding(void) /* iropt.c */
+{
+ return libFIRM_opt & OPT_CONSTANT_FOLDING;
+}
+
+/** Returns unreachable code elimination setting. */
+int get_opt_unreachable_code(void) /* iropt.c */
+{
+ return libFIRM_opt & OPT_UNREACHABLE_CODE;
+}
+
+/** Returns Straightening setting. */
+int get_opt_control_flow_straightening(void) /* iropt.c, irgopt.c */
+{
+ return libFIRM_opt & OPT_CONTROL_FLOW_STRAIGHTENING;
+}
+
+/** Returns if simplifications in local optimizations setting. */
+int get_opt_control_flow_weak_simplification(void) /* iropt.c, irgopt.c */
+{
+ return libFIRM_opt & OPT_CONTROL_FLOW_WEAK_SIMPLIFICATION;
+}
+
+/** Returns strong if and loop simplification setting */
+int get_opt_control_flow_strong_simplification(void) /* irgopt.c */
+{
+ return libFIRM_opt & OPT_CONTROL_FLOW_STRONG_SIMPLIFICATION;
+}
+
+/** Returns whether critical edges are removed */
+int get_opt_critical_edges(void) /* irgopt.c */
+{
+ return libFIRM_opt & OPT_CRITICAL_EDGES;
+}
+
+/** Returns reassociation setting. */
+int get_opt_reassociation(void) /* iropt.c */
+{
+ return libFIRM_opt & OPT_REASSOCIATION;
+}
+
+/** Returns dead node elimination setting. */
+int get_opt_dead_node_elimination(void) /* irgopt.c */
+{
+ return libFIRM_opt & OPT_DEAD_NODE_ELIMINATION;
+}
+
+/** Returns global optimization setting */
+int get_opt_optimize(void) /* iropt.c, irgopt.c */
+{
+ return libFIRM_opt & OPT_OPTIMIZED;
+}
+
+/** Returns inlining setting. */ /* how appropriate */
+int get_opt_inline(void) /* irgopt.c */
+{
+ return libFIRM_opt & OPT_INLINE;
+}
+
+int get_opt_dyn_meth_dispatch(void) /* cgana.c */
+{
+ return libFIRM_opt & OPT_DYN_METH_DISPATCH;
+}
+
+int get_opt_normalize(void) /* irgopt.c, irnode.c, iropt.c */
+{
+ return libFIRM_opt & OPT_NORMALIZE;
+}
+
+# endif /* not defined USE_GCC_INLINE */
OPT_NORMALIZE = 0x00001000,
/** Turn off all optimizations. */
- OPT_OPTIMIZED = 0x40000000,
+ OPT_OPTIMIZED = 0x40000000
} libfirm_opts_t;
extern optimization_state_t libFIRM_opt;
+/* take care of the INLINE/USE_GCC_INLINE mess */
+
+# ifndef INLINE
+# ifdef USE_GCC_INLINE
+# define INLINE __extension__ ((__inline__))
+# else /* defined USE_GCC_INLINE */
+# define INLINE
+# endif /* define USE_GCC_INLINE */
+# endif /* defined INLINE */
+
+
/** Returns constant folding optimization setting. */
-static INLINE int get_opt_cse(void)
+INLINE int get_opt_cse(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CSE;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns constant subexpression elimination setting. */
-static INLINE int get_opt_global_cse(void)
+INLINE int get_opt_global_cse(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_GLOBAL_CSE;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns global constant subexpression elimination setting. */
-static INLINE int get_opt_constant_folding(void)
+INLINE int get_opt_constant_folding(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CONSTANT_FOLDING;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns unreachable code elimination setting. */
-static INLINE int get_opt_unreachable_code(void)
+INLINE int get_opt_unreachable_code(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_UNREACHABLE_CODE;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns Straightening setting. */
-static INLINE int get_opt_control_flow_straightening(void)
+INLINE int get_opt_control_flow_straightening(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CONTROL_FLOW_STRAIGHTENING;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns if simplifications in local optimizations setting. */
-static INLINE int get_opt_control_flow_weak_simplification(void)
+INLINE int get_opt_control_flow_weak_simplification(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CONTROL_FLOW_WEAK_SIMPLIFICATION;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns strong if and loop simplification setting */
-static INLINE int get_opt_control_flow_strong_simplification(void)
+INLINE int get_opt_control_flow_strong_simplification(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CONTROL_FLOW_STRONG_SIMPLIFICATION;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns whether critical edges are removed */
-static INLINE int get_opt_critical_edges(void)
+INLINE int get_opt_critical_edges(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_CRITICAL_EDGES;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns reassociation setting. */
-static INLINE int get_opt_reassociation(void)
+INLINE int get_opt_reassociation(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_REASSOCIATION;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns dead node elimination setting. */
-static INLINE int get_opt_dead_node_elimination(void)
+INLINE int get_opt_dead_node_elimination(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_DEAD_NODE_ELIMINATION;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns global optimization setting */
-static INLINE int get_opt_optimize(void)
+INLINE int get_opt_optimize(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_OPTIMIZED;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns inlining setting. */
-static INLINE int get_opt_inline(void)
+INLINE int get_opt_inline(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_INLINE;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
-static INLINE int get_opt_dyn_meth_dispatch(void)
+INLINE int get_opt_dyn_meth_dispatch(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_DYN_METH_DISPATCH;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
-static INLINE int get_opt_normalize(void)
+INLINE int get_opt_normalize(void)
+# ifdef USE_GCC_INLINE
{
return libFIRM_opt & OPT_NORMALIZE;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
#endif /* _IRFLAG_T_H_ */
# include "irnode_t.h"
# include "firmstat.h"
+# include "iropt.h" /* for firm_set_default_operations */
+
# include "xmalloc.h"
ir_op *op_Block; ir_op *get_op_Block (void) { return op_Block; }
-ir_op *op_Start; ir_op *get_op_Start (void) { return op_Start; }
-ir_op *op_End; ir_op *get_op_End (void) { return op_End; }
-ir_op *op_Jmp; ir_op *get_op_Jmp (void) { return op_Jmp; }
-ir_op *op_Cond; ir_op *get_op_Cond (void) { return op_Cond; }
-ir_op *op_Return; ir_op *get_op_Return (void) { return op_Return; }
-ir_op *op_Raise; ir_op *get_op_Raise (void) { return op_Raise; }
-
-ir_op *op_Sel; ir_op *get_op_Sel (void) { return op_Sel; }
-ir_op *op_InstOf; ir_op *get_op_InstOf (void) { return op_InstOf; }
-
-ir_op *op_Const; ir_op *get_op_Const (void) { return op_Const; }
-ir_op *op_SymConst; ir_op *get_op_SymConst (void) { return op_SymConst; }
-
-ir_op *op_Call; ir_op *get_op_Call (void) { return op_Call; }
-ir_op *op_Add; ir_op *get_op_Add (void) { return op_Add; }
-ir_op *op_Sub; ir_op *get_op_Sub (void) { return op_Sub; }
-ir_op *op_Minus; ir_op *get_op_Minus (void) { return op_Minus; }
-ir_op *op_Mul; ir_op *get_op_Mul (void) { return op_Mul; }
-ir_op *op_Quot; ir_op *get_op_Quot (void) { return op_Quot; }
-ir_op *op_DivMod; ir_op *get_op_DivMod (void) { return op_DivMod; }
-ir_op *op_Div; ir_op *get_op_Div (void) { return op_Div; }
-ir_op *op_Mod; ir_op *get_op_Mod (void) { return op_Mod; }
-ir_op *op_Abs; ir_op *get_op_Abs (void) { return op_Abs; }
-ir_op *op_And; ir_op *get_op_And (void) { return op_And; }
-ir_op *op_Or; ir_op *get_op_Or (void) { return op_Or; }
-ir_op *op_Eor; ir_op *get_op_Eor (void) { return op_Eor; }
-ir_op *op_Not; ir_op *get_op_Not (void) { return op_Not; }
-ir_op *op_Cmp; ir_op *get_op_Cmp (void) { return op_Cmp; }
-ir_op *op_Shl; ir_op *get_op_Shl (void) { return op_Shl; }
-ir_op *op_Shr; ir_op *get_op_Shr (void) { return op_Shr; }
-ir_op *op_Shrs; ir_op *get_op_Shrs (void) { return op_Shrs; }
-ir_op *op_Rot; ir_op *get_op_Rot (void) { return op_Rot; }
-ir_op *op_Conv; ir_op *get_op_Conv (void) { return op_Conv; }
+ir_op *op_Start; ir_op *get_op_Start (void) { return op_Start; }
+ir_op *op_End; ir_op *get_op_End (void) { return op_End; }
+ir_op *op_Jmp; ir_op *get_op_Jmp (void) { return op_Jmp; }
+ir_op *op_Cond; ir_op *get_op_Cond (void) { return op_Cond; }
+ir_op *op_Return; ir_op *get_op_Return (void) { return op_Return; }
+ir_op *op_Raise; ir_op *get_op_Raise (void) { return op_Raise; }
+
+ir_op *op_Sel; ir_op *get_op_Sel (void) { return op_Sel; }
+ir_op *op_InstOf; ir_op *get_op_InstOf (void) { return op_InstOf; }
+
+ir_op *op_Const; ir_op *get_op_Const (void) { return op_Const; }
+ir_op *op_SymConst; ir_op *get_op_SymConst (void) { return op_SymConst; }
+
+ir_op *op_Call; ir_op *get_op_Call (void) { return op_Call; }
+ir_op *op_Add; ir_op *get_op_Add (void) { return op_Add; }
+ir_op *op_Sub; ir_op *get_op_Sub (void) { return op_Sub; }
+ir_op *op_Minus; ir_op *get_op_Minus (void) { return op_Minus; }
+ir_op *op_Mul; ir_op *get_op_Mul (void) { return op_Mul; }
+ir_op *op_Quot; ir_op *get_op_Quot (void) { return op_Quot; }
+ir_op *op_DivMod; ir_op *get_op_DivMod (void) { return op_DivMod; }
+ir_op *op_Div; ir_op *get_op_Div (void) { return op_Div; }
+ir_op *op_Mod; ir_op *get_op_Mod (void) { return op_Mod; }
+ir_op *op_Abs; ir_op *get_op_Abs (void) { return op_Abs; }
+ir_op *op_And; ir_op *get_op_And (void) { return op_And; }
+ir_op *op_Or; ir_op *get_op_Or (void) { return op_Or; }
+ir_op *op_Eor; ir_op *get_op_Eor (void) { return op_Eor; }
+ir_op *op_Not; ir_op *get_op_Not (void) { return op_Not; }
+ir_op *op_Cmp; ir_op *get_op_Cmp (void) { return op_Cmp; }
+ir_op *op_Shl; ir_op *get_op_Shl (void) { return op_Shl; }
+ir_op *op_Shr; ir_op *get_op_Shr (void) { return op_Shr; }
+ir_op *op_Shrs; ir_op *get_op_Shrs (void) { return op_Shrs; }
+ir_op *op_Rot; ir_op *get_op_Rot (void) { return op_Rot; }
+ir_op *op_Conv; ir_op *get_op_Conv (void) { return op_Conv; }
ir_op *op_Cast; ir_op *get_op_Cast (void) { return op_Cast; }
-ir_op *op_Phi; ir_op *get_op_Phi (void) { return op_Phi; }
+ir_op *op_Phi; ir_op *get_op_Phi (void) { return op_Phi; }
-ir_op *op_Load; ir_op *get_op_Load (void) { return op_Load; }
-ir_op *op_Store; ir_op *get_op_Store (void) { return op_Store; }
-ir_op *op_Alloc; ir_op *get_op_Alloc (void) { return op_Alloc; }
-ir_op *op_Free; ir_op *get_op_Free (void) { return op_Free; }
-ir_op *op_Sync; ir_op *get_op_Sync (void) { return op_Sync; }
+ir_op *op_Load; ir_op *get_op_Load (void) { return op_Load; }
+ir_op *op_Store; ir_op *get_op_Store (void) { return op_Store; }
+ir_op *op_Alloc; ir_op *get_op_Alloc (void) { return op_Alloc; }
+ir_op *op_Free; ir_op *get_op_Free (void) { return op_Free; }
+ir_op *op_Sync; ir_op *get_op_Sync (void) { return op_Sync; }
-ir_op *op_Tuple; ir_op *get_op_Tuple (void) { return op_Tuple; }
-ir_op *op_Proj; ir_op *get_op_Proj (void) { return op_Proj; }
-ir_op *op_Id; ir_op *get_op_Id (void) { return op_Id; }
-ir_op *op_Bad; ir_op *get_op_Bad (void) { return op_Bad; }
-ir_op *op_Confirm; ir_op *get_op_Confirm (void) { return op_Confirm; }
+ir_op *op_Tuple; ir_op *get_op_Tuple (void) { return op_Tuple; }
+ir_op *op_Proj; ir_op *get_op_Proj (void) { return op_Proj; }
+ir_op *op_Id; ir_op *get_op_Id (void) { return op_Id; }
+ir_op *op_Bad; ir_op *get_op_Bad (void) { return op_Bad; }
+ir_op *op_Confirm; ir_op *get_op_Confirm (void) { return op_Confirm; }
-ir_op *op_Unknown; ir_op *get_op_Unknown (void) { return op_Unknown; }
-ir_op *op_Filter; ir_op *get_op_Filter (void) { return op_Filter; }
-ir_op *op_Break; ir_op *get_op_Break (void) { return op_Break; }
-ir_op *op_CallBegin; ir_op *get_op_CallBegin (void) { return op_CallBegin; }
-ir_op *op_EndReg; ir_op *get_op_EndReg (void) { return op_EndReg; }
-ir_op *op_EndExcept; ir_op *get_op_EndExcept (void) { return op_EndExcept; }
+ir_op *op_Unknown; ir_op *get_op_Unknown (void) { return op_Unknown; }
+ir_op *op_Filter; ir_op *get_op_Filter (void) { return op_Filter; }
+ir_op *op_Break; ir_op *get_op_Break (void) { return op_Break; }
+ir_op *op_CallBegin; ir_op *get_op_CallBegin (void) { return op_CallBegin; }
+ir_op *op_EndReg; ir_op *get_op_EndReg (void) { return op_EndReg; }
+ir_op *op_EndExcept; ir_op *get_op_EndExcept (void) { return op_EndExcept; }
-ir_op *op_FuncCall; ir_op *get_op_FuncCall (void) { return op_FuncCall; }
+ir_op *op_FuncCall; ir_op *get_op_FuncCall (void) { return op_FuncCall; }
ir_op *
void
init_op(void)
{
-#define L irop_flag_labeled
-#define C irop_flag_commutative
-#define X irop_flag_cfopcode
-#define I irop_flag_ip_cfopcode
-#define F irop_flag_fragile
+#define L irop_flag_labeled
+#define C irop_flag_commutative
+#define X irop_flag_cfopcode
+#define I irop_flag_ip_cfopcode
+#define F irop_flag_fragile
op_Block = new_ir_op(iro_Block, "Block", pinned, L, oparity_variable, -1, sizeof(block_attr));
if (op == op_Block || op == op_Phi || is_cfopcode(op)) return;
op->pinned = pinned;
}
+
+/* repeat 'inline' methods here */
+
+# ifndef USE_GCC_INLINE
+/** Returns the attribute size of nodes of this opcode.
+ @note Use not encouraged, internal feature. */
+int get_op_attr_size (const ir_op *op) { /* used in irnode.c */
+ return op->attr_size;
+}
+
+/** Returns non-zero if op is one of Start, End, Jmp, Cond, Return, Raise or Bad. */
+int is_cfopcode(const ir_op *op) { /* used in irnode.c */
+ return op->flags & irop_flag_cfopcode;
+}
+
+/** Returns true if the operation manipulates interprocedural control flow:
+ CallBegin, EndReg, EndExcept */
+int is_ip_cfopcode(const ir_op *op) { /* used in irnode.c */
+ return op->flags & irop_flag_ip_cfopcode;
+}
+
+/* Returns non-zero if operation is commutative */
+int is_op_commutative(const ir_op *op) { /* used in iropt.c */
+ return op->flags & irop_flag_commutative;
+}
+
+/* Returns non-zero if operation is fragile */
+int is_op_fragile(const ir_op *op) { /* used in irnode.c */
+ return op->flags & irop_flag_fragile;
+}
+# endif /* not defined USE_GCC_INLINE */
oparity_trinary, /**< an trinary operator -- considering 'numeric' arguments.*/
oparity_zero, /**< no operators, as e.g. Const. */
oparity_variable, /**< arity not fixed by opcode, but statically
- known. E.g., number of arguments to call. */
+ known. E.g., number of arguments to call. */
oparity_dynamic, /**< arity depends on state of firm representation.
- Can change by optimizations...
- We must allocate a dynamic in array for the node! */
- oparity_any, /**< other arity */
+ Can change by optimizations...
+ We must allocate a dynamic in array for the node! */
+ oparity_any /**< other arity */
} op_arity;
/** The irop flags */
typedef enum {
- irop_flag_labeled = 0x00000001, /**< if set, Output edge labels on in-edges in vcg graph */
- irop_flag_commutative = 0x00000002, /**< operation is commutative */
+ irop_flag_labeled = 0x00000001, /**< if set, Output edge labels on in-edges in vcg graph */
+ irop_flag_commutative = 0x00000002, /**< operation is commutative */
irop_flag_cfopcode = 0x00000004, /**< is a control flow operation */
- irop_flag_ip_cfopcode = 0x00000008, /**< operation manipulates interprocedural control flow */
- irop_flag_fragile = 0x00000010, /**< set if the operation can change the control flow because
+ irop_flag_ip_cfopcode = 0x00000008, /**< operation manipulates interprocedural control flow */
+ irop_flag_fragile = 0x00000010 /**< set if the operation can change the control flow because
of an exception */
} irop_flags;
unsigned flags; /**< flags describing the behavior of the ir_op, a bitmaks of irop_flags */
/* CallBacks */
- computed_value_func computed_value; /**< evaluates a node into a tarval if possible. */
- equivalent_node_func equivalent_node; /**< optimizes the node by returning an equivalent one. */
- transform_node_func transform_node; /**< optimizes the node by transforming it. */
- node_cmp_attr_func node_cmp_attr; /**< compares two node attributes. */
+ computed_value_func computed_value; /**< evaluates a node into a tarval if possible. */
+ equivalent_node_func equivalent_node; /**< optimizes the node by returning an equivalent one. */
+ transform_node_func transform_node; /**< optimizes the node by transforming it. */
+ node_cmp_attr_func node_cmp_attr; /**< compares two node attributes. */
};
/**
* @return The genenerated ir operation.
*/
ir_op * new_ir_op(opcode code, const char *name, op_pinned p,
- unsigned flags, op_arity opar, int op_index, size_t attr_size);
+ unsigned flags, op_arity opar, int op_index, size_t attr_size);
/**
* Frees a newly created ir operation.
/** Free memory used by irop module. */
void finish_op(void);
+# ifndef INLINE
+# ifdef USE_GCC_INLINE
+# define INLINE __extension__ ((__inline__))
+# else /* defined USE_GCC_INLINE */
+# define INLINE
+# endif /* define USE_GCC_INLINE */
+# endif /* defined INLINE */
+
/** Returns the attribute size of nodes of this opcode.
@note Use not encouraged, internal feature. */
-static INLINE int get_op_attr_size (const ir_op *op) {
+INLINE int get_op_attr_size (const ir_op *op)
+# ifdef USE_GCC_INLINE
+{
return op->attr_size;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns non-zero if op is one of Start, End, Jmp, Cond, Return, Raise or Bad. */
-static INLINE int is_cfopcode(const ir_op *op) {
+INLINE int is_cfopcode(const ir_op *op)
+# ifdef USE_GCC_INLINE
+{
return op->flags & irop_flag_cfopcode;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/** Returns true if the operation manipulates interprocedural control flow:
CallBegin, EndReg, EndExcept */
-static INLINE int is_ip_cfopcode(const ir_op *op) {
+INLINE int is_ip_cfopcode(const ir_op *op)
+# ifdef USE_GCC_INLINE
+{
return op->flags & irop_flag_ip_cfopcode;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/* Returns non-zero if operation is commutative */
-static INLINE int is_op_commutative(const ir_op *op) {
+INLINE int is_op_commutative(const ir_op *op)
+# ifdef USE_GCC_INLINE
+{
return op->flags & irop_flag_commutative;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
/* Returns non-zero if operation is fragile */
-static INLINE int is_op_fragile(const ir_op *op) {
+INLINE int is_op_fragile(const ir_op *op)
+# ifdef USE_GCC_INLINE
+{
return op->flags & irop_flag_fragile;
}
+# else /* defined USE_GCC_INLINE */
+;
+# endif /* not defined USE_GCC_INLINE */
#endif /* _IROP_T_H_ */
/** If the expression referenced can be evaluated statically
* computed_value returns a tarval representing the result.
* Else returns tarval_bad. */
-tarval *computed_value (ir_node *n);
+tarval *computed_value (ir_node*);
/** Applies all optimizations to n that are expressible as a pattern
* in Firm, i.e., they need not a walk of the graph.
* An equivalent optimization is applied in the constructors defined in
* ircons.ch. There n is freed if a better node could be found.
*/
-ir_node *optimize_in_place (ir_node *n);
+ir_node *optimize_in_place (ir_node*);
+
+/**
+ * set the default ir op operations
+ */
+ir_op *firm_set_default_operations(ir_op*);
# endif /* _IROPT_H_ */
* in RELEASE mode, returns ret if the expression expr evaluates to zero
* in ASSERT mode, asserts the expression expr (and the string string).
*/
-#define ASSERT_AND_RET(expr, string, ret) if (!(expr)) return (ret)
+#define ASSERT_AND_RET(expr, string, ret) if (!(expr)) return (ret)
/*
* in RELEASE mode, returns ret if the expression expr evaluates to zero
* in ASSERT mode, executes blk if the expression expr evaluates to zero and asserts
*/
-#define ASSERT_AND_RET_DBG(expr, string, ret, blk) if (!(expr)) return (ret)
+#define ASSERT_AND_RET_DBG(expr, string, ret, blk) if (!(expr)) return (ret)
#else
#define ASSERT_AND_RET(expr, string, ret) \
do { \
case iro_Start:
ASSERT_AND_RET_DBG(
(
- (proj == pns_initial_exec && mode == mode_X) ||
+ (proj == pns_initial_exec && mode == mode_X) ||
(proj == pns_global_store && mode == mode_M) ||
(proj == pns_frame_base && mode_is_reference(mode)) ||
(proj == pns_globals && mode_is_reference(mode)) ||
(proj == pns_args && mode == mode_T) ||
- (proj == pns_value_arg_base && mode_is_reference(mode))
- ),
+ (proj == pns_value_arg_base && mode_is_reference(mode))
+ ),
"wrong Proj from Start", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
case iro_Cond:
ASSERT_AND_RET_DBG(
(proj >= 0 && mode == mode_X),
- "wrong Proj from Cond", 0,
- show_proj_failure(p);
+ "wrong Proj from Cond", 0,
+ show_proj_failure(p);
);
break;
ASSERT_AND_RET_DBG(
((proj == pn_Raise_X && mode == mode_X) || (proj == pn_Raise_M && mode == mode_M)),
"wrong Proj from Raise", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
case iro_InstOf:
ASSERT_AND_RET_DBG(
- (proj >= 0 && mode == mode_X),
- "wrong Proj from InstOf", 0,
- show_proj_failure(p);
+ (proj >= 0 && mode == mode_X),
+ "wrong Proj from InstOf", 0,
+ show_proj_failure(p);
);
break;
(proj == pn_Call_X_except && mode == mode_X) ||
(proj == pn_Call_T_result && mode == mode_T) ||
(proj == pn_Call_M_except && mode == mode_M) ||
- (proj == pn_Call_P_value_res_base && mode == mode_P)),
+ (proj == pn_Call_P_value_res_base && mode == mode_P)),
"wrong Proj from Call", 0,
show_proj_failure(p);
);
(proj == pn_Call_X_except && mode == mode_X) ||
(proj == pn_Call_T_result && mode == mode_T) ||
(proj == pn_Call_M_except && mode == mode_M) ||
- (proj == pn_Call_P_value_res_base && mode == mode_P)),
+ (proj == pn_Call_P_value_res_base && mode == mode_P)),
"wrong Proj from FuncCall", 0,
show_proj_failure(p);
);
(proj == pn_Quot_X_except && mode == mode_X) ||
(proj == pn_Quot_res && mode_is_float(mode))),
"wrong Proj from Quot", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
(proj == pn_DivMod_res_div && mode_is_int(mode)) ||
(proj == pn_DivMod_res_mod && mode_is_int(mode))),
"wrong Proj from DivMod", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
(proj == pn_Div_X_except && mode == mode_X) ||
(proj == pn_Div_res && mode_is_int(mode))),
"wrong Proj from Div or Mod", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
(proj == pn_Mod_X_except && mode == mode_X) ||
(proj == pn_Mod_res && mode_is_int(mode))),
"wrong Proj from Div or Mod", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
ASSERT_AND_RET_DBG(
(proj >= 0 && proj <= 15 && mode == mode_b),
"wrong Proj from Cmp", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
case iro_Load:
if (proj == pn_Load_res) {
- ir_node *ptr = get_Load_ptr(pred);
- entity *ent = NULL;
- if (get_irn_op(ptr) == op_Sel) {
- ent = get_Sel_entity(ptr);
- } /*
+ ir_node *ptr = get_Load_ptr(pred);
+ entity *ent = NULL;
+ if (get_irn_op(ptr) == op_Sel) {
+ ent = get_Sel_entity(ptr);
+ } /*
We may not test this, after lowering and optimization the Const can
have an unexpected type.
else if ((get_irn_op(ptr) == op_Const) &&
- tarval_is_entity(get_Const_tarval(ptr))) {
- ent = get_tarval_entity(get_Const_tarval(ptr));
- } */
- if (ent) {
- ASSERT_AND_RET_DBG(
- (mode == get_type_mode(get_entity_type(ent))),
- "wrong data Proj from Load, entity type_mode failed", 0,
- show_proj_failure_ent(p, ent);
- );
- }
- else {
- ASSERT_AND_RET_DBG(
- mode_is_data(mode),
- "wrong data Proj from Load", 0,
- show_proj_failure(p);
- );
- }
+ tarval_is_entity(get_Const_tarval(ptr))) {
+ ent = get_tarval_entity(get_Const_tarval(ptr));
+ } */
+ if (ent) {
+ ASSERT_AND_RET_DBG(
+ (mode == get_type_mode(get_entity_type(ent))),
+ "wrong data Proj from Load, entity type_mode failed", 0,
+ show_proj_failure_ent(p, ent);
+ );
+ }
+ else {
+ ASSERT_AND_RET_DBG(
+ mode_is_data(mode),
+ "wrong data Proj from Load", 0,
+ show_proj_failure(p);
+ );
+ }
} else {
- ASSERT_AND_RET_DBG(
- ((proj == pn_Load_M && mode == mode_M) ||
- (proj == pn_Load_X_except && mode == mode_X)),
+ ASSERT_AND_RET_DBG(
+ ((proj == pn_Load_M && mode == mode_M) ||
+ (proj == pn_Load_X_except && mode == mode_X)),
"wrong Proj from Load", 0,
- show_proj_failure(p);
- );
+ show_proj_failure(p);
+ );
}
break;
((proj == pn_Store_M && mode == mode_M) ||
(proj == pn_Store_X_except && mode == mode_X)),
"wrong Proj from Store", 0,
- show_proj_failure(p);
+ show_proj_failure(p);
);
break;
ASSERT_AND_RET(
(proj < get_method_n_params(mt)),
"More Projs for args than args in type", 0
- );
+ );
if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
/* value argument */ break;
int opcode, opcode1;
ir_mode *mymode, *op1mode = NULL, *op2mode, *op3mode;
int op_is_symmetric = 1; /* 0: asymmetric
- 1: operands have identical modes
- 2: modes of operands == mode of this node */
+ 1: operands have identical modes
+ 2: modes of operands == mode of this node */
type *mt; /* A method type */
entity *ent;
case iro_Block:
for (i = 0; i < get_Block_n_cfgpreds(n); ++i) {
- ir_node *pred = get_Block_cfgpred(n, i);
- ASSERT_AND_RET(
- (is_Bad(pred) ||
- is_Unknown(pred) ||
- (get_irn_mode(pred) == mode_X)
- ), "Block node", 0);
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ASSERT_AND_RET(
+ (is_Bad(pred) ||
+ is_Unknown(pred) ||
+ (get_irn_mode(pred) == mode_X)
+ ), "Block node", 0);
}
// End block may only have Return, Raise or fragile ops as preds.
if (n == get_irg_end_block(irg))
- for (i = 0; i < get_Block_n_cfgpreds(n); ++i) {
- ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
- if (is_Proj(pred) || get_irn_op(pred) == op_Tuple)
- break; // We can not test properly. How many tuples are there?
- ASSERT_AND_RET(((get_irn_op(pred) == op_Return) ||
- is_Bad(pred) ||
- (get_irn_op(pred) == op_Raise) ||
- is_fragile_op(pred) ),
- "End Block node", 0);
- }
+ for (i = 0; i < get_Block_n_cfgpreds(n); ++i) {
+ ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
+ if (is_Proj(pred) || get_irn_op(pred) == op_Tuple)
+ break; // We can not test properly. How many tuples are there?
+ ASSERT_AND_RET(((get_irn_op(pred) == op_Return) ||
+ is_Bad(pred) ||
+ (get_irn_op(pred) == op_Raise) ||
+ is_fragile_op(pred) ),
+ "End Block node", 0);
+ }
// irg attr must == graph we are in.
if (! interprocedural_view) {
- ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
}
break;
mt = get_entity_type(get_irg_ent(irg));
ASSERT_AND_RET_DBG( get_Return_n_ress(n) == get_method_n_ress(mt),
"Number of results for Return doesn't match number of results in type.", 0,
- show_return_nres(irg, n, mt););
+ show_return_nres(irg, n, mt););
for (i = 0; i < get_Return_n_ress(n); i++)
ASSERT_AND_RET_DBG(
get_irn_mode(get_Return_res(n, i)) == get_type_mode(get_method_res_type(mt, i)),
"Mode of result for Return doesn't match mode of result type.", 0,
- show_return_modes(irg, n, mt, i););
+ show_return_modes(irg, n, mt, i););
break;
case iro_Raise:
ASSERT_AND_RET_DBG(
/* Sel: BB x M x ref x int^n --> ref */
(op1mode == mode_M && op2mode == mymode && mode_is_reference(mymode)),
- "Sel node", 0, show_node_failure(n)
+ "Sel node", 0, show_node_failure(n)
);
for (i=3; i < get_irn_arity(n); i++)
{
get_Call_n_params(n) >= get_method_n_params(mt),
"Number of args for Call doesn't match number of args in variadic type.",
0,
- fprintf(stderr, "Call has %d params, method %s type %d\n",
- get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
- );
+ fprintf(stderr, "Call has %d params, method %s type %d\n",
+ get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
+ );
}
else {
ASSERT_AND_RET(
ASSERT_AND_RET_DBG(
get_irn_mode(get_Call_param(n, i)) == get_type_mode(get_method_param_type(mt, i)),
"Mode of arg for Call doesn't match mode of arg type.", 0,
- show_call_param(n, mt);
- );
+ show_call_param(n, mt);
+ );
}
break;
ASSERT_AND_RET_DBG(
(
/* common Add: BB x numP x numP --> numP */
- (op1mode == mymode && op2mode == op1mode && mode_is_numP(mymode)) ||
+ (op1mode == mymode && op2mode == op1mode && mode_is_numP(mymode)) ||
/* Pointer Add: BB x ref x int --> ref */
(mode_is_reference(op1mode) && mode_is_int(op2mode) && op1mode == mymode) ||
/* Pointer Add: BB x int x ref --> ref */
(mode_is_int(op1mode) && op2mode == mymode && mode_is_reference(mymode))
- ),
+ ),
"Add node", 0,
- show_binop_failure(n, "/* common Add: BB x numP x numP --> numP */ |\n"
- "/* Pointer Add: BB x ref x int --> ref */ |\n"
+ show_binop_failure(n, "/* common Add: BB x numP x numP --> numP */ |\n"
+ "/* Pointer Add: BB x ref x int --> ref */ |\n"
"/* Pointer Add: BB x int x ref --> ref */");
);
if (mode_is_reference(op1mode) != mode_is_reference(op2mode)) {
/* Pointer Sub: BB x ref x ref --> int */
(op1mode == op2mode && mode_is_reference(op2mode) && mode_is_int(mymode))),
"Sub node", 0,
- show_binop_failure(n, "/* common Sub: BB x numP x numP --> numP */ |\n"
- "/* Pointer Sub: BB x ref x int --> ref */ |\n"
- "/* Pointer Sub: BB x int x ref --> ref */ |\n"
- "/* Pointer Sub: BB x ref x ref --> int */" );
+ show_binop_failure(n, "/* common Sub: BB x numP x numP --> numP */ |\n"
+ "/* Pointer Sub: BB x ref x int --> ref */ |\n"
+ "/* Pointer Sub: BB x int x ref --> ref */ |\n"
+ "/* Pointer Sub: BB x ref x ref --> int */" );
);
if (mode_is_reference(op1mode) != mode_is_reference(op2mode)) {
op_is_symmetric = 0;
ASSERT_AND_RET_DBG(
/* Minus: BB x float --> float */
op1mode == mymode && get_mode_sort(op1mode) == irms_float_number, "Minus node", 0,
- show_unop_failure(n , "/* Minus: BB x float --> float */");
+ show_unop_failure(n , "/* Minus: BB x float --> float */");
);
op_is_symmetric = 2;
break;
ASSERT_AND_RET_DBG(
/* Mul: BB x int1 x int1 --> int2 */
((mode_is_int(op1mode) && op2mode == op1mode && mode_is_int(mymode)) ||
- (mode_is_float(op1mode) && op2mode == op1mode && mymode == op1mode)),
+ (mode_is_float(op1mode) && op2mode == op1mode && mymode == op1mode)),
"Mul node",0,
- show_binop_failure(n, "/* Mul: BB x int1 x int1 --> int2 */");
+ show_binop_failure(n, "/* Mul: BB x int1 x int1 --> int2 */");
);
op_is_symmetric = 2;
break;
get_mode_sort(op2mode) == irms_float_number &&
mymode == mode_T,
"Quot node",0,
- show_binop_failure(n, "/* Quot: BB x M x float x float --> M x X x float */");
+ show_binop_failure(n, "/* Quot: BB x M x float x float --> M x X x float */");
);
op_is_symmetric = 2;
break;
case iro_Abs:
op1mode = get_irn_mode(in[1]);
ASSERT_AND_RET_DBG(
- /* Abs: BB x num --> num */
- op1mode == mymode &&
- mode_is_num (op1mode),
- "Abs node", 0,
- show_unop_failure(n, "/* Abs: BB x num --> num */");
+ /* Abs: BB x num --> num */
+ op1mode == mymode &&
+ mode_is_num (op1mode),
+ "Abs node", 0,
+ show_unop_failure(n, "/* Abs: BB x num --> num */");
);
op_is_symmetric = 2;
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* And or Or or Eor: BB x int x int --> int */
- mode_is_int(mymode) &&
- op2mode == op1mode &&
- mymode == op2mode,
- "And, Or or Eor node", 0,
- show_binop_failure(n, "/* And or Or or Eor: BB x int x int --> int */");
+ /* And or Or or Eor: BB x int x int --> int */
+ mode_is_int(mymode) &&
+ op2mode == op1mode &&
+ mymode == op2mode,
+ "And, Or or Eor node", 0,
+ show_binop_failure(n, "/* And or Or or Eor: BB x int x int --> int */");
);
op_is_symmetric = 2;
break;
case iro_Not:
op1mode = get_irn_mode(in[1]);
ASSERT_AND_RET_DBG(
- /* Not: BB x int --> int */
- mode_is_int(mymode) &&
- mymode == op1mode,
- "Not node", 0,
- show_unop_failure(n, "/* Not: BB x int --> int */");
+ /* Not: BB x int --> int */
+ mode_is_int(mymode) &&
+ mymode == op1mode,
+ "Not node", 0,
+ show_unop_failure(n, "/* Not: BB x int --> int */");
);
op_is_symmetric = 2;
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Cmp: BB x datab x datab --> b16 */
- mode_is_data (op1mode) &&
- op2mode == op1mode &&
- mymode == mode_T,
- "Cmp node", 0,
- show_binop_failure(n, "/* Cmp: BB x datab x datab --> b16 */");
+ /* Cmp: BB x datab x datab --> b16 */
+ mode_is_data (op1mode) &&
+ op2mode == op1mode &&
+ mymode == mode_T,
+ "Cmp node", 0,
+ show_binop_failure(n, "/* Cmp: BB x datab x datab --> b16 */");
);
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Shl, Shr or Shrs: BB x int x int_u --> int */
- mode_is_int(op1mode) &&
- mode_is_int(op2mode) &&
- !mode_is_signed(op2mode) &&
- mymode == op1mode,
- "Shl, Shr, Shr or Rot node", 0,
- show_binop_failure(n, "/* Shl, Shr or Shrs: BB x int x int_u --> int */");
+ /* Shl, Shr or Shrs: BB x int x int_u --> int */
+ mode_is_int(op1mode) &&
+ mode_is_int(op2mode) &&
+ !mode_is_signed(op2mode) &&
+ mymode == op1mode,
+ "Shl, Shr, Shr or Rot node", 0,
+ show_binop_failure(n, "/* Shl, Shr or Shrs: BB x int x int_u --> int */");
);
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Rot: BB x int x int --> int */
- mode_is_int(op1mode) &&
- mode_is_int(op2mode) &&
- mymode == op1mode,
- "Rot node", 0,
- show_binop_failure(n, "/* Rot: BB x int x int --> int */");
+ /* Rot: BB x int x int --> int */
+ mode_is_int(op1mode) &&
+ mode_is_int(op2mode) &&
+ mymode == op1mode,
+ "Rot node", 0,
+ show_binop_failure(n, "/* Rot: BB x int x int --> int */");
);
break;
case iro_Conv:
op1mode = get_irn_mode(in[1]);
ASSERT_AND_RET_DBG(
- /* Conv: BB x datab1 --> datab2 */
- mode_is_datab(op1mode) && mode_is_data(mymode),
- "Conv node", 0,
- show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
+ /* Conv: BB x datab1 --> datab2 */
+ mode_is_datab(op1mode) && mode_is_data(mymode),
+ "Conv node", 0,
+ show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
);
break;
case iro_Cast:
op1mode = get_irn_mode(in[1]);
ASSERT_AND_RET_DBG(
- /* Conv: BB x datab1 --> datab2 */
- mode_is_data(op1mode) && op1mode == mymode,
- "Cast node", 0,
- show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
+ /* Conv: BB x datab1 --> datab2 */
+ mode_is_data(op1mode) && op1mode == mymode,
+ "Cast node", 0,
+ show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
);
break;
for (i = 1; i < get_irn_arity(n); i++) {
if (!is_Bad(in[i]) && (get_irn_op(in[i]) != op_Unknown))
ASSERT_AND_RET_DBG(
- get_irn_mode(in[i]) == mymode,
- "Phi node", 0,
- show_phi_failure(n, in[i], i);
- );
+ get_irn_mode(in[i]) == mymode,
+ "Phi node", 0,
+ show_phi_failure(n, in[i], i);
+ );
};
ASSERT_AND_RET( mode_is_dataM(mymode), "Phi node", 0 );
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Alloc: BB x M x int_u --> M x X x ref */
- op1mode == mode_M &&
- mode_is_int(op2mode) &&
- !mode_is_signed(op2mode) &&
- mymode == mode_T,
- "Alloc node", 0,
- show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
+ /* Alloc: BB x M x int_u --> M x X x ref */
+ op1mode == mode_M &&
+ mode_is_int(op2mode) &&
+ !mode_is_signed(op2mode) &&
+ mymode == mode_T,
+ "Alloc node", 0,
+ show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
);
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Free: BB x M x ref --> M */
- op1mode == mode_M && mode_is_reference(op2mode) &&
- mymode == mode_M,
- "Free node", 0,
- show_binop_failure(n, "/* Free: BB x M x ref --> M */");
+ /* Free: BB x M x ref --> M */
+ op1mode == mode_M && mode_is_reference(op2mode) &&
+ mymode == mode_M,
+ "Free node", 0,
+ show_binop_failure(n, "/* Free: BB x M x ref --> M */");
);
break;
op1mode = get_irn_mode(in[1]);
op2mode = get_irn_mode(in[2]);
ASSERT_AND_RET_DBG(
- /* Confirm: BB x T x T --> T */
- op1mode == mymode &&
- op2mode == mymode,
- "Confirm node", 0,
- show_binop_failure(n, "/* Confirm: BB x T x T --> T */");
+ /* Confirm: BB x T x T --> T */
+ op1mode == mymode &&
+ op2mode == mymode,
+ "Confirm node", 0,
+ show_binop_failure(n, "/* Confirm: BB x T x T --> T */");
);
break;
/* Verify the whole graph. */
/*******************************************************************/
+/* This *is* used, except gcc doesn't notice that */
static void vrfy_wrap(ir_node *node, void *env)
{
int *res = env;