(Used so far to indicate wether muxes are lowered and should not be touched anymore and wether architecture dependent mul and div with constant implementations are used)
We should use this to indicate dom/loop/... consistency in the fututure
- fixed a bug where lower_mode_b was creating a "set" instruction with the help of a Mux. (Maybe we should create a new Node for this and not use a 0/1 mux?)
[r25937]
#define ir_resources_reserved(irg) 0
#endif
+/**
+ * Graph State
+ */
+typedef enum {
+ IR_GRAPH_STATE_KEEP_MUX = 1 << 0, /**< should perform no further optimisations on Mux nodes */
+ IR_GRAPH_STATE_ARCH_DEP = 1 << 1, /**< should not construct more nodes which irarch potentially breaks down */
+} ir_graph_state_t;
+
+/** set some state flags on the graph (this does not clear the other flags) */
+void set_irg_state(ir_graph *irg, ir_graph_state_t state);
+/** clear some state flags of the graph */
+void clear_irg_state(ir_graph *irg, ir_graph_state_t state);
+/** query wether a set of graph state flags are activated */
+int is_irg_state(const ir_graph *irg, ir_graph_state_t state);
+
/** Normalization: Move Proj nodes into the same block as its predecessors */
void normalize_proj_nodes(ir_graph *irg);
/* Replace Muls with Shifts and Add/Subs. */
ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) {
- ir_node *res = irn;
+ ir_graph *irg;
+ ir_node *res = irn;
ir_mode *mode = get_irn_mode(irn);
+ ir_node *left;
+ ir_node *right;
+ ir_node *operand;
+ tarval *tv;
+
/* If the architecture dependent optimizations were not initialized
or this optimization was not enabled. */
if (params == NULL || (opts & arch_dep_mul_to_shift) == 0)
return irn;
- set_arch_dep_running(1);
- {
- if (is_Mul(irn) && mode_is_int(mode)) {
- ir_node *left = get_binop_left(irn);
- ir_node *right = get_binop_right(irn);
- tarval *tv = NULL;
- ir_node *operand = NULL;
-
- /* Look, if one operand is a constant. */
- if (is_Const(left)) {
- tv = get_Const_tarval(left);
- operand = right;
- } else if (is_Const(right)) {
- tv = get_Const_tarval(right);
- operand = left;
- }
+ if (!is_Mul(irn) || !mode_is_int(mode))
+ return res;
+
+ /* we should never do the reverse transformations again
+ (like x+x -> 2*x) */
+ irg = get_irn_irg(irn);
+ set_irg_state(irg, IR_GRAPH_STATE_ARCH_DEP);
+
+ left = get_binop_left(irn);
+ right = get_binop_right(irn);
+ tv = NULL;
+ operand = NULL;
+
+ /* Look, if one operand is a constant. */
+ if (is_Const(left)) {
+ tv = get_Const_tarval(left);
+ operand = right;
+ } else if (is_Const(right)) {
+ tv = get_Const_tarval(right);
+ operand = left;
+ }
- if (tv != NULL) {
- res = do_decomposition(irn, operand, tv);
+ if (tv != NULL) {
+ res = do_decomposition(irn, operand, tv);
- if (res != irn) {
- hook_arch_dep_replace_mul_with_shifts(irn);
- exchange(irn, res);
- }
- }
+ if (res != irn) {
+ hook_arch_dep_replace_mul_with_shifts(irn);
+ exchange(irn, res);
}
}
- //set_arch_dep_running(0);
return res;
}
/** This flag is set while the reassociation optimizations are running */
R_FLAG(reassoc , 0)
-
-/** This flag is set while architecture dependent optimizations are running */
-R_FLAG(arch_dep , 0)
return additional_graph_data_size += size;
}
+
+void (set_irg_state)(ir_graph *irg, ir_graph_state_t state)
+{
+ _set_irg_state(irg, state);
+}
+
+void (clear_irg_state)(ir_graph *irg, ir_graph_state_t state)
+{
+ _clear_irg_state(irg, state);
+}
+
+int (is_irg_state)(const ir_graph *irg, ir_graph_state_t state)
+{
+ return _is_irg_state(irg, state);
+}
return irg->fp_model;
}
+static inline void _set_irg_state(ir_graph *irg, ir_graph_state_t state)
+{
+ irg->state |= state;
+}
+
+static inline void _clear_irg_state(ir_graph *irg, ir_graph_state_t state)
+{
+ irg->state &= ~state;
+}
+
+static inline int _is_irg_state(const ir_graph *irg, ir_graph_state_t state)
+{
+ return (irg->state & state) == state;
+}
+
/**
* Allocates a new idx in the irg for the node and adds the irn to the idx -> irn map.
* @param irg The graph.
#define get_irg_estimated_node_cnt(irg) _get_irg_estimated_node_cnt(irg)
#define get_irg_fp_model(irg) _get_irg_fp_model(irg)
#define get_idx_irn(irg, idx) _get_idx_irn(irg, idx)
+#define set_irg_state(irg, state) _set_irg_state(irg, state)
+#define clear_irg_state(irg, state) _clear_irg_state(irg, state)
+#define is_irg_state(irg, state) _is_irg_state(irg, state)
#endif
if (mode_is_num(mode)) {
/* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
- if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
+ if (!is_irg_state(current_ir_graph, IR_GRAPH_STATE_ARCH_DEP)
+ && a == b && mode_is_int(mode)) {
ir_node *block = get_nodes_block(n);
n = new_rd_Mul(
ir_node *f = get_Mux_false(n);
ir_graph *irg = current_ir_graph;
+ if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
+ return n;
+
if (is_Mux(t)) {
ir_node* block = get_nodes_block(n);
ir_node* c0 = sel;
if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
tarval *a = get_Const_tarval(t);
tarval *b = get_Const_tarval(f);
- tarval *null = get_tarval_null(mode);
tarval *diff, *min;
if (tarval_is_one(a) && tarval_is_null(b)) {
*/
do {
oldn = n;
- if (n->op->ops.transform_node)
+ if (n->op->ops.transform_node != NULL)
n = n->op->ops.transform_node(n);
} while (oldn != n);
unsigned additional_properties; /**< Additional graph properties. */
/* -- Fields indicating different states of irgraph -- */
+ unsigned state;
irg_phase_state phase_state; /**< Compiler phase. */
op_pin_state irg_pinned_state; /**< Flag for status of nodes. */
irg_outs_state outs_state; /**< Out edges. */
tarval *tv_zero = get_tarval_null(mode);
ir_node *zero = new_d_Const(dbgi, tv_zero);
+ /* ensure no optimisation touches muxes anymore */
+ set_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX);
+
ir_node *set = new_rd_Mux(dbgi, irg, block, node, zero, one, mode);
if (mode != config.lowered_mode) {