ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
unsigned visited; /**< visited counter for breaking loops */
ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
unsigned visited; /**< visited counter for breaking loops */
{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
- Stored value into the needed one. */
- if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ Stored value into the needed one. (on big endian machines we currently
+ only support this for modes of same size) */
+ if (old_size >= new_size &&
- get_mode_arithmetic(new_mode) == irma_twos_complement)
- return 1;
- return 0;
-} /* can_use_stored_value */
+ get_mode_arithmetic(new_mode) == irma_twos_complement &&
+ (!be_get_backend_param()->byte_order_big_endian
+ || old_size == new_size)) {
+ return true;
+ }
+ return false;
+}
- if (delta < 0 || delta + load_mode_len > store_mode_len)
+ /* TODO: implement for big-endian */
+ if (delta < 0 || delta + load_mode_len > store_mode_len
+ || (be_get_backend_param()->byte_order_big_endian
+ && load_mode_len != store_mode_len))
cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
* Here, there is no need to check if the previous Load has an
* exception hander because they would have exact the same
* exception...
* Here, there is no need to check if the previous Load has an
* exception hander because they would have exact the same
* exception...
if (is_reinterpret_cast(c_mode, l_mode)) {
/* copy the value from the const code irg and cast it */
res = new_rd_Conv(dbgi, block, res, l_mode);
if (is_reinterpret_cast(c_mode, l_mode)) {
/* copy the value from the const code irg and cast it */
res = new_rd_Conv(dbgi, block, res, l_mode);
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
/* new style initializer */
value = find_compound_ent_value(ptr);
} else if (entity_has_compound_ent_values(ent)) {
/* new style initializer */
value = find_compound_ent_value(ptr);
} else if (entity_has_compound_ent_values(ent)) {
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+ ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+ ir_node *old_store;
+#endif
/* check if the block is post dominated by Phi-block
and has no exception exit */
bl_info = (block_info_t*)get_irn_link(block);
/* check if the block is post dominated by Phi-block
and has no exception exit */
bl_info = (block_info_t*)get_irn_link(block);
/* check if the block is post dominated by Phi-block
and has no exception exit. Note that block must be different from
/* check if the block is post dominated by Phi-block
and has no exception exit. Note that block must be different from
typedef struct loop_env {
ir_phase ph; /**< the phase object */
ir_node **stack; /**< the node stack */
typedef struct loop_env {
ir_phase ph; /**< the phase object */
ir_node **stack; /**< the node stack */
unsigned nextDFSnum; /**< the current DFS number */
unsigned POnum; /**< current post order number */
unsigned nextDFSnum; /**< the current DFS number */
unsigned POnum; /**< current post order number */
ARR_RESIZE(ir_node *, env->stack, nlen);
}
env->stack[env->tos++] = n;
ARR_RESIZE(ir_node *, env->stack, nlen);
}
env->stack[env->tos++] = n;
static void move_loads_out_of_loops(scc *pscc, loop_env *env)
{
ir_node *phi, *load, *next, *other, *next_other;
static void move_loads_out_of_loops(scc *pscc, loop_env *env)
{
ir_node *phi, *load, *next, *other, *next_other;
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
node_entry *ne = get_irn_ne(other, env);
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
node_entry *ne = get_irn_ne(other, env);
ninfo = get_ldst_info(irn, phase_obst(&env->ph));
ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
ninfo = get_ldst_info(irn, phase_obst(&env->ph));
ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
- set_Phi_pred(phi, pos, mem);
+ if (res == NULL) {
+ /* irn is from cache, so do not set phi pred again.
+ * There might be other Loads between phi and irn already.
+ */
+ set_Phi_pred(phi, pos, mem);
+ }