-static inline ir_node *
-new_rd_Phi0(ir_graph *irg, ir_node *block, ir_mode *mode) {
- ir_node *res;
-
- res = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL);
- IRN_VRFY_IRG(res, irg);
- return res;
-} /* new_rd_Phi0 */
-
-
-/**
- * Internal constructor of a Phi node by a phi_merge operation.
- *
- * @param irg the graph on which the Phi will be constructed
- * @param block the block in which the Phi will be constructed
- * @param mode the mod eof the Phi node
- * @param in the input array of the phi node
- * @param ins number of elements in the input array
- * @param phi0 in non-NULL: the Phi0 node in the same block that represents
- * the value for which the new Phi is constructed
- */
-static inline ir_node *
-new_rd_Phi_in(ir_graph *irg, ir_node *block, ir_mode *mode,
- ir_node **in, int ins, ir_node *phi0) {
- int i;
- ir_node *res, *known;
-
- /* Allocate a new node on the obstack. The allocation copies the in
- array. */
- res = new_ir_node(NULL, irg, block, op_Phi, mode, ins, in);
- res->attr.phi.u.backedge = new_backedge_arr(irg->obst, ins);
-
- /* This loop checks whether the Phi has more than one predecessor.
- If so, it is a real Phi node and we break the loop. Else the
- Phi node merges the same definition on several paths and therefore
- is not needed.
- Note: We MUST consider Bad nodes, else we might get data flow cycles in dead loops! */
- known = res;
- for (i = ins - 1; i >= 0; --i) {
- assert(in[i]);
-
- in[i] = skip_Id(in[i]); /* increases the number of freed Phis. */
-
- /* Optimize self referencing Phis: We can't detect them yet properly, as
- they still refer to the Phi0 they will replace. So replace right now. */
- if (phi0 && in[i] == phi0)
- in[i] = res;
-
- if (in[i] == res || in[i] == known)
- continue;
-
- if (known == res)
- known = in[i];
- else
- break;
- }
-
- /* i < 0: there is at most one predecessor, we don't need a phi node. */
- if (i < 0) {
- if (res != known) {
- edges_node_deleted(res, current_ir_graph);
- obstack_free(current_ir_graph->obst, res);
- if (is_Phi(known)) {
- /* If pred is a phi node we want to optimize it: If loops are matured in a bad
- order, an enclosing Phi know may get superfluous. */
- res = optimize_in_place_2(known);
- if (res != known)
- exchange(known, res);
- }
- else
- res = known;
- } else {
- /* A undefined value, e.g., in unreachable code. */
- res = new_Bad();
- }
- } else {
- res = optimize_node(res); /* This is necessary to add the node to the hash table for cse. */
- IRN_VRFY_IRG(res, irg);
- /* Memory Phis in endless loops must be kept alive.
- As we can't distinguish these easily we keep all of them alive. */
- if (is_Phi(res) && mode == mode_M)
- add_End_keepalive(get_irg_end(irg), res);
- }
-
- return res;
-} /* new_rd_Phi_in */
-
-static ir_node *
-get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
-
-static ir_node *
-phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
-
-/**
- * Construct a new frag_array for node n.
- * Copy the content from the current graph_arr of the corresponding block:
- * this is the current state.
- * Set ProjM(n) as current memory state.
- * Further the last entry in frag_arr of current block points to n. This
- * constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
- */
-static inline ir_node **new_frag_arr(ir_node *n) {
- ir_node **arr;
- int opt;
-
- arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
- memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
- sizeof(ir_node *)*current_ir_graph->n_loc);
-
- /* turn off optimization before allocating Proj nodes, as res isn't
- finished yet. */
- opt = get_opt_optimize(); set_optimize(0);
- /* Here we rely on the fact that all frag ops have Memory as first result! */
- if (is_Call(n)) {
- arr[0] = new_Proj(n, mode_M, pn_Call_M);
- } else if (is_CopyB(n)) {
- arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
- } else {
- assert((pn_Quot_M == pn_DivMod_M) &&
- (pn_Quot_M == pn_Div_M) &&
- (pn_Quot_M == pn_Mod_M) &&
- (pn_Quot_M == pn_Load_M) &&
- (pn_Quot_M == pn_Store_M) &&
- (pn_Quot_M == pn_Alloc_M) &&
- (pn_Quot_M == pn_Bound_M));
- arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
- }
- set_optimize(opt);
-
- current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
- return arr;
-} /* new_frag_arr */
-
-/**
- * Returns the frag_arr from a node.
- */
-static inline ir_node **get_frag_arr(ir_node *n) {
- switch (get_irn_opcode(n)) {
- case iro_Call:
- return n->attr.call.exc.frag_arr;
- case iro_Alloc:
- return n->attr.alloc.exc.frag_arr;
- case iro_Load:
- return n->attr.load.exc.frag_arr;
- case iro_Store:
- return n->attr.store.exc.frag_arr;
- default:
- return n->attr.except.frag_arr;
- }
-} /* get_frag_arr */
-
-static void
-set_frag_value(ir_node **frag_arr, int pos, ir_node *val) {
-#ifdef DEBUG_libfirm
- int i;
-
- for (i = 1024; i >= 0; --i)
-#else
- for (;;)
-#endif
- {
- if (frag_arr[pos] == NULL)
- frag_arr[pos] = val;
- if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
- ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
- assert(arr != frag_arr && "Endless recursion detected");
- frag_arr = arr;
- } else
- return;
- }
- assert(!"potential endless recursion in set_frag_value");
-} /* set_frag_value */
-
-static ir_node *
-get_r_frag_value_internal(ir_node *block, ir_node *cfOp, int pos, ir_mode *mode) {
- ir_node *res;
- ir_node **frag_arr;
-
- assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
-
- frag_arr = get_frag_arr(cfOp);
- res = frag_arr[pos];
- if (res == NULL) {
- if (block->attr.block.graph_arr[pos] != NULL) {
- /* There was a set_value() after the cfOp and no get_value() before that
- set_value(). We must build a Phi node now. */
- if (block->attr.block.is_matured) {
- int ins = get_irn_arity(block);
- ir_node **nin;
- NEW_ARR_A(ir_node *, nin, ins);
- res = phi_merge(block, pos, mode, nin, ins);
- } else {
- res = new_rd_Phi0(current_ir_graph, block, mode);
- res->attr.phi.u.pos = pos;
- res->attr.phi.next = block->attr.block.phis;
- block->attr.block.phis = res;
- }
- assert(res != NULL);
- /* It's a Phi, we can write this into all graph_arrs with NULL */
- set_frag_value(block->attr.block.graph_arr, pos, res);
- } else {
- res = get_r_value_internal(block, pos, mode);
- set_frag_value(block->attr.block.graph_arr, pos, res);
- }
- }