skip_barrier(start_blk, irg);
}
+/*
+ * Tries to optimize a beIncSp node with it's previous IncSP node.
+ * Must be run from a be_peephole_opt() context.
+ */
+void be_peephole_IncSP_IncSP(ir_node *node)
+{
+ int pred_offs;
+ int curr_offs;
+ int offs;
+ ir_node *pred = be_get_IncSP_pred(node);
+ ir_node *predpred;
+
+ if (!be_is_IncSP(pred))
+ return;
+
+ if (get_irn_n_edges(pred) > 1)
+ return;
+
+ pred_offs = be_get_IncSP_offset(pred);
+ curr_offs = be_get_IncSP_offset(node);
+
+ if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
+ if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
+ return;
+ }
+ offs = 0;
+ } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
+ if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
+ return;
+ }
+ offs = 0;
+ } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
+ curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
+ return;
+ } else {
+ offs = curr_offs + pred_offs;
+ }
+
+ /* add pred offset to ours and remove pred IncSP */
+ be_set_IncSP_offset(node, offs);
+
+ predpred = be_get_IncSP_pred(pred);
+ be_peephole_before_exchange(pred, predpred);
+
+ /* rewire dependency edges */
+ edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
+ be_set_IncSP_pred(node, predpred);
+ sched_remove(pred);
+ be_kill_node(pred);
+
+ be_peephole_after_exchange(predpred);
+}
void be_peephole_opt(be_irg_t *birg)
{
*/
void be_peephole_after_exchange(ir_node *new_node);
+/**
+ * Tries to optimize a beIncSp node with it's previous IncSP node.
+ * Must be run from a be_peephole_opt() context.
+ *
+ * @param node a be_IncSP node
+ */
+void be_peephole_IncSP_IncSP(ir_node *node);
+
/**
* Do peephole optimisations. It traverses the schedule of all blocks in
* backward direction. The register_values variable indicates which (live)
be_set_IncSP_pred(irn, curr_sp);
}
-/**
- * Tries to optimize two following IncSP.
- */
-static void peephole_IncSP_IncSP(ir_node *node)
-{
- int pred_offs;
- int curr_offs;
- int offs;
- ir_node *pred = be_get_IncSP_pred(node);
- ir_node *predpred;
-
- if(!be_is_IncSP(pred))
- return;
-
- if(get_irn_n_edges(pred) > 1)
- return;
-
- pred_offs = be_get_IncSP_offset(pred);
- curr_offs = be_get_IncSP_offset(node);
-
- if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
- if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
- return;
- }
- offs = 0;
- } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
- return;
- }
- offs = 0;
- } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
- || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- return;
- } else {
- offs = curr_offs + pred_offs;
- }
-
- /* add pred offset to ours and remove pred IncSP */
- be_set_IncSP_offset(node, offs);
-
- predpred = be_get_IncSP_pred(pred);
- be_peephole_before_exchange(pred, predpred);
-
- /* rewire dependency edges */
- edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
- be_set_IncSP_pred(node, predpred);
- sched_remove(pred);
- be_kill_node(pred);
-
- be_peephole_after_exchange(predpred);
-}
-
/**
* Find a free GP register if possible, else return NULL.
*/
int offset;
/* first optimize incsp->incsp combinations */
- peephole_IncSP_IncSP(node);
+ be_peephole_IncSP_IncSP(node);
/* transform IncSP->Store combinations to Push where possible */
peephole_IncSP_Store_to_push(node);