projects
/
libfirm
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Correct a subtle bug in the ia32 backend: Sub(x, x) triggered that the Neg+Add trick...
[libfirm]
/
ir
/
be
/
beschedtrace.c
diff --git
a/ir/be/beschedtrace.c
b/ir/be/beschedtrace.c
index
9340693
..
7362abb
100644
(file)
--- a/
ir/be/beschedtrace.c
+++ b/
ir/be/beschedtrace.c
@@
-24,17
+24,16
@@
* @date 28.08.2006
* @version $Id$
*/
* @date 28.08.2006
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
#include "config.h"
-#endif
#include <stdlib.h>
#include "iredges_t.h"
#include <stdlib.h>
#include "iredges_t.h"
-#include "besched
_t
.h"
+#include "besched.h"
#include "belistsched.h"
#include "belistsched.h"
-#include "benode_t.h"
+#include "benode.h"
+#include "belive.h"
/* we need a special mark */
static char _mark;
/* we need a special mark */
static char _mark;
@@
-52,7
+51,6
@@
typedef struct _trace_irn {
typedef struct _trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
typedef struct _trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
- const arch_env_t *arch_env; /**< the arch environment */
sched_timestep_t curr_time; /**< current time of the scheduler */
void *selector_env; /**< the backend selector environment */
const list_sched_selector_t *selector; /**< the actual backend selector */
sched_timestep_t curr_time; /**< current time of the scheduler */
void *selector_env; /**< the backend selector environment */
const list_sched_selector_t *selector; /**< the actual backend selector */
@@
-74,7
+72,7
@@
static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
/**
* Returns non-zero if the node is a root node
*/
/**
* Returns non-zero if the node is a root node
*/
-static
INLINE
unsigned is_root_node(trace_env_t *env, ir_node *n)
+static
inline
unsigned is_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
{
int idx = get_irn_idx(n);
@@
-85,7
+83,7
@@
static INLINE unsigned is_root_node(trace_env_t *env, ir_node *n)
/**
* Mark a node as root node
*/
/**
* Mark a node as root node
*/
-static
INLINE
void mark_root_node(trace_env_t *env, ir_node *n)
+static
inline
void mark_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
{
int idx = get_irn_idx(n);
@@
-96,7
+94,8
@@
static INLINE void mark_root_node(trace_env_t *env, ir_node *n)
/**
* Get the current delay.
*/
/**
* Get the current delay.
*/
-static INLINE sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-106,7
+105,8
@@
static INLINE sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
/**
* Set the current delay.
*/
/**
* Set the current delay.
*/
-static INLINE void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
+static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-116,7
+116,8
@@
static INLINE void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t
/**
* Get the current etime.
*/
/**
* Get the current etime.
*/
-static INLINE sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
+static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-126,7
+127,8
@@
static INLINE sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
/**
* Set the current etime.
*/
/**
* Set the current etime.
*/
-static INLINE void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
+static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-136,7
+138,8
@@
static INLINE void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t
/**
* Get the number of users.
*/
/**
* Get the number of users.
*/
-static INLINE unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-146,7
+149,8
@@
static INLINE unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
/**
* Set the number of users.
*/
/**
* Set the number of users.
*/
-static INLINE void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
+static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-156,7
+160,8
@@
static INLINE void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_u
/**
* Get the register difference.
*/
/**
* Get the register difference.
*/
-static INLINE int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
+static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-166,7
+171,8
@@
static INLINE int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
/**
* Set the register difference.
*/
/**
* Set the register difference.
*/
-static INLINE void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
+static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-176,7
+182,8
@@
static INLINE void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
/**
* Get the pre-order position.
*/
/**
* Get the pre-order position.
*/
-static INLINE int get_irn_preorder(trace_env_t *env, ir_node *n) {
+static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-186,7
+193,8
@@
static INLINE int get_irn_preorder(trace_env_t *env, ir_node *n) {
/**
* Set the pre-order position.
*/
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
+static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-196,7
+204,8
@@
static INLINE void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
/**
* Get the pre-order position.
*/
/**
* Get the pre-order position.
*/
-static INLINE unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
+static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-206,7
+215,8
@@
static INLINE unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
/**
* Set the pre-order position.
*/
/**
* Set the pre-order position.
*/
-static INLINE void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
+static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
+{
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
int idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
@@
-216,7
+226,8
@@
static INLINE void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsig
/**
* returns the exec-time for node n.
*/
/**
* returns the exec-time for node n.
*/
-static sched_timestep_t exectime(trace_env_t *env, ir_node *n) {
+static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
+{
if (be_is_Keep(n) || is_Proj(n))
return 0;
if (env->selector->exectime)
if (be_is_Keep(n) || is_Proj(n))
return 0;
if (env->selector->exectime)
@@
-227,7
+238,8
@@
static sched_timestep_t exectime(trace_env_t *env, ir_node *n) {
/**
* Calculates the latency for between two ops
*/
/**
* Calculates the latency for between two ops
*/
-static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
+static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
+{
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
@@
-248,7
+260,8
@@
static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle,
/**
* Returns the number of users of a node having mode datab.
*/
/**
* Returns the number of users of a node having mode datab.
*/
-static int get_num_successors(ir_node *irn) {
+static int get_num_successors(ir_node *irn)
+{
int sum = 0;
const ir_edge_t *edge;
int sum = 0;
const ir_edge_t *edge;
@@
-278,14
+291,15
@@
static int get_num_successors(ir_node *irn) {
/**
* Returns the difference of regs_output - regs_input;
*/
/**
* Returns the difference of regs_output - regs_input;
*/
-static int get_reg_difference(trace_env_t *env, ir_node *irn) {
+static int get_reg_difference(trace_env_t *env, ir_node *irn)
+{
int num_out = 0;
int num_in = 0;
int i;
ir_node *block = get_nodes_block(irn);
if (be_is_Call(irn)) {
int num_out = 0;
int num_in = 0;
int i;
ir_node *block = get_nodes_block(irn);
if (be_is_Call(irn)) {
- /* we want calls prefered */
+ /* we want calls prefer
r
ed */
return -5;
}
return -5;
}
@@
-305,13
+319,16
@@
static int get_reg_difference(trace_env_t *env, ir_node *irn) {
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
- if (mode_is_datab(get_irn_mode(in)) && /* must be data node */
- ! arch_irn_is(env->arch_env, in, ignore) && /* ignore "ignore" nodes :) */
- ! be_is_live_end(env->liveness, block, in) /* if the value lives outside of block: do not count */
+ if (!mode_is_datab(get_irn_mode(in)))
+ continue;
- ) {
- num_in++;
- }
+ if (arch_irn_is_ignore(in))
+ continue;
+
+ if (be_is_live_end(env->liveness, block, in))
+ continue;
+
+ num_in++;
}
return num_out - num_in;
}
return num_out - num_in;
@@
-320,7
+337,8
@@
static int get_reg_difference(trace_env_t *env, ir_node *irn) {
/**
* descent into a dag and create a pre-order list.
*/
/**
* descent into a dag and create a pre-order list.
*/
-static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len) {
+static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
+{
int i;
if (! is_Phi(root)) {
int i;
if (! is_Phi(root)) {
@@
-364,7
+382,8
@@
static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *
/**
* Returns non-zero if root is a root in the block block.
*/
/**
* Returns non-zero if root is a root in the block block.
*/
-static int is_root(ir_node *root, ir_node *block) {
+static int is_root(ir_node *root, ir_node *block)
+{
const ir_edge_t *edge;
foreach_out_edge(root, edge) {
const ir_edge_t *edge;
foreach_out_edge(root, edge) {
@@
-384,7
+403,8
@@
static int is_root(ir_node *root, ir_node *block) {
/**
* Performs initial block calculations for trace scheduling.
*/
/**
* Performs initial block calculations for trace scheduling.
*/
-static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
+static void trace_preprocess_block(trace_env_t *env, ir_node *block)
+{
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
ir_node *root = NULL, *preord = NULL;
ir_node *curr, *irn;
int cur_pos;
@@
-428,7
+448,7
@@
static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
- if (
arch_irn_class_is(curr, branch
)) {
+ if (
is_cfop(curr
)) {
/* assure, that branches can be executed last */
d = 0;
}
/* assure, that branches can be executed last */
d = 0;
}
@@
-462,7
+482,8
@@
static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
/**
* This functions gets called after a node finally has been made ready.
*/
/**
* This functions gets called after a node finally has been made ready.
*/
-static void trace_node_ready(void *data, ir_node *irn, ir_node *pred) {
+static void trace_node_ready(void *data, ir_node *irn, ir_node *pred)
+{
trace_env_t *env = data;
sched_timestep_t etime_p, etime;
trace_env_t *env = data;
sched_timestep_t etime_p, etime;
@@
-480,9
+501,10
@@
static void trace_node_ready(void *data, ir_node *irn, ir_node *pred) {
/**
* Update the current time after irn has been selected.
*/
/**
* Update the current time after irn has been selected.
*/
-static void trace_update_time(void *data, ir_node *irn) {
+static void trace_update_time(void *data, ir_node *irn)
+{
trace_env_t *env = data;
trace_env_t *env = data;
- if (is_Phi(irn) || get_irn_opcode(irn) ==
ir
o_Start) {
+ if (is_Phi(irn) || get_irn_opcode(irn) ==
be
o_Start) {
env->curr_time += get_irn_etime(env, irn);
}
else {
env->curr_time += get_irn_etime(env, irn);
}
else {
@@
-495,15
+517,15
@@
static void trace_update_time(void *data, ir_node *irn) {
* @param birg The backend irg object
* @return The environment
*/
* @param birg The backend irg object
* @return The environment
*/
-static trace_env_t *trace_init(const be_irg_t *birg) {
+static trace_env_t *trace_init(const be_irg_t *birg)
+{
trace_env_t *env = XMALLOCZ(trace_env_t);
ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
trace_env_t *env = XMALLOCZ(trace_env_t);
ir_graph *irg = be_get_birg_irg(birg);
int nn = get_irg_last_idx(irg);
- env->arch_env = be_get_birg_arch_env(birg);
env->curr_time = 0;
env->sched_info = NEW_ARR_F(trace_irn_t, nn);
env->curr_time = 0;
env->sched_info = NEW_ARR_F(trace_irn_t, nn);
- env->liveness = be_liveness(
b
irg);
+ env->liveness = be_liveness(irg);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
be_liveness_assure_chk(env->liveness);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
be_liveness_assure_chk(env->liveness);
@@
-516,7
+538,8
@@
static trace_env_t *trace_init(const be_irg_t *birg) {
* Frees all memory allocated for trace scheduling environment.
* @param env The environment
*/
* Frees all memory allocated for trace scheduling environment.
* @param env The environment
*/
-static void trace_free(void *data) {
+static void trace_free(void *data)
+{
trace_env_t *env = data;
be_liveness_free(env->liveness);
DEL_ARR_F(env->sched_info);
trace_env_t *env = data;
be_liveness_free(env->liveness);
DEL_ARR_F(env->sched_info);
@@
-533,7
+556,7
@@
static ir_node *basic_selection(ir_nodeset_t *ready_set)
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
- if (!
arch_irn_class_is(irn, branch
)) {
+ if (!
is_cfop(irn
)) {
return irn;
}
}
return irn;
}
}
@@
-585,7
+608,7
@@
static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set, ir_node
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
- if (
arch_irn_class_is(irn, branch
)) {
+ if (
is_cfop(irn
)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
@@
-661,7
+684,7
@@
static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
- if (!
arch_irn_class_is(irn, branch
)) {
+ if (!
is_cfop(irn
)) {
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;