# include "src/heapanal.h"
# include "src/interprete.h"
-// boilerplate stuff:
+/* boilerplate stuff: */
#include "libfirm/irvrfy.h"
#include "libfirm/trvrfy.h"
#include "libfirm/irdump.h"
-// I/O:
+/* I/O: */
# include <stdio.h>
/*
*/
void Java_firmjni_Heapanal_initAnal (JNIEnv *env, jclass clazz)
{
- // from interprete.c:
+ /* from interprete.c: */
init_interprete ();
}
fprintf (stdout, "Ajacs Boilerplate:\n");
{
- entity **free_methods = 0;
- int arr_len = 0;
+ entity **free_methods = 0;
+ int arr_len = 0;
/* replace static constant fields by constants
@@@ This corrects some code that is invalid Firm!!!
Execute before irg_vrfy(). */
- // ???
- // opt_load_const_static();
+ /* ??? */
+ /* opt_load_const_static(); */
- /* dump graphs as they come out of the front end */
- dump_file_suffix = "-fe";
- dump_all_types ();
- dump_class_hierarchy (true);
- dump_all_ir_graphs(dump_ir_block_graph);
- dump_all_ir_graphs(dump_ir_block_graph_w_types);
- dump_all_ir_graphs(dump_cfg);
+ /* dump graphs as they come out of the front end */
+ dump_file_suffix = "-fe";
+ dump_all_types ();
+ dump_class_hierarchy (true);
+ dump_all_ir_graphs(dump_ir_block_graph);
+ dump_all_ir_graphs(dump_ir_block_graph_w_types);
+ dump_all_ir_graphs(dump_cfg);
/* verify constructed graphs */
for (i = 0; i < get_irp_n_irgs(); i++)
- irg_vrfy(get_irp_irg(i));
+ irg_vrfy(get_irp_irg(i));
- /* verify something */
+ /* verify something */
tr_vrfy();
* test loop construction intraprocedural
*/
for (i = 0; i < get_irp_n_irgs(); i++) {
- construct_backedges(get_irp_irg(i));
+ construct_backedges(get_irp_irg(i));
if (1) {
- dump_loop_information();
- dump_file_suffix = "-1.2-intra-loop";
- dump_ir_block_graph(get_irp_irg(i));
- dont_dump_loop_information();
- dump_loop_tree(get_irp_irg(i), "-1.2-intra");
+ dump_loop_information();
+ dump_file_suffix = "-1.2-intra-loop";
+ dump_ir_block_graph(get_irp_irg(i));
+ dont_dump_loop_information();
+ dump_loop_tree(get_irp_irg(i), "-1.2-intra");
}
}
- DDMG (get_irp_main_irg ());
- assert(get_irp_main_irg());
- assert(get_irg_ent(get_irp_main_irg()));
+ DDMG (get_irp_main_irg ());
+ assert(get_irp_main_irg());
+ assert(get_irg_ent(get_irp_main_irg()));
/** Do interprocedural optimizations **/
Optimizes polymorphic calls.*/
cgana(&arr_len, &free_methods);
/* Remove methods that are never called. */
- // gc_irgs(arr_len, free_methods);
+ /* gc_irgs(arr_len, free_methods); */
/* Build the interprocedural dataflow representation */
cg_construct(arr_len, free_methods);
/* Test construction of interprocedural loop information */
- // construct_ip_backedges();
+ /* construct_ip_backedges(); */
- dump_loop_information();
- dump_file_suffix = "-1.2-inter-loop";
- dump_all_cg_block_graph();
- dont_dump_loop_information();
- dump_loop_tree(get_irp_main_irg(), "-1.2-inter");
+ dump_loop_information();
+ dump_file_suffix = "-1.2-inter-loop";
+ dump_all_cg_block_graph();
+ dont_dump_loop_information();
+ dump_loop_tree(get_irp_main_irg(), "-1.2-inter");
- fprintf (stdout, "HA:\n");
- DDMG (get_irp_main_irg ());
+ fprintf (stdout, "HA:\n");
+ DDMG (get_irp_main_irg ());
- set_max_chi_height(8); /* change ad lib */
- set_initial_context_depth(4); /* change as needed */
- ha_analyse_heap(get_irg_ent(get_irp_main_irg()), 1);
+ set_max_chi_height(8); /* change ad lib */
+ set_initial_context_depth(4); /* change as needed */
+ ha_analyse_heap(get_irg_ent(get_irp_main_irg()), 1);
/* Remove the interprocedural dataflow representation */
- free(free_methods);
- cg_destruct();
+ free(free_methods);
+ cg_destruct();
- /* verify optimized graphs */
- for (i = 0; i < get_irp_n_irgs(); i++) {
- irg_vrfy(get_irp_irg(i));
- }
+ /* verify optimized graphs */
+ for (i = 0; i < get_irp_n_irgs(); i++) {
+ irg_vrfy(get_irp_irg(i));
+ }
- tr_vrfy();
+ tr_vrfy();
}
set_opt_dump_abstvals (1);
- // ToDo: Dump ???
+ /* ToDo: Dump ??? */
fprintf (stdout, "Bye, Heap!\n");
}
/*
* $Log$
+ * Revision 1.2 2004/04/29 13:59:11 liekweg
+ * Removed C++-Style comments --flo
+ *
* Revision 1.1 2004/04/29 12:11:36 liekweg
* Moved ... to aux --flo
*
bool in_stack; /* Marks whether node is on the stack. */
int dfn; /* Depth first search number. */
int uplink; /* dfn number of ancestor. */
- // ir_loop *loop; /* Refers to the containing loop. */
+ /* ir_loop *loop; *//* Refers to the containing loop. */
/*
struct section *section;
xset def;
static INLINE void
mark_irn_in_stack (ir_node *n) {
assert(get_irn_link(n));
- // to slow
- //((scc_info *)get_irn_link(n))->in_stack = true;
+ /* to slow */
+ /* ((scc_info *)get_irn_link(n))->in_stack = true; */
((scc_info *)n->link)->in_stack = true;
}
static INLINE void
mark_irn_not_in_stack (ir_node *n) {
assert(get_irn_link(n));
- // to slow
- //((scc_info *)get_irn_link(n))->in_stack = false;
+ /* to slow */
+ /* ((scc_info *)get_irn_link(n))->in_stack = false; */
((scc_info *)n->link)->in_stack = false;
}
static INLINE bool
irn_is_in_stack (ir_node *n) {
assert(get_irn_link(n));
- // to slow
- //return ((scc_info *)get_irn_link(n))->in_stack;
+ /* to slow */
+ /* return ((scc_info *)get_irn_link(n))->in_stack; */
return ((scc_info *)n->link)->in_stack;
}
static INLINE void
set_irn_uplink (ir_node *n, int uplink) {
assert(get_irn_link(n));
- // to slow
- //((scc_info *)get_irn_link(n))->uplink = uplink;
+ /* to slow */
+ /* ((scc_info *)get_irn_link(n))->uplink = uplink; */
((scc_info *)n->link)->uplink = uplink;
}
static INLINE int
get_irn_uplink (ir_node *n) {
assert(get_irn_link(n));
- // to slow
- //return ((scc_info *)get_irn_link(n))->uplink;
+ /* from fast to slow */
+ /* return ((scc_info *)get_irn_link(n))->uplink; */
return ((scc_info *)n->link)->uplink;
}
static INLINE void
set_irn_dfn (ir_node *n, int dfn) {
assert(get_irn_link(n));
- // to slow
- //((scc_info *)get_irn_link(n))->dfn = dfn;
+ /* to slow */
+ /* ((scc_info *)get_irn_link(n))->dfn = dfn; */
((scc_info *)n->link)->dfn = dfn;
}
static INLINE int
get_irn_dfn (ir_node *n) {
assert(get_irn_link(n));
- // to slow
- //return ((scc_info *)get_irn_link(n))->dfn;
+ /* to slow */
+ /* return ((scc_info *)get_irn_link(n))->dfn; */
return ((scc_info *)n->link)->dfn;
}
The mem is not lost as its on the obstack. */
ir_node *cb = get_Proj_pred(n);
if ((intern_get_irn_op(cb) == op_CallBegin) ||
- (intern_get_irn_op(cb) == op_EndReg) ||
- (intern_get_irn_op(cb) == op_EndExcept)) {
+ (intern_get_irn_op(cb) == op_EndReg) ||
+ (intern_get_irn_op(cb) == op_EndExcept)) {
init_node(cb, NULL);
init_node(get_nodes_Block(cb), NULL);
}
m = stack[i];
/*printf(" Visiting %d ", i); DDMN(m);*/
if (is_ip_cfop(m)) {
- current_ir_graph = get_irn_irg(m);
- break;
+ current_ir_graph = get_irn_irg(m);
+ break;
}
if (intern_get_irn_op(m) == op_Filter) {
- /* Find the corresponding ip_cfop */
- ir_node *pred = stack[i+1];
- int j;
- for (j = 0; j < get_Filter_n_cg_preds(m); j++)
- if (get_Filter_cg_pred(m, j) == pred) break;
- if (j >= get_Filter_n_cg_preds(m))
- /* It is a filter we didn't pass as the predecessors are marked. */
- continue;
- assert(get_Filter_cg_pred(m, j) == pred);
- switch_irg(m, j);
- break;
+ /* Find the corresponding ip_cfop */
+ ir_node *pred = stack[i+1];
+ int j;
+ for (j = 0; j < get_Filter_n_cg_preds(m); j++)
+ if (get_Filter_cg_pred(m, j) == pred) break;
+ if (j >= get_Filter_n_cg_preds(m))
+ /* It is a filter we didn't pass as the predecessors are marked. */
+ continue;
+ assert(get_Filter_cg_pred(m, j) == pred);
+ switch_irg(m, j);
+ break;
}
}
}
if (is_backedge(n, i)) continue;
m = intern_get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
- //if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue;
+ /* if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue; */
scc (m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
pop_scc_unmark_visit (n);
/* and recompute it in a better order; and so that it goes into
the new loop. */
- // GL @@@ remove experimental stuff rem = find_irg_on_stack(tail);
+ /* GL @@@ remove experimental stuff rem = find_irg_on_stack(tail); */
scc (tail);
- // GL @@@ remove experimental stuff current_ir_graph = rem;
+ /* GL @@@ remove experimental stuff current_ir_graph = rem; */
assert (irn_visited(n));
#if NO_LOOPS_WITHOUT_HEAD
tp = find_pointer_type_to(get_glob_type());
else if (get_Proj_proj(n) == pns_value_arg_base) {
VERBOSE_UNKNOWN_TYPE(("Value arg base proj %ld from Start: unknown type\n", get_irn_node_nr(n)));
- tp = unknown_type; //find_pointer_type_to(get....(get_entity_type(get_irg_ent(get_Start_irg(pred)))));
+ tp = unknown_type; /* find_pointer_type_to(get....(get_entity_type(get_irg_ent(get_Start_irg(pred))))); */
} else {
VERBOSE_UNKNOWN_TYPE(("Proj %ld %ld from Start: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
tp = unknown_type;
/* value args pointer */
if (get_Proj_proj(n) == pncl_value_res_base) {
VERBOSE_UNKNOWN_TYPE(("Value res base Proj %ld from Call: unknown type\n", get_irn_node_nr(n)));
- tp = unknown_type; //find_pointer_type_to(get....get_Call_type(pred));
+ tp = unknown_type; /* find_pointer_type_to(get....get_Call_type(pred)); */
} else {
VERBOSE_UNKNOWN_TYPE(("Proj %ld %ld from Call: unknown type\n", get_Proj_proj(n), get_irn_node_nr(n)));
tp = unknown_type;
type *tp1 = NULL, *tp2 = NULL;
ir_node *a = NULL, *b = NULL;
- //DDMN(n);
+ /* DDMN(n); */
if (is_unop(n)) {
a = get_unop_op(n);
type *ana_res_type = get_irn_type(get_Return_res(n, i));
if (ana_res_type == unknown_type) continue;
if (res_type != ana_res_type && "return value has wrong type") {
- DDMN(n);
- assert(res_type == ana_res_type && "return value has wrong type");
+ DDMN(n);
+ assert(res_type == ana_res_type && "return value has wrong type");
}
}
*/
tp1 = compute_irn_type(get_Phi_pred(n, i));
assert(tp1 != initial_type);
if ((tp1 != phi_cycle_type) && (tp1 != none_type))
- break;
+ break;
}
/* find a second real type */
for (; (i < n_preds); ++i) {
tp2 = compute_irn_type(get_Phi_pred(n, i));
if ((tp2 == phi_cycle_type) || (tp2 == none_type)) {
- tp2 = tp1;
- continue;
+ tp2 = tp1;
+ continue;
}
if (tp2 != tp1) break;
}
- //printf("Types in Phi %s and %s \n", get_type_name(tp1), get_type_name(tp2));
+ /* printf("Types in Phi %s and %s \n", get_type_name(tp1), get_type_name(tp2)); */
if (tp1 == tp2) { tp = tp1; break; }
VERBOSE_UNKNOWN_TYPE(("Phi %ld with two different types: %s, %s: unknown type.\n", get_irn_node_nr(n),
- get_type_name(tp1), get_type_name(tp2)));
+ get_type_name(tp1), get_type_name(tp2)));
tp = unknown_type;
} break;
case iro_Load: {
if (intern_get_irn_op(a) == op_Sel)
tp = get_entity_type(get_Sel_entity(a));
else if ((intern_get_irn_op(a) == op_Const) &&
- (tarval_is_entity(get_Const_tarval(a))))
+ (tarval_is_entity(get_Const_tarval(a))))
tp = get_entity_type(tarval_to_entity(get_Const_tarval(a)));
else if (is_pointer_type(compute_irn_type(a))) {
tp = get_pointer_points_to_type(get_irn_type(a));
/* catch special cases with fallthrough to binop/unop cases in default. */
case iro_Sub: {
if (mode_is_int(intern_get_irn_mode(n)) &&
- mode_is_reference(intern_get_irn_mode(a)) &&
- mode_is_reference(intern_get_irn_mode(b)) ) {
+ mode_is_reference(intern_get_irn_mode(a)) &&
+ mode_is_reference(intern_get_irn_mode(b)) ) {
VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
} /* fall through to Add. */
case iro_Add: {
if (mode_is_reference(intern_get_irn_mode(n)) &&
- mode_is_reference(intern_get_irn_mode(a)) &&
- mode_is_int(intern_get_irn_mode(b)) ) {
+ mode_is_reference(intern_get_irn_mode(a)) &&
+ mode_is_int(intern_get_irn_mode(b)) ) {
tp = tp1; break;
}
if (mode_is_reference(intern_get_irn_mode(n)) &&
- mode_is_int(intern_get_irn_mode(a)) &&
- mode_is_reference(intern_get_irn_mode(b)) ) {
+ mode_is_int(intern_get_irn_mode(a)) &&
+ mode_is_reference(intern_get_irn_mode(b)) ) {
tp = tp2; break;
}
goto default_code;
if (is_binop(n)) {
if (tp1 == tp2) {
- tp = tp1;
- break;
+ tp = tp1;
+ break;
}
if((tp1 == phi_cycle_type) || (tp2 == phi_cycle_type)) {
- tp = phi_cycle_type;
- break;
+ tp = phi_cycle_type;
+ break;
}
VERBOSE_UNKNOWN_TYPE(("Binop %ld with two different types: %s, %s: unknown type \n", get_irn_node_nr(n),
- get_type_name(tp1), get_type_name(tp2)));
+ get_type_name(tp1), get_type_name(tp2)));
tp = unknown_type;
break;
}
} break; /* default */
} /* end switch */
- //printf (" found %s ", get_type_name(tp)); DDM;
+ /* printf (" found %s ", get_type_name(tp)); DDM; */
return tp;
}
static type* compute_irn_type(ir_node *n) {
- //DDMN(n);
+ /* DDMN(n); */
type *tp = get_irn_type(n);
set_irn_type(n, tp);
}
- //printf (" found %s ", get_type_name(tp)); DDM;
+ /* printf (" found %s ", get_type_name(tp)); DDM; */
return tp;
}
type *tp = get_irn_type(n);
if (tp == phi_cycle_type) {
- //printf(" recomputing for phi_cycle_type "); DDMN(n);
+ /* printf(" recomputing for phi_cycle_type "); DDMN(n); */
set_irn_type(n, initial_type);
}
* Not yet implemented, but I guess we want this for iropt, to find the
* type for newly allocated constants.
*/
-//type *analyse_irn_type(ir_node *node);
+/* type *analyse_irn_type(ir_node *node); */
#endif /* _IRSIMPLETYPE_H_ */
get_firm_walk_link. */
} fw_data;
-//@{
+/*@{ */
/** Access macros to fw_data structure */
#define FW_GET_DATA_LIST(s) ((s)->list)
#define FW_SET_DATA_LIST(s, t) ((s)->list = (t))
#define FW_GET_DATA_LINK(s) ((s)->link)
#define FW_SET_DATA_LINK(s, t) ((s)->link = (t))
-//@}
+/*@} */
/** Returns own data struct of the firm walker.
*
}
}
break;
- default: {} // other kinds of firm nodes
+ default: {} /* other kinds of firm nodes */
}
return data;
}
}
break;
- default: {} // other kinds of firm nodes
+ default: {} /* other kinds of firm nodes */
}
}
-// documentation in header file
+/* documentation in header file */
void set_firm_walk_link(void *thing, void *link)
{
fw_data *data;
case k_ir_mode:
set_mode_link(thing, link);
break;
- default: {} // other kinds of firm nodes
+ default: {} /* other kinds of firm nodes */
}
}
-// documentation in header file
+/* documentation in header file */
void *get_firm_walk_link(void *thing)
{
fw_data *data;
switch (get_kind(tore)) {
case k_entity:
ent = (entity *)tore;
- // append entity to list
+ /* append entity to list */
set_entity_link(ent, NULL);
if (!pmap_contains(entity_map, ent))
pmap_insert(entity_map, ent, env);
case k_type:
tp = (type *)tore;
mode = get_type_mode(tp);
- // append type to list
+ /* append type to list */
set_type_link(tp, NULL);
if (!pmap_contains(type_map, tp))
pmap_insert(type_map, tp, env);
pmap_insert(mode_map, mode_b, NULL);
*/
- // Collect all types (also unused types) if flag is set
+ /* Collect all types (also unused types) if flag is set */
if (FW_WITH_ALL_TYPES & flags)
type_walk(fw_collect_tore, NULL, NULL);
- // for each ir graph
+ /* for each ir graph */
for (i = 0; i < get_irp_n_irgs(); i++)
{
ir_graph *irg = get_irp_irg(i);
pmap_destroy(entity_map);
entity_map = NULL;
- // free all collected data from ir graphs and nodes
+ /* free all collected data from ir graphs and nodes */
for (i = 0; i < get_irp_n_irgs(); i++)
{
ir_graph *irg = get_irp_irg(i);
irn_list = FW_GET_DATA_LIST(data);
irn_list_len = ARR_LEN(irn_list);
- // call block as prefix ir node
+ /* call block as prefix ir node */
if ((wif->do_node) &&
(wif->flags & FW_DUMP_BLOCK_AS_IRN & !FW_DUMP_IRN_IN_PREFIX))
wif->do_node(block, wif->env);
- // do ir nodes in prefix or postfix order?
+ /* do ir nodes in prefix or postfix order? */
if (wif->flags & FW_DUMP_IRN_IN_PREFIX)
irn_i = irn_list_len-1;
else
{
if (wif->do_node) wif->do_node((ir_node *)irn_list[irn_i], wif->env);
- // do ir nodes in prefix or postfix order?
+ /* do ir nodes in prefix or postfix order? */
if (wif->flags & FW_DUMP_IRN_IN_PREFIX)
irn_i--;
else
irn_i++;
}
- // call block as postfix ir node
+ /* call block as postfix ir node */
if ((wif->do_node) &&
(wif->flags & (FW_DUMP_BLOCK_AS_IRN | FW_DUMP_IRN_IN_PREFIX)))
wif->do_node(block, wif->env);
/* wall over all block's ir nodes nested end =============== */
if (wif->do_block_finalize) wif->do_block_finalize(current_ir_graph, wif->env);
- } // for each block
+ } /* for each block */
/* walk over all irg's block nested end ====================== */
- } // for each ir graph irg
+ } /* for each ir graph irg */
if (wif->do_graph_finalize) wif->do_graph_finalize(wif->env);
/** ### ToDo: Dump const_code_irg ?? No! Dump const code with entities, types etc. */
typedef void firm_walk_entity_func(entity *ent, void *env);
/** Graph callback function definition */
typedef void firm_walk_graph_func(ir_graph *irg, void *env);
-//@{
+/* @{ */
/** Block callback function definition */
typedef void firm_walk_block_init_func(ir_graph *irg, void *env);
typedef void firm_walk_block_func(ir_node *block, void *env);
typedef void firm_walk_block_finalize_func(ir_graph *irg, void *env);
-//@}
+/* @} */
/** Node callback function definition */
typedef void firm_walk_node_func (ir_node *irn, void *env);
/** Interface of the firm walker */
typedef struct
{
- //@{
+ /* @{ */
/** Interface function to dump all used and internal modes.
Internal modes are: BB, X, M and T */
firm_walk_init_func *do_mode_init;
firm_walk_mode_func *do_mode;
firm_walk_finalize_func *do_mode_finalize;
- //@}
+ /* @} */
- //@{
+ /* @{ */
/** Interface to dump all collected types.
*
* @node To dump all (not only used types by default) a special walk
firm_walk_init_func *do_type_init;
firm_walk_type_func *do_type;
firm_walk_finalize_func *do_type_finalize;
- //@}
+ /* @} */
- //@{
+ /* @{ */
/** Dumping interface for entities */
firm_walk_init_func *do_entity_init;
firm_walk_entity_func *do_entity;
firm_walk_finalize_func *do_entity_finalize;
- //@}
+ /* @} */
/** Dumps all graphs and subnodes.
*
firm_walk_graph_func *do_graph;
firm_walk_finalize_func *do_graph_finalize;
- //@{
+ /* @{ */
/** Dumping interface for blocks. If blocks should be handled like
* like a normal ir node, a special walker flag could be set.
* @see do_graph */
firm_walk_block_init_func *do_block_init;
firm_walk_block_func *do_block;
firm_walk_block_finalize_func *do_block_finalize;
- //@}
+ /* @} */
/** Dumping interface for ir nodes
* @see do_graph */
*
* @see new_get_id_str(), id_from_str(), get_id_str(), id_is_prefix()
*/
-//int id_contains(ident *infix, ident *id);
+/* int id_contains(ident *infix, ident *id); */
/**
* Return true if an ident contains a given character.
#include "irflag_t.h"
-// # define CATE_jni
+/* # define CATE_jni */
/* Datenstruktur für jede Methode */
typedef struct {
int count; /* GL: anzahl aufrufer */
- bool open; /* offene Methode (mit unbekanntem Aufrufer) */
+ bool open; /* offene Methode (mit unbekanntem Aufrufer) */
ir_node * reg, * mem, ** res; /* EndReg, Mem und Rückgabewerte */
ir_node * except, * except_mem; /* EndExcept und Mem für Ausnahmeabbruch */
} irg_data_t;
for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) {
if (intern_get_irn_op(call) != op_Call) continue;
for (j = get_Call_n_callees(call) - 1; j >= 0; --j) {
- entity * ent = get_Call_callee(call, j);
- if (ent) {
- irg_data_t * data = get_entity_link(ent);
+ entity * ent = get_Call_callee(call, j);
+ if (ent) {
+ irg_data_t * data = get_entity_link(ent);
# ifndef CATE_jni
- assert(get_entity_irg(ent) && data);
- ++data->count;
+ assert(get_entity_irg(ent) && data);
+ ++data->count;
# endif /* ndef CATE_jni */
- }
+ }
}
}
}
* block has no predecessors. */
static INLINE ir_node *get_cg_Unknown(ir_mode *m) {
assert((get_Block_n_cfgpreds(get_irg_start_block(get_irp_main_irg())) == 1) &&
- (get_nodes_block(get_Block_cfgpred(get_irg_start_block(get_irp_main_irg()), 0)) ==
- get_irg_start_block(get_irp_main_irg())));
+ (get_nodes_block(get_Block_cfgpred(get_irg_start_block(get_irp_main_irg()), 0)) ==
+ get_irg_start_block(get_irp_main_irg())));
return new_r_Unknown(get_irp_main_irg(), m);
}
* den Start-Block verschieben. */
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
if (get_Proj_pred(proj) != get_irg_start(irg)
- || (get_Proj_proj(proj) != pns_initial_exec && get_Proj_proj(proj) != pns_args)) {
+ || (get_Proj_proj(proj) != pns_initial_exec && get_Proj_proj(proj) != pns_args)) {
ir_node * filter = exchange_proj(proj);
set_Filter_cg_pred_arr(filter, n_callers, in);
} else {
ir_node * filter = get_Id_pred(proj);
assert(get_irn_op(filter) == op_Filter);
if (filter != link && get_irn_link(filter) == NULL) {
- set_irn_link(link, filter);
- link = filter;
+ set_irn_link(link, filter);
+ link = filter;
}
}
}
set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X));
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
if (intern_get_irn_op(proj) == op_Filter) {
- set_Filter_cg_pred(proj, 0, get_cg_Unknown(intern_get_irn_mode(proj)));
+ set_Filter_cg_pred(proj, 0, get_cg_Unknown(intern_get_irn_mode(proj)));
}
}
data->count = 1;
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
if (intern_get_irn_op(cfgpred_arr[i]) == op_Return) {
if (ret_arr) {
- ARR_APP1(ir_node *, ret_arr, cfgpred_arr[i]);
+ ARR_APP1(ir_node *, ret_arr, cfgpred_arr[i]);
} else {
- ret_arr = NEW_ARR_F(ir_node *, 1);
- ret_arr[0] = cfgpred_arr[i];
+ ret_arr = NEW_ARR_F(ir_node *, 1);
+ ret_arr[0] = cfgpred_arr[i];
}
++n_ret;
}
ir_mode *mode = NULL;
/* In[0] could be a Bad node with wrong mode. */
for (i = n_ret - 1; i >= 0; --i) {
- in[i] = get_Return_res(ret_arr[i], j);
- if (!mode && intern_get_irn_mode(in[i]) != mode_T)
- mode = intern_get_irn_mode(in[i]);
+ in[i] = get_Return_res(ret_arr[i], j);
+ if (!mode && intern_get_irn_mode(in[i]) != mode_T)
+ mode = intern_get_irn_mode(in[i]);
}
if (mode)
- data->res[j] = new_Phi(n_ret, in, mode);
+ data->res[j] = new_Phi(n_ret, in, mode);
else /* All preds are Bad */
- data->res[j] = new_Bad();
+ data->res[j] = new_Bad();
}
DEL_ARR_F(in);
for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
if (intern_get_irn_op(cfgpred_arr[i]) != op_Return) {
if (except_arr) {
- ARR_APP1(ir_node *, except_arr, cfgpred_arr[i]);
+ ARR_APP1(ir_node *, except_arr, cfgpred_arr[i]);
} else {
- except_arr = NEW_ARR_F(ir_node *, 1);
- except_arr[0] = cfgpred_arr[i];
+ except_arr = NEW_ARR_F(ir_node *, 1);
+ except_arr[0] = cfgpred_arr[i];
}
++n_except;
}
for (i = n_except - 1; i >= 0; --i) {
ir_node * node = skip_Proj(except_arr[i]);
if (intern_get_irn_op(node) == op_Call) {
- in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 3);
+ in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 3);
} else if (intern_get_irn_op(node) == op_Raise) {
- in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 1);
+ in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 1);
} else {
- assert(is_fragile_op(node));
- /* We rely that all cfops have the memory output at the same position. */
- in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 0);
+ assert(is_fragile_op(node));
+ /* We rely that all cfops have the memory output at the same position. */
+ in[i] = new_r_Proj(irg, get_nodes_Block(node), node, mode_M, 0);
}
}
data->except_mem = new_Phi(n_except, in, mode_M);
/* This Phi is a merge, therefor needs not be kept alive.
It might be optimized away, though. */
if (get_End_keepalive(end, get_End_n_keepalives(end)-1 )
- == data->except_mem)
+ == data->except_mem)
set_End_keepalive(end, get_End_n_keepalives(end)-1, new_Bad());
DEL_ARR_F(in);
}
/* Abhängigkeiten vom Start-Block und den Filter-Operationen im
* Start-Block auf den Aufrufer hinzufügen. */
static void construct_start(entity * caller, entity * callee,
- ir_node * call, ir_node * exec) {
+ ir_node * call, ir_node * exec) {
irg_data_t *data = get_entity_link(callee);
ir_graph *irg = get_entity_irg(callee);
ir_node *start = get_irg_start(irg);
assert(irg);
assert(get_entity_peculiarity(callee) == peculiarity_existent); /* Else data is not initalized. */
assert((0 <= data->count) &&
- (data->count < get_Block_cg_n_cfgpreds(get_nodes_Block(start))));
+ (data->count < get_Block_cg_n_cfgpreds(get_nodes_Block(start))));
set_Block_cg_cfgpred(get_nodes_Block(start), data->count, exec);
for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) {
if (get_Proj_pred(filter) == start) {
switch ((int) get_Proj_proj(filter)) {
case pns_global_store:
- set_Filter_cg_pred(filter, data->count, get_Call_mem(call));
- break;
+ set_Filter_cg_pred(filter, data->count, get_Call_mem(call));
+ break;
case pns_frame_base:
- /* "frame_base" wird nur durch Unknown dargestellt. Man kann ihn aber
- * auch explizit darstellen, wenn sich daraus Vorteile für die
- * Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
- break;
+ /* "frame_base" wird nur durch Unknown dargestellt. Man kann ihn aber
+ * auch explizit darstellen, wenn sich daraus Vorteile für die
+ * Datenflussanalyse ergeben. */
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
+ break;
case pns_globals:
- /* "globals" wird nur durch Unknown dargestellt. Man kann ihn aber auch
- * explizit darstellen, wenn sich daraus Vorteile für die
- * Datenflussanalyse ergeben. */
- set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
- break;
+ /* "globals" wird nur durch Unknown dargestellt. Man kann ihn aber auch
+ * explizit darstellen, wenn sich daraus Vorteile für die
+ * Datenflussanalyse ergeben. */
+ set_Filter_cg_pred(filter, data->count, get_cg_Unknown(intern_get_irn_mode(filter)));
+ break;
default:
- /* not reached */
- assert(0 && "not reached");
- break;
+ /* not reached */
+ assert(0 && "not reached");
+ break;
}
} else {
set_Filter_cg_pred(filter, data->count, get_Call_param(call, get_Proj_proj(filter)));
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
if (data[i]->reg) {
- in[i] = data[i]->mem;
+ in[i] = data[i]->mem;
} else {
- in[i] = new_Bad();
+ in[i] = new_Bad();
}
} else { /* unknown */
in[i] = get_cg_Unknown(mode_M);
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
if (data[i]->except) {
- in[i] = data[i]->except_mem;
+ in[i] = data[i]->except_mem;
} else {
- in[i] = new_Bad();
+ in[i] = new_Bad();
}
} else { /* unknown */
in[i] = get_cg_Unknown(mode_M);
for (i = 0; i < length; ++i) {
if (data[i]) { /* explicit */
if (data[i]->reg) {
- in[i] = data[i]->res[pos];
+ in[i] = data[i]->res[pos];
} else {
- in[i] = new_Bad();
+ in[i] = new_Bad();
}
} else { /* unknown */
in[i] = get_cg_Unknown(m);
int n_callees = get_Call_n_callees(call);
ir_node * post_block = get_nodes_Block(call); /* block nach dem Aufruf */
ir_node * pre_block = create_Block(get_Block_n_cfgpreds(post_block),
- get_Block_cfgpred_arr(post_block)); /* block vor dem Aufruf (mit CallBegin) */
+ get_Block_cfgpred_arr(post_block)); /* block vor dem Aufruf (mit CallBegin) */
ir_node * except_block = NULL, * proj;
ir_node * jmp = new_Break(); /* Sprung für intraprozedurale Darstellung (in
- * pre_block) */
+ * pre_block) */
ir_node * call_begin = new_CallBegin(call); /* (in pre_block) */
ir_node ** in = NEW_ARR_F(ir_node *, n_callees);
entity * caller = get_irg_ent(current_ir_graph); /* entity des aktuellen ir_graph */
* besitzen. Diese müssen dann auch noch für den pre_block gesetzt werden. */
if (get_Block_cg_cfgpred_arr(post_block)) {
set_Block_cg_cfgpred_arr(pre_block, get_Block_cg_n_cfgpreds(post_block),
- get_Block_cg_cfgpred_arr(post_block));
+ get_Block_cg_cfgpred_arr(post_block));
remove_Block_cg_cfgpred_arr(post_block);
}
for (i = 0; i < n_callees; ++i) {
if (data[i]) { /* explicit */
if (data[i]->reg) {
- in[i] = new_r_Proj(irgs[i], get_nodes_Block(data[i]->reg),
- data[i]->reg, mode_X, data[i]->count);
+ in[i] = new_r_Proj(irgs[i], get_nodes_Block(data[i]->reg),
+ data[i]->reg, mode_X, data[i]->count);
} else {
- in[i] = new_Bad();
+ in[i] = new_Bad();
}
} else { /* unknown */
in[i] = get_cg_Unknown(mode_X);
bool exc_to_end = false;
if (exc_branches_to_end(current_ir_graph, proj)) {
/* The Call aborts the procedure if it returns with an exception.
- If this is an outermost procedure, the normal handling of exceptions
- will generate a Break that goes to the end block. This is illegal
- Frim. So directly branch to the end block with all exceptions. */
+ If this is an outermost procedure, the normal handling of exceptions
+ will generate a Break that goes to the end block. This is illegal
+ Frim. So directly branch to the end block with all exceptions. */
exc_to_end = true;
if (is_outermost_graph(current_ir_graph)) {
- except_block = get_irg_end_block(current_ir_graph);
+ except_block = get_irg_end_block(current_ir_graph);
} else {
- irg_data_t * tmp_data = get_entity_link(get_irg_ent(current_ir_graph));
- except_block = get_nodes_block(tmp_data->except);
+ irg_data_t * tmp_data = get_entity_link(get_irg_ent(current_ir_graph));
+ except_block = get_nodes_block(tmp_data->except);
}
} else
{
for (i = 0; i < n_callees; ++i) {
entity * callee = get_Call_callee(call, i);
if (data[i]) { /* explicit */
- if (data[i]->except) {
- in[i] = new_r_Proj(get_entity_irg(callee), get_nodes_Block(data[i]->except),
- data[i]->except, mode_X, data[i]->count);
- } else {
- in[i] = new_Bad();
- }
+ if (data[i]->except) {
+ in[i] = new_r_Proj(get_entity_irg(callee), get_nodes_Block(data[i]->except),
+ data[i]->except, mode_X, data[i]->count);
+ } else {
+ in[i] = new_Bad();
+ }
} else { /* unknown */
- in[i] = get_cg_Unknown(mode_X);
+ in[i] = get_cg_Unknown(mode_X);
}
}
* (interporcedural view is set!)
* Do not add the exc pred of end we are replacing! */
for (i = get_Block_n_cfgpreds(except_block)-1; i >= 0; --i) {
- ir_node *pred = get_Block_cfgpred(except_block, i);
- if (pred != proj) {
- ARR_APP1(ir_node *, in, pred);
- preds++;
- }
+ ir_node *pred = get_Block_cfgpred(except_block, i);
+ if (pred != proj) {
+ ARR_APP1(ir_node *, in, pred);
+ preds++;
+ }
}
}
set_Block_cg_cfgpred_arr(except_block, preds, in);
if (skip_Proj(get_Proj_pred(proj)) != call) continue;
if (get_Proj_pred(proj) == call) {
if (get_Proj_proj(proj) == 0) { /* memory */
- ir_node * filter;
-
- set_nodes_Block(proj, post_block);
- filter = exchange_proj(proj);
- /* filter in die Liste der Phis aufnehmen */
- if (get_irn_link(filter) == NULL) { /* note CSE */
- set_irn_link(filter, get_irn_link(post_block));
- set_irn_link(post_block, filter);
- }
- fill_mem(n_callees, data, in);
- set_Filter_cg_pred_arr(filter, n_callees, in);
+ ir_node * filter;
+
+ set_nodes_Block(proj, post_block);
+ filter = exchange_proj(proj);
+ /* filter in die Liste der Phis aufnehmen */
+ if (get_irn_link(filter) == NULL) { /* note CSE */
+ set_irn_link(filter, get_irn_link(post_block));
+ set_irn_link(post_block, filter);
+ }
+ fill_mem(n_callees, data, in);
+ set_Filter_cg_pred_arr(filter, n_callees, in);
} else if (get_Proj_proj(proj) == 1) { /* except */
- /* nothing: siehe oben */
+ /* nothing: siehe oben */
} else if (get_Proj_proj(proj) == 2) { /* results */
- /* nothing */
+ /* nothing */
} else if (get_Proj_proj(proj) == 3) { /* except_mem */
ir_node * filter;
- set_nodes_Block(proj, post_block);
- assert(except_block);
- set_irg_current_block(current_ir_graph, except_block);
- filter = exchange_proj(proj);
- /* filter in die Liste der Phis aufnehmen */
- if (get_irn_link(filter) == NULL) { /* note CSE */
- set_irn_link(filter, get_irn_link(except_block));
- set_irn_link(except_block, filter);
- }
- set_irg_current_block(current_ir_graph, post_block);
- fill_except_mem(n_callees, data, in);
- set_Filter_cg_pred_arr(filter, n_callees, in);
+ set_nodes_Block(proj, post_block);
+ assert(except_block);
+ set_irg_current_block(current_ir_graph, except_block);
+ filter = exchange_proj(proj);
+ /* filter in die Liste der Phis aufnehmen */
+ if (get_irn_link(filter) == NULL) { /* note CSE */
+ set_irn_link(filter, get_irn_link(except_block));
+ set_irn_link(except_block, filter);
+ }
+ set_irg_current_block(current_ir_graph, post_block);
+ fill_except_mem(n_callees, data, in);
+ set_Filter_cg_pred_arr(filter, n_callees, in);
} else {
- assert(0 && "not reached");
+ assert(0 && "not reached");
}
} else { /* result */
ir_node * filter;
filter = exchange_proj(proj);
/* filter in die Liste der Phis aufnehmen */
if (get_irn_link(filter) == NULL) { /* not CSE */
- set_irn_link(filter, get_irn_link(post_block));
- set_irn_link(post_block, filter);
+ set_irn_link(filter, get_irn_link(post_block));
+ set_irn_link(post_block, filter);
}
fill_result(get_Proj_proj(filter), n_callees, data, in, intern_get_irn_mode(filter));
set_Filter_cg_pred_arr(filter, n_callees, in);
current_ir_graph = get_irp_irg(i);
for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) {
if (intern_get_irn_op(node) == op_Call) {
- int n_callees = get_Call_n_callees(node);
- if (n_callees > 1 || (n_callees == 1 && get_Call_callee(node, 0) != NULL)) {
- construct_call(node);
- }
+ int n_callees = get_Call_n_callees(node);
+ if (n_callees > 1 || (n_callees == 1 && get_Call_callee(node, 0) != NULL)) {
+ construct_call(node);
+ }
}
}
}
} else if (intern_get_irn_op(node) == op_Call) {
remove_Call_callee_arr(node);
} else if (intern_get_irn_op(node) == op_Proj) {
- // some ProjX end up in strage blocks.
+ /* some ProjX end up in strage blocks. */
set_nodes_block(node, get_nodes_block(get_Proj_pred(node)));
}
}
set_Block_matured(res, 1);
set_Block_block_visited(res, 0);
- //res->attr.block.exc = exc_normal;
- //res->attr.block.handler_entry = 0;
+ /* res->attr.block.exc = exc_normal; */
+ /* res->attr.block.handler_entry = 0; */
res->attr.block.irg = irg;
res->attr.block.backedge = new_backedge_arr(irg->obst, arity);
res->attr.block.in_cg = NULL;
ir_node *res;
res = new_ir_node (db, irg, block, op_Start, mode_T, 0, NULL);
- //res->attr.start.irg = irg;
+ /* res->attr.start.irg = irg; */
irn_vrfy_irg (res, irg);
return res;
INLINE ir_node *
new_rd_Proj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
- long proj)
+ long proj)
{
ir_node *in[1];
ir_node *res;
INLINE ir_node *
new_rd_defaultProj (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *arg,
- long max_proj)
+ long max_proj)
{
ir_node *res;
assert((arg->op==op_Cond) && (get_irn_mode(arg->in[1]) == mode_Iu));
INLINE ir_node *
new_rd_Add (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Sub (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Minus (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
ir_node *in[1];
ir_node *res;
INLINE ir_node *
new_rd_Mul (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Quot (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
ir_node *in[3] ;
ir_node *res;
INLINE ir_node *
new_rd_DivMod (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
ir_node *in[3];
ir_node *res;
INLINE ir_node *
new_rd_Div (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
ir_node *in[3];
ir_node *res;
INLINE ir_node *
new_rd_Mod (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2)
+ ir_node *memop, ir_node *op1, ir_node *op2)
{
ir_node *in[3];
ir_node *res;
INLINE ir_node *
new_rd_And (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Or (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Eor (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode)
+ ir_node *op1, ir_node *op2, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Not (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
ir_node *in[1];
ir_node *res;
INLINE ir_node *
new_rd_Shl (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Shr (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Shrs (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Rot (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode)
+ ir_node *op, ir_node *k, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Abs (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode)
+ ir_node *op, ir_mode *mode)
{
ir_node *in[1];
ir_node *res;
INLINE ir_node *
new_rd_Cmp (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2)
+ ir_node *op1, ir_node *op2)
{
ir_node *in[2];
ir_node *res;
ir_node *
new_rd_Call (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *callee, int arity, ir_node **in, type *tp)
+ ir_node *callee, int arity, ir_node **in, type *tp)
{
ir_node **r_in;
ir_node *res;
INLINE ir_node *
new_rd_Load (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr)
+ ir_node *store, ir_node *adr)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Store (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val)
+ ir_node *store, ir_node *adr, ir_node *val)
{
ir_node *in[3];
ir_node *res;
INLINE ir_node *
new_rd_Alloc (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, type *alloc_type, where_alloc where)
+ ir_node *size, type *alloc_type, where_alloc where)
{
ir_node *in[2];
ir_node *res;
INLINE ir_node *
new_rd_Free (dbg_info* db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, type *free_type)
+ ir_node *ptr, ir_node *size, type *free_type)
{
ir_node *in[3];
ir_node *res;
ir_node *
new_rd_InstOf (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *objptr, type *ent)
+ ir_node *objptr, type *ent)
{
ir_node **r_in;
ir_node *res;
res->attr.i.tori.ptrinfo = (ident *)value;
} else {
assert ( ( (symkind == type_tag)
- || (symkind == size))
+ || (symkind == size))
&& (is_type(value)));
res->attr.i.tori.typ = (type *)value;
}
ir_node *res;
in[0] = get_Call_ptr(call);
res = new_ir_node (db, irg, block, op_CallBegin, mode_T, 1, in);
- //res->attr.callbegin.irg = irg;
+ /* res->attr.callbegin.irg = irg; */
res->attr.callbegin.call = call;
res = optimize_node (res);
irn_vrfy_irg (res, irg);
ir_node *res;
res = new_ir_node (db, irg, block, op_EndReg, mode_T, -1, NULL);
- //res->attr.end.irg = irg;
+ /* res->attr.end.irg = irg; */
irn_vrfy_irg (res, irg);
return res;
ir_node *res;
res = new_ir_node (db, irg, block, op_EndExcept, mode_T, -1, NULL);
- //res->attr.end.irg = irg;
+ /* res->attr.end.irg = irg; */
irn_vrfy_irg (res, irg);
return res;
INLINE ir_node *
new_rd_Filter (dbg_info *db, ir_graph *irg, ir_node *block, ir_node *arg, ir_mode *mode,
- long proj)
+ long proj)
{
ir_node *in[1];
ir_node *res;
ir_node *
new_rd_FuncCall (dbg_info* db, ir_graph *irg, ir_node *block,
- ir_node *callee, int arity, ir_node **in, type *tp)
+ ir_node *callee, int arity, ir_node **in, type *tp)
{
ir_node **r_in;
ir_node *res;
return new_rd_Cond(NULL, irg, block, c);
}
INLINE ir_node *new_r_Return (ir_graph *irg, ir_node *block,
- ir_node *store, int arity, ir_node **in) {
+ ir_node *store, int arity, ir_node **in) {
return new_rd_Return(NULL, irg, block, store, arity, in);
}
INLINE ir_node *new_r_Raise (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *obj) {
+ ir_node *store, ir_node *obj) {
return new_rd_Raise(NULL, irg, block, store, obj);
}
INLINE ir_node *new_r_Const (ir_graph *irg, ir_node *block,
- ir_mode *mode, tarval *con) {
+ ir_mode *mode, tarval *con) {
return new_rd_Const(NULL, irg, block, mode, con);
}
INLINE ir_node *new_r_SymConst (ir_graph *irg, ir_node *block,
return new_rd_SymConst(NULL, irg, block, value, symkind);
}
INLINE ir_node *new_r_Sel (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *objptr, int n_index, ir_node **index,
- entity *ent) {
+ ir_node *objptr, int n_index, ir_node **index,
+ entity *ent) {
return new_rd_Sel(NULL, irg, block, store, objptr, n_index, index, ent);
}
INLINE ir_node *new_r_InstOf (ir_graph *irg, ir_node *block, ir_node *store, ir_node *objptr,
- type *ent) {
+ type *ent) {
return (new_rd_InstOf (NULL, irg, block, store, objptr, ent));
}
INLINE ir_node *new_r_Call (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *callee, int arity, ir_node **in,
- type *tp) {
+ ir_node *callee, int arity, ir_node **in,
+ type *tp) {
return new_rd_Call(NULL, irg, block, store, callee, arity, in, tp);
}
INLINE ir_node *new_r_Add (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Add(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Sub (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Sub(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Minus (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Minus(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_Mul (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Mul(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Quot (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Quot(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_DivMod (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_DivMod(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Div (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Div(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Mod (ir_graph *irg, ir_node *block,
- ir_node *memop, ir_node *op1, ir_node *op2) {
+ ir_node *memop, ir_node *op1, ir_node *op2) {
return new_rd_Mod(NULL, irg, block, memop, op1, op2);
}
INLINE ir_node *new_r_Abs (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Abs(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_And (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_And(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Or (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Or(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Eor (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2, ir_mode *mode) {
+ ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_rd_Eor(NULL, irg, block, op1, op2, mode);
}
INLINE ir_node *new_r_Not (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Not(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_Cmp (ir_graph *irg, ir_node *block,
- ir_node *op1, ir_node *op2) {
+ ir_node *op1, ir_node *op2) {
return new_rd_Cmp(NULL, irg, block, op1, op2);
}
INLINE ir_node *new_r_Shl (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shl(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Shr (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shr(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Shrs (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shrs(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Rot (ir_graph *irg, ir_node *block,
- ir_node *op, ir_node *k, ir_mode *mode) {
+ ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Rot(NULL, irg, block, op, k, mode);
}
INLINE ir_node *new_r_Conv (ir_graph *irg, ir_node *block,
- ir_node *op, ir_mode *mode) {
+ ir_node *op, ir_mode *mode) {
return new_rd_Conv(NULL, irg, block, op, mode);
}
INLINE ir_node *new_r_Cast (ir_graph *irg, ir_node *block, ir_node *op, type *to_tp) {
return new_rd_Cast(NULL, irg, block, op, to_tp);
}
INLINE ir_node *new_r_Phi (ir_graph *irg, ir_node *block, int arity,
- ir_node **in, ir_mode *mode) {
+ ir_node **in, ir_mode *mode) {
return new_rd_Phi(NULL, irg, block, arity, in, mode);
}
INLINE ir_node *new_r_Load (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr) {
+ ir_node *store, ir_node *adr) {
return new_rd_Load(NULL, irg, block, store, adr);
}
INLINE ir_node *new_r_Store (ir_graph *irg, ir_node *block,
- ir_node *store, ir_node *adr, ir_node *val) {
+ ir_node *store, ir_node *adr, ir_node *val) {
return new_rd_Store(NULL, irg, block, store, adr, val);
}
INLINE ir_node *new_r_Alloc (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *size, type *alloc_type, where_alloc where) {
+ ir_node *size, type *alloc_type, where_alloc where) {
return new_rd_Alloc(NULL, irg, block, store, size, alloc_type, where);
}
INLINE ir_node *new_r_Free (ir_graph *irg, ir_node *block, ir_node *store,
- ir_node *ptr, ir_node *size, type *free_type) {
+ ir_node *ptr, ir_node *size, type *free_type) {
return new_rd_Free(NULL, irg, block, store, ptr, size, free_type);
}
INLINE ir_node *new_r_Sync (ir_graph *irg, ir_node *block, int arity, ir_node **in) {
return new_rd_Sync(NULL, irg, block, arity, in);
}
INLINE ir_node *new_r_Proj (ir_graph *irg, ir_node *block, ir_node *arg,
- ir_mode *mode, long proj) {
+ ir_mode *mode, long proj) {
return new_rd_Proj(NULL, irg, block, arg, mode, proj);
}
INLINE ir_node *new_r_defaultProj (ir_graph *irg, ir_node *block, ir_node *arg,
- long max_proj) {
+ long max_proj) {
return new_rd_defaultProj(NULL, irg, block, arg, max_proj);
}
INLINE ir_node *new_r_Tuple (ir_graph *irg, ir_node *block,
- int arity, ir_node **in) {
+ int arity, ir_node **in) {
return new_rd_Tuple(NULL, irg, block, arity, in );
}
INLINE ir_node *new_r_Id (ir_graph *irg, ir_node *block,
- ir_node *val, ir_mode *mode) {
+ ir_node *val, ir_mode *mode) {
return new_rd_Id(NULL, irg, block, val, mode);
}
INLINE ir_node *new_r_Bad (ir_graph *irg) {
return new_rd_Break(NULL, irg, block);
}
INLINE ir_node *new_r_Filter (ir_graph *irg, ir_node *block, ir_node *arg,
- ir_mode *mode, long proj) {
+ ir_mode *mode, long proj) {
return new_rd_Filter(NULL, irg, block, arg, mode, proj);
}
INLINE ir_node *new_r_FuncCall (ir_graph *irg, ir_node *block,
- ir_node *callee, int arity, ir_node **in,
- type *tp) {
+ ir_node *callee, int arity, ir_node **in,
+ type *tp) {
return new_rd_FuncCall(NULL, irg, block, callee, arity, in, tp);
}
ir_node *res;
res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
- op_Start, mode_T, 0, NULL);
- //res->attr.start.irg = current_ir_graph;
+ op_Start, mode_T, 0, NULL);
+ /* res->attr.start.irg = current_ir_graph; */
res = optimize_node (res);
irn_vrfy_irg (res, current_ir_graph);
{
ir_node *res;
res = new_ir_node (db, current_ir_graph, current_ir_graph->current_block,
- op_End, mode_X, -1, NULL);
+ op_End, mode_X, -1, NULL);
res = optimize_node (res);
irn_vrfy_irg (res, current_ir_graph);
\|/ / |/_ \
get_r_value_internal |
| |
- | |
- \|/ \|/
- new_rd_Phi0 new_rd_Phi_in
+ | |
+ \|/ \|/
+ new_rd_Phi0 new_rd_Phi_in
* *************************************************************************** */
static INLINE ir_node *
alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
- int arity, ir_node **in) {
+ int arity, ir_node **in) {
ir_node *res;
ir_node **stack = current_ir_graph->Phi_in_stack->stack;
int pos = current_ir_graph->Phi_in_stack->pos;
*/
static INLINE ir_node *
new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
- ir_node **in, int ins)
+ ir_node **in, int ins)
{
int i;
ir_node *res, *known;
* The recursion that visited this node and set the flag did not
return yet. We are computing a value in a loop and need to
break the recursion without knowing the result yet.
- @@@ strange case. Straight forward we would create a Phi before
- starting the computation of it's predecessors. In this case we will
- find a Phi here in any case. The problem is that this implementation
- only creates a Phi after computing the predecessors, so that it is
- hard to compute self references of this Phi. @@@
+ @@@ strange case. Straight forward we would create a Phi before
+ starting the computation of it's predecessors. In this case we will
+ find a Phi here in any case. The problem is that this implementation
+ only creates a Phi after computing the predecessors, so that it is
+ hard to compute self references of this Phi. @@@
There is no simple check for the second subcase. Therefore we check
for a second visit and treat all such cases as the second subcase.
Anyways, the basic situation is the same: we reached a block
implementation that relies on the fact that an obstack is a stack and
will return a node with the same address on different allocations.
Look also at phi_merge and new_rd_phi_in to understand this.
- @@@ Unfortunately this does not work, see testprogram
- three_cfpred_example.
+ @@@ Unfortunately this does not work, see testprogram
+ three_cfpred_example.
*/
printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
assert (mode->code >= irm_F && mode->code <= irm_P);
res = new_rd_Const (NULL, current_ir_graph, block, mode,
- tarval_mode_null[mode->code]);
+ tarval_mode_null[mode->code]);
}
/* The local valid value is available now. */
static INLINE ir_node *
new_rd_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
- ir_node **in, int ins)
+ ir_node **in, int ins)
{
int i;
ir_node *res, *known;
known = res;
for (i=0; i < ins; ++i)
{
- assert(in[i]);
+ assert(in[i]);
if (in[i]==res || in[i]==known || is_Bad(in[i])) continue;
int opt;
arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
- sizeof(ir_node *)*current_ir_graph->n_loc);
+ sizeof(ir_node *)*current_ir_graph->n_loc);
/* turn off optimization before allocating Proj nodes, as res isn't
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
if (!res) {
if (block->attr.block.graph_arr[pos]) {
/* There was a set_value after the cfOp and no get_value before that
- set_value. We must build a Phi node now. */
+ set_value. We must build a Phi node now. */
if (block->attr.block.matured) {
- int ins = intern_get_irn_arity(block);
- ir_node **nin;
- NEW_ARR_A (ir_node *, nin, ins);
- res = phi_merge(block, pos, mode, nin, ins);
+ int ins = intern_get_irn_arity(block);
+ ir_node **nin;
+ NEW_ARR_A (ir_node *, nin, ins);
+ res = phi_merge(block, pos, mode, nin, ins);
} else {
- res = new_rd_Phi0 (current_ir_graph, block, mode);
- res->attr.phi0_pos = pos;
- res->link = block->link;
- block->link = res;
+ res = new_rd_Phi0 (current_ir_graph, block, mode);
+ res->attr.phi0_pos = pos;
+ res->link = block->link;
+ block->link = res;
}
assert(res);
/* @@@ tested by Flo: set_frag_value(frag_arr, pos, res);
- but this should be better: (remove comment if this works) */
+ but this should be better: (remove comment if this works) */
/* It's a Phi, we can write this into all graph_arrs with NULL */
set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
else
block->attr.block.graph_arr[pos] = new_Const(mode, tarval_bad);
/* We don't need to care about exception ops in the start block.
- There are none by definition. */
+ There are none by definition. */
return block->attr.block.graph_arr[pos];
} else {
phi0 = new_rd_Phi0(current_ir_graph, block, mode);
block->attr.block.graph_arr[pos] = phi0;
#if PRECISE_EXC_CONTEXT
/* Set graph_arr for fragile ops. Also here we should break recursion.
- We could choose a cyclic path through an cfop. But the recursion would
- break at some point. */
+ We could choose a cyclic path through an cfop. But the recursion would
+ break at some point. */
set_frag_value(block->attr.block.graph_arr, pos, phi0);
#endif
}
assert (prevCfOp);
if (is_Bad(prevCfOp)) {
/* In case a Cond has been optimized we would get right to the start block
- with an invalid definition. */
+ with an invalid definition. */
nin[i-1] = new_Bad();
continue;
}
if (!is_Bad(prevBlock)) {
#if PRECISE_EXC_CONTEXT
if (is_fragile_op(prevCfOp) && (intern_get_irn_op (prevCfOp) != op_Bad)) {
- assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
- nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
+ assert(get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode));
+ nin[i-1] = get_r_frag_value_internal (prevBlock, prevCfOp, pos, mode);
} else
#endif
- nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
+ nin[i-1] = get_r_value_internal (prevBlock, pos, mode);
} else {
nin[i-1] = new_Bad();
}
* The recursion that visited this node and set the flag did not
return yet. We are computing a value in a loop and need to
break the recursion. This case only happens if we visited
- the same block with phi_merge before, which inserted a Phi0.
- So we return the Phi0.
+ the same block with phi_merge before, which inserted a Phi0.
+ So we return the Phi0.
*/
/* case 4 -- already visited. */
printf("Error: no value set. Use of undefined variable. Initializing to zero.\n");
assert (mode->code >= irm_F && mode->code <= irm_P);
res = new_rd_Const (NULL, current_ir_graph, block, mode,
- get_mode_null(mode));
+ get_mode_null(mode));
}
/* The local valid value is available now. */
new_d_Phi (dbg_info* db, int arity, ir_node **in, ir_mode *mode)
{
return new_rd_Phi (db, current_ir_graph, current_ir_graph->current_block,
- arity, in, mode);
+ arity, in, mode);
}
ir_node *
new_d_Const (dbg_info* db, ir_mode *mode, tarval *con)
{
return new_rd_Const (db, current_ir_graph, current_ir_graph->start_block,
- mode, con);
+ mode, con);
}
ir_node *
new_d_Const_type (dbg_info* db, ir_mode *mode, tarval *con, type *tp)
{
return new_rd_Const_type (db, current_ir_graph, current_ir_graph->start_block,
- mode, con, tp);
+ mode, con, tp);
}
new_d_Id (dbg_info* db, ir_node *val, ir_mode *mode)
{
return new_rd_Id (db, current_ir_graph, current_ir_graph->current_block,
- val, mode);
+ val, mode);
}
ir_node *
new_d_Proj (dbg_info* db, ir_node *arg, ir_mode *mode, long proj)
{
return new_rd_Proj (db, current_ir_graph, current_ir_graph->current_block,
- arg, mode, proj);
+ arg, mode, proj);
}
ir_node *
new_d_Conv (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Conv (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Tuple (dbg_info* db, int arity, ir_node **in)
{
return new_rd_Tuple (db, current_ir_graph, current_ir_graph->current_block,
- arity, in);
+ arity, in);
}
ir_node *
new_d_Add (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Add (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Sub (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Sub (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
new_d_Minus (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Minus (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Mul (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Mul (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
{
ir_node *res;
res = new_rd_Quot (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Quot)) /* Could be optimized away. */
{
ir_node *res;
res = new_rd_DivMod (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_DivMod)) /* Could be optimized away. */
{
ir_node *res;
res = new_rd_Div (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Div)) /* Could be optimized away. */
{
ir_node *res;
res = new_rd_Mod (db, current_ir_graph, current_ir_graph->current_block,
- memop, op1, op2);
+ memop, op1, op2);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Mod)) /* Could be optimized away. */
new_d_And (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_And (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Or (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Or (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Eor (dbg_info* db, ir_node *op1, ir_node *op2, ir_mode *mode)
{
return new_rd_Eor (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2, mode);
+ op1, op2, mode);
}
ir_node *
new_d_Not (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Not (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Shl (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shl (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Shr (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shr (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Shrs (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Shrs (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Rot (dbg_info* db, ir_node *op, ir_node *k, ir_mode *mode)
{
return new_rd_Rot (db, current_ir_graph, current_ir_graph->current_block,
- op, k, mode);
+ op, k, mode);
}
ir_node *
new_d_Abs (dbg_info* db, ir_node *op, ir_mode *mode)
{
return new_rd_Abs (db, current_ir_graph, current_ir_graph->current_block,
- op, mode);
+ op, mode);
}
ir_node *
new_d_Cmp (dbg_info* db, ir_node *op1, ir_node *op2)
{
return new_rd_Cmp (db, current_ir_graph, current_ir_graph->current_block,
- op1, op2);
+ op1, op2);
}
ir_node *
ir_node *
new_d_Call (dbg_info* db, ir_node *store, ir_node *callee, int arity, ir_node **in,
- type *tp)
+ type *tp)
{
ir_node *res;
res = new_rd_Call (db, current_ir_graph, current_ir_graph->current_block,
- store, callee, arity, in, tp);
+ store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Call)) /* Could be optimized away. */
new_d_Return (dbg_info* db, ir_node* store, int arity, ir_node **in)
{
return new_rd_Return (db, current_ir_graph, current_ir_graph->current_block,
- store, arity, in);
+ store, arity, in);
}
ir_node *
new_d_Raise (dbg_info* db, ir_node *store, ir_node *obj)
{
return new_rd_Raise (db, current_ir_graph, current_ir_graph->current_block,
- store, obj);
+ store, obj);
}
ir_node *
{
ir_node *res;
res = new_rd_Load (db, current_ir_graph, current_ir_graph->current_block,
- store, addr);
+ store, addr);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Load)) /* Could be optimized away. */
{
ir_node *res;
res = new_rd_Store (db, current_ir_graph, current_ir_graph->current_block,
- store, addr, val);
+ store, addr, val);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Store)) /* Could be optimized away. */
{
ir_node *res;
res = new_rd_Alloc (db, current_ir_graph, current_ir_graph->current_block,
- store, size, alloc_type, where);
+ store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
if ((current_ir_graph->phase_state == phase_building) &&
(intern_get_irn_op(res) == op_Alloc)) /* Could be optimized away. */
new_d_Free (dbg_info* db, ir_node *store, ir_node *ptr, ir_node *size, type *free_type)
{
return new_rd_Free (db, current_ir_graph, current_ir_graph->current_block,
- store, ptr, size, free_type);
+ store, ptr, size, free_type);
}
ir_node *
as the operand could as well be a pointer to a dynamic object. */
{
return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, 0, NULL, ent);
+ store, objptr, 0, NULL, ent);
}
ir_node *
new_d_Sel (dbg_info* db, ir_node *store, ir_node *objptr, int n_index, ir_node **index, entity *sel)
{
return new_rd_Sel (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, n_index, index, sel);
+ store, objptr, n_index, index, sel);
}
ir_node *
new_d_InstOf (dbg_info *db, ir_node *store, ir_node *objptr, type *ent)
{
return (new_rd_InstOf (db, current_ir_graph, current_ir_graph->current_block,
- store, objptr, ent));
+ store, objptr, ent));
}
ir_node *
new_d_Sync (dbg_info* db, int arity, ir_node** in)
{
return new_rd_Sync (db, current_ir_graph, current_ir_graph->current_block,
- arity, in);
+ arity, in);
}
new_d_Confirm (dbg_info *db, ir_node *val, ir_node *bound, pn_Cmp cmp)
{
return new_rd_Confirm (db, current_ir_graph, current_ir_graph->current_block,
- val, bound, cmp);
+ val, bound, cmp);
}
ir_node *
new_d_Filter (dbg_info *db, ir_node *arg, ir_mode *mode, long proj)
{
return new_rd_Filter (db, current_ir_graph, current_ir_graph->current_block,
- arg, mode, proj);
+ arg, mode, proj);
}
ir_node *
new_d_FuncCall (dbg_info* db, ir_node *callee, int arity, ir_node **in,
- type *tp)
+ type *tp)
{
ir_node *res;
res = new_rd_FuncCall (db, current_ir_graph, current_ir_graph->current_block,
- callee, arity, in, tp);
+ callee, arity, in, tp);
return res;
}
res = new_ir_node (db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
current_ir_graph->current_block = res;
res->attr.block.matured = 0;
- //res->attr.block.exc = exc_normal;
- //res->attr.block.handler_entry = 0;
+ /* res->attr.block.exc = exc_normal; */
+ /* res->attr.block.handler_entry = 0; */
res->attr.block.irg = current_ir_graph;
res->attr.block.backedge = NULL;
res->attr.block.in_cg = NULL;
return new_d_InstOf (NULL, store, objptr, ent);
}
ir_node *new_Call (ir_node *store, ir_node *callee, int arity, ir_node **in,
- type *tp) {
+ type *tp) {
return new_d_Call(NULL, store, callee, arity, in, tp);
}
ir_node *new_Add (ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_d_Alloc(NULL, store, size, alloc_type, where);
}
ir_node *new_Free (ir_node *store, ir_node *ptr, ir_node *size,
- type *free_type) {
+ type *free_type) {
return new_d_Free(NULL, store, ptr, size, free_type);
}
ir_node *new_Sync (int arity, ir_node **in) {
* mature_block(loop_header);
* mature_block(loop_body);
*
- * get_value(loop_body, x); // gets the Phi in loop_header
- * set_value(loop_header, x); // sets the value the above get_value should
- * // have returned!!!
+ * get_value(loop_body, x); // gets the Phi in loop_header
+ * set_value(loop_header, x); // sets the value the above get_value should
+ * // have returned!!!
*
* Mature_block also fixes the number of inputs to the Phi nodes. Mature_block
* should be called as early as possible, as afterwards the generation of Phi
ir_loop *loop = get_irn_loop(n);
assert(loop);
fprintf(F, " in loop %d with depth %d\n",
- get_loop_loop_nr(loop), get_loop_depth(loop));
+ get_loop_loop_nr(loop), get_loop_depth(loop));
}
*/
/** open file for vcg graph */
fname = malloc (len * 2 + strlen(suffix1) + strlen(suffix2) + 5);
- //strncpy (fname, nm, len); /* copy the filename */
+ /* strncpy (fname, nm, len); */ /* copy the filename */
j = 0;
for (i = 0; i < len; ++i) { /* replase '/' in the name: escape by @. */
if (nm[i] == '/') {
/** open file for vcg graph */
fname = malloc (len * 2 + 5 + strlen(suffix));
- //strcpy (fname, name); /* copy the filename */
+ /* strcpy (fname, name);*/ /* copy the filename */
j = 0;
for (i = 0; i < len; ++i) { /* replase '/' in the name: escape by @. */
if (name[i] == '/') {
char *suffix;
rem = current_ir_graph;
- //printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter);
+ /* printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter); */
if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return;
int i;
char *suffix;
- //printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter);
+ /* printf("comparing %s %s\n", get_irg_dump_name(irg), dump_file_filter); */
if(strncmp(get_irg_dump_name(irg),dump_file_filter,strlen(dump_file_filter))!=0) return;
if (interprocedural_view) suffix = "-ip";
}
#if 0 /* Old version. Avoids Ids.
- This is not necessary: we do a postwalk, and get_irn_n
- removes ids anyways. So it's much cheaper to call the
- optimization less often and use the exchange() algorithm. */
+ This is not necessary: we do a postwalk, and get_irn_n
+ removes ids anyways. So it's much cheaper to call the
+ optimization less often and use the exchange() algorithm. */
static void
optimize_in_place_wrapper (ir_node *n, void *env) {
int i, irn_arity;
/* The end node looses it's flexible in array. This doesn't matter,
as dead node elimination builds End by hand, inlineing doesn't use
the End node. */
- //assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC));
+ /* assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC)); */
if (intern_get_irn_opcode(n) == iro_Block) {
block = NULL;
}
}
nn = new_ir_node(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- intern_get_irn_op(n),
- intern_get_irn_mode(n),
- new_arity,
- get_irn_in(n));
+ current_ir_graph,
+ block,
+ intern_get_irn_op(n),
+ intern_get_irn_mode(n),
+ new_arity,
+ get_irn_in(n));
/* Copy the attributes. These might point to additional data. If this
was allocated on the old obstack the pointers now are dangling. This
frees e.g. the memory of the graph_arr allocated in new_immBlock. */
irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
if (intern_get_irn_opcode(intern_get_irn_n(n, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
}
/* repair the block visited flag from above misuse. Repair it in both
graphs so that the old one can still be used. */
We don't call optimize_in_place as it requires
that the fields in ir_graph are set properly. */
if ((get_opt_control_flow_straightening()) &&
- (get_Block_n_cfgpreds(nn) == 1) &&
- (intern_get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
+ (get_Block_n_cfgpreds(nn) == 1) &&
+ (intern_get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
} else if (intern_get_irn_opcode(n) == iro_Phi) {
/* Don't copy node if corresponding predecessor in block is Bad.
irn_arity = intern_get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
if (intern_get_irn_opcode(intern_get_irn_n(block, i)) != iro_Bad) {
- set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(intern_get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
}
/* If the pre walker reached this Phi after the post walker visited the
block block_visited is > 0. */
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
ne = new_ir_node(get_irn_dbg_info(oe),
- current_ir_graph,
- NULL,
- op_End,
- mode_X,
- -1,
- NULL);
+ current_ir_graph,
+ NULL,
+ op_End,
+ mode_X,
+ -1,
+ NULL);
/* Copy the attributes. Well, there might be some in the future... */
copy_attrs(oe, ne);
set_new_node(oe, ne);
for (i = 0; i < irn_arity; i++) {
ka = intern_get_irn_intra_n(oe, i);
if ((intern_get_irn_op(ka) == op_Block) &&
- (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
+ (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
irg_walk(ka, copy_node, copy_preds, NULL);
ka = intern_get_irn_intra_n(oe, i);
if ((intern_get_irn_op(ka) == op_Phi)) {
if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
- /* We didn't copy the Phi yet. */
- set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
- irg_walk(ka, copy_node, copy_preds, NULL);
+ /* We didn't copy the Phi yet. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
}
add_End_keepalive(ne, get_new_node(ka));
}
set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
set_irg_start_block(current_ir_graph,
- get_new_node(get_irg_start_block(current_ir_graph)));
+ get_new_node(get_irg_start_block(current_ir_graph)));
set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
new_in[0] = NULL;
new_irn_n = 1;
for (i = 1; i < old_irn_arity; i++) {
- irn = intern_get_irn_n(n, i);
- if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
+ irn = intern_get_irn_n(n, i);
+ if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
}
n->in = new_in;
} /* ir node has bad predecessors */
/* Relink Phi predeseccors if count of predeseccors changed */
if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
/* set new predeseccors in array
- n->in[0] remains the same block */
+ n->in[0] remains the same block */
new_irn_arity = 1;
for(i = 1; i < old_irn_arity; i++)
- if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
+ if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
ARR_SETLEN(ir_node *, n->in, new_irn_arity);
}
for the Call node, or do we branch directly to End on an exception?
exc_handling: 0 There is a handler.
1 Branches to End.
- 2 Exception handling not represented in Firm. -- */
+ 2 Exception handling not represented in Firm. -- */
{
ir_node *proj, *Mproj = NULL, *Xproj = NULL;
for (proj = (ir_node *)get_irn_link(call); proj; proj = (ir_node *)get_irn_link(proj)) {
if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
}
- if (Mproj) { assert(Xproj); exc_handling = 0; } // Mproj
- else if (Xproj) { exc_handling = 1; } //!Mproj && Xproj
- else { exc_handling = 2; } //!Mproj && !Xproj
+ if (Mproj) { assert(Xproj); exc_handling = 0; } /* Mproj */
+ else if (Xproj) { exc_handling = 1; } /* !Mproj && Xproj */
+ else { exc_handling = 2; } /* !Mproj && !Xproj */
}
copying. */
set_irn_link(get_irg_start(called_graph), pre_call);
set_irn_visited(get_irg_start(called_graph),
- get_irg_visited(current_ir_graph));
+ get_irg_visited(current_ir_graph));
set_irn_link(get_irg_start_block(called_graph),
- get_nodes_Block(pre_call));
+ get_nodes_Block(pre_call));
set_irn_visited(get_irg_start_block(called_graph),
- get_irg_visited(current_ir_graph));
+ get_irg_visited(current_ir_graph));
/* Initialize for compaction of in arrays */
inc_irg_block_visited(current_ir_graph);
entities. */
/* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
- get_irg_frame_type(called_graph));
+ get_irg_frame_type(called_graph));
/* Repair called_graph */
set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
-1: Block of Tuple.
0: Phi of all Memories of Return statements.
1: Jmp from new Block that merges the control flow from all exception
- predecessors of the old end block.
+ predecessors of the old end block.
2: Tuple of all arguments.
3: Phi of Exception memories.
In case the old Call directly branches to End on an exception we don't
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = intern_get_irn_n(end_bl, i);
- if (intern_get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
+ ret = intern_get_irn_n(end_bl, i);
+ if (intern_get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
}
phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
}
}
set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
ir_node *ret;
ret = intern_get_irn_n(end_bl, i);
if (is_fragile_op(skip_Proj(ret)) || (intern_get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
+ cf_pred[n_exc] = ret;
+ n_exc++;
}
}
if (n_exc > 0) {
/* The Phi for the memories with the exception objects */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(intern_get_irn_n(end_bl, i));
- if (intern_get_irn_op(ret) == op_Call) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (intern_get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
- n_exc++;
- }
+ ir_node *ret;
+ ret = skip_Proj(intern_get_irn_n(end_bl, i));
+ if (intern_get_irn_op(ret) == op_Call) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
+ n_exc++;
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
+ n_exc++;
+ } else if (intern_get_irn_op(ret) == op_Raise) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
}
set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
} else {
for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
cf_op = get_Block_cfgpred(end_bl, i);
if (intern_get_irn_op(cf_op) == op_Proj) {
- cf_op = get_Proj_pred(cf_op);
- if ((intern_get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
- // There are unoptimized tuples from inlineing before when no exc
- assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
- cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(intern_get_irn_op(cf_op) == op_Jmp);
- break;
- }
+ cf_op = get_Proj_pred(cf_op);
+ if ((intern_get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
+ /* There are unoptimized tuples from inlineing before when no exc */
+ assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
+ cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
+ assert(intern_get_irn_op(cf_op) == op_Jmp);
+ break;
+ }
}
}
/* repair */
arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
for (j = 0; j < i; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j);
+ cf_pred[j] = get_Block_cfgpred(end_bl, j);
for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
- cf_pred[j] = get_Block_cfgpred(bl, j-i);
+ cf_pred[j] = get_Block_cfgpred(bl, j-i);
for (j = j; j < arity; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
+ cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
set_irn_in(end_bl, arity, cf_pred);
free(cf_pred);
- // Remove the exception pred from post-call Tuple.
+ /* Remove the exception pred from post-call Tuple. */
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
}
}
tv = get_Const_tarval(get_Call_ptr(calls[i]));
callee = get_entity_irg(tarval_to_entity(tv));
if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) == irg_inline_forced)) {
+ (get_irg_inline_property(callee) == irg_inline_forced)) {
inline_method(calls[i], callee);
}
}
assert(get_irg_callee_info_state(current_ir_graph) == irg_callee_info_none);
irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2,
- get_irg_link(current_ir_graph));
+ get_irg_link(current_ir_graph));
env = (inline_irg_env *)get_irg_link(current_ir_graph);
}
/* and now inline.
Inline leaves recursively -- we might construct new leaves. */
- //int itercnt = 1;
+ /* int itercnt = 1; */
while (did_inline) {
- //printf("iteration %d\n", itercnt++);
+ /* printf("iteration %d\n", itercnt++); */
did_inline = 0;
for (i = 0; i < n_irgs; ++i) {
ir_node *call;
if (env->n_nodes > maxsize) break;
if (callee &&
- ((is_leave(callee) && is_smaller(callee, leavesize)) ||
- (get_irg_inline_property(callee) == irg_inline_forced))) {
+ ((is_leave(callee) && is_smaller(callee, leavesize)) ||
+ (get_irg_inline_property(callee) == irg_inline_forced))) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
}
callee_env = (inline_irg_env *)get_irg_link(callee);
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
inline_method(call, callee);
did_inline = 1;
env->n_call_nodes--;
}
}
- //printf("Non leaves\n");
+ /* printf("Non leaves\n"); */
/* inline other small functions. */
for (i = 0; i < n_irgs; ++i) {
ir_node *call;
if (env->n_nodes > maxsize) break;
if (callee && is_smaller(callee, size)) {
if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
}
callee_env = (inline_irg_env *)get_irg_link(callee);
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
inline_method(call, callee);
did_inline = 1;
env->n_call_nodes--;
#if 0
env = (inline_irg_env *)get_irg_link(current_ir_graph);
if ((env->n_call_nodes_orig != env->n_call_nodes) ||
- (env->n_callers_orig != env->n_callers))
+ (env->n_callers_orig != env->n_callers))
printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(current_ir_graph)));
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(current_ir_graph)));
#endif
free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph));
}
assert(intern_get_irn_op(n) != op_Block);
if ((intern_get_irn_op(n) == op_Const) ||
- (intern_get_irn_op(n) == op_SymConst) ||
- (is_Bad(n)) ||
- (intern_get_irn_op(n) == op_Unknown)) {
+ (intern_get_irn_op(n) == op_SymConst) ||
+ (is_Bad(n)) ||
+ (intern_get_irn_op(n) == op_Unknown)) {
/* These nodes will not be placed by the loop below. */
b = get_irg_start_block(current_ir_graph);
depth = 1;
ir_node *dep = intern_get_irn_n(n, i);
ir_node *dep_block;
if ((irn_not_visited(dep)) &&
- (get_op_pinned(intern_get_irn_op(dep)) == floats)) {
- place_floats_early(dep, worklist);
+ (get_op_pinned(intern_get_irn_op(dep)) == floats)) {
+ place_floats_early(dep, worklist);
}
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
been finished on them. We do not have any unfinished inputs! */
dep_block = get_nodes_Block(dep);
if ((!is_Bad(dep_block)) &&
- (get_Block_dom_depth(dep_block) > depth)) {
- b = dep_block;
- depth = get_Block_dom_depth(dep_block);
+ (get_Block_dom_depth(dep_block) > depth)) {
+ b = dep_block;
+ depth = get_Block_dom_depth(dep_block);
}
/* Avoid that the node is placed in the Start block */
if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
- b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
- assert(b != get_irg_start_block(current_ir_graph));
- depth = 2;
+ b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
+ assert(b != get_irg_start_block(current_ir_graph));
+ depth = 2;
}
}
set_nodes_Block(n, b);
irn_arity = intern_get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
if (intern_get_irn_n(consumer, i) == producer) {
- block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
+ block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
}
}
} else {
for (i = 0; i < get_irn_n_outs(n); i++) {
ir_node *succ = get_irn_out(n, i);
if (irn_not_visited(succ) && (intern_get_irn_op(succ) != op_Phi))
- place_floats_late(succ, worklist);
+ place_floats_late(succ, worklist);
}
/* We have to determine the final block of this node... except for
constants. */
if ((get_op_pinned(intern_get_irn_op(n)) == floats) &&
- (intern_get_irn_op(n) != op_Const) &&
- (intern_get_irn_op(n) != op_SymConst)) {
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
+ (intern_get_irn_op(n) != op_Const) &&
+ (intern_get_irn_op(n) != op_SymConst)) {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
for (i = 0; i < get_irn_n_outs(n); i++) {
- dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
+ dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
}
set_nodes_Block(n, dca);
/* Remove Tuples */
for (i = 0; i < get_Block_n_cfgpreds(n); i++)
/* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
- A different order of optimizations might cause problems. */
+ A different order of optimizations might cause problems. */
if (get_opt_normalize())
- set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
+ set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
} else if (get_opt_optimize() && (intern_get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
ir_node *new_node = equivalent_node(b);
while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) {
/* We would have to run gigo if new is bad, so we
- promote it directly below. */
+ promote it directly below. */
assert(((b == new_node) ||
- get_opt_control_flow_straightening() ||
- get_opt_control_flow_weak_simplification()) &&
- ("strange flag setting"));
+ get_opt_control_flow_straightening() ||
+ get_opt_control_flow_weak_simplification()) &&
+ ("strange flag setting"));
exchange (b, new_node);
b = new_node;
new_node = equivalent_node(b);
n_preds = get_Block_n_cfgpreds(pred);
} else {
/* b's pred blocks and pred's pred blocks must be pairwise disjunct.
- Work preds < pos as if they were already removed. */
+ Work preds < pos as if they were already removed. */
for (i = 0; i < pos; i++) {
- ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (get_Block_block_visited(b_pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
- for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
- ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
- if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
- }
- } else {
- if (is_pred_of(b_pred, pred)) dispensable = 0;
- }
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (get_Block_block_visited(b_pred) + 1
+ < get_irg_block_visited(current_ir_graph)) {
+ for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
+ ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
+ if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
+ }
+ } else {
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
+ }
}
for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
- ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_pred_of(b_pred, pred)) dispensable = 0;
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
}
if (!dispensable) {
- set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
- n_preds = 1;
+ set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
+ n_preds = 1;
} else {
- n_preds = get_Block_n_cfgpreds(pred);
+ n_preds = get_Block_n_cfgpreds(pred);
}
}
}
if (is_Bad(get_Block_cfgpred(b, i))) {
printf(" removing Bad %i\n ", i);
} else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
+ < get_irg_block_visited(current_ir_graph)) {
printf(" removing pred %i ", i); DDMN(pred);
} else { printf(" Nothing to do for "); DDMN(pred); }
}
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
+ /* Do nothing */
} else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- ir_node *phi_pred = get_Phi_pred(phi, i);
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- if (get_nodes_Block(phi_pred) == pred) {
- assert(intern_get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
- in[n_preds] = get_Phi_pred(phi_pred, j);
- } else {
- in[n_preds] = phi_pred;
- }
- n_preds++;
- }
- /* The Phi_pred node is replaced now if it is a Phi.
- In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
- Daher muss der Phiknoten durch den neuen ersetzt werden.
- Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
- durch einen Bad) damit er aus den keep_alive verschwinden kann.
- Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
- aufrufen. */
- if (get_nodes_Block(phi_pred) == pred) {
- /* remove the Phi as it might be kept alive. Further there
- might be other users. */
- exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
- }
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ ir_node *phi_pred = get_Phi_pred(phi, i);
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ if (get_nodes_Block(phi_pred) == pred) {
+ assert(intern_get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ in[n_preds] = get_Phi_pred(phi_pred, j);
} else {
- in[n_preds] = get_Phi_pred(phi, i);
- n_preds ++;
+ in[n_preds] = phi_pred;
+ }
+ n_preds++;
+ }
+ /* The Phi_pred node is replaced now if it is a Phi.
+ In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
+ Daher muss der Phiknoten durch den neuen ersetzt werden.
+ Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
+ durch einen Bad) damit er aus den keep_alive verschwinden kann.
+ Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
+ aufrufen. */
+ if (get_nodes_Block(phi_pred) == pred) {
+ /* remove the Phi as it might be kept alive. Further there
+ might be other users. */
+ exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
+ }
+ } else {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds ++;
}
}
/* Fix the node */
for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
pred = get_nodes_Block(get_Block_cfgpred(b, k));
if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
+ < get_irg_block_visited(current_ir_graph)) {
phi = get_irn_link(pred);
while (phi) {
- if (intern_get_irn_op(phi) == op_Phi) {
- set_nodes_Block(phi, b);
-
- n_preds = 0;
- for (i = 0; i < k; i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
- muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
- Anweisungen.) Trotzdem tuts bisher!! */
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- for (i = 0; i < get_Phi_n_preds(phi); i++) {
- in[n_preds] = get_Phi_pred(phi, i);
- n_preds++;
- }
- for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- set_irn_in(phi, n_preds, in);
- }
- phi = get_irn_link(phi);
+ if (intern_get_irn_op(phi) == op_Phi) {
+ set_nodes_Block(phi, b);
+
+ n_preds = 0;
+ for (i = 0; i < k; i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
+ muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
+ Anweisungen.) Trotzdem tuts bisher!! */
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ for (i = 0; i < get_Phi_n_preds(phi); i++) {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds++;
+ }
+ for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ set_irn_in(phi, n_preds, in);
+ }
+ phi = get_irn_link(phi);
}
}
}
if (is_Bad(get_Block_cfgpred(b, i))) {
/* Do nothing */
} else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
+ < get_irg_block_visited(current_ir_graph)) {
/* It's an empty block and not yet visited. */
assert(get_Block_n_cfgpreds(b) > 1);
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- in[n_preds] = get_Block_cfgpred(pred, j);
- n_preds++;
+ in[n_preds] = get_Block_cfgpred(pred, j);
+ n_preds++;
}
/* Remove block as it might be kept alive. */
exchange(pred, b/*new_Bad()*/);
ir_node *ka = get_End_keepalive(end, i);
if (irn_not_visited(ka)) {
if ((intern_get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
- set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
- get_irg_block_visited(current_ir_graph)-1);
- irg_block_walk(ka, optimize_blocks, NULL, NULL);
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
+ set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
+ get_irg_block_visited(current_ir_graph)-1);
+ irg_block_walk(ka, optimize_blocks, NULL, NULL);
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
} else if (intern_get_irn_op(ka) == op_Phi) {
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
}
}
}
arity = intern_get_irn_arity(n);
if (n == get_irg_end_block(current_ir_graph))
- return; // No use to add a block here.
+ return; /* No use to add a block here. */
for (i=0; i<arity; i++) {
pre = intern_get_irn_n(n, i);
/* Predecessor has multiple successors. Insert new flow edge */
if ((NULL != pre) &&
- (op_Proj == intern_get_irn_op(pre)) &&
- op_Raise != intern_get_irn_op(skip_Proj(pre))) {
-
- /* set predecessor array for new block */
- in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
- /* set predecessor of new block */
- in[0] = pre;
- block = new_Block(1, in);
- /* insert new jmp node to new block */
- switch_block(block);
- jmp = new_Jmp();
- switch_block(n);
- /* set successor of new block */
- set_irn_n(n, i, jmp);
+ (op_Proj == intern_get_irn_op(pre)) &&
+ op_Raise != intern_get_irn_op(skip_Proj(pre))) {
+
+ /* set predecessor array for new block */
+ in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
+ /* set predecessor of new block */
+ in[0] = pre;
+ block = new_Block(1, in);
+ /* insert new jmp node to new block */
+ switch_block(block);
+ jmp = new_Jmp();
+ switch_block(n);
+ /* set successor of new block */
+ set_irn_n(n, i, jmp);
} /* predecessor has multiple successors */
} /* for all predecessors */
#if PRECISE_EXC_CONTEXT
res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
dereferenced in this graph plus one for
- the store plus one for links to fragile
- operations. n_loc is not the number of
- parameters to the procedure! */
+ the store plus one for links to fragile
+ operations. n_loc is not the number of
+ parameters to the procedure! */
#else
res->n_loc = n_loc + 1; /* number of local variables that are never
dereferenced in this graph plus one for
- the store. This is not the number of parameters
+ the store. This is not the number of parameters
to the procedure! */
#endif
res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
obstack_init (res->obst);
res->value_table = new_identities (); /* value table for global value
- numbering for optimizing use in
- iropt.c */
+ numbering for optimizing use in
+ iropt.c */
res->outs = NULL;
res->phase_state = phase_building;
res->start_block = new_immBlock ();
res->start = new_Start ();
res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
- //res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL);
+ /* res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL); */
/* Proj results of start node */
projX = new_Proj (res->start, mode_X, pns_initial_exec);
res->phase_state = phase_building;
res->pinned = pinned;
res->value_table = new_identities (); /* value table for global value
- numbering for optimizing use in
- iropt.c */
+ numbering for optimizing use in
+ iropt.c */
res->ent = NULL;
res->frame_type = NULL;
res->start_block = new_immBlock ();
res->end = new_End ();
mature_block(get_cur_block());
res->bad = new_ir_node (NULL, res, res->start_block, op_Bad, mode_T, 0, NULL);
- //res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL);
+ /* res->unknown = new_ir_node (NULL, res, res->start_block, op_Unknown, mode_T, 0, NULL); */
res->start = new_Start ();
/* Proj results of start node */
*/
int node_is_in_irgs_storage(ir_graph *irg, ir_node *n)
{
- struct _obstack_chunk *p;
+ struct _obstack_chunk *p;
/*
* checks wheater the ir_node pointer i on the obstack.
firm_kind kind; /**< always set to k_ir_graph*/
/* -- Basics of the representation -- */
struct entity *ent; /**< The entity of this procedure, i.e.,
- the type of the procedure and the
- class it belongs to. */
+ the type of the procedure and the
+ class it belongs to. */
struct type *frame_type; /**< A class type representing the stack frame.
- Can include "inner" methods. */
+ Can include "inner" methods. */
struct ir_node *start_block; /**< block the start node will belong to */
- struct ir_node *start; /**< start node of this ir_graph */
+ struct ir_node *start; /**< start node of this ir_graph */
struct ir_node *end_block; /**< block the end node will belong to */
- struct ir_node *end; /**< end node of this ir_graph */
- struct ir_node *cstore; /**< constant store -- no more needed!! */
+ struct ir_node *end; /**< end node of this ir_graph */
+ struct ir_node *cstore; /**< constant store -- no more needed!! */
struct ir_node *frame; /**< method's frame */
struct ir_node *globals; /**< pointer to the data segment containing all
- globals as well as global procedures. */
+ globals as well as global procedures. */
struct ir_node *args; /**< methods arguments */
- struct ir_node *bad; /**< bad node of this ir_graph, the one and
+ struct ir_node *bad; /**< bad node of this ir_graph, the one and
only in this graph */
/* GL removed: we need unknown with mode for analyses. */
- // struct ir_node *unknown; /**< unknown node of this ir_graph */
- struct obstack *obst; /**< obstack where all of the ir_nodes live */
+ /* struct ir_node *unknown;*/ /**< unknown node of this ir_graph */
+ struct obstack *obst; /**< obstack where all of the ir_nodes live */
struct ir_node *current_block; /**< block for newly gen_*()-erated
- ir_nodes */
+ ir_nodes */
/* -- Fields indicating different states of irgraph -- */
irg_phase_state phase_state; /**< compiler phase */
struct Phi_in_stack *Phi_in_stack; /**< needed for automatic Phi construction */
#endif
int n_loc; /**< number of local variable in this
- procedure including procedure parameters. */
+ procedure including procedure parameters. */
/* -- Fields for optimizations / analysis information -- */
pset *value_table; /**< hash table for global value numbering (cse)
- for optimizing use in iropt.c */
+ for optimizing use in iropt.c */
struct ir_node **outs; /**< Space for the out arrays. */
struct ir_loop *loop; /**< The outermost loop */
void *link; /**< A void* field to link any information to
- the node. */
+ the node. */
/* -- Fields for Walking the graph -- */
unsigned long visited; /**< this flag is an identifier for
- ir walk. it will be incremented
- every time someone walks through
- the graph */
+ ir walk. it will be incremented
+ every time someone walks through
+ the graph */
unsigned long block_visited; /**< same as visited, for a complete block */
#ifdef DEBUG_libfirm
int graph_nr; /**< a unique graph number for each graph to make output
- readable. */
+ readable. */
#endif
};
/**
* Set the pinned state of a graph.
*
- * @irg the IR graph
- * @p new pin state
+ * @irg the IR graph
+ * @p new pin state
*/
INLINE void
set_irg_pinned (ir_graph *irg, op_pinned p);
/**
* Returns true if the node n is allocated on the storage of graph irg.
*
- * @param irg the IR graph
- * @param n the IR node
+ * @param irg the IR graph
+ * @param n the IR node
*/
int node_is_in_irgs_storage(ir_graph *irg, ir_node *n);
# include "irmode.h"
# include "type.h"
# include "dbginfo.h"
-//# include "exc.h"
+/* # include "exc.h" */
/**
* @file irnode.h
pn_Cmp_True = 15 /**< true */
/* not_mask = Leg*/ /* bits to flip to negate comparison * @@ hack for jni interface */
} pn_Cmp; /* Projection numbers for Cmp */
-//#define not_mask pn_Cmp_Leg
+/* #define not_mask pn_Cmp_Leg */
const char *get_pnc_string(int pnc);
int get_negated_pnc(int pnc);
struct ir_node **graph_arr; /**< array to store all parameters */
/* Attributes holding analyses information */
struct dom_info dom; /**< Datastructure that holds information about dominators.
- @@@ @todo
- Eventually overlay with graph_arr as only valid
- in different phases. Eventually inline the whole
- datastructure. */
- // exc_t exc; /**< role of this block for exception handling */
- // ir_node *handler_entry; /**< handler entry block iff this block is part of a region */
+ @@@ @todo
+ Eventually overlay with graph_arr as only valid
+ in different phases. Eventually inline the whole
+ datastructure. */
+ /* exc_t exc; */ /**< role of this block for exception handling */
+ /* ir_node *handler_entry; */ /**< handler entry block iff this block is part of a region */
ir_node ** in_cg; /**< array with predecessors in
- * interprocedural_view, if they differ
- * from intraprocedural predecessors */
+ * interprocedural_view, if they differ
+ * from intraprocedural predecessors */
int *backedge; /**< Field n set to true if pred n is backedge.
- @@@ @todo Ev. replace by bitfield! */
+ @@@ @todo Ev. replace by bitfield! */
int *cg_backedge; /**< Field n set to true if pred n is interprocedural backedge.
- @@@ @todo Ev. replace by bitfield! */
+ @@@ @todo Ev. replace by bitfield! */
} block_attr;
/** Start attributes */
typedef struct {
char dummy;
- // ir_graph *irg; @@@ now in block
+ /* ir_graph *irg; @@@ now in block */
} start_attr;
/** Cond attributes */
typedef struct {
cond_kind kind; /**< flavor of Cond */
long default_proj; /**< for optimization: biggest Proj number, i.e. the one
- used for default. */
+ used for default. */
} cond_attr;
/** Const attributes */
long proj; /**< contains the result position to project (Proj) */
ir_node ** in_cg; /**< array with interprocedural predecessors (Phi) */
int *backedge; /**< Field n set to true if pred n is backedge.
- @todo Ev. replace by bitfield! */
+ @todo Ev. replace by bitfield! */
} filter_attr;
/** EndReg/EndExcept attributes */
typedef struct {
char dummy;
- // ir_graph * irg; /**< ir_graph this node belongs to (for
- // * navigating in interprocedural graphs)
- // @@@ now in block */
+ /* ir_graph * irg; */ /**< ir_graph this node belongs to (for */
+ /* * navigating in interprocedural graphs) */
+ /* @@@ now in block */
} end_attr;
/** CallBegin attributes */
typedef struct {
- // ir_graph * irg; / **< ir_graph this node belongs to (for
- // * navigating in interprocedural graphs) */
- // @@@ now in block
+ /* ir_graph * irg; */ /**< ir_graph this node belongs to (for */
+ /* * navigating in interprocedural graphs) */
+ /* @@@ now in block */
ir_node * call; /**< associated Call-operation */
} callbegin_attr;
call_attr call; /**< For Call: pointer to the type of the method to call */
callbegin_attr callbegin; /**< For CallBegin */
alloc_attr a; /**< For Alloc. */
- io_attr io; /**< For InstOf */
+ io_attr io; /**< For InstOf */
type *f; /**< For Free. */
cast_attr cast; /**< For Cast. */
int phi0_pos; /**< For Phi. Used to remember the value defined by
- this Phi node. Needed when the Phi is completed
- to call get_r_internal_value to find the
- predecessors. If this attribute is set, the Phi
- node takes the role of the obsolete Phi0 node,
- therefore the name. */
+ this Phi node. Needed when the Phi is completed
+ to call get_r_internal_value to find the
+ predecessors. If this attribute is set, the Phi
+ node takes the role of the obsolete Phi0 node,
+ therefore the name. */
int *phi_backedge; /**< For Phi after construction.
- Field n set to true if pred n is backedge.
- @todo Ev. replace by bitfield! */
+ Field n set to true if pred n is backedge.
+ @todo Ev. replace by bitfield! */
long proj; /**< For Proj: contains the result position to project */
confirm_attr confirm_cmp; /**< For Confirm: compare operation */
filter_attr filter; /**< For Filter */
end_attr end; /**< For EndReg, EndExcept */
#if PRECISE_EXC_CONTEXT
struct ir_node **frag_arr; /**< For Phi node construction in case of exceptions
- for nodes Store, Load, Div, Mod, Quot, DivMod. */
+ for nodes Store, Load, Div, Mod, Quot, DivMod. */
#endif
} attr;
struct ir_node **in; /**< array with predecessors / operands */
void *link; /**< to attach additional information to the node, e.g.
used while construction to link Phi0 nodes and
- during optimization to link to nodes that
- shall replace a node. */
+ during optimization to link to nodes that
+ shall replace a node. */
/* ------- Fields for optimizations / analysis information ------- */
struct ir_node **out; /**< array of out edges */
struct dbg_info* dbi; /**< A pointer to information for debug support. */
/* ------- For debugging ------- */
#ifdef DEBUG_libfirm
int node_nr; /**< a unique node number for each node to make output
- readable. */
+ readable. */
#endif
/* ------- For analyses -------- */
ir_loop *loop; /**< the loop the node is in. Access routines in irloop.h */
(get_irn_mode(pred) == mode_X)
), "Block node", 0);
}
- // End block may only have Return, Raise or fragile ops as preds.
+ /* End block may only have Return, Raise or fragile ops as preds. */
if (n == get_irg_end_block(irg))
for (i = 0; i < get_Block_n_cfgpreds(n); ++i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
if (is_Proj(pred) || get_irn_op(pred) == op_Tuple)
- break; // We can not test properly. How many tuples are there?
+ break; /* We can not test properly. How many tuples are there? */
ASSERT_AND_RET(((get_irn_op(pred) == op_Return) ||
is_Bad(pred) ||
(get_irn_op(pred) == op_Raise) ||
is_fragile_op(pred) ),
"End Block node", 0);
}
- // irg attr must == graph we are in.
+ /* irg attr must == graph we are in. */
if (! interprocedural_view) {
ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
}
#define get_ident_of_mode get_mode_ident
#define get_size_of_mode get_mode_size
#define get_ld_align_of_mode get_mode_ld_align
-#define get_min_of_mode get_mode_min
-#define get_max_of_mode get_mode_max
+#define get_min_of_mode get_mode_min
+#define get_max_of_mode get_mode_max
#define get_null_of_mode get_mode_null
#define get_fsigned_of_mode get_mode_fsigned
#define get_ffloat_of_mode get_mode_ffloat
#define tarval_P_from_entity(X) new_tarval_from_entity(X, mode_P_mach)
#define get_tarval_entity(X) tarval_to_entity(X)
-/* ident.h */ // @@@ Attention: still in ident.h
-//#define id_to_str get_id_str
-//#define id_to_strlen get_id_strlen
+/* ident.h */ /* @@@ Attention: still in ident.h */
+/* #define id_to_str get_id_str */
+/* #define id_to_strlen get_id_strlen */
#endif
DEL_ARR_F(ent->overwrites); ent->overwrites = NULL;
DEL_ARR_F(ent->overwrittenby); ent->overwrittenby = NULL;
}
- //if (ent->values) DEL_ARR_F(ent->values); /* @@@ warum nich? */
+ /* if (ent->values) DEL_ARR_F(ent->values); *//* @@@ warum nich? */
if (ent->val_paths) {
if (is_compound_entity(ent))
for (i = 0; i < get_compound_ent_n_values(ent); i++)
- if (ent->val_paths[i]) ;
- /* free_compound_graph_path(ent->val_paths[i]) ; * @@@ warum nich? */
- /* Geht nich: wird mehrfach verwendet!!! ==> mehrfach frei gegeben. */
- //DEL_ARR_F(ent->val_paths);
+ if (ent->val_paths[i]) ;
+ /* free_compound_graph_path(ent->val_paths[i]) ; * @@@ warum nich? */
+ /* Geht nich: wird mehrfach verwendet!!! ==> mehrfach frei gegeben. */
+ /* DEL_ARR_F(ent->val_paths); */
}
ent->val_paths = NULL;
ent->values = NULL;
new->overwrites = DUP_ARR_F(entity *, old->overwrites);
new->overwrittenby = DUP_ARR_F(entity *, old->overwrittenby);
} else if ((get_type_tpop(get_entity_owner(old)) != type_class) &&
- (get_type_tpop(new_owner) == type_class)) {
+ (get_type_tpop(new_owner) == type_class)) {
new->overwrites = NEW_ARR_F(entity *, 0);
new->overwrittenby = NEW_ARR_F(entity *, 0);
}
assert (get_type_tpop_code(owner) == tpo_class ||
get_type_tpop_code(owner) == tpo_union ||
get_type_tpop_code(owner) == tpo_struct ||
- get_type_tpop_code(owner) == tpo_array); /* Yes, array has an entity
- -- to select fields! */
+ get_type_tpop_code(owner) == tpo_array); /* Yes, array has an entity
+ -- to select fields! */
}
ident *
/* return the name of the visibility */
const char *get_allocation_name(ent_allocation all)
{
-#define X(a) case a: return #a
+#define X(a) case a: return #a
switch (all) {
X(allocation_automatic);
X(allocation_parameter);
set_entity_visibility (entity *ent, ent_visibility vis) {
if (vis != visibility_local)
assert((ent->allocation == allocation_static) ||
- (ent->allocation == allocation_automatic));
+ (ent->allocation == allocation_automatic));
/* @@@ Test that the owner type is not local, but how??
&& get_class_visibility(get_entity_owner(ent)) != local));*/
ent->visibility = vis;
/* return the name of the visibility */
const char *get_visibility_name(ent_visibility vis)
{
-#define X(a) case a: return #a
+#define X(a) case a: return #a
switch (vis) {
X(visibility_local);
X(visibility_external_visible);
/* return the name of the variablity */
const char *get_variability_name(ent_variability var)
{
-#define X(a) case a: return #a
+#define X(a) case a: return #a
switch (var) {
X(variability_uninitialized);
X(variability_initialized);
/* return the name of the volatility */
const char *get_volatility_name(ent_volatility var)
{
-#define X(a) case a: return #a
+#define X(a) case a: return #a
switch (var) {
X(volatility_non_volatile);
X(volatility_is_volatile);
/* return the name of the peculiarity */
const char *get_peculiarity_name(peculiarity var)
{
-#define X(a) case a: return #a
+#define X(a) case a: return #a
switch (var) {
X(peculiarity_description);
X(peculiarity_inherited);
nn = new_SymConst(get_SymConst_type_or_id(n), get_SymConst_kind(n)); break;
case iro_Add:
nn = new_Add(copy_const_value(get_Add_left(n)),
- copy_const_value(get_Add_right(n)), m); break;
+ copy_const_value(get_Add_right(n)), m); break;
case iro_Cast:
nn = new_Cast(copy_const_value(get_Cast_op(n)), get_Cast_type(n)); break;
case iro_Conv:
compound_graph_path *path = ent->val_paths[i];
if (path->nodes[path->len-1] == value_ent) {
for(; i < (ARR_LEN (ent->val_paths))-1; i++) {
- ent->val_paths[i] = ent->val_paths[i+1];
- ent->values[i] = ent->values[i+1];
+ ent->val_paths[i] = ent->val_paths[i+1];
+ ent->values[i] = ent->values[i+1];
}
ARR_SETLEN(entity*, ent->val_paths, ARR_LEN(ent->val_paths) - 1);
ARR_SETLEN(ir_node*, ent->values, ARR_LEN(ent->values) - 1);
for (i = 0; i < (ARR_LEN (ent->overwrites)); i++)
if (ent->overwrites[i] == overwritten) {
for(; i < (ARR_LEN (ent->overwrites))-1; i++)
- ent->overwrites[i] = ent->overwrites[i+1];
+ ent->overwrites[i] = ent->overwrites[i+1];
ARR_SETLEN(entity*, ent->overwrites, ARR_LEN(ent->overwrites) - 1);
break;
}
for (i = 0; i < (ARR_LEN (ent->overwrittenby)); i++)
if (ent->overwrittenby[i] == overwrites) {
for(; i < (ARR_LEN (ent->overwrittenby))-1; i++)
- ent->overwrittenby[i] = ent->overwrittenby[i+1];
+ ent->overwrittenby[i] = ent->overwrittenby[i+1];
ARR_SETLEN(entity*, ent->overwrittenby, ARR_LEN(ent->overwrittenby) - 1);
break;
}
int is_atomic_entity(entity *ent) {
type* t = get_entity_type(ent);
return (is_primitive_type(t) || is_pointer_type(t) ||
- is_enumeration_type(t) || is_method_type(t));
+ is_enumeration_type(t) || is_method_type(t));
}
int is_compound_entity(entity *ent) {
type* t = get_entity_type(ent);
return (is_class_type(t) || is_struct_type(t) ||
- is_array_type(t) || is_union_type(t));
+ is_array_type(t) || is_union_type(t));
}
/* @@@ not implemnted!!! */
#if 1 || DEBUG_libfirm
int dump_node_opcode(FILE *F, ir_node *n); /* from irdump.c */
-#define X(a) case a: printf(#a); break
+#define X(a) case a: printf(#a); break
void dump_entity (entity *ent) {
int i, j;
type *owner = get_entity_owner(ent);
} else {
printf("\n compound values:");
for (i = 0; i < get_compound_ent_n_values(ent); ++i) {
- compound_graph_path *path = get_compound_ent_value_path(ent, i);
- entity *ent0 = get_compound_graph_path_node(path, 0);
- printf("\n %2d %s", get_entity_offset(ent0), get_entity_name(ent0));
- for (j = 1; j < get_compound_graph_path_length(path); ++j)
- printf(".%s", get_entity_name(get_compound_graph_path_node(path, j)));
- printf("\t = ");
- dump_node_opcode(stdout, get_compound_ent_value(ent, i));
+ compound_graph_path *path = get_compound_ent_value_path(ent, i);
+ entity *ent0 = get_compound_graph_path_node(path, 0);
+ printf("\n %2d %s", get_entity_offset(ent0), get_entity_name(ent0));
+ for (j = 1; j < get_compound_graph_path_length(path); ++j)
+ printf(".%s", get_entity_name(get_compound_graph_path_node(path, j)));
+ printf("\t = ");
+ dump_node_opcode(stdout, get_compound_ent_value(ent, i));
}
}
}
# with the principles of the IEEE 754-854 floating-point standards. #
# You can find out more about the testing tool IeeeCC754 at #
# #
-# http://win-www.uia.ac.be/u/cant/ieeecc754.html #
+# http:/* win-www.uia.ac.be/u/cant/ieeecc754.html # */
# #
# This tool is in parts based on and greatly benefited from the #
# the program FPTEST developed by Jerome Coonen. For a full #
class Bitstring
{
protected:
- ///the bitstring
+ /* * the bitstring */ */
unsigned long * bitstr;
- ///length of bitstring,
+ /* * length of bitstring, */ */
long length;
- /// number of array elments
+ /* * number of array elments */ */
long lengthBlock;
- ///number of bits in class T
+ /* * number of bits in class T */ */
long lengthT;
- /// returns a unsigned long with the first res bits set.
+ /* * returns a unsigned long with the first res bits set. */ */
unsigned long GetPattern(unsigned long rest);
/** returns the first block of bitstr. It's only defined
unsigned long Convert();
public:
- /// Constructor, creates an empty bitstring
+ /* * Constructor, creates an empty bitstring */ */
Bitstring();
/**Constructor, creates a bitstring of size size.
@param size the default size*/
/**Constructor,initiate the bitstring with the str as input
@param str the default value for the bitstring */
Bitstring(char * str);
- /// Copy constructor.
+ /* * Copy constructor. */ */
Bitstring(const Bitstring& copy);
- ///Destructor
+ /* * Destructor */ */
~Bitstring();
/**Get the length of the bitstring.
/** Replace the byte value at position "byte" with the new "bytevalue"
@param byte position of the byte
@param bytevalue new value
- @return: void // Previous byte value */
+ @return: void /* Previous byte value */ */
void PutByte(unsigned long byte,Bitstring bytevalue);
@param begin the begining of the substring
@param count the length of the substring
@return substring from bitstring */
- // Bitstring SubBitstring (unsigned long begin, unsigned long count) const;
+ /* Bitstring SubBitstring (unsigned long begin, unsigned long count) const; */
void SubBitstring(unsigned long begin, Bitstring &sub) const;
/** Overloads the array operator. Returns/change block "n"
friend istream& operator >> (istream& ins, Bitstring &instr);
};
-//@Include: Hex.h
+/* @Include: Hex.h */
#endif
# with the principles of the IEEE 754-854 floating-point standards. #
# You can find out more about the testing tool IeeeCC754 at #
# #
-# http://win-www.uia.ac.be/u/cant/ieeecc754.html #
+# http:/* win-www.uia.ac.be/u/cant/ieeecc754.html # */
# #
# This tool is in parts based on and greatly benefited from the #
# the program FPTEST developed by Jerome Coonen. For a full #
*/
-// MORE INFORMATION ON THE FUNCTIONS IN THIS HEADER FILE CAN BE FOUND
-// ON THE WEBPAGE
-// http://win-www.uia.ac.be/u/cant/ieeecc754.html
-// ``Adapt IeeeCC754 to test your floating-point implementation''
+/* MORE INFORMATION ON THE FUNCTIONS IN THIS HEADER FILE CAN BE FOUND */
+/* ON THE WEBPAGE */
+/* http://win-www.uia.ac.be/u/cant/ieeecc754.html */
+/* ``Adapt IeeeCC754 to test your floating-point implementation'' */
-// ----
-// Includes
-// ----
+/* ---- */
+/* Includes */
+/* ---- */
#include <Fp.h>
#include <Bitstring.h>
-// ----
-// Includes and Defines specificly for testing MpIeee
-// ----
+/* ---- */
+/* Includes and Defines specificly for testing MpIeee */
+/* ---- */
#ifdef MPIEEE_TEST
#include <MpIeee.hh>
#endif
-// ----
-// Includes and Defines specificly for testing FMLib
-// ----
+/* ---- */
+/* Includes and Defines specificly for testing FMLib */
+/* ---- */
#ifdef FMLIB_TEST
#include <Sdefs.h>
#endif
-// ----
-// CLASS DEFINITION : DriverFloatRepr
-// ----
+/* ---- */
+/* CLASS DEFINITION : DriverFloatRepr */
+/* ---- */
class DriverFloatRepr: public FP
protected:
void SetLibRound( );
-// call the appropriate functions to set the rounding mode
-// on the target platform - see item (a) on webpage
+/* call the appropriate functions to set the rounding mode */
+/* on the target platform - see item (a) on webpage */
void SetLibEnvironment( );
-// call the appropriate functions to clear all floating-point
-// exceptions on the target platform - see item (b) on webpage
+/* call the appropriate functions to clear all floating-point */
+/* exceptions on the target platform - see item (b) on webpage */
void GetLibExceptions( );
-// call the appropriate functions to read out exceptions generated
-// by the target platform - see item (b) on webpage
+/* call the appropriate functions to read out exceptions generated */
+/* by the target platform - see item (b) on webpage */
public:
#ifndef FMLIB_TEST
-// For testing FMLib we use other definitions
-// --
+/* For testing FMLib we use other definitions */
+/* -- */
DriverFloatRepr::DriverFloatRepr( ) :FP( )
{}
;
#endif
-// If your target implementation is implemented in hardware,
-// provide an implementation of the functions listed below for
-// conversion between your hardware data types and DriverFloatRepr
-// see item (c) on webpage and
-// ftp://win-ftp.uia.ac.be/pub/cant/IeeeCC754/converting.pdf
+/* If your target implementation is implemented in hardware, */
+/* provide an implementation of the functions listed below for */
+/* conversion between your hardware data types and DriverFloatRepr */
+/* see item (c) on webpage and */
+/* ftp://win-ftp.uia.ac.be/pub/cant/IeeeCC754/converting.pdf */
DriverFloatRepr( float f );
DriverFloatRepr( double d );
long double tolongdouble( );
-// If your target implementation is implemented in software,
-// provide an implementation of the functions listed below for
-// conversion between your floating-point datatype and
+/* If your target implementation is implemented in software, */
+/* provide an implementation of the functions listed below for */
+/* conversion between your floating-point datatype and */
// DriverFloatRepr (don't forget to replace MyDatatype by the
-// appropriate identifier in this declaration) - see item (c) on
-// webpage and
-// ftp://win-ftp.uia.ac.be/pub/cant/IeeeCC754/converting.pdf
+/* appropriate identifier in this declaration) - see item (c) on */
+/* webpage and */
+/* ftp://win-ftp.uia.ac.be/pub/cant/IeeeCC754/converting.pdf */
DriverFloatRepr (void *val);
void* to(void *val);
-//
-// Here is an example to test the multiprecision floating-point
-// implementation MpIeee:
+/* */
+/* Here is an example to test the multiprecision floating-point */
+/* implementation MpIeee: */
#ifdef MPIEEE_TEST
-// Functions that Convert MpIeee to DriverFloatRepr and vice-versa
-//--
+/* Functions that Convert MpIeee to DriverFloatRepr and vice-versa */
+/* -- */
DriverFloatRepr ( const MpIeee &M );
MpIeee to( );
#endif
#ifdef FMLIB_TEST
-// Functions specifically for Testing FMLib
-// --
+/* Functions specifically for Testing FMLib */
+/* -- */
DriverFloatRepr::DriverFloatRepr( );
DriverFloatRepr::DriverFloatRepr( int m,int e,int h );
#endif
-// provide implementaton of the functions listed below in the file
-// BasicOperations/$PLATFORM/BasicOperationstest.cc
-// see item (d) on webpage
-// --
+/* provide implementaton of the functions listed below in the file */
+/* BasicOperations/$PLATFORM/BasicOperationstest.cc */
+/* see item (d) on webpage */
+/* -- */
DriverFloatRepr operator + ( DriverFloatRepr &T );
DriverFloatRepr operator - ( DriverFloatRepr &T );
DriverFloatRepr operator % ( DriverFloatRepr &T );
DriverFloatRepr sqrt( );
-// provide implementation of the functions listed below in the file
-// Conversions/$PLATFORM/Conversionstest.cc
-// see item (d) on webpage
-// --
+/* provide implementation of the functions listed below in the file */
+/* Conversions/$PLATFORM/Conversionstest.cc */
+/* see item (d) on webpage */
+/* -- */
DriverFloatRepr roundto ( int, int, int );
DriverFloatRepr copyto ( int, int, int );
DriverFloatRepr b2d ( int );
DriverFloatRepr d2b ( );
-// conversions between DriverFloatRepr and hardware integer
-// data types; implementation is provided in the files
-// BasicOperations/$PLATFORM/fpenv_$PLATFORM.cc
-// and
-// Conversions/$PLATFORM/fpenv_$PLATFORM.cc
-// and should not be modified
-// --
+/* conversions between DriverFloatRepr and hardware integer */
+/* data types; implementation is provided in the files */
+/* BasicOperations/$PLATFORM/fpenv_$PLATFORM.cc */
+/* and */
+/* Conversions/$PLATFORM/fpenv_$PLATFORM.cc */
+/* and should not be modified */
+/* -- */
DriverFloatRepr( long i );
DriverFloatRepr( unsigned long i );
# with the principles of the IEEE 754-854 floating-point standards. #
# You can find out more about the testing tool IeeeCC754 at #
# #
-# http://win-www.uia.ac.be/u/cant/ieeecc754.html #
+# http:/* win-www.uia.ac.be/u/cant/ieeecc754.html # */
# #
# This tool is in parts based on and greatly benefited from the #
# the program FPTEST developed by Jerome Coonen. For a full #
#endif
#define maxstr 5000
-// maximum chars for binary to decimal conversion
+/* maximum chars for binary to decimal conversion */
/**This is an abstract class defining floating-points as described in the IEEE 754 standard. It uses the Bitstring class to represent the mantissa and exponent
separately. The hidden bit is always present in the mantissa and needs to be defined for input and output in a hexadecimal form.\\
{
protected:
- ///Little or Big Endian
+ /* * Little or Big Endian */ */
static int Endian;
/**The representation of a floating-point environment. \\
\end{itemize} */
static Bitstring fpEnv;
- ///The sign of the floating-point number
+ /* /The sign of the floating-point number */
int sign;
- ///Hidden bit (yes/no)
+ /* /Hidden bit (yes/no) */
int hidden;
- ///The size of the mantissa
+ /* * The size of the mantissa */ */
int sizeMant;
- ///The mantissa
+ /* * The mantissa */ */
Bitstring mant;
- ///The exponent
+ /* * The exponent */ */
Bitstring exp;
*/
int GetFPRound();
- ///Sets the divide by zero exception in fpEnv
+ /* * Sets the divide by zero exception in fpEnv */ */
void SetFPDivByZero(){fpEnv.Set(0);}
- ///Sets the invalid exception in fpEnv
+ /* * Sets the invalid exception in fpEnv */ */
void SetFPInvalid() {fpEnv.Set(1);}
- ///Sets the underflow exception in fpEnv
+ /* * Sets the underflow exception in fpEnv */ */
void SetFPUnderflow(){fpEnv.Set(2);}
- ///Sets the overflow exception in fpEnv
+ /* * Sets the overflow exception in fpEnv */ */
void SetFPOverflow() {fpEnv.Set(3);}
- ///Sets the inexact exception in fpEnv
+ /* * Sets the inexact exception in fpEnv */ */
void SetFPInexact() {fpEnv.Set(4);}
public:
- ///The size of the exponent
+ /* * The size of the exponent */ */
int sizeExp;
- ///decimal
- char *decimal; // binary to decimal conversion
+ /* * decimal */ */
+ char *decimal; /* binary to decimal conversion */
- ///Constructor
+ /* * Constructor */ */
FP();
/**Constructor setting the size of the exponent, the mantissa and the
hidden bit
@param copy an other FP object*/
FP(FP & copy);
- //Destructor
+ /* Destructor */
virtual~FP(){};
- ///returns true (1) if there is a hidden bit
+ /* * returns true (1) if there is a hidden bit */ */
int Hidden(){return hidden;};
/**returns or sets the sign
@param sgn the new sign \\ \begin{tabular}{rcl} 1 &:& negative \\
\end{tabular}
*/
void SetFPRound (int rm);
- ///Clears the environment (fpEnv)
+ /* * Clears the environment (fpEnv) */ */
void ClearFPEnvironment();
/**Returns the exceptions stored in fpEnv as a Bitstring
@return a Bitstring with next bit positions set, depending the exception \\\begin{tabular}{rcl}
@return\begin{tabular}{rcl} 1 &:& invalid exception \\
0 &:& no invalid exception
\end{tabular}*/
- int GetFPInvalid() {//cout << "fpEnv.GetBit(1)" << fpEnv.GetBit(1);
+ int GetFPInvalid() {/* cout << "fpEnv.GetBit(1)" << fpEnv.GetBit(1); */
return fpEnv.GetBit(1);}
/**chekcs if there has occured a underflow exception
@return\begin{tabular}{rcl} 1 &:& underflow exception \\
int isInf();
int isNan();
- ///Overloaded output operator
+ /* * Overloaded output operator */ */
friend ostream& operator << (ostream& outs, FP &outstr);
friend istream& operator >> (istream& ins, FP &instr);
};
-//@Include: MyFloat.h MyDouble.h MyQuad.h FpSim.h Bitstring.h UCB.h dlist.h stack.h ../Calculator/FPcalculator.h
+/* @Include: MyFloat.h MyDouble.h MyQuad.h FpSim.h Bitstring.h UCB.h dlist.h stack.h ../Calculator/FPcalculator.h */
#endif
# with the principles of the IEEE 754-854 floating-point standards. #
# You can find out more about the testing tool IeeeCC754 at #
# #
-# http://win-www.uia.ac.be/u/cant/ieeecc754.html #
+# http:/* win-www.uia.ac.be/u/cant/ieeecc754.html # */
# #
# This tool is in parts based on and greatly benefited from the #
# the program FPTEST developed by Jerome Coonen. For a full #
public:
- /// Constructor, creates empty Bitstring
+ /* * Constructor, creates empty Bitstring */ */
Hex();
/** Constructor
@param copy a Bitstring object */
Hex(const Bitstring ©);
- ///Deconstructor
+ /* * Deconstructor */ */
~Hex(){};
# with the principles of the IEEE 754-854 floating-point standards. #
# You can find out more about the testing tool IeeeCC754 at #
# #
-# http://win-www.uia.ac.be/u/cant/ieeecc754.html #
+# http:/* win-www.uia.ac.be/u/cant/ieeecc754.html # */
# #
# This tool is in parts based on and greatly benefited from the #
# the program FPTEST developed by Jerome Coonen. For a full #
#ifndef _UCB_H
#define _UCB_H
-// ----
-// Includes
-// ----
+/* ---- */
+/* Includes */
+/* ---- */
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <fstream.h>
#include <Fp.h>
-// ----
-// Defines
-// ----
+/* ---- */
+/* Defines */
+/* ---- */
#define BUF_LEN 1024
#define FP_STR 256
UCB<T>::UCB( )
{
line = 0;
- // result.SetEnvironment(); change BV
+ /* result.SetEnvironment(); change BV */
signalu = 0;
signalv = 0;
signalw = 0;
logfile << "Summary: " << endl;
logfile << "-------- " << endl;
- if ( tiny ) { // conclusion underflow
+ if ( tiny ) { /* conclusion underflow */
if ( ucb.nou ) {
if ( ucb.nov ) {
if ( !( ucb.now ) )
logfile << "Warning: only 'v' underflow cases in the testset" << endl;
else
logfile << "Warning: only 'v' and 'w' underflow cases in the testset" << endl;
- } // if
+ } /* if */
else {
if ( ucb.nov ) {
if ( ucb.now ) {
logfile << "Implementation signals underflow in case the result" << endl << "(1) is tiny after rounding and" << endl << "(2) raises the inexact exception"<< endl << "('v' - underflow)" << endl;
else
logfile << "Implementation signals underflow in case the result" << endl << "(1) is tiny after rounding and" << endl << "(2) suffers denormalization loss" << endl << "('u' - underflow)" << endl;
- } // else
- } // else
- } // else
- } // if tiny
+ } /* else */
+ } /* else */
+ } /* else */
+ } /* if tiny */
logfile << "Errors: " << errors << "/" << allops << endl;
logfile << "Warnings: " << warnings << "/" << allops << endl;
}
} else {
- ieeeVector=0; // Cannot be ieeeVector!
+ ieeeVector=0; /* Cannot be ieeeVector! */
i--;
- while ( isdigit( buf[ i ] ) ) i--; // rewind
+ while ( isdigit( buf[ i ] ) ) i--; /* rewind */
i++;
- operation[ i ] = '\0'; // ignore digit after operation
+ operation[ i ] = '\0'; /* ignore digit after operation */
j = 0;
while ( buf[ i ] != ' ' )
tmp[ j ] ='\0';
sizeM = atoi( tmp );
- sizeM++ ; // +1 for the sign
+ sizeM++ ; /* +1 for the sign */
}
while ( buf[ i ] == ' ' ) i++;
- // read destination format
+ /* read destination format */
if ( ( strncmp( operation,"rt",2 ) == 0 ) || ( strncmp( operation,"ct",2 ) == 0 ) ) {
if ( !isdigit( buf[ i ] ) ) {
prec = buf[ i ];
dsizeM=240 + 1;
dhidden = 0;
break;
- } // switch
+ } /* switch */
i++;
while ( buf[ i ] == ' ' ) i++;
- } // if
+ } /* if */
else {
j = 0;
dsizeM = atoi( tmp );
dsizeM++;
- } // else
+ } /* else */
while ( buf[ i ] == ' ' ) i++;
- } // if
+ } /* if */
else {
dsizeE = sizeE;
dhidden = hidden;
dsizeM = sizeM;
- } // else
+ } /* else */
rounding=buf[ i ];
if ( ( strncmp( operation,"ci",2 ) ==0 ) ||
( strncmp( operation,"cu",2 ) ==0 ) ) {
- count = 32; // 32 bit integer
- i += 2; // avoid 0x
+ count = 32; /* 32 bit integer */
+ i += 2; /* avoid 0x */
for ( j=0; j<count;j++ ) {
if ( isdigit( buf[ i ] ) || ( ( buf[ i ] >= 'a' ) && ( buf[ i ] <= 'f' ) ) )
tmp[ j ] = 0;
break;
}
- } // for
+ } /* for */
for ( ; j<count;j++ )
tmp[ j ] = 0;
} else if ( ( strncmp( operation,"cI",2 ) ==0 ) ||
( strncmp( operation,"cU",2 ) ==0 ) ) {
- count = 64; // 64 bit integer
- i += 2; // avoid 0x
+ count = 64; /* 64 bit integer */
+ i += 2; /* avoid 0x */
for ( j=0; j<count;j++ ) {
if ( isdigit( buf[ i ] ) || ( ( buf[ i ] >= 'a' ) && ( buf[ i ] <= 'f' ) ) )
tmp[ j ] = buf[ i++ ];
tmp[ j ] ='\0';
if ( strncmp( operation,"d2b",3 ) == 0 ) {
- operand1 = T( sizeM-1, sizeE, hidden ); // sets Mantissa and exp right
+ operand1 = T( sizeM-1, sizeE, hidden ); /* sets Mantissa and exp right */
operand1.decimal = new char[ maxstr ];
for ( k = 0; k <= j; k++ )
operand1.decimal[ k ] = tmp[ k ];
}
i++;
- count =( int ) ceil( ( double ) ( sizeM+sizeE ) /32.0 ) *8; // reset count!
+ count =( int ) ceil( ( double ) ( sizeM+sizeE ) /32.0 ) *8; /* reset count! */
for ( j=0; j<count ;j++ ) {
if ( buf[ i ] ==' ' )
} else {
if ( ( strncmp( operation,"ri",2 ) ==0 ) ||
( strncmp( operation,"ru",2 ) ==0 ) ) {
- count = 32; // 32 bit integer
- i += 3; // avoid 0x
+ count = 32; /* 32 bit integer */
+ i += 3; /* avoid 0x */
for ( j=0; j<count;j++ ) {
if ( isdigit( buf[ i ] ) || ( ( buf[ i ] >= 'a' ) && ( buf[ i ] <= 'f' ) ) )
tmp[ j ] = buf[ i++ ];
tmp[ j ] = 0;
break;
}
- } // for
+ } /* for */
for ( ; j<count;j++ )
tmp[ j ] = 0;
}
else if ( ( strncmp( operation,"rI",2 ) ==0 ) ||
( strncmp( operation,"rU",2 ) ==0 ) ) {
- count = 64; // 64 bit integer
- i += 3; // avoid 0x
+ count = 64; /* 64 bit integer */
+ i += 3; /* avoid 0x */
for ( j=0; j<count;j++ ) {
if ( isdigit( buf[ i ] ) || ( ( buf[ i ] >= 'a' ) && ( buf[ i ] <= 'f' ) ) )
tmp[ j ] = buf[ i++ ];
tmp[ j ] = 0;
break;
}
- } // for
+ } /* for */
for ( ; j<count;j++ )
tmp[ j ] = 0;
} else if ( strncmp( operation,"b2d",3 ) == 0 ) {
int i = 0;
if ( !( tiny ) && ( operand1.istiny( ) || operand2.istiny( ) || result.istiny( ) ) )
- return NULL; // do not test tiny denormalized numbers
+ return NULL; /* do not test tiny denormalized numbers */
else if ( !( inf ) && ( operand1.isInf( ) || operand2.isInf( ) || result.isInf( ) ) )
- return NULL; // do not test infinities
+ return NULL; /* do not test infinities */
else if ( !( nan ) && ( operand1.isNan( ) || operand2.isNan( ) || result.isNan( ) ) )
- return NULL; // do not test NaNs
- // logstream.seekp(0,ios::beg);
+ return NULL; /* do not test NaNs */
+ /* logstream.seekp(0,ios::beg); */
allops++;
SetFPRound( );
else if( strncmp( operation,"mul",3 ) ==0 )
res = operand1 * operand2;
else if( strncmp( operation,"div",3 ) ==0 )
- res = operand1 / operand2; // debug
+ res = operand1 / operand2; /* debug */
else if( strncmp( operation,"rem",3 ) ==0 )
res = operand1 % operand2;
else if( strncmp( operation,"sqrt",4 ) ==0 )
{
unsigned int i;
if ( (!ieeeVector) || (ieee) )
- errors++; // total number of errors encountered
+ errors++; /* total number of errors encountered */
else
warnings++;
logfile<<"Operation: " << operation << endl;
case 'm':
logfile<<"Round down" << endl;
break;
- } // switch
+ } /* switch */
logfile<<"Operand 1: " << operand1 << endl;
logfile<<"Operand 2: " << operand2 << endl;
logfile<< "Flags expected: ";
case 'd':
logfile<<"z ";
break;
- } // switch
+ } /* switch */
logfile << endl;
logfile <<"Flags returned: ";
if ( res.GetFPDivByZero( ) )
if ( notsignalv ) {
logfile<<((ieeeVector) && !(ieee) ? "Warning " : "Error ") <<"Line "<<line<< ": underflow without denormalization loss previously not detected"<< endl;
check = 0;
- } // end if
+ } /* end if */
}
else if ( strchr( exceptions,'b' ) ) {
signalw = 1;
if ( notsignalw ) {
logfile <<((ieeeVector) && !(ieee) ? "Warning " : "Error ")<<"Line "<<line<< ": underflow before rounding previously not detected"<< endl;
check = 0;
- } // end if
+ } /* end if */
}
else {
logfile<<((ieeeVector) && !(ieee) ? "Warning " : "Error ") <<"Line "<<line<< ": underflow not expected"<<endl;
check = 0;
- } // end if
+ } /* end if */
}
else
signalu = 1;
- } // end if
+ } /* end if */
for( i=0 ; i < strlen( exceptions );i++ ) {
switch ( exceptions[ i ] ) {
if ( ( noFlags & NO_FLAGS_UNDERFLOW ) == 0 ) {
if( !reslt.GetFPUnderflow( ) ) {
notsignalv = 1;
- //PrintError(reslt);
+ /* PrintError(reslt); */
if ( signalv ) {
check = 0;
- } // end if
- } // end if
- } // end if
+ } /* end if */
+ } /* end if */
+ } /* end if */
break;
case 'b':
if ( signalw ) {
logfile<<((ieeeVector) && !(ieee) ? "Warning " : "Error ") <<"Line "<<line<< ": underflow before rounding previously detected"<< endl;
check = 0;
- } // end if
- } // end if
- } // end if
+ } /* end if */
+ } /* end if */
+ } /* end if */
break;
case 'v':
if (strcmp(resultdummy, reslt.decimal) != 0)
{
- logfile <<((ieeeVector) && !(ieee) ? "Warning " : "Error ") << "Line "<<line<< ": different decimal representation"<< endl;
- check =0;
- }
- delete[] resultdummy;
+ logfile <<((ieeeVector) && !(ieee) ? "Warning " : "Error ") << "Line "<<line<< ": different decimal representation"<< endl;
+ check =0;
+ }
+ delete[] resultdummy;
}
} else if ( result.IsNaN( ) ) {
if( !reslt.IsNaN( ) ) {
check =0;
}
else if ( signedZero ) {
- // In this case result is a zero and there is signedzero
+ /* In this case result is a zero and there is signedzero */
logfile<<((ieeeVector) && !(ieee) ? "Warning " : "Error ") <<"Line "<<line<< ": Different sign"<< endl;
check =0;
}
* value is represented as a pseudo-struct char array, addressed
* by macros
* struct {
- * char sign; // 0 for positive, 1 for negative
+ * char sign; // 0 for positive, 1 for negative
* char exp[VALUE_SIZE];
* char mant[VALUE_SIZE];
* descriptor_t desc;
_fail_char(old_str, len, str - old_str);
}
}
- } // switch(state)
+ } /* switch(state) */
done:
sc_val_from_str(mant_str, strlen(mant_str), _mant(result));
#endif
#ifdef HAVE_LONG_DOUBLE
- TRACEPRINTF(("val_from_float(%.8X%.8X%.8X)\n", ((int*)&l)[2], ((int*)&l)[1], ((int*)&l)[0]));//srcval.val.high, srcval.val.mid, srcval.val.low));
+ TRACEPRINTF(("val_from_float(%.8X%.8X%.8X)\n", ((int*)&l)[2], ((int*)&l)[1], ((int*)&l)[0]));/* srcval.val.high, srcval.val.mid, srcval.val.low)); */
DEBUGPRINTF(("(%d-%.4X-%.8X%.8X)\n", sign, exponent, mantissa0, mantissa1));
#else
TRACEPRINTF(("val_from_float(%.8X%.8X)\n", srcval.val.high, srcval.val.low));
#include "tv_t.h"
#include "set.h" /* to store tarvals in */
-//#include "tune.h" /* some constants */
+/* #include "tune.h" */ /* some constants */
#include "entity_t.h" /* needed to store pointers to entities */
#include "irmode.h" /* defines modes etc */
#include "irmode_t.h"
/** Size of hash tables. Should correspond to average number of distinct constant
target values */
-#define N_CONSTANTS 2048
+#define N_CONSTANTS 2048
/* XXX hack until theres's a proper interface */
#define BAD 1
else if (strcasecmp(str, "false")) return tarval_b_true;
else
/* XXX This is C semantics */
- return atoi(str) ? tarval_b_true : tarval_b_false;
+ return atoi(str) ? tarval_b_true : tarval_b_false;
case irms_float_number:
switch(get_mode_size_bits(mode)) {
assert(tv);
/* tv->value == NULL means dereferencing a null pointer */
return ((get_mode_sort(tv->mode) == irms_reference) && (tv->value != NULL) && (tv->length == 0)
- && (tv != tarval_P_void));
+ && (tv != tarval_P_void));
}
entity *tarval_to_entity(tarval *tv)
case irms_int_number:
if (!mode_is_signed(a->mode)) return 0;
else
- return sc_comp(a->value, get_mode_null(a->mode)->value) == -1 ? 1 : 0;
+ return sc_comp(a->value, get_mode_null(a->mode)->value) == -1 ? 1 : 0;
case irms_float_number:
return fc_comp(a->value, get_mode_null(a->mode)->value) == -1 ? 1 : 0;
/* cast float to something */
case irms_float_number:
switch (get_mode_sort(m)) {
- case irms_float_number:
+ case irms_float_number:
switch (get_mode_size_bits(m))
{
case 32:
break;
}
return get_tarval(fc_get_buffer(), fc_get_buffer_length(), m);
- break;
+ break;
case irms_int_number:
switch (GET_FLOAT_TO_INT_MODE())
* an intermediate representation is needed here first. */
/* return get_tarval(); */
return tarval_bad;
- break;
+ break;
default:
/* the rest can't be converted */
case TVO_DECIMAL:
str = sc_print(tv->value, get_mode_size_bits(tv->mode), SC_DEC);
- break;
+ break;
case TVO_OCTAL:
str = sc_print(tv->value, get_mode_size_bits(tv->mode), SC_OCT);
- break;
+ break;
case TVO_HEX:
case TVO_NATIVE:
default:
str = sc_print(tv->value, get_mode_size_bits(tv->mode), SC_HEX);
- break;
+ break;
}
return snprintf(buf, len, "%s%s%s", prefix, str, suffix);
if (get_entity_peculiarity((entity *)tv->value) != peculiarity_description)
return snprintf(buf, len, "%s%s%s", prefix, get_entity_ld_name((entity *)tv->value), suffix);
else {
- if (mode_info->mode_output == TVO_NATIVE)
+ if (mode_info->mode_output == TVO_NATIVE)
return snprintf(buf, len, "NULL");
- else
+ else
return snprintf(buf, len, "0");
- }
- }
+ }
+ }
else {
- if (size > tv->length) {
- memcpy(buf, tv->value, tv->length);
- buf[tv->length] = '\0';
- }
- else {
- /* truncated */
- memcpy(buf, tv->value, size-1);
- buf[size-1] = '\0';
- }
+ if (size > tv->length) {
+ memcpy(buf, tv->value, tv->length);
+ buf[tv->length] = '\0';
+ }
+ else {
+ /* truncated */
+ memcpy(buf, tv->value, size-1);
+ buf[size-1] = '\0';
+ }
return tv->length;
- }
+ }
else
return snprintf(buf, len, "void");
* |/_ _\|
* Block1 Block2 deadBlock
* \ | /
-* \ | /
-* _\| \ / |/_
+* \ | /
+* _\| \ / |/_
* nextBlock
*
*
* This is a program as, e.g.,
*
* if () then
-* { Jmp label1; } // happens anyways
+* { Jmp label1; } /* happens anyways */
* else
-* { Jmp label1; } // happens anyways
+* { Jmp label1; } /* happens anyways */
* label1:
* return();
* Jmp label1;
*
* VAR_A is some extern variable.
*
-* main(int a) { // pos 0
-* int b = 1; // pos 1
-* int h; // pos 2
+* main(int a) { /* pos 0 */
+* int b = 1; /* pos 1 */
+* int h; /* pos 2 */
*
* while (0 == 0) loop {
* h = a;
r = new_immBlock ();
add_in_edge (r, x);
x = new_Cond (new_Proj(new_Cmp(new_Const (mode_Is, new_tarval_from_long (0, mode_Is)),
- new_Const (mode_Is, new_tarval_from_long (0, mode_Is))),
- mode_b, Eq));
+ new_Const (mode_Is, new_tarval_from_long (0, mode_Is))),
+ mode_b, Eq));
f = new_Proj (x, mode_X, 0);
t = new_Proj (x, mode_X, 1);
/* output the vcg file */
printf("Done building the graph. Dumping it.\n");
- //turn_of_edge_labels();
+ /* turn_of_edge_labels(); */
dump_keepalive_edges(true);
dump_all_types();
dump_ir_block_graph (irg);
* This file constructs the ir for the following pseudo-program:
*
* main() {
-* int a = 0; // pos 0
-* int b = 1; // pos 1
-* int h; // pos 2
+* int a = 0; /* pos 0 */
+* int b = 1; /* pos 1 */
+* int h; /* pos 2 */
*
* if (0 == 0)
* { a = 2; }
* Block1 scnCondBlock
* | | |
* | | |
- * | \ / \ /
+ * | \ / \ /
* | Block2 Block3
* \ | /
* \ | /
* This is a program as, e.g.,
*
* if () then
- * { Jmp label1; } // happens anyways
+ * { Jmp label1; } /* happens anyways */
* else
- * { Jmp label1; } // happens anyways
+ * { Jmp label1; } /* happens anyways */
* label1:
* return();
* Jmp label1;
owner = get_glob_type();
proc_main = new_type_method(id_from_str(METHODNAME, strlen(METHODNAME)),
- NRARGS, NRES);
+ NRARGS, NRES);
set_method_param_type(proc_main, 0, prim_t_int);
set_method_res_type(proc_main, 0, prim_t_int);
/**
* This file constructs the ir for the following pseudo-program:
*
-* main(int a) { // pos 0
-* int b = 1; // pos 1
-* int h; // pos 2
+* main(int a) { /* pos 0 */
+* int b = 1; /* pos 1 */
+* int h; /* pos 2 */
*
* while (0 == 2) loop {
* h = a;
r = new_immBlock ();
add_in_edge (r, x);
x = new_Cond (new_Proj(new_Cmp(new_Const (mode_Is, new_tarval_from_long (0, mode_Is)),
- get_value(1, mode_Is)),
+ get_value(1, mode_Is)),
mode_b, Eq));
f = new_Proj (x, mode_X, 0);
t = new_Proj (x, mode_X, 1);