#define pset_new_ptr(slots) new_pset(pset_default_ptr_cmp, slots)
#define pset_new_ptr_default() pset_new_ptr(64)
-/** The entry of a pset, representing an element pointer in the set and it's meta-information */
+/** The entry of a pset, representing an element pointer in the set and its meta-information */
typedef struct {
unsigned hash;
void *dptr;
* @param pset the pset
*
* @note
- * This does NOT delete the elements of this pset, just it's pointers!
+ * This does NOT delete the elements of this pset, just its pointers!
*/
FIRM_API void del_pset(pset *pset);
*/
typedef struct set set;
-/** The entry of a set, representing an element in the set and it's meta-information */
+/** The entry of a set, representing an element in the set and its meta-information */
typedef struct set_entry {
unsigned hash; /**< the hash value of the element */
size_t size; /**< the size of the element */
* Parameter
* arity number of predecessors
* **in array with predecessors
- * *mode The mode of it's inputs and output.
+ * *mode The mode of its inputs and output.
* Inputs:
* A Phi node has as many inputs as the block it belongs to.
* Each input points to a definition of the same value on a
* replaced by the Tuple operation so that the following Proj nodes have not to
* be changed. (They are hard to find due to the implementation with pointers
* in only one direction.) The Tuple node is smaller than any other
- * node, so that a node can be changed into a Tuple by just changing it's
+ * node, so that a node can be changed into a Tuple by just changing its
* opcode and giving it a new in array.
*
* Parameters
* ir_node *new_Id (ir_node *val, ir_mode *mode)
* ---------------------------------------------
*
- * The single output of the Id operation is it's input. Also needed
+ * The single output of the Id operation is its input. Also needed
* for optimizations.
*
*
* e.g. if there is only one definition of this value, but this
* definition reaches the currend block on several different
* paths. This Phi node will be eliminated if optimizations are
- * turned on right after it's creation.
+ * turned on right after its creation.
* Requires current_block to be set correctly.
*
* There are two special routines for the global store:
* visited
* @param env - environment, passed to pre and post
*
- * This function Walks only over Block nodes in the graph. Has it's own visited
+ * This function Walks only over Block nodes in the graph. Has its own visited
* flag, so that it can be interleaved with the other walker.
* If a none block is passed, starts at the block this node belongs to.
* If end is passed also visits kept alive blocks. Does not use the link field.
* with a Sel node the pointer to a thread local variable.
*
* - args The ir_node that produces the arguments of the method as
- * it's result. This is a Proj node on the fourth output of
+ * its result. This is a Proj node on the fourth output of
* the start node. This output is tagged as pn_Start_T_args.
*
* - proj_args The proj nodes of the args node.
* to point to this graph. Further it allocates the following nodes needed
* for every procedure:
*
- * - The start block containing a start node and Proj nodes for it's
+ * - The start block containing a start node and Proj nodes for its
* seven results (X, M, P, P, P, T, P).
* - The end block containing an end node. This block is not matured
* after executing new_ir_graph() as predecessors need to be added to it.
- * (Maturing a block means fixing it's number of predecessors.)
+ * (Maturing a block means fixing its number of predecessors.)
* - The current block, which is empty and also not matured.
*
* Further it enters the global store into the data structure of the start
* @param post walker function, executed after the predecessor of a node are visited
* @param env environment, passed to pre and post
*
- * This function Walks only over Block nodes in the graph. Has it's own visited
+ * This function Walks only over Block nodes in the graph. Has its own visited
* flag, so that it can be interleaved with the other walker.
* If a none block is passed, starts at the block this node belongs to.
* If end is passed also visits kept alive blocks. Does not use the link field.
FIRM_API const char *get_irn_opname(const ir_node *node);
/** Get the ident for a string representation of the opcode. */
FIRM_API ident *get_irn_opident(const ir_node *node);
-/** If arg is an argument of the node, returns it's position, -1 otherwise */
+/** If arg is an argument of the node, returns its position, -1 otherwise */
FIRM_API int get_irn_pred_pos(ir_node *node, ir_node *arg);
/** Gets the visited counter of a node. */
FIRM_API ir_visited_t get_irn_visited(const ir_node *node);
*
* @param irg The graph whose loops will be processed
*
- * This function did not change the graph, only it's frame type.
+ * This function did not change the graph, only its frame type.
* The layout state of the frame type will be set to layout_undefined
* if entities were removed.
*/
*
* @param irg The graph whose frame type will be optimized
*
- * This function did not change the graph, only it's frame type.
+ * This function did not change the graph, only its frame type.
* The layout state of the frame type will be set to layout_undefined
* if entities were removed.
*/
FIRM_API void irg_out_walk(ir_node *node, irg_walk_func *pre,
irg_walk_func *post, void *env);
-/** Walks only over Block nodes in the graph. Has it's own visited
+/** Walks only over Block nodes in the graph. Has its own visited
flag, so that it can be interleaved with the other walker.
node must be either op_Block or mode_X. */
FIRM_API void irg_out_block_walk(ir_node *node, irg_walk_func *pre,
* @version $Id$
*
* This implements a set of pointers which allows to specify custom callbacks
- * for comparing and hashing it's elements.
+ * for comparing and hashing its elements.
*/
#include "config.h"
x[i] = i;
/* triangularize A */
- /* ie A has zeros below it's diagonal */
+ /* ie A has zeros below its diagonal */
for (col = 0; col < nsize - 1; ++col) {
big = 0;
/* find the largest left in LRH box */
return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
}
-/** Search the caller in the list of all callers and set it's backedge property. */
+/** Search the caller in the list of all callers and set its backedge property. */
static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
{
size_t i, n_callers = get_irg_n_callers(irg);
* Add all method addresses in global new style initializers to the set.
*
* @note
- * We do NOT check the type here, just it it's an entity address.
+ * We do NOT check the type here, just if it's an entity address.
* The reason for this is code like:
*
* void *p = function;
* Add all method addresses in global initializers to the set.
*
* @note
- * We do NOT check the type here, just it it's an entity address.
+ * We do NOT check the type here, just if it's an entity address.
* The reason for this is code like:
*
* void *p = function;
{
ir_type *tp;
- /* ignore methods: these of course reference it's address
+ /* ignore methods: these of course reference their addresses
* TODO: remove this later once this incorrect self-initialisation is gone
*/
tp = get_entity_type(ent);
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/**
- * Return the effective use block of a node and it's predecessor on
+ * Return the effective use block of a node and its predecessor on
* position pos.
*
* @param node the node
/*
* The user of the user is dominated by our true/false
* block. So, create a copy of user WITH the constant
- * replacing it's pos'th input.
+ * replacing its pos'th input.
*
* This is always good for unop's and might be good
* for binops.
}
}
-/* walks only over extended Block nodes in the graph. Has it's own visited
+/* walks only over extended Block nodes in the graph. Has its own visited
flag, so that it can be interleaved with the other walker. */
void irg_extblock_walk(ir_extblk *blk, extbb_walk_func *pre, extbb_walk_func *post, void *env)
{
/*
* Bitfields can be constructed as Sels from its base address.
* As they have different entities, the disambiguator would find that they are
- * alias free. While this is true for it's values, it is false for the addresses
+ * alias free. While this is true for its values, it is false for the addresses
* (strictly speaking, the Sel's are NOT the addresses of the bitfields).
* So, skip those bitfield selecting Sel's.
*/
}
}
-/* Walks only over Block nodes in the graph. Has it's own visited
+/* Walks only over Block nodes in the graph. Has its own visited
flag, so that it can be interleaved with the other walker. */
void irg_out_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
void *env)
}
/**
- * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
+ * Tries to bring node @p node and all its neighbours to color @p tgt_col.
* @return 1 if color @p col could be applied, 0 otherwise
*/
static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
/*
Node has not yet a fixed color and target color is admissible
- -> try to recolor node and it's affinity neighbours
+ -> try to recolor node and its affinity neighbours
*/
if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
col_cost_t *costs = env->single_cols[tgt_col];
case iro_Const:
tv = get_Const_tarval(init);
- /* it's a arithmetic value */
+ /* it's an arithmetic value */
emit_arith_tarval(tv, bytes);
return;
/**
* This type describes the stack layout.
* The stack is divided into 3 parts:
- * - arg_type: A struct type describing the stack arguments and it's order.
+ * - arg_type: A struct type describing the stack arguments and its order.
* - between_type: A struct type describing the stack layout between arguments
* and frame type. In architectures that put the return address
* automatically on the stack, the return address is put here.
}
/**
- * Compute the highest register pressure in a loop and it's sub-loops.
+ * Compute the highest register pressure in a loop and its sub-loops.
* @param loop_ana The loop ana object.
* @param loop The loop to compute pressure for.
* @param cls The register class to compute pressure for.
DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;)
DEBUG_ONLY(static firm_dbg_module_t *dbg_permmove;)
-/** Associates an ir_node with it's copy and CopyKeep. */
+/** Associates an ir_node with its copy and CopyKeep. */
typedef struct {
ir_nodeset_t copies; /**< all non-spillable copies of this irn */
const arch_register_class_t *cls;
sched_add_before(skip_Proj(irn), cpy);
sched_add_after(skip_Proj(irn), keep);
- /* insert the other different and it's copies into the map */
+ /* insert the other different and its copies into the map */
entry = (op_copy_assoc_t*)ir_nodemap_get(op_set, other_different);
if (! entry) {
entry = OALLOC(&env->obst, op_copy_assoc_t);
DB((dbg_constr, LEVEL_1, "\n"));
- /* introduce the copies for the operand and it's copies */
+ /* introduce the copies for the operand and its copies */
be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, map_entry.node);
be_ssa_construction_add_copies(&senv, nodes, n);
/**
* must be called from peephole optimisations before a node will be killed
* and its users will be redirected to new_node.
- * so bepeephole can update it's internal state.
+ * so bepeephole can update its internal state.
*
- * Note: killing a node and rewiring os only allowed if new_node produces
+ * Note: killing a node and rewiring is only allowed if new_node produces
* the same registers as old_node.
*/
static void be_peephole_before_exchange(const ir_node *old_node,
void be_peephole_exchange(ir_node *old, ir_node *nw);
/**
- * Tries to optimize a beIncSp node with it's previous IncSP node.
+ * Tries to optimize a beIncSp node with its previous IncSP node.
* Must be run from a be_peephole_opt() context.
*
* @param node a be_IncSP node
/* no predecessor -> empty set */
workset_clear(ws);
} else if (arity == 1) {
- /* one predecessor, copy it's end workset */
+ /* one predecessor, copy its end workset */
ir_node *pred_block = get_Block_cfgpred_block(block, 0);
block_info_t *pred_info = get_block_info(pred_block);
void be_insert_spills_reloads(spill_env_t *senv);
/**
- * There are 2 possibilities to spill a phi node: Only it's value, or replacing
+ * There are 2 possibilities to spill a phi node: Only its value, or replacing
* the whole phi-node with a memory phi. Normally only the value of a phi will
* be spilled unless you mark the phi with be_spill_phi.
* (Remember that each phi needs a register, so you have to spill phis when
* to their closest copy while introducing phis as necessary.
*
* Algorithm: Mark all blocks in the iterated dominance frontiers of the value
- * and it's copies. Link the copies ordered by dominance to the blocks. Then
+ * and its copies. Link the copies ordered by dominance to the blocks. Then
* we search for each use all definitions in the current block, if none is
* found, then we search one in the immediate dominator. If we are in a block
* of the dominance frontier, create a phi and do the same search for all
* to their closest copy while introducing phis as necessary.
*
* Algorithm: Mark all blocks in the iterated dominance frontiers of the value
- * and it's copies. Link the copies ordered by dominance to the blocks. Then
+ * and its copies. Link the copies ordered by dominance to the blocks. Then
* we search for each use all definitions in the current block, if none is
* found, then we search one in the immediate dominator. If we are in a block
* of the dominance frontier, create a phi and do the same search for all
/**
* Removes node from schedule if it is not used anymore. If irn is a mode_T node
- * all it's Projs are removed as well.
+ * all its Projs are removed as well.
* @param irn The irn to be removed from schedule
*/
static inline void try_kill(ir_node *node)
}
###
-# Returns the index of a given register within it's register class.
+# Returns the index of a given register within its register class.
# @return index or undef
###
sub get_reg_index {
};
/**
- * Compares two modules by comparing it's names
+ * Compares two modules by comparing their names
*/
static int module_cmp(const void *p1, const void *p2, size_t size)
{
return id_mangle_3(first, '.', scnd);
}
-/* returns a mangled name for a Win32 function using it's calling convention */
+/* returns a mangled name for a Win32 function using its calling convention */
ident *id_decorate_win32_c_fkt(const ir_entity *ent, ident *id)
{
ir_type *tp = get_entity_type(ent);
}
/**
- * Walker, allocates an array for all blocks and puts it's nodes non-floating
+ * Walker, allocates an array for all blocks and puts their non-floating
* nodes into this array.
*/
static void collect_node(ir_node *node, void *env)
fprintf(F, INTER_MEM_EDGE_ATTR);
}
-/** Print the vcg attributes for the edge from node from to it's to's input */
+/** Print the vcg attributes for the edge from node "from" to its "to"th input */
static void print_edge_vcgattr(FILE *F, ir_node *from, int to)
{
assert(from);
(void) ent;
}
-/** Dumps a type or entity and it's edges. */
+/** Dumps a type or entity and its edges. */
static void dump_type_info(type_or_ent tore, void *env)
{
FILE *F = (FILE*)env;
ir_graph *irg = get_irn_irg(tgt);
assert(info->out_count >= 0);
if (info->out_count == 0 && kind == EDGE_KIND_NORMAL) {
- /* tgt lost it's last user */
+ /* tgt lost its last user */
int i;
for (i = get_irn_arity(tgt) - 1; i >= -1; --i) {
/**
* Data flow optimization walker.
- * Optimizes all nodes and enqueue it's users
+ * Optimizes all nodes and enqueue its users
* if done.
*/
static void opt_walker(ir_node *n, void *env)
}
/* Allocates a list of nodes:
- - The start block containing a start node and Proj nodes for it's four
+ - The start block containing a start node and Proj nodes for its four
results (X, M, P, Tuple).
- The end block containing an end node. This block is not matured after
new_ir_graph as predecessors need to be added to it.
}
-/* walks only over Block nodes in the graph. Has it's own visited
+/* walks only over Block nodes in the graph. Has its own visited
flag, so that it can be interleaved with the other walker. */
void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env)
{
ir_tarval *tv;
if (a == b) {
- n = a; /* Or has it's own neutral element */
+ n = a; /* idempotence */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_OR);
return n;
}
- /* constants are cormalized to right, check this site first */
+ /* constants are normalized to right, check this side first */
tv = value_of(b);
if (tarval_is_null(tv)) {
n = a;
ir_tarval *tv;
if (a == b) {
- n = a; /* And has it's own neutral element */
+ n = a; /* idempotence */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
return n;
}
- /* constants are normalized to right, check this site first */
+ /* constants are normalized to right, check this side first */
tv = value_of(b);
if (tarval_is_all_one(tv)) {
n = a;
first_val = get_Phi_pred(n, i);
if ( (first_val != n) /* not self pointer */
#if 0
- /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad
- * predecessors. Then, Phi nodes in dead code might be removed, causing
- * nodes pointing to themself (Add's for instance).
- * This is really bad and causes endless recursions in several
- * code pathes, so we do NOT optimize such a code.
+ /* BEWARE: when the if is changed to 1, Phis will ignore their Bad
+ * predecessors. Then, Phi nodes in unreachable code might be removed,
+ * causing nodes pointing to themselev (Adds for instance).
+ * This is really bad and causes endless recursion on several
+ * code pathes, so we do NOT optimize such code.
* This is not that bad as it sounds, optimize_cf() removes bad control flow
* (and bad Phi predecessors), so live code is optimized later.
*/
} /* equivalent_node_Proj_Store */
/**
- * Does all optimizations on nodes that must be done on it's Proj's
+ * Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static ir_node *equivalent_node_Proj(ir_node *proj)
} /* transform_node_Proj_Bound */
/**
- * Does all optimizations on nodes that must be done on it's Proj's
+ * Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static ir_node *transform_node_Proj(ir_node *proj)
#if 0
/* Propagating Unknowns here seems to be a bad idea, because
sometimes we need a node as a input and did not want that
- it kills it's user.
+ it kills its user.
However, it might be useful to move this into a later phase
(if you think that optimizing such code is useful). */
if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
size_t node_size;
/*
- * we MUST copy the node here temporary, because it's still
+ * we MUST copy the node here temporarily, because it's still
* needed for DBG_OPT_CSTEVAL
*/
node_size = offsetof(ir_node, attr) + n->op->attr_size;
}
/* normally, we would create a Bad block here, but this must be
- * prevented, so just set it's cf to Bad.
+ * prevented, so just set its cf to Bad.
*/
if (is_Block_dead(new_block)) {
ir_graph *irg = get_irn_irg(node);
* all "living" nodes into a living block. That's why we must
* move nodes in dead block with "live" successors into a valid
* block.
- * We move them just into the same block as it's successor (or
+ * We move them just into the same block as its successor (or
* in case of a Phi into the effective use block). For Phi successors,
* this may still be a dead block, but then there is no real use, as
* the control flow will be dead later.
}
/**
- * Move n to a block with less loop depth than it's current block. The
+ * Move n to a block with less loop depth than its current block. The
* new block must be dominated by early.
*
* @param n the node that should be moved
irn = y->node;
if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes always produce tarval_bottom, so we must explicitly
- add it's Proj's to get constant evaluation to work */
+ * add its Projs to get constant evaluation to work */
int i;
for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
dump_partition("split_by", X);
if (X->n_leader == 1) {
- /* we have only one leader, no need to split, just check it's type */
+ /* we have only one leader, no need to split, just check its type */
node_t *x = get_first_node(X);
X->type_is_T_or_C = x->type.tv == tarval_top || is_con(x->type);
return;
list = (ir_entity*)get_entity_link(ent);
free_entity(ent);
}
- /* we changed the frame type, it's layout should be redefined */
+ /* we changed the frame type, its layout should be redefined */
set_type_state(frame_tp, layout_undefined);
}
irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
append_call_list(env, callee_env, entry->loop_depth);
/* remove it from the caller list */
list_del(&curr_call->list);
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
*/
static void update_Phi_memop(memop_t *m)
{
- /* the Phi is it's own mem */
+ /* the Phi is its own mem */
m->mem = m->node;
} /* update_Phi_memop */
env.process_scc = process_scc;
/* Clear all links and move Proj nodes into the
- the same block as it's predecessors.
- This can improve the placement of new nodes.
+ * the same block as its predecessors.
+ * This can improve the placement of new nodes.
*/
projs_moved = 0;
irg_walk_graph(irg, NULL, clear_and_fix, &projs_moved);
* with the Return, otherwise they are dead (because the Return leaves
* the graph, so no more users of the other nodes can exists.
*
- * We can move a Return, if it's predecessors are Phi nodes or
+ * We can move a Return, if its predecessors are Phi nodes or
* comes from another block. In the later case, it is always possible
* to move the Return one block up, because the predecessor block must
* dominate the Return block (SSA) and then it dominates the predecessor
/*
* Returns non-zero, if the address of an entity
- * represented by a Sel node (or it's successor Sels) is taken.
+ * represented by a Sel node (or its successor Sels) is taken.
*/
int is_address_taken(ir_node *sel)
{
*
* This function finds variables on the (members of the) frame type
* that can be scalar replaced, because their address is never taken.
- * If such a variable is found, it's entity link will hold a list of all
+ * If such a variable is found, its entity link will hold a list of all
* Sel nodes, that selects the atomic fields of this entity.
* Otherwise, the link will be ADDRESS_TAKEN or NULL.
*
}
/**
- * Return a path from the Sel node sel to it's root.
+ * Return a path from the Sel node "sel" to its root.
*
* @param sel the Sel node
* @param len the length of the path so far
/**
* Returns non-zero, if the address of an entity
- * represented by a Sel node (or it's successor Sels) is taken.
+ * represented by a Sel node (or its successor Sels) is taken.
*
* @param sel the Sel node
*/
res->attr.code_attr.label = (ir_label_t) -1;
}
- /* Remember entity in it's owner. */
+ /* Remember entity in its owner. */
if (owner != NULL)
add_compound_member(owner, res);
/**
* Returns a new type opcode.
*
- * Allocates a new tp_op struct and initializes it's fields with
+ * Allocates a new tp_op struct and initializes its fields with
* the passed values. This function is only to be used during
* initialization of the library.
*
FIRM_DBG_REGISTER(dbg, "firm.tr.finalization");
- /* types must be visited before it's entities */
+ /* types must be visited before their entities */
type_walk(do_finalization, NULL, get_glob_type());
}
when fixing the layout of this class. Size must be
given in bytes. */
unsigned align; /**< Alignment of an ir_entity of this type. This should be
- set according to the source language needs. If not set it's
+ set according to the source language needs. If not set, it's
calculated automatically by get_type_alignment().
Alignment must be given in bytes. */
ir_mode *mode; /**< The mode for atomic types */
following Proj nodes have not to be changed. (They are hard to find due to
the implementation with pointers in only one direction.) The Tuple node is
smaller than any other node, so that a node can be changed into a Tuple by
- just changing it's opcode and giving it a new in array."""
+ just changing its opcode and giving it a new in array."""
arity = "variable"
mode = "mode_T"
pinned = "no"