/*
- * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
- *
* This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
*/
/**
#include "iredges_t.h"
#include "irgmod.h"
#include "irgwalk.h"
-#include "irprintf_t.h"
+#include "irprintf.h"
#include "irgopt.h"
#include "iropt_t.h"
#include "irtools.h"
#include "be.h"
#include "beabi.h"
+#include "beabihelper.h"
#include "bearch.h"
#include "benode.h"
#include "belive_t.h"
}
/**
- * Check if the given register is callee save, ie. will be saved by the callee.
+ * Check if the given register is callee save, i.e. will be saved by the callee.
*/
static bool arch_register_is_callee_save(
const arch_env_t *arch_env,
}
/**
- * Check if the given register is caller save, ie. must be saved by the caller.
+ * Check if the given register is caller save, i.e. must be saved by the caller.
*/
static bool arch_register_is_caller_save(
const arch_env_t *arch_env,
* checking */
continue;
}
- if (arch_register_is_caller_save(arch_env, reg)) {
- if (!(reg->type & arch_register_type_ignore)) {
- ARR_APP1(const arch_register_t*, destroyed_regs, reg);
- }
- }
+ if (arch_register_is_caller_save(arch_env, reg))
+ ARR_APP1(const arch_register_t*, destroyed_regs, reg);
}
}
continue;
foreach_out_edge(irn, res_edge) {
- int proj;
- ir_node *res = get_edge_src_irn(res_edge);
-
- assert(is_Proj(res));
-
- proj = get_Proj_proj(res);
+ ir_node *const res = get_edge_src_irn(res_edge);
+ long const proj = get_Proj_proj(res);
assert(proj < n_res);
assert(res_projs[proj] == NULL);
res_projs[proj] = res;
throws_exception = ir_throws_exception(irn);
if (env->call->flags.call_has_imm && is_SymConst(call_ptr)) {
/* direct call */
- low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
+ low_call = be_new_Call(dbgi, bl, curr_mem, sp->single_req, curr_sp,
sp->single_req, curr_sp,
n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
n_ins, in, get_Call_type(irn));
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
} else {
/* indirect call */
- low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
+ low_call = be_new_Call(dbgi, bl, curr_mem, sp->single_req, curr_sp,
sp->reg_class->class_req, call_ptr,
n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
n_ins, in, get_Call_type(irn));
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
- assert(is_Proj(irn));
switch (get_Proj_proj(irn)) {
case pn_Alloc_M:
alloc_mem = irn;
return res;
}
-typedef struct {
- const arch_register_t *reg;
- ir_node *irn;
-} reg_node_map_t;
-
static int cmp_regs(const void *a, const void *b)
{
- const reg_node_map_t *p = (const reg_node_map_t*)a;
- const reg_node_map_t *q = (const reg_node_map_t*)b;
+ arch_register_t const *const p = *(arch_register_t const**)a;
+ arch_register_t const *const q = *(arch_register_t const**)b;
- if (p->reg->reg_class == q->reg->reg_class)
- return p->reg->index - q->reg->index;
+ if (p->reg_class == q->reg_class)
+ return p->index - q->index;
else
- return p->reg->reg_class < q->reg->reg_class ? -1 : +1;
+ return p->reg_class < q->reg_class ? -1 : +1;
}
-static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
+static void reg_map_to_arr(arch_register_t const **const res, pmap *const reg_map)
{
pmap_entry *ent;
size_t n = pmap_count(reg_map);
size_t i = 0;
foreach_pmap(reg_map, ent) {
- res[i].reg = (const arch_register_t*)ent->key;
- res[i].irn = (ir_node*)ent->value;
- i++;
+ res[i++] = (arch_register_t const*)ent->key;
}
qsort(res, n, sizeof(res[0]), cmp_regs);
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(ir_node, env->keep_map, bl);
size_t in_max;
- ir_node *ret;
int i, n;
- unsigned pop;
ir_node **in;
ir_node *stack;
const arch_register_t **regs;
/* Add uses of the callee save registers. */
foreach_pmap(env->regs, ent) {
const arch_register_t *reg = (const arch_register_t*)ent->key;
- if ((reg->type & arch_register_type_ignore) || arch_register_is_callee_save(arch_env, reg))
+ if (arch_register_is_callee_save(arch_env, reg))
pmap_insert(reg_map, ent->key, ent->value);
}
/* The in array for the new back end return is now ready. */
dbg_info *const dbgi = get_irn_dbg_info(irn);
- /* we have to pop the shadow parameter in in case of struct returns */
- pop = call->pop;
- ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
+ ir_node *const ret = be_new_Return(dbgi, bl, n_res, call->pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
int i, n;
unsigned j;
- reg_node_map_t *rm;
const arch_register_t *fp_reg;
ir_node *frame_pointer;
ir_node *start_bl;
* Note, that if a register corresponds to an argument, the regs map
* contains the old Proj from start for that argument.
*/
- rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
- reg_map_to_arr(rm, env->regs);
+ arch_register_t const **const regs = ALLOCAN(arch_register_t const*, pmap_count(env->regs));
+ reg_map_to_arr(regs, env->regs);
for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
- const arch_register_t *reg = rm[i].reg;
+ const arch_register_t *reg = regs[i];
ir_mode *mode = reg->reg_class->mode;
long nr = i;
arch_register_req_type_t add_type = arch_register_req_type_none;
pmap_destroy(env.regs);
}
-void be_put_allocatable_regs(const ir_graph *irg,
- const arch_register_class_t *cls, bitset_t *bs)
-{
- be_irg_t *birg = be_birg_from_irg(irg);
- unsigned *allocatable_regs = birg->allocatable_regs;
- unsigned i;
-
- assert(bitset_size(bs) == cls->n_regs);
- bitset_clear_all(bs);
- for (i = 0; i < cls->n_regs; ++i) {
- const arch_register_t *reg = &cls->regs[i];
- if (rbitset_is_set(allocatable_regs, reg->global_index))
- bitset_set(bs, i);
- }
-}
-
unsigned be_get_n_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls)
{
- bitset_t *bs = bitset_alloca(cls->n_regs);
- be_put_allocatable_regs(irg, cls, bs);
- return bitset_popcount(bs);
+ unsigned *const bs = rbitset_alloca(cls->n_regs);
+ be_get_allocatable_regs(irg, cls, bs);
+ return rbitset_popcount(bs, cls->n_regs);
}
-void be_set_allocatable_regs(const ir_graph *irg,
- const arch_register_class_t *cls,
- unsigned *raw_bitset)
+void be_get_allocatable_regs(ir_graph const *const irg, arch_register_class_t const *const cls, unsigned *const raw_bitset)
{
be_irg_t *birg = be_birg_from_irg(irg);
unsigned *allocatable_regs = birg->allocatable_regs;