/**
* Transform generic IR-nodes into TEMPLATE machine instructions
*/
-void TEMPLATE_transform_graph(TEMPLATE_code_gen_t *cg)
+void TEMPLATE_transform_graph(ir_graph *irg)
{
TEMPLATE_register_transformers();
- be_transform_graph(cg->irg, NULL);
+ be_transform_graph(irg, NULL);
}
void TEMPLATE_init_transform(void)
void TEMPLATE_init_transform(void);
-void TEMPLATE_transform_graph(TEMPLATE_code_gen_t *cg);
+void TEMPLATE_transform_graph(ir_graph *irg);
#endif
* Transforms the standard firm graph into
* a TEMLPATE firm graph
*/
-static void TEMPLATE_prepare_graph(void *self)
+static void TEMPLATE_prepare_graph(ir_graph *irg)
{
- TEMPLATE_code_gen_t *cg = self;
-
/* transform nodes into assembler instructions */
- TEMPLATE_transform_graph(cg);
+ TEMPLATE_transform_graph(irg);
}
/**
* Called immediatly before emit phase.
*/
-static void TEMPLATE_finish_irg(void *self)
+static void TEMPLATE_finish_irg(ir_graph *irg)
{
- (void) self;
+ (void) irg;
}
-static void TEMPLATE_before_ra(void *self)
+static void TEMPLATE_before_ra(ir_graph *irg)
{
- (void) self;
+ (void) irg;
/* Some stuff you need to do after scheduling but before register allocation */
}
-static void TEMPLATE_after_ra(void *self)
+static void TEMPLATE_after_ra(ir_graph *irg)
{
- (void) self;
+ (void) irg;
/* Some stuff you need to do immediatly after register allocation */
}
-
-
-/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
- */
-static void TEMPLATE_emit_and_done(void *self)
+static void TEMPLATE_init_graph(ir_graph *irg)
{
- TEMPLATE_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- TEMPLATE_emit_routine(irg);
-
- /* de-allocate code generator */
- free(cg);
-}
-
-static void *TEMPLATE_cg_init(ir_graph *irg);
-
-static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
- TEMPLATE_cg_init,
- NULL, /* get_pic_base hook */
- NULL, /* before abi introduce hook */
- TEMPLATE_prepare_graph,
- NULL, /* spill hook */
- TEMPLATE_before_ra, /* before register allocation hook */
- TEMPLATE_after_ra, /* after register allocation hook */
- TEMPLATE_finish_irg,
- TEMPLATE_emit_and_done
-};
-
-/**
- * Initializes the code generator.
- */
-static void *TEMPLATE_cg_init(ir_graph *irg)
-{
- const arch_env_t *arch_env = be_get_irg_arch_env(irg);
- TEMPLATE_isa_t *isa = (TEMPLATE_isa_t *) arch_env;
- TEMPLATE_code_gen_t *cg = XMALLOC(TEMPLATE_code_gen_t);
-
- cg->impl = &TEMPLATE_code_gen_if;
- cg->irg = irg;
- cg->isa = isa;
-
- return (arch_code_generator_t *)cg;
+ (void) irg;
}
return 1;
}
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *TEMPLATE_get_code_generator_if(
- void *self)
-{
- (void) self;
- return &TEMPLATE_code_gen_if;
-}
-
list_sched_selector_t TEMPLATE_sched_selector;
/**
TEMPLATE_get_reg_class,
TEMPLATE_get_reg_class_for_mode,
TEMPLATE_get_call_abi,
- TEMPLATE_get_code_generator_if,
TEMPLATE_get_list_sched_selector,
TEMPLATE_get_ilp_sched_selector,
TEMPLATE_get_reg_class_alignment,
TEMPLATE_get_backend_irg_list,
NULL, /* mark remat */
TEMPLATE_parse_asm_constraint,
- TEMPLATE_is_valid_clobber
+ TEMPLATE_is_valid_clobber,
+
+ TEMPLATE_init_graph,
+ NULL, /* get_pic_base */
+ NULL, /* before_abi */
+ TEMPLATE_prepare_graph,
+ TEMPLATE_before_ra,
+ TEMPLATE_after_ra,
+ TEMPLATE_finish_irg,
+ TEMPLATE_emit_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE);
#include "../beemitter.h"
#include "set.h"
-typedef struct TEMPLATE_isa_t TEMPLATE_isa_t;
-typedef struct TEMPLATE_code_gen_t TEMPLATE_code_gen_t;
-typedef struct TEMPLATE_transform_env_t TEMPLATE_transform_env_t;
-
-struct TEMPLATE_code_gen_t {
- const arch_code_generator_if_t *impl; /**< implementation */
- ir_graph *irg; /**< current irg */
- TEMPLATE_isa_t *isa; /**< the isa instance */
-};
-
-struct TEMPLATE_isa_t {
+typedef struct TEMPLATE_isa_t {
arch_env_t base; /**< must be derived from arch_isa */
-};
-
-/**
- * this is a struct to minimize the number of parameters
- * for transformation walker
- */
-struct TEMPLATE_transform_env_t {
- dbg_info *dbg; /**< The node debug info */
- ir_graph *irg; /**< The irg, the node should be created in */
- ir_node *block; /**< The block, the node should belong to */
- ir_node *irn; /**< The irn, to be transformed */
- ir_mode *mode; /**< The mode of the irn */
-};
+} TEMPLATE_isa_t;
#endif
/**
* Main driver
*/
-void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
+void amd64_gen_routine(ir_graph *irg)
{
ir_entity *entity = get_irg_entity(irg);
ir_node **blk_sched;
int i, n;
- (void)cg;
/* register all emitter functions */
amd64_register_emitters();
int get_amd64_reg_nr(ir_node *irn, int posi, int in_out);
const char *get_amd64_in_reg_name(ir_node *irn, int pos);
-
-void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg);
+void amd64_gen_routine(ir_graph *irg);
#endif
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-/** holds the current code generator during transformation */
-static amd64_code_gen_t *env_cg;
-
-///* its enough to have those once */
-//static ir_node *nomem, *noreg_GP;
-
/* Some support functions: */
static inline int mode_needs_gp_reg(ir_mode *mode)
/* Boilerplate code for transformation: */
-static void amd64_pretransform_node(void)
-{
- amd64_code_gen_t *cg = env_cg;
- (void) cg;
-
-// nomem = get_irg_no_mem(current_ir_graph);
-}
-
static void amd64_register_transformers(void)
{
be_start_transform_setup();
be_set_transform_function(op_Minus, gen_Minus);
}
-
-void amd64_transform_graph(amd64_code_gen_t *cg)
+void amd64_transform_graph(ir_graph *irg)
{
amd64_register_transformers();
- env_cg = cg;
- be_transform_graph(cg->irg, amd64_pretransform_node);
+ be_transform_graph(irg, NULL);
}
void amd64_init_transform(void)
void amd64_init_transform(void);
-void amd64_transform_graph(amd64_code_gen_t *cg);
+void amd64_transform_graph(ir_graph *irg);
#endif
* Transforms the standard firm graph into
* a amd64 firm graph
*/
-static void amd64_prepare_graph(void *self)
+static void amd64_prepare_graph(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
+ amd64_irg_data_t *irg_data = amd64_get_irg_data(irg);
+ amd64_transform_graph(irg);
- amd64_transform_graph (cg);
-
- if (cg->dump)
- dump_ir_graph(cg->irg, "transformed");
+ if (irg_data->dump)
+ dump_ir_graph(irg, "transformed");
}
/**
* Called immediatly before emit phase.
*/
-static void amd64_finish_irg(void *self)
+static void amd64_finish_irg(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- dump_ir_graph(irg, "amd64-finished");
+ (void) irg;
}
-static void amd64_before_ra(void *self)
+static void amd64_before_ra(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
-
- be_sched_fix_flags(cg->irg, &amd64_reg_classes[CLASS_amd64_flags],
- NULL, NULL);
+ be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
}
}
-static void amd64_after_ra(void *self)
+static void amd64_after_ra(ir_graph *irg)
{
- amd64_code_gen_t *cg = self;
- be_coalesce_spillslots(cg->irg);
+ be_coalesce_spillslots(irg);
- irg_block_walk_graph(cg->irg, NULL, amd64_after_ra_walker, NULL);
+ irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
}
-
-/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
- */
-static void amd64_emit_and_done(void *self)
-{
- amd64_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- amd64_gen_routine(cg, irg);
-
- /* de-allocate code generator */
- free(cg);
-}
-
-static void *amd64_cg_init(ir_graph *irg);
-
-static const arch_code_generator_if_t amd64_code_gen_if = {
- amd64_cg_init,
- NULL, /* get_pic_base hook */
- NULL, /* before abi introduce hook */
- amd64_prepare_graph,
- NULL, /* spill hook */
- amd64_before_ra, /* before register allocation hook */
- amd64_after_ra, /* after register allocation hook */
- amd64_finish_irg,
- amd64_emit_and_done
-};
-
/**
* Initializes the code generator.
*/
-static void *amd64_cg_init(ir_graph *irg)
+static void amd64_init_graph(ir_graph *irg)
{
- const arch_env_t *arch_env = be_get_irg_arch_env(irg);
- amd64_isa_t *isa = (amd64_isa_t *) arch_env;
- amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
-
- cg->impl = &amd64_code_gen_if;
- cg->irg = irg;
- cg->isa = isa;
- cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
+ struct obstack *obst = be_get_be_obst(irg);
+ amd64_irg_data_t *irg_data = OALLOCZ(obst, amd64_irg_data_t);
+ irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
- return (arch_code_generator_t *)cg;
+ be_birg_from_irg(irg)->isa_link = irg_data;
}
/**
* Used to create per-graph unique pseudo nodes.
*/
-static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
+static inline ir_node *create_const(ir_graph *irg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
if (*place != NULL)
return *place;
- block = get_irg_start_block(cg->irg);
+ block = get_irg_start_block(irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
return 1;
}
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *amd64_get_code_generator_if(
- void *self)
-{
- (void) self;
- return &amd64_code_gen_if;
-}
-
list_sched_selector_t amd64_sched_selector;
/**
amd64_get_reg_class,
amd64_get_reg_class_for_mode,
amd64_get_call_abi,
- amd64_get_code_generator_if,
amd64_get_list_sched_selector,
amd64_get_ilp_sched_selector,
amd64_get_reg_class_alignment,
amd64_get_backend_irg_list,
NULL, /* mark remat */
amd64_parse_asm_constraint,
- amd64_is_valid_clobber
+ amd64_is_valid_clobber,
+
+ amd64_init_graph,
+ NULL, /* get_pic_base */
+ NULL, /* before_abi */
+ amd64_prepare_graph,
+ amd64_before_ra,
+ amd64_after_ra,
+ amd64_finish_irg,
+ amd64_gen_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);
#include "set.h"
typedef struct amd64_isa_t amd64_isa_t;
-typedef struct amd64_code_gen_t amd64_code_gen_t;
typedef struct amd64_transform_env_t amd64_transform_env_t;
-struct amd64_code_gen_t {
- const arch_code_generator_if_t *impl; /**< implementation */
- ir_graph *irg; /**< current irg */
- amd64_isa_t *isa; /**< the isa instance */
- char dump; /**< set to 1 if graphs should be dumped */
- ir_node *noreg_gp; /**< unique NoReg_GP node */
-};
+typedef struct amd64_irg_data_t {
+ ir_graph *irg; /**< current irg */
+ amd64_isa_t *isa; /**< the isa instance */
+ char dump; /**< set to 1 if graphs should be dumped */
+ ir_node *noreg_gp; /**< unique NoReg_GP node */
+} amd64_irg_data_t;
struct amd64_isa_t {
arch_env_t base; /**< must be derived from arch_isa */
ir_mode *mode; /**< The mode of the irn */
};
-ir_node *amd64_new_NoReg_gp(amd64_code_gen_t *cg);
+static inline amd64_irg_data_t *amd64_get_irg_data(const ir_graph *irg)
+{
+ return (amd64_irg_data_t*) be_birg_from_irg(irg)->isa_link;
+}
+
+ir_node *amd64_new_NoReg_gp(ir_graph *irg);
#endif
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static const arm_code_gen_t *cg;
-static set *sym_or_tv;
+static set *sym_or_tv;
+static arm_isa_t *isa;
/**
* Returns the register at in position pos.
}
if (mode_is_float(mode)) {
- if (USE_FPA(cg->isa)) {
+ if (USE_FPA(isa)) {
be_emit_cstring("\tmvf");
be_emit_char(' ');
arm_emit_dest_register(irn, 0);
int n_cfgpreds;
int need_label;
int i, arity;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
+ ir_graph *irg = get_irn_irg(block);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
need_label = 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
return p1->u.generic != p2->u.generic;
}
-void arm_gen_routine(const arm_code_gen_t *arm_cg, ir_graph *irg)
+void arm_gen_routine(ir_graph *irg)
{
- ir_node **blk_sched;
- int i, n;
- ir_node *last_block = NULL;
- ir_entity *entity = get_irg_entity(irg);
+ ir_node *last_block = NULL;
+ ir_entity *entity = get_irg_entity(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ir_node **blk_sched;
+ int i, n;
- cg = arm_cg;
+ isa = (arm_isa_t*) arch_env;
sym_or_tv = new_set(cmp_sym_or_tv, 8);
be_gas_elf_type_char = '%';
void arm_emit_load_mode(const ir_node *node);
void arm_emit_store_mode(const ir_node *node);
-void arm_gen_routine(const arm_code_gen_t *cg, ir_graph *irg);
+void arm_gen_routine(ir_graph *irg);
void arm_init_emitter(void);
#include "arm_nodes_attr.h"
#include "arm_new_nodes.h"
-static arm_code_gen_t *cg;
-
static unsigned arm_ror(unsigned v, unsigned ror)
{
return (v << (32 - ror)) | (v >> ror);
}
/* Perform peephole-optimizations. */
-void arm_peephole_optimization(arm_code_gen_t *new_cg)
+void arm_peephole_optimization(ir_graph *irg)
{
- cg = new_cg;
-
/* register peephole optimizations */
clear_irp_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_FrameAddr, peephole_arm_FrameAddr);
- be_peephole_opt(cg->irg);
+ be_peephole_opt(irg);
}
* Performs Peephole Optimizations an a graph.
*
* @param irg the graph
- * @param cg the code generator object
*/
-void arm_peephole_optimization(arm_code_gen_t *cg);
+void arm_peephole_optimization(ir_graph *irg);
-#endif /* FIRM_BE_ARM_ARM_OPTIMIZE_H */
+#endif
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-/** hold the current code generator during transformation */
-static arm_code_gen_t *env_cg;
-
static const arch_register_t *sp_reg = &arm_gp_regs[REG_SP];
static ir_mode *mode_gp;
static ir_mode *mode_fp;
static beabi_helper_env_t *abihelper;
static calling_convention_t *cconv = NULL;
+static arm_isa_t *isa;
static pmap *node_to_stack;
return new_op;
if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
if (mode_is_float(src_mode)) {
if (mode_is_float(dst_mode)) {
/* from float to float */
return new_bd_arm_FltX(dbg, block, new_op, dst_mode);
}
}
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
panic("VFP not supported yet");
} else {
panic("Softfloat not supported yet");
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_op1 = be_transform_node(op1);
ir_node *new_op2 = be_transform_node(op2);
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
return new_bd_arm_Adf(dbgi, block, new_op1, new_op2, mode);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
dbg_info *dbg = get_irn_dbg_info(node);
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
return new_bd_arm_Muf(dbg, block, new_op1, new_op2, mode);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
assert(mode != mode_E && "IEEE Extended FP not supported");
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
return new_bd_arm_Dvf(dbg, block, new_op1, new_op2, mode);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
dbg_info *dbgi = get_irn_dbg_info(node);
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
return new_bd_arm_Suf(dbgi, block, new_op1, new_op2, mode);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
ir_mode *mode = get_irn_mode(node);
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
return new_bd_arm_Mvf(dbgi, block, op, mode);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
ir_node *new_load = NULL;
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
new_load = new_bd_arm_Ldf(dbgi, block, new_ptr, new_mem, mode,
NULL, 0, 0, false);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
ir_node *new_store = NULL;
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
new_store = new_bd_arm_Stf(dbgi, block, new_ptr, new_val,
new_mem, mode, NULL, 0, 0, false);
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
dbg_info *dbg = get_irn_dbg_info(node);
if (mode_is_float(mode)) {
- if (USE_FPA(env_cg->isa)) {
+ if (USE_FPA(isa)) {
tarval *tv = get_Const_tarval(node);
node = new_bd_arm_fConst(dbg, block, tv);
be_dep_on_frame(node);
return node;
- } else if (USE_VFP(env_cg->isa)) {
+ } else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
typedef ir_node *(*create_const_node_func)(dbg_info *db, ir_node *block);
-static inline ir_node *create_const(ir_node **place,
+static inline ir_node *create_const(ir_graph *irg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
if (*place != NULL)
return *place;
- block = get_irg_start_block(env_cg->irg);
+ block = get_irg_start_block(irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
/**
* Transform a Firm graph into an ARM graph.
*/
-void arm_transform_graph(arm_code_gen_t *cg)
+void arm_transform_graph(ir_graph *irg)
{
static int imm_initialized = 0;
- ir_graph *irg = cg->irg;
ir_entity *entity = get_irg_entity(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
ir_type *frame_type;
mode_gp = mode_Iu;
imm_initialized = 1;
}
arm_register_transformers();
- env_cg = cg;
+
+ isa = (arm_isa_t*) arch_env;
node_to_stack = pmap_create();
cconv = arm_decide_calling_convention(get_entity_type(entity));
create_stacklayout(irg);
- be_transform_graph(cg->irg, NULL);
+ be_transform_graph(irg, NULL);
be_abihelper_finish(abihelper);
abihelper = NULL;
/**
* Transform a Firm graph into an ARM graph.
*/
-void arm_transform_graph(arm_code_gen_t *cg);
+void arm_transform_graph(ir_graph *irg);
void arm_init_transform(void);
* Transforms the standard Firm graph into
* a ARM firm graph.
*/
-static void arm_prepare_graph(void *self)
+static void arm_prepare_graph(ir_graph *irg)
{
- arm_code_gen_t *cg = self;
-
/* transform nodes into assembler instructions */
- arm_transform_graph(cg);
+ arm_transform_graph(irg);
/* do local optimizations (mainly CSE) */
- local_optimize_graph(cg->irg);
-
- if (cg->dump)
- dump_ir_graph(cg->irg, "transformed");
+ local_optimize_graph(irg);
/* do code placement, to optimize the position of constants */
- place_code(cg->irg);
-
- if (cg->dump)
- dump_ir_graph(cg->irg, "place");
+ place_code(irg);
}
/**
* Called immediately before emit phase.
*/
-static void arm_finish_irg(void *self)
+static void arm_finish_irg(ir_graph *irg)
{
- arm_code_gen_t *cg = self;
-
/* do peephole optimizations and fix stack offsets */
- arm_peephole_optimization(cg);
+ arm_peephole_optimization(irg);
}
-static void arm_before_ra(void *self)
+static void arm_before_ra(ir_graph *irg)
{
- arm_code_gen_t *cg = self;
-
- be_sched_fix_flags(cg->irg, &arm_reg_classes[CLASS_arm_flags],
- NULL, NULL);
+ be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL);
}
static void transform_Reload(ir_node *node)
}
}
-static void arm_after_ra(void *self)
+static void arm_after_ra(ir_graph *irg)
{
- arm_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
irg_walk_graph(irg, NULL, arm_collect_frame_entity_nodes, fec_env);
be_assign_entities(fec_env, arm_set_frame_entity);
be_free_frame_entity_coalescer(fec_env);
- irg_block_walk_graph(cg->irg, NULL, arm_after_ra_walker, NULL);
+ irg_block_walk_graph(irg, NULL, arm_after_ra_walker, NULL);
}
-/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
- */
-static void arm_emit_and_done(void *self)
-{
- arm_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- arm_gen_routine(cg, irg);
-
- /* de-allocate code generator */
- free(self);
-}
-
-/* forward */
-static void *arm_cg_init(ir_graph *irg);
-
-static const arch_code_generator_if_t arm_code_gen_if = {
- arm_cg_init,
- NULL, /* get_pic_base */
- NULL, /* before abi introduce */
- arm_prepare_graph,
- NULL, /* spill */
- arm_before_ra, /* before register allocation hook */
- arm_after_ra,
- arm_finish_irg,
- arm_emit_and_done,
-};
-
/**
* Initializes the code generator.
*/
-static void *arm_cg_init(ir_graph *irg)
+static void arm_init_graph(ir_graph *irg)
{
- arm_isa_t *isa = (arm_isa_t*) be_get_irg_arch_env(irg);
- arm_code_gen_t *cg;
-
- cg = XMALLOCZ(arm_code_gen_t);
- cg->impl = &arm_code_gen_if;
- cg->irg = irg;
- cg->isa = isa;
- cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
-
- /* enter the current code generator */
- isa->cg = cg;
-
- return (arch_code_generator_t *)cg;
+ (void) irg;
}
true, /* we do have custom abi handling */
},
ARM_FPU_ARCH_FPE, /* FPU architecture */
- NULL, /* current code generator */
};
/**
arm_register_init();
- isa->cg = NULL;
be_emit_init(file_handle);
arm_create_opcodes(&arm_irn_ops);
return 1;
}
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *arm_get_code_generator_if(void *self)
-{
- (void) self;
- return &arm_code_gen_if;
-}
-
list_sched_selector_t arm_sched_selector;
/**
arm_get_reg_class,
arm_get_reg_class_for_mode,
NULL,
- arm_get_code_generator_if,
arm_get_list_sched_selector,
arm_get_ilp_sched_selector,
arm_get_reg_class_alignment,
arm_get_irg_list,
NULL, /* mark remat */
arm_parse_asm_constraint,
- arm_is_valid_clobber
+ arm_is_valid_clobber,
+
+ arm_init_graph,
+ NULL, /* get_pic_base */
+ NULL, /* before_abi */
+ arm_prepare_graph,
+ arm_before_ra,
+ arm_after_ra,
+ arm_finish_irg,
+ arm_gen_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm);
ARM_STRONG = ARM_ARCH_V4,
};
-typedef struct arm_code_gen_t {
- const arch_code_generator_if_t *impl; /**< implementation */
- ir_graph *irg; /**< current irg */
- arm_isa_t *isa; /**< the isa instance */
- char dump; /**< set to 1 if graphs should be dumped */
-} arm_code_gen_t;
-
-
struct arm_isa_t {
arch_env_t base; /**< must be derived from arch_env_t */
int fpu_arch; /**< FPU architecture */
- arm_code_gen_t *cg; /**< current code generator */
};
#endif
};
struct be_main_env_t {
- arch_env_t *arch_env;
- be_options_t *options; /**< backend options */
- arch_code_generator_t *cg;
- const char *cup_name; /**< name of the compilation unit */
- pmap *ent_trampoline_map; /**< A map containing PIC trampolines for methods. */
- ir_type *pic_trampolines_type; /**< Class type containing all trampolines */
- pmap *ent_pic_symbol_map;
- ir_type *pic_symbols_type;
+ arch_env_t *arch_env;
+ be_options_t *options; /**< backend options */
+ const char *cup_name; /**< name of the compilation unit */
+ pmap *ent_trampoline_map; /**< A map containing PIC trampolines for methods. */
+ ir_type *pic_trampolines_type; /**< Class type containing all trampolines */
+ pmap *ent_pic_symbol_map;
+ ir_type *pic_symbols_type;
};
extern unsigned short asm_constraint_flags[256];
typedef struct arch_inverse_t arch_inverse_t;
typedef struct arch_isa_if_t arch_isa_if_t;
typedef struct arch_env_t arch_env_t;
-typedef struct arch_code_generator_t arch_code_generator_t;
-typedef struct arch_code_generator_if_t arch_code_generator_if_t;
/**
* Some flags describing a node in more detail.
&& !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
}
+static ir_node *get_pic_base(ir_graph *irg)
+{
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ if (arch_env->impl->get_pic_base == NULL)
+ return NULL;
+ return arch_env->impl->get_pic_base(irg);
+}
+
/** patches SymConsts to work in position independent code */
static void fix_pic_symconsts(ir_node *node, void *data)
{
- ir_node *pic_base;
- ir_node *add;
- ir_node *block;
- ir_mode *mode;
- ir_node *load;
- ir_node *load_res;
- ir_graph *irg = get_irn_irg(node);
- int arity, i;
- be_main_env_t *be = be_get_irg_main_env(irg);
+ ir_graph *irg = get_irn_irg(node);
+ be_main_env_t *be = be_get_irg_main_env(irg);
+ ir_node *pic_base;
+ ir_node *add;
+ ir_node *block;
+ ir_mode *mode;
+ ir_node *load;
+ ir_node *load_res;
+ int arity, i;
(void) data;
arity = get_irn_arity(node);
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
- pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(irg));
+ pic_base = get_pic_base(irg);
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
unsigned int i);
};
-/**
- * The code generator interface.
- */
-struct arch_code_generator_if_t {
- /**
- * Initialize the code generator.
- * @param irg A graph
- * @return A newly created code generator.
- */
- void *(*init)(ir_graph *irg);
-
- /**
- * return node used as base in pic code addresses
- */
- ir_node* (*get_pic_base)(void *self);
-
- /**
- * Called before abi introduce.
- */
- void (*before_abi)(void *self);
-
- /**
- * Called, when the graph is being normalized.
- */
- void (*prepare_graph)(void *self);
-
- /**
- * Backend may provide an own spiller.
- * This spiller needs to spill all register classes.
- */
- void (*spill)(void *self, ir_graph *irg);
-
- /**
- * Called before register allocation.
- */
- void (*before_ra)(void *self);
-
- /**
- * Called after register allocation.
- */
- void (*after_ra)(void *self);
-
- /**
- * Called directly before done is called. This should be the last place
- * where the irg is modified.
- */
- void (*finish)(void *self);
-
- /**
- * Called after everything happened. This call should emit the final
- * assembly code but avoid changing the irg.
- * The code generator must also be de-allocated here.
- */
- void (*done)(void *self);
-};
-
-/**
- * helper macro: call function func from the code generator
- * if it's implemented.
- */
-#define _arch_cg_call(cg, func) \
-do { \
- if((cg)->impl->func) \
- (cg)->impl->func(cg); \
-} while(0)
-
-#define _arch_cg_call_env(cg, env, func) \
-do { \
- if((cg)->impl->func) \
- (cg)->impl->func(cg, env); \
-} while(0)
-
-#define arch_code_generator_before_abi(cg) _arch_cg_call(cg, before_abi)
-#define arch_code_generator_prepare_graph(cg) _arch_cg_call(cg, prepare_graph)
-#define arch_code_generator_before_ra(cg) _arch_cg_call(cg, before_ra)
-#define arch_code_generator_after_ra(cg) _arch_cg_call(cg, after_ra)
-#define arch_code_generator_finish(cg) _arch_cg_call(cg, finish)
-#define arch_code_generator_done(cg) _arch_cg_call(cg, done)
-#define arch_code_generator_spill(cg, irg) _arch_cg_call_env(cg, irg, spill)
-#define arch_code_generator_has_spiller(cg) ((cg)->impl->spill != NULL)
-#define arch_code_generator_get_pic_base(cg) \
- ((cg)->impl->get_pic_base != NULL ? (cg)->impl->get_pic_base(cg) : NULL)
-
-/**
- * Code generator base class.
- */
-struct arch_code_generator_t {
- const arch_code_generator_if_t *impl;
-};
-
/**
* Architecture interface.
*/
void (*get_call_abi)(const void *self, ir_type *call_type,
be_abi_call_t *abi);
- /**
- * Get the code generator interface.
- * @param self The this pointer.
- * @return Some code generator interface.
- */
- const arch_code_generator_if_t *(*get_code_generator_if)(void *self);
-
/**
* Get the list scheduler to use. There is already a selector given, the
* backend is free to modify and/or ignore it.
* backend
*/
int (*is_valid_clobber)(const char *clobber);
+
+ /**
+ * Initialize the code generator.
+ * @param irg A graph
+ * @return A newly created code generator.
+ */
+ void (*init_graph)(ir_graph *irg);
+
+ /**
+ * return node used as base in pic code addresses
+ */
+ ir_node* (*get_pic_base)(ir_graph *irg);
+
+ /**
+ * Called before abi introduce.
+ */
+ void (*before_abi)(ir_graph *irg);
+
+ /**
+ * Called, when the graph is being normalized.
+ */
+ void (*prepare_graph)(ir_graph *irg);
+
+ /**
+ * Called before register allocation.
+ */
+ void (*before_ra)(ir_graph *irg);
+
+ /**
+ * Called after register allocation.
+ */
+ void (*after_ra)(ir_graph *irg);
+
+ /**
+ * Called directly before done is called. This should be the last place
+ * where the irg is modified.
+ */
+ void (*finish)(ir_graph *irg);
+
+ /**
+ * Called after everything happened. This call should emit the final
+ * assembly code but avoid changing the irg.
+ * The code generator must also be de-allocated here.
+ */
+ void (*emit)(ir_graph *irg);
};
#define arch_env_done(env) ((env)->impl->done(env))
#define arch_env_get_reg_class(env,i) ((env)->impl->get_reg_class(i))
#define arch_env_get_reg_class_for_mode(env,mode) ((env)->impl->get_reg_class_for_mode((mode)))
#define arch_env_get_call_abi(env,tp,abi) ((env)->impl->get_call_abi((env), (tp), (abi)))
-#define arch_env_get_code_generator_if(env) ((env)->impl->get_code_generator_if((env)))
#define arch_env_get_list_sched_selector(env,selector) ((env)->impl->get_list_sched_selector((env), (selector)))
#define arch_env_get_ilp_sched_selector(env) ((env)->impl->get_ilp_sched_selector(env))
#define arch_env_get_reg_class_alignment(env,cls) ((env)->impl->get_reg_class_alignment((cls)))
be_collect_node_stats(&last_node_stats, irg);
}
- if (! arch_code_generator_has_spiller(be_get_irg_cg(irg))) {
- /* use one of the generic spiller */
+ /* use one of the generic spiller */
- /* Perform the following for each register class. */
- for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
- post_spill_env_t pse;
- const arch_register_class_t *cls
- = arch_env_get_reg_class(arch_env, j);
+ /* Perform the following for each register class. */
+ for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
+ post_spill_env_t pse;
+ const arch_register_class_t *cls
+ = arch_env_get_reg_class(arch_env, j);
- if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
- continue;
+ if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
+ continue;
- stat_ev_ctx_push_str("bechordal_cls", cls->name);
+ stat_ev_ctx_push_str("bechordal_cls", cls->name);
- stat_ev_if {
- be_do_stat_reg_pressure(irg, cls);
- }
-
- memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
- pse.irg = irg;
- pre_spill(&pse, cls);
-
- be_timer_push(T_RA_SPILL);
- be_do_spill(irg, cls);
- be_timer_pop(T_RA_SPILL);
-
- dump(BE_CH_DUMP_SPILL, irg, pse.cls, "spill");
-
- post_spill(&pse, 0);
-
- stat_ev_if {
- be_node_stats_t node_stats;
+ stat_ev_if {
+ be_do_stat_reg_pressure(irg, cls);
+ }
- be_collect_node_stats(&node_stats, irg);
- be_subtract_node_stats(&node_stats, &last_node_stats);
- be_emit_node_stats(&node_stats, "bechordal_");
+ memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
+ pse.irg = irg;
+ pre_spill(&pse, cls);
- be_copy_node_stats(&last_node_stats, &node_stats);
- stat_ev_ctx_pop("bechordal_cls");
- }
- }
- } else {
- post_spill_env_t *pse;
+ be_timer_push(T_RA_SPILL);
+ be_do_spill(irg, cls);
+ be_timer_pop(T_RA_SPILL);
- /* the backend has its own spiller */
- m = arch_env_get_n_reg_class(arch_env);
+ dump(BE_CH_DUMP_SPILL, irg, pse.cls, "spill");
- pse = ALLOCAN(post_spill_env_t, m);
+ post_spill(&pse, 0);
- for (j = 0; j < m; ++j) {
- memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
- pse[j].irg = irg;
- pre_spill(&pse[j], pse[j].cls);
- }
+ stat_ev_if {
+ be_node_stats_t node_stats;
- be_timer_push(T_RA_SPILL);
- arch_code_generator_spill(be_get_irg_cg(irg), irg);
- be_timer_pop(T_RA_SPILL);
- dump(BE_CH_DUMP_SPILL, irg, NULL, "spill");
+ be_collect_node_stats(&node_stats, irg);
+ be_subtract_node_stats(&node_stats, &last_node_stats);
+ be_emit_node_stats(&node_stats, "bechordal_");
- for (j = 0; j < m; ++j) {
- post_spill(&pse[j], j);
+ be_copy_node_stats(&last_node_stats, &node_stats);
+ stat_ev_ctx_pop("bechordal_cls");
}
}
ir_graph *irg;
be_main_env_t *main_env;
be_abi_irg_t *abi;
- arch_code_generator_t *cg;
ir_exec_freq *exec_freq;
be_dom_front_info_t *dom_front;
be_lv_t *lv;
nodes. */
struct obstack obst; /**< birg obstack (mainly used to keep
register constraints which we can't keep
- in the irg obst, because it gets replace
+ in the irg obst, because it gets replaced
during code selection) */
+ void *isa_link; /**< architecture specific per-graph data*/
} be_irg_t;
static inline be_irg_t *be_birg_from_irg(const ir_graph *irg)
return be_birg_from_irg(irg)->main_env->options;
}
-static inline arch_code_generator_t *be_get_irg_cg(const ir_graph *irg)
-{
- return be_birg_from_irg(irg)->cg;
-}
-
/** deprecated */
static inline ir_graph *be_get_birg_irg(const be_irg_t *birg)
{
be_irg_t *birg = &birgs[i];
ir_graph *irg = birg->irg;
optimization_state_t state;
- const arch_code_generator_if_t *cg_if;
/* set the current graph (this is important for several firm functions) */
current_ir_graph = irg;
}
be_timer_pop(T_VERIFY);
- /* Get the code generator interface. */
- cg_if = arch_env_get_code_generator_if(arch_env);
-
/* get a code generator for this graph. */
- birg->cg = cg_if->init(irg);
+ arch_env->impl->init_graph(irg);
/* some transformations need to be done before abi introduce */
- assert(birg->cg->impl->before_abi == NULL || !arch_env->custom_abi);
- arch_code_generator_before_abi(birg->cg);
+ if (arch_env->impl->before_abi != NULL)
+ arch_env->impl->before_abi(irg);
/* implement the ABI conventions. */
be_timer_push(T_ABI);
/* perform codeselection */
be_timer_push(T_CODEGEN);
- arch_code_generator_prepare_graph(birg->cg);
+ if (arch_env->impl->prepare_graph != NULL)
+ arch_env->impl->prepare_graph(irg);
be_timer_pop(T_CODEGEN);
if (be_options.verify_option == BE_VERIFY_WARN) {
/* stuff needs to be done after scheduling but before register allocation */
be_timer_push(T_RA_PREPARATION);
- arch_code_generator_before_ra(birg->cg);
+ if (arch_env->impl->before_ra != NULL)
+ arch_env->impl->before_ra(irg);
be_timer_pop(T_RA_PREPARATION);
/* connect all stack modifying nodes together (see beabi.c) */
/* let the code generator prepare the graph for emitter */
be_timer_push(T_FINISH);
- arch_code_generator_after_ra(birg->cg);
+ if (arch_env->impl->after_ra != NULL)
+ arch_env->impl->after_ra(irg);
be_timer_pop(T_FINISH);
/* fix stack offsets */
dump(DUMP_SCHED, irg, "fix_stack_after_ra");
be_timer_push(T_FINISH);
- arch_code_generator_finish(birg->cg);
+ if (arch_env->impl->finish != NULL)
+ arch_env->impl->finish(irg);
be_timer_pop(T_FINISH);
dump(DUMP_FINAL, irg, "finish");
/* emit assembler code */
be_timer_push(T_EMIT);
- arch_code_generator_done(birg->cg);
+ if (arch_env->impl->emit != NULL)
+ arch_env->impl->emit(irg);
be_timer_pop(T_EMIT);
dump(DUMP_FINAL, irg, "end");
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
ir_mode *mode_fpcw = NULL;
-ia32_code_gen_t *ia32_current_cg = NULL;
/** The current omit-fp state */
static unsigned ia32_curr_fp_ommitted = 0;
/**
* Used to create per-graph unique pseudo nodes.
*/
-static inline ir_node *create_const(ia32_code_gen_t *cg, ir_node **place,
+static inline ir_node *create_const(ir_graph *irg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
if (*place != NULL)
return *place;
- block = get_irg_start_block(cg->irg);
+ block = get_irg_start_block(irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
}
/* Creates the unique per irg GP NoReg node. */
-ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg)
+ir_node *ia32_new_NoReg_gp(ir_graph *irg)
{
- return create_const(cg, &cg->noreg_gp, new_bd_ia32_NoReg_GP,
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ return create_const(irg, &irg_data->noreg_gp, new_bd_ia32_NoReg_GP,
&ia32_gp_regs[REG_GP_NOREG]);
}
-ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg)
+ir_node *ia32_new_NoReg_vfp(ir_graph *irg)
{
- return create_const(cg, &cg->noreg_vfp, new_bd_ia32_NoReg_VFP,
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ return create_const(irg, &irg_data->noreg_vfp, new_bd_ia32_NoReg_VFP,
&ia32_vfp_regs[REG_VFP_NOREG]);
}
-ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg)
+ir_node *ia32_new_NoReg_xmm(ir_graph *irg)
{
- return create_const(cg, &cg->noreg_xmm, new_bd_ia32_NoReg_XMM,
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ return create_const(irg, &irg_data->noreg_xmm, new_bd_ia32_NoReg_XMM,
&ia32_xmm_regs[REG_XMM_NOREG]);
}
-ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg)
+ir_node *ia32_new_Fpu_truncate(ir_graph *irg)
{
- return create_const(cg, &cg->fpu_trunc_mode, new_bd_ia32_ChangeCW,
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ return create_const(irg, &irg_data->fpu_trunc_mode, new_bd_ia32_ChangeCW,
&ia32_fp_cw_regs[REG_FPCW]);
}
/**
* Returns the admissible noreg register node for input register pos of node irn.
*/
-static ir_node *ia32_get_admissible_noreg(ia32_code_gen_t *cg, ir_node *irn, int pos)
+static ir_node *ia32_get_admissible_noreg(ir_node *irn, int pos)
{
+ ir_graph *irg = get_irn_irg(irn);
const arch_register_req_t *req = arch_get_register_req(irn, pos);
assert(req != NULL && "Missing register requirements");
if (req->cls == &ia32_reg_classes[CLASS_ia32_gp])
- return ia32_new_NoReg_gp(cg);
+ return ia32_new_NoReg_gp(irg);
if (ia32_cg_config.use_sse2) {
- return ia32_new_NoReg_xmm(cg);
+ return ia32_new_NoReg_xmm(irg);
} else {
- return ia32_new_NoReg_vfp(cg);
+ return ia32_new_NoReg_vfp(irg);
}
}
static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
{
ia32_abi_env_t *env = self;
- ia32_code_gen_t *cg = ia32_current_cg;
- const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
+ ir_graph *irg = env->irg;
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
ia32_curr_fp_ommitted = env->flags.try_omit_fp;
if (! env->flags.try_omit_fp) {
ir_node *bl = get_irg_start_block(env->irg);
ir_node *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
ir_node *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *push;
/* mark bp register as ignore */
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_mem, spill);
- set_irn_n(irn, i, ia32_get_admissible_noreg(ia32_current_cg, irn, i));
+ set_irn_n(irn, i, ia32_get_admissible_noreg(irn, i));
set_ia32_is_reload(irn);
}
static ir_entity *mcount = NULL;
-#define ID(s) new_id_from_chars(s, sizeof(s) - 1)
-
-static void ia32_before_abi(void *self)
+static void ia32_before_abi(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
- if (cg->gprof) {
+ if (be_get_irg_options(irg)->gprof) {
if (mcount == NULL) {
ir_type *tp = new_type_method(0, 0);
- mcount = new_entity(get_glob_type(), ID("mcount"), tp);
+ ident *id = new_id_from_str("mcount");
+ mcount = new_entity(get_glob_type(), id, tp);
/* FIXME: enter the right ld_ident here */
set_entity_ld_ident(mcount, get_entity_ident(mcount));
set_entity_visibility(mcount, ir_visibility_external);
}
- instrument_initcall(cg->irg, mcount);
+ instrument_initcall(irg, mcount);
}
}
* Transforms the standard firm graph into
* an ia32 firm graph
*/
-static void ia32_prepare_graph(void *self)
+static void ia32_prepare_graph(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
#ifdef FIRM_GRGEN_BE
switch (be_transformer) {
case TRANSFORMER_DEFAULT:
/* transform remaining nodes into assembler instructions */
- ia32_transform_graph(cg);
+ ia32_transform_graph(irg);
break;
case TRANSFORMER_PBQP:
case TRANSFORMER_RAND:
/* transform nodes into assembler instructions by PBQP magic */
- ia32_transform_graph_by_pbqp(cg);
+ ia32_transform_graph_by_pbqp(irg);
break;
default:
panic("invalid transformer");
}
#else
- ia32_transform_graph(cg);
+ ia32_transform_graph(irg);
#endif
/* do local optimizations (mainly CSE) */
- optimize_graph_df(cg->irg);
+ optimize_graph_df(irg);
- if (cg->dump)
- dump_ir_graph(cg->irg, "transformed");
+ if (irg_data->dump)
+ dump_ir_graph(irg, "transformed");
/* optimize address mode */
- ia32_optimize_graph(cg);
+ ia32_optimize_graph(irg);
/* do code placement, to optimize the position of constants */
- place_code(cg->irg);
+ place_code(irg);
- if (cg->dump)
- dump_ir_graph(cg->irg, "place");
+ if (irg_data->dump)
+ dump_ir_graph(irg, "place");
}
ir_node *turn_back_am(ir_node *node)
default:
panic("Unknown AM type");
}
- noreg = ia32_new_NoReg_gp(ia32_current_cg);
+ noreg = ia32_new_NoReg_gp(current_ir_graph);
set_irn_n(node, n_ia32_base, noreg);
set_irn_n(node, n_ia32_index, noreg);
set_ia32_am_offs_int(node, 0);
/**
* Called before the register allocator.
*/
-static void ia32_before_ra(void *self)
+static void ia32_before_ra(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
-
/* setup fpu rounding modes */
- ia32_setup_fpu_mode(cg);
+ ia32_setup_fpu_mode(irg);
/* fixup flags */
- be_sched_fix_flags(cg->irg, &ia32_reg_classes[CLASS_ia32_flags],
+ be_sched_fix_flags(irg, &ia32_reg_classes[CLASS_ia32_flags],
&flags_remat, NULL);
- be_add_missing_keeps(cg->irg);
+ be_add_missing_keeps(irg);
}
/**
* Transforms a be_Reload into a ia32 Load.
*/
-static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node)
+static void transform_to_Load(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_entity *ent = be_get_frame_entity(node);
ir_mode *mode = get_irn_mode(node);
ir_mode *spillmode = get_spill_mode(node);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *sched_point = NULL;
ir_node *ptr = get_irg_frame(irg);
ir_node *mem = get_irn_n(node, be_pos_Reload_mem);
/**
* Transforms a be_Spill node into a ia32 Store.
*/
-static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node)
+static void transform_to_Store(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
dbg_info *dbg = get_irn_dbg_info(node);
ir_entity *ent = be_get_frame_entity(node);
const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
ir_mode *mode = get_spill_mode(spillval);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *nomem = new_NoMem();
ir_node *ptr = get_irg_frame(irg);
ir_node *val = get_irn_n(node, be_pos_Spill_val);
exchange(node, store);
}
-static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
+static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
{
dbg_info *dbg = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_graph *irg = get_irn_irg(node);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *frame = get_irg_frame(irg);
ir_node *push = new_bd_ia32_Push(dbg, block, frame, noreg, mem, noreg, sp);
return push;
}
-static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
+static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_entity *ent)
{
- dbg_info *dbg = get_irn_dbg_info(node);
- ir_node *block = get_nodes_block(node);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_graph *irg = get_irn_irg(node);
- ir_node *frame = get_irg_frame(irg);
+ dbg_info *dbg = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *frame = get_irg_frame(irg);
ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_NoMem(), sp);
* push/pop into/from memory cascades. This is possible without using
* any registers.
*/
-static void transform_MemPerm(ia32_code_gen_t *cg, ir_node *node)
+static void transform_MemPerm(ir_node *node)
{
ir_node *block = get_nodes_block(node);
- ir_node *sp = be_abi_get_ignore_irn(be_get_irg_abi(cg->irg), &ia32_gp_regs[REG_ESP]);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *sp = be_abi_get_ignore_irn(be_get_irg_abi(irg), &ia32_gp_regs[REG_ESP]);
int arity = be_get_MemPerm_entity_arity(node);
ir_node **pops = ALLOCAN(ir_node*, arity);
ir_node *in[1];
entsize = entsize2;
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
- push = create_push(cg, node, node, sp, mem, inent);
+ push = create_push(node, node, sp, mem, inent);
sp = create_spproj(node, push, pn_ia32_Push_stack);
if (entsize == 8) {
/* add another push after the first one */
- push = create_push(cg, node, node, sp, mem, inent);
+ push = create_push(node, node, sp, mem, inent);
add_ia32_am_offs_int(push, 4);
sp = create_spproj(node, push, pn_ia32_Push_stack);
}
entsize = entsize2;
assert( (entsize == 4 || entsize == 8) && "spillslot on x86 should be 32 or 64 bit");
- pop = create_pop(cg, node, node, sp, outent);
+ pop = create_pop(node, node, sp, outent);
sp = create_spproj(node, pop, pn_ia32_Pop_stack);
if (entsize == 8) {
add_ia32_am_offs_int(pop, 4);
/* add another pop after the first one */
- pop = create_pop(cg, node, node, sp, outent);
+ pop = create_pop(node, node, sp, outent);
sp = create_spproj(node, pop, pn_ia32_Pop_stack);
}
static void ia32_after_ra_walker(ir_node *block, void *env)
{
ir_node *node, *prev;
- ia32_code_gen_t *cg = env;
+ (void) env;
/* beware: the schedule is changed here */
for (node = sched_last(block); !sched_is_begin(node); node = prev) {
prev = sched_prev(node);
if (be_is_Reload(node)) {
- transform_to_Load(cg, node);
+ transform_to_Load(node);
} else if (be_is_Spill(node)) {
- transform_to_Store(cg, node);
+ transform_to_Store(node);
} else if (be_is_MemPerm(node)) {
- transform_MemPerm(cg, node);
+ transform_MemPerm(node);
}
}
}
* We transform Spill and Reload here. This needs to be done before
* stack biasing otherwise we would miss the corrected offset for these nodes.
*/
-static void ia32_after_ra(void *self)
+static void ia32_after_ra(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(cg->irg);
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
/* create and coalesce frame entities */
irg_walk_graph(irg, NULL, ia32_collect_frame_entity_nodes, fec_env);
be_assign_entities(fec_env, ia32_set_frame_entity);
be_free_frame_entity_coalescer(fec_env);
- irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, cg);
+ irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
}
/**
* virtual with real x87 instructions, creating a block schedule and peephole
* optimisations.
*/
-static void ia32_finish(void *self)
+static void ia32_finish(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
- ia32_finish_irg(irg, cg);
+ ia32_finish_irg(irg);
/* we might have to rewrite x87 virtual registers */
- if (cg->do_x87_sim) {
- x87_simulate_graph(cg->irg);
+ if (irg_data->do_x87_sim) {
+ x87_simulate_graph(irg);
}
/* do peephole optimisations */
- ia32_peephole_optimization(cg);
+ ia32_peephole_optimization(irg);
/* create block schedule, this also removes empty blocks which might
* produce critical edges */
- cg->blk_sched = be_create_block_schedule(irg);
+ irg_data->blk_sched = be_create_block_schedule(irg);
}
/**
* Emits the code, closes the output file and frees
* the code generator interface.
*/
-static void ia32_codegen(void *self)
+static void ia32_emit(ir_graph *irg)
{
- ia32_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
if (ia32_cg_config.emit_machcode) {
- ia32_gen_binary_routine(cg, irg);
+ ia32_gen_binary_routine(irg);
} else {
- ia32_gen_routine(cg, irg);
+ ia32_gen_routine(irg);
}
-
- /* remove it from the isa */
- cg->isa->cg = NULL;
-
- assert(ia32_current_cg == cg);
- ia32_current_cg = NULL;
-
- /* de-allocate code generator */
- free(cg);
}
/**
* Returns the node representing the PIC base.
*/
-static ir_node *ia32_get_pic_base(void *self)
+static ir_node *ia32_get_pic_base(ir_graph *irg)
{
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
ir_node *block;
- ia32_code_gen_t *cg = self;
- ir_node *get_eip = cg->get_eip;
+ ir_node *get_eip = irg_data->get_eip;
if (get_eip != NULL)
return get_eip;
- block = get_irg_start_block(cg->irg);
- get_eip = new_bd_ia32_GetEIP(NULL, block);
- cg->get_eip = get_eip;
+ block = get_irg_start_block(irg);
+ get_eip = new_bd_ia32_GetEIP(NULL, block);
+ irg_data->get_eip = get_eip;
be_dep_on_frame(get_eip);
return get_eip;
}
-static void *ia32_cg_init(ir_graph *irg);
-
-static const arch_code_generator_if_t ia32_code_gen_if = {
- ia32_cg_init,
- ia32_get_pic_base, /* return node used as base in pic code addresses */
- ia32_before_abi, /* before abi introduce hook */
- ia32_prepare_graph,
- NULL, /* spill */
- ia32_before_ra, /* before register allocation hook */
- ia32_after_ra, /* after register allocation hook */
- ia32_finish, /* called before codegen */
- ia32_codegen /* emit && done */
-};
-
/**
* Initializes a IA32 code generator.
*/
-static void *ia32_cg_init(ir_graph *irg)
+static void ia32_init_graph(ir_graph *irg)
{
- ia32_isa_t *isa = (ia32_isa_t *)be_get_irg_arch_env(irg);
- ia32_code_gen_t *cg = XMALLOCZ(ia32_code_gen_t);
+ struct obstack *obst = be_get_be_obst(irg);
+ ia32_irg_data_t *irg_data = OALLOCZ(obst, ia32_irg_data_t);
- cg->impl = &ia32_code_gen_if;
- cg->irg = irg;
- cg->isa = isa;
- cg->blk_sched = NULL;
- cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
- cg->gprof = (be_get_irg_options(irg)->gprof) ? 1 : 0;
+ irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
- if (cg->gprof) {
+ if (be_get_irg_options(irg)->gprof) {
/* Linux gprof implementation needs base pointer */
be_get_irg_options(irg)->omit_fp = 0;
}
- /* enter it */
- isa->cg = cg;
-
-#ifndef NDEBUG
- if (isa->name_obst) {
- obstack_free(isa->name_obst, NULL);
- obstack_init(isa->name_obst);
- }
-#endif /* NDEBUG */
-
- assert(ia32_current_cg == NULL);
- ia32_current_cg = cg;
-
- return (arch_code_generator_t *)cg;
+ be_birg_from_irg(irg)->isa_link = irg_data;
}
NULL, /* 8bit register names high */
NULL, /* types */
NULL, /* tv_ents */
- NULL, /* current code generator */
NULL, /* abstract machine */
-#ifndef NDEBUG
- NULL, /* name obstack */
-#endif
};
static void init_asm_constraints(void)
ia32_build_8bit_reg_map(isa->regs_8bit);
ia32_build_8bit_reg_map_high(isa->regs_8bit_high);
-#ifndef NDEBUG
- isa->name_obst = XMALLOC(struct obstack);
- obstack_init(isa->name_obst);
-#endif /* NDEBUG */
-
/* enter the ISA object into the intrinsic environment */
intrinsic_env.isa = isa;
pmap_destroy(isa->tv_ent);
pmap_destroy(isa->types);
-#ifndef NDEBUG
- obstack_free(isa->name_obst, NULL);
-#endif /* NDEBUG */
-
be_emit_exit();
free(self);
return 1;
}
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *ia32_get_code_generator_if(void *self)
-{
- (void) self;
- return &ia32_code_gen_if;
-}
-
/**
* Returns the estimated execution time of an ia32 irn.
*/
ia32_get_reg_class,
ia32_get_reg_class_for_mode,
ia32_get_call_abi,
- ia32_get_code_generator_if,
ia32_get_list_sched_selector,
ia32_get_ilp_sched_selector,
ia32_get_reg_class_alignment,
ia32_get_irg_list,
ia32_mark_remat,
ia32_parse_asm_constraint,
- ia32_is_valid_clobber
+ ia32_is_valid_clobber,
+
+ ia32_init_graph,
+ ia32_get_pic_base, /* return node used as base in pic code addresses */
+ ia32_before_abi, /* before abi introduce hook */
+ ia32_prepare_graph,
+ ia32_before_ra, /* before register allocation hook */
+ ia32_after_ra, /* after register allocation hook */
+ ia32_finish, /* called before codegen */
+ ia32_emit, /* emit && done */
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);
typedef enum fp_support fp_support;
typedef struct ia32_isa_t ia32_isa_t;
-typedef struct ia32_code_gen_t ia32_code_gen_t;
typedef struct ia32_irn_ops_t ia32_irn_ops_t;
typedef struct ia32_intrinsic_env_t ia32_intrinsic_env_t;
-/**
- * IA32 code generator
- */
-struct ia32_code_gen_t {
- const arch_code_generator_if_t *impl; /**< implementation */
- ir_graph *irg; /**< current irg */
- ia32_isa_t *isa; /**< for fast access to the isa object */
- ir_node **blk_sched; /**< an array containing the scheduled blocks */
- unsigned do_x87_sim:1; /**< set to 1 if x87 simulation should be enforced */
- unsigned dump:1; /**< set to 1 if graphs should be dumped */
- unsigned gprof:1; /**< set to 1 grof profiling is in use */
- ir_node *noreg_gp; /**< unique NoReg_GP node */
- ir_node *noreg_vfp; /**< unique NoReg_VFP node */
- ir_node *noreg_xmm; /**< unique NoReg_XMM node */
-
- ir_node *fpu_trunc_mode; /**< truncate fpu mode */
- ir_node *get_eip; /**< get eip node */
-
- struct obstack *obst;
-};
+typedef struct ia32_irg_data_t {
+ ir_node **blk_sched; /**< an array containing the scheduled blocks */
+ unsigned do_x87_sim:1; /**< set to 1 if x87 simulation should be enforced */
+ unsigned dump:1; /**< set to 1 if graphs should be dumped */
+ ir_node *noreg_gp; /**< unique NoReg_GP node */
+ ir_node *noreg_vfp; /**< unique NoReg_VFP node */
+ ir_node *noreg_xmm; /**< unique NoReg_XMM node */
+
+ ir_node *fpu_trunc_mode; /**< truncate fpu mode */
+ ir_node *get_eip; /**< get eip node */
+} ia32_irg_data_t;
/**
* IA32 ISA object
pmap *regs_8bit_high; /**< contains the high part of the 8 bit names of the gp registers */
pmap *types; /**< A map of modes to primitive types */
pmap *tv_ent; /**< A map of entities that store const tarvals */
- ia32_code_gen_t *cg; /**< the current code generator */
const be_machine_t *cpu; /**< the abstract machine */
-#ifndef NDEBUG
- struct obstack *name_obst; /**< holds the original node names (for debugging) */
-#endif /* NDEBUG */
};
/**
* A helper type collecting needed info for IA32 intrinsic lowering.
*/
struct ia32_intrinsic_env_t {
- ia32_isa_t *isa; /**< the isa object */
- ir_graph *irg; /**< the irg, these entities belong to */
- ir_entity *divdi3; /**< entity for __divdi3 library call */
- ir_entity *moddi3; /**< entity for __moddi3 library call */
- ir_entity *udivdi3; /**< entity for __udivdi3 library call */
- ir_entity *umoddi3; /**< entity for __umoddi3 library call */
+ ia32_isa_t *isa; /**< the isa object */
+ ir_graph *irg; /**< the irg, these entities belong to */
+ ir_entity *divdi3; /**< entity for __divdi3 library call */
+ ir_entity *moddi3; /**< entity for __moddi3 library call */
+ ir_entity *udivdi3; /**< entity for __udivdi3 library call */
+ ir_entity *umoddi3; /**< entity for __umoddi3 library call */
};
typedef enum transformer_t {
/** The mode for the floating point control word. */
extern ir_mode *mode_fpcw;
-/** The current code generator. */
-extern ia32_code_gen_t *ia32_current_cg;
+static inline ia32_irg_data_t *ia32_get_irg_data(const ir_graph *irg)
+{
+ return (ia32_irg_data_t*) be_birg_from_irg(irg)->isa_link;
+}
/**
* Returns the unique per irg GP NoReg node.
*/
-ir_node *ia32_new_NoReg_gp(ia32_code_gen_t *cg);
-ir_node *ia32_new_NoReg_xmm(ia32_code_gen_t *cg);
-ir_node *ia32_new_NoReg_vfp(ia32_code_gen_t *cg);
+ir_node *ia32_new_NoReg_gp(ir_graph *irg);
+ir_node *ia32_new_NoReg_xmm(ir_graph *irg);
+ir_node *ia32_new_NoReg_vfp(ir_graph *irg);
/**
* Returns the unique per irg FPU truncation mode node.
*/
-ir_node *ia32_new_Fpu_truncate(ia32_code_gen_t *cg);
+ir_node *ia32_new_Fpu_truncate(ir_graph *irg);
/**
* Split instruction with source AM into Load and separate instruction.
#include "gen_ia32_new_nodes.h"
#include "gen_ia32_regalloc_if.h"
-/** hold the current code generator during transformation */
-ia32_code_gen_t *env_cg = NULL;
-
ir_heights_t *heights = NULL;
static int check_immediate_constraint(long val, char immediate_constraint_type)
ir_entity *create_float_const_entity(ir_node *cnst)
{
- ia32_isa_t *isa = env_cg->isa;
- tarval *tv = get_Const_tarval(cnst);
- ir_entity *res = pmap_get(isa->tv_ent, tv);
+ ir_graph *irg = get_irn_irg(cnst);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_isa_t *isa = (ia32_isa_t*) arch_env;
+ tarval *tv = get_Const_tarval(cnst);
+ ir_entity *res = pmap_get(isa->tv_ent, tv);
ir_initializer_t *initializer;
ir_mode *mode;
ir_type *tp;
if (r_clobber_bits != 0) {
if (parsed_constraint.all_registers_allowed) {
parsed_constraint.all_registers_allowed = 0;
- be_abi_set_non_ignore_regs(be_get_irg_abi(env_cg->irg),
+ be_abi_set_non_ignore_regs(be_get_irg_abi(current_ir_graph),
parsed_constraint.cls,
&parsed_constraint.allowed_registers);
}
int same_as;
};
-extern ia32_code_gen_t *env_cg;
-extern ir_heights_t *heights;
-extern int no_pic_adjust;
+extern ir_heights_t *heights;
+extern int no_pic_adjust;
/**
* Get an atomic entity that is initialized with a tarval forming
#define SNPRINTF_BUF_LEN 128
static const ia32_isa_t *isa;
-static ia32_code_gen_t *cg;
static char pic_base_label[128];
static ir_label_t exc_label_id;
static int mark_spill_reload = 0;
static int should_align_block(const ir_node *block)
{
static const double DELTA = .0001;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
+ ir_graph *irg = get_irn_irg(block);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
ir_node *prev = get_prev_block_sched(block);
double block_freq;
double prev_freq = 0; /**< execfreq of the fallthrough block */
ir_graph *irg = current_ir_graph;
int need_label = block_needs_label(block);
int i, arity;
- ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
+ ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
if (block == get_irg_end_block(irg))
return;
/**
* Main driver. Emits the code for one routine.
*/
-void ia32_gen_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+void ia32_gen_routine(ir_graph *irg)
{
- ir_entity *entity = get_irg_entity(irg);
- exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
+ ir_entity *entity = get_irg_entity(irg);
+ exc_entry *exc_list = NEW_ARR_F(exc_entry, 0);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ ir_node **blk_sched = irg_data->blk_sched;
int i, n;
- cg = ia32_cg;
- isa = cg->isa;
- do_pic = be_get_irg_options(cg->irg)->pic;
+ isa = (ia32_isa_t*) arch_env;
+ do_pic = be_get_irg_options(irg)->pic;
be_gas_elf_type_char = '@';
irg_block_walk_graph(irg, ia32_gen_labels, NULL, &exc_list);
/* initialize next block links */
- n = ARR_LEN(cg->blk_sched);
+ n = ARR_LEN(blk_sched);
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
- ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
+ ir_node *block = blk_sched[i];
+ ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
set_irn_link(block, prev);
}
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
+ ir_node *block = blk_sched[i];
ia32_gen_block(block);
}
}
}
-void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg)
+void ia32_gen_binary_routine(ir_graph *irg)
{
- ir_entity *entity = get_irg_entity(irg);
+ ir_entity *entity = get_irg_entity(irg);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ ir_node **blk_sched = irg_data->blk_sched;
int i, n;
- cg = ia32_cg;
- isa = cg->isa;
+ isa = (ia32_isa_t*) arch_env;
ia32_register_binary_emitters();
irg_block_walk_graph(irg, ia32_gen_labels, NULL, NULL);
/* initialize next block links */
- n = ARR_LEN(cg->blk_sched);
+ n = ARR_LEN(blk_sched);
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
- ir_node *prev = i > 0 ? cg->blk_sched[i-1] : NULL;
+ ir_node *block = blk_sched[i];
+ ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
set_irn_link(block, prev);
}
for (i = 0; i < n; ++i) {
- ir_node *block = cg->blk_sched[i];
+ ir_node *block = blk_sched[i];
gen_binary_block(block);
}
}
-
-
void ia32_init_emitter(void)
{
lc_opt_entry_t *be_grp;
void ia32_emit_am(const ir_node *node);
void ia32_emit_x87_binop(const ir_node *node);
-void ia32_gen_routine(ia32_code_gen_t *cg, ir_graph *irg);
-void ia32_gen_binary_routine(ia32_code_gen_t *ia32_cg, ir_graph *irg);
+void ia32_gen_routine(ir_graph *irg);
+void ia32_gen_binary_routine(ir_graph *irg);
/** Initializes the Emitter. */
void ia32_init_emitter(void);
* Transforms a Sub or xSub into Neg--Add iff OUT_REG != SRC1_REG && OUT_REG == SRC2_REG.
* THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
*/
-static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg)
+static void ia32_transform_sub_to_neg_add(ir_node *irn)
{
ir_graph *irg;
ir_node *in1, *in2, *noreg, *nomem, *res;
if (get_ia32_op_type(irn) != ia32_Normal)
return;
- noreg = ia32_new_NoReg_gp(cg);
- noreg_fp = ia32_new_NoReg_xmm(cg);
+ irg = get_irn_irg(irn);
+ noreg = ia32_new_NoReg_gp(irg);
+ noreg_fp = ia32_new_NoReg_xmm(irg);
nomem = new_NoMem();
in1 = get_irn_n(irn, n_ia32_binary_left);
in2 = get_irn_n(irn, n_ia32_binary_right);
if (out_reg != in2_reg)
return;
- irg = cg->irg;
block = get_nodes_block(irn);
dbg = get_irn_dbg_info(irn);
*/
static void ia32_finish_irg_walker(ir_node *block, void *env)
{
- ia32_code_gen_t *cg = env;
ir_node *irn, *next;
+ (void) env;
/* first: turn back AM source if necessary */
for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
/* check if there is a sub which need to be transformed */
if (is_ia32_Sub(irn) || is_ia32_xSub(irn)) {
- ia32_transform_sub_to_neg_add(irn, cg);
+ ia32_transform_sub_to_neg_add(irn);
}
}
/**
* Add Copy nodes for not fulfilled should_be_equal constraints
*/
-void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg)
+void ia32_finish_irg(ir_graph *irg)
{
waitq *wq = new_waitq();
while (! waitq_empty(wq)) {
ir_node *block = waitq_get(wq);
- ia32_finish_irg_walker(block, cg);
+ ia32_finish_irg_walker(block, NULL);
}
del_waitq(wq);
}
/**
* Check 2-Addresscode constraints and call peephole optimizations.
* @param irg The irg to finish
- * @param cg The codegenerator object for the irg
*/
-void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg);
+void ia32_finish_irg(ir_graph *irg);
/** Initialize the finisher. */
void ia32_init_finish(void);
-#endif /* FIRM_BE_IA32_IA32_FINISH_H */
+#endif
static ir_node *create_fpu_mode_spill(void *env, ir_node *state, int force,
ir_node *after)
{
- ia32_code_gen_t *cg = env;
ir_node *spill = NULL;
+ (void) env;
/* we don't spill the fpcw in unsafe mode */
if (ia32_cg_config.use_unsafe_floatconv) {
if (force == 1 || !is_ia32_ChangeCW(state)) {
ir_graph *irg = get_irn_irg(state);
ir_node *block = get_nodes_block(state);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *nomem = new_NoMem();
ir_node *frame = get_irg_frame(irg);
return spill;
}
-static ir_node *create_fldcw_ent(ia32_code_gen_t *cg, ir_node *block,
- ir_entity *entity)
+static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
{
+ ir_graph *irg = get_irn_irg(block);
ir_node *nomem = new_NoMem();
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *reload;
reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem);
ir_node *spill, ir_node *before,
ir_node *last_state)
{
- ia32_code_gen_t *cg = env;
- ir_graph *irg = get_irn_irg(state);
- ir_node *block = get_nodes_block(before);
- ir_node *frame = get_irg_frame(irg);
- ir_node *noreg = ia32_new_NoReg_gp(cg);
- ir_node *reload = NULL;
+ ir_graph *irg = get_irn_irg(state);
+ ir_node *block = get_nodes_block(before);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *reload = NULL;
+ (void) env;
if (ia32_cg_config.use_unsafe_floatconv) {
if (fpcw_round == NULL) {
create_fpcw_entities();
}
if (spill != NULL) {
- reload = create_fldcw_ent(cg, block, fpcw_round);
+ reload = create_fldcw_ent(block, fpcw_round);
} else {
- reload = create_fldcw_ent(cg, block, fpcw_truncate);
+ reload = create_fldcw_ent(block, fpcw_truncate);
}
sched_add_before(before, reload);
return reload;
be_liveness_invalidate(be_get_irg_liveness(irg));
}
-void ia32_setup_fpu_mode(ia32_code_gen_t *cg)
+void ia32_setup_fpu_mode(ir_graph *irg)
{
/* do ssa construction for the fpu modes */
- rewire_fpu_mode_nodes(cg->irg);
+ rewire_fpu_mode_nodes(irg);
/* ensure correct fpu mode for operations */
- be_assure_state(cg->irg, &ia32_fp_cw_regs[REG_FPCW],
- cg, create_fpu_mode_spill, create_fpu_mode_reload);
+ be_assure_state(irg, &ia32_fp_cw_regs[REG_FPCW],
+ NULL, create_fpu_mode_spill, create_fpu_mode_reload);
}
/**
* Handle switching of fpu mode
*/
-void ia32_setup_fpu_mode(ia32_code_gen_t *cg);
+void ia32_setup_fpu_mode(ir_graph *irg);
-#endif /* FIRM_BE_IA32_IA32_FPU_H */
+#endif
static const char *ia32_get_old_node_name(const ir_node *irn)
{
- struct obstack *obst = env_cg->isa->name_obst;
+ ir_graph *irg = get_irn_irg(irn);
+ struct obstack *obst = be_get_be_obst(irg);
lc_eoprintf(firm_get_arg_env(), obst, "%+F", irn);
obstack_1grow(obst, 0);
void init_ia32_x87_attributes(ir_node *res)
{
+ ir_graph *irg = get_irn_irg(res);
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
#ifndef NDEBUG
ia32_attr_t *attr = get_ia32_attr(res);
attr->attr_type |= IA32_ATTR_ia32_x87_attr_t;
#else
(void) res;
#endif
- ia32_current_cg->do_x87_sim = 1;
+ irg_data->do_x87_sim = 1;
}
void init_ia32_asm_attributes(ir_node *res)
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static ia32_code_gen_t *cg;
-
static void copy_mark(const ir_node *old, ir_node *new)
{
if (is_ia32_is_reload(old))
static void peephole_ia32_Cmp(ir_node *const node)
{
ir_node *right;
+ ir_graph *irg;
ia32_immediate_attr_t const *imm;
dbg_info *dbgi;
ir_node *block;
return;
dbgi = get_irn_dbg_info(node);
+ irg = get_irn_irg(node);
block = get_nodes_block(node);
- noreg = ia32_new_NoReg_gp(cg);
+ noreg = ia32_new_NoReg_gp(irg);
nomem = get_irg_no_mem(current_ir_graph);
op = get_irn_n(node, n_ia32_Cmp_left);
attr = get_irn_generic_attr(node);
/* walk through the Stores and create Pushs for them */
block = get_nodes_block(irn);
spmode = get_irn_mode(irn);
- irg = cg->irg;
+ irg = get_irn_irg(irn);
for (; i >= 0; --i) {
const arch_register_t *spreg;
ir_node *push;
ir_node *val, *mem, *mem_proj;
ir_node *store = stores[i];
- ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
val = get_irn_n(store, n_ia32_unary_op);
mem = get_irn_n(store, n_ia32_mem);
/* create a new IncSP if needed */
block = get_nodes_block(irn);
- irg = cg->irg;
+ irg = get_irn_irg(irn);
if (inc_ofs > 0) {
pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
sched_add_before(irn, pred_sp);
be_peephole_exchange(node, xor);
}
-static inline int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
+static inline int is_noreg(const ir_node *node)
{
- return node == cg->noreg_gp;
+ return is_ia32_NoReg_GP(node);
}
ir_node *ia32_immediate_from_long(long val)
*/
static void peephole_ia32_Lea(ir_node *node)
{
+ ir_graph *irg;
ir_node *base;
ir_node *index;
const arch_register_t *base_reg;
base = get_irn_n(node, n_ia32_Lea_base);
index = get_irn_n(node, n_ia32_Lea_index);
- if (is_noreg(cg, base)) {
+ if (is_noreg(base)) {
base = NULL;
base_reg = NULL;
} else {
base_reg = arch_get_irn_register(base);
}
- if (is_noreg(cg, index)) {
+ if (is_noreg(index)) {
index = NULL;
index_reg = NULL;
} else {
make_add:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- noreg = ia32_new_NoReg_gp(cg);
+ irg = get_irn_irg(node);
+ noreg = ia32_new_NoReg_gp(irg);
nomem = new_NoMem();
res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(res, out_reg);
make_shl:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- noreg = ia32_new_NoReg_gp(cg);
+ irg = get_irn_irg(node);
+ noreg = ia32_new_NoReg_gp(irg);
nomem = new_NoMem();
res = new_bd_ia32_Shl(dbgi, block, op1, op2);
arch_set_irn_register(res, out_reg);
}
/* Perform peephole-optimizations. */
-void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
+void ia32_peephole_optimization(ir_graph *irg)
{
- cg = new_cg;
-
/* register peephole optimisations */
clear_irp_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
if (ia32_cg_config.use_short_sex_eax)
register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
- be_peephole_opt(cg->irg);
+ be_peephole_opt(irg);
}
/**
/**
* Performs conv and address mode optimization.
*/
-void ia32_optimize_graph(ia32_code_gen_t *cg)
+void ia32_optimize_graph(ir_graph *irg)
{
- irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
-
- if (cg->dump)
- dump_ir_graph(cg->irg, "opt");
+ irg_walk_blkwise_graph(irg, NULL, optimize_node, NULL);
}
void ia32_init_optimize(void)
/**
* Prepares irg for codegeneration. Places consts and transform reference mode
* nodes into mode_Iu nodes.
- * @param cg The ia32 codegenerator object
*/
-void ia32_pre_transform_phase(ia32_code_gen_t *cg);
+void ia32_pre_transform_phase(ir_graph *irg);
/**
* Performs conv and address mode optimizations.
* @param cg The ia32 codegenerator object
*/
-void ia32_optimize_graph(ia32_code_gen_t *cg);
+void ia32_optimize_graph(ir_graph *irg);
/**
* Performs Peephole Optimizations an a graph.
* @param irg the graph
* @param cg the code generator object
*/
-void ia32_peephole_optimization(ia32_code_gen_t *cg);
+void ia32_peephole_optimization(ir_graph *irg);
/** Initialize the ia32 address mode optimizer. */
void ia32_init_optimize(void);
*/
static ir_node *get_symconst_base(void)
{
- if (be_get_irg_options(env_cg->irg)->pic) {
- return arch_code_generator_get_pic_base(env_cg);
+ ir_graph *irg = current_ir_graph;
+
+ if (be_get_irg_options(irg)->pic) {
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ return arch_env->impl->get_pic_base(irg);
}
return noreg_GP;
build_address(am, op2, 0);
new_op1 = (op1 == NULL ? NULL : be_transform_node(op1));
if (mode_is_float(mode)) {
- new_op2 = ia32_new_NoReg_vfp(env_cg);
+ new_op2 = ia32_new_NoReg_vfp(current_ir_graph);
} else {
new_op2 = noreg_GP;
}
build_address(am, op1, 0);
if (mode_is_float(mode)) {
- noreg = ia32_new_NoReg_vfp(env_cg);
+ noreg = ia32_new_NoReg_vfp(current_ir_graph);
} else {
noreg = noreg_GP;
}
if (initial_fpcw != NULL)
return initial_fpcw;
- fpcw = be_abi_get_ignore_irn(be_get_irg_abi(env_cg->irg),
+ fpcw = be_abi_get_ignore_irn(be_get_irg_abi(current_ir_graph),
&ia32_fp_cw_regs[REG_FPCW]);
initial_fpcw = be_transform_node(fpcw);
/* TODO: non-optimal... if we have many xXors, then we should
* rather create a load for the const and use that instead of
* several AM nodes... */
- ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg);
+ ir_node *noreg_xmm = ia32_new_NoReg_xmm(current_ir_graph);
new_node = new_bd_ia32_xXor(dbgi, block, get_symconst_base(),
noreg_GP, nomem, new_op, noreg_xmm);
new_op = be_transform_node(op);
if (ia32_cg_config.use_sse2) {
- ir_node *noreg_fp = ia32_new_NoReg_xmm(env_cg);
+ ir_node *noreg_fp = ia32_new_NoReg_xmm(current_ir_graph);
new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(),
noreg_GP, nomem, new_op, noreg_fp);
new_node = new_r_Proj(vfisttp, mode_M, pn_ia32_vfisttp_M);
*fist = vfisttp;
} else {
- ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg);
+ ir_node *trunc_mode = ia32_new_Fpu_truncate(current_ir_graph);
/* do a fist */
new_node = new_bd_ia32_vfist(dbgi, block, base, index, mem, val, trunc_mode);
am.mem_proj = nomem;
am.op_type = ia32_AddrModeS;
am.new_op1 = res;
- am.new_op2 = ia32_new_NoReg_vfp(env_cg);
+ am.new_op2 = ia32_new_NoReg_vfp(current_ir_graph);
am.pinned = op_pin_state_floats;
am.commutative = 1;
am.ins_permuted = 0;
ir_mode *const res_mode = get_type_mode(res_type);
if (res_mode != NULL && mode_is_float(res_mode)) {
- env_cg->do_x87_sim = 1;
+ ir_graph *irg = current_ir_graph;
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
+ irg_data->do_x87_sim = 1;
}
}
/* special case for PIC trampoline calls */
old_no_pic_adjust = no_pic_adjust;
- no_pic_adjust = be_get_irg_options(env_cg->irg)->pic;
+ no_pic_adjust = be_get_irg_options(current_ir_graph)->pic;
match_arguments(&am, src_block, NULL, src_ptr, src_mem,
match_am | match_immediate);
*/
static void ia32_pretransform_node(void)
{
- ia32_code_gen_t *cg = env_cg;
+ ir_graph *irg = current_ir_graph;
+ ia32_irg_data_t *irg_data = ia32_get_irg_data(current_ir_graph);
- cg->noreg_gp = be_pre_transform_node(cg->noreg_gp);
- cg->noreg_vfp = be_pre_transform_node(cg->noreg_vfp);
- cg->noreg_xmm = be_pre_transform_node(cg->noreg_xmm);
+ irg_data->noreg_gp = be_pre_transform_node(irg_data->noreg_gp);
+ irg_data->noreg_vfp = be_pre_transform_node(irg_data->noreg_vfp);
+ irg_data->noreg_xmm = be_pre_transform_node(irg_data->noreg_xmm);
- nomem = get_irg_no_mem(current_ir_graph);
- noreg_GP = ia32_new_NoReg_gp(cg);
+ nomem = get_irg_no_mem(irg);
+ noreg_GP = ia32_new_NoReg_gp(irg);
get_fpcw();
}
}
/* do the transformation */
-void ia32_transform_graph(ia32_code_gen_t *cg)
+void ia32_transform_graph(ir_graph *irg)
{
int cse_last;
register_transformers();
- env_cg = cg;
initial_fpcw = NULL;
no_pic_adjust = 0;
be_timer_push(T_HEIGHTS);
- heights = heights_new(cg->irg);
+ heights = heights_new(irg);
be_timer_pop(T_HEIGHTS);
- ia32_calculate_non_address_mode_nodes(cg->irg);
+ ia32_calculate_non_address_mode_nodes(irg);
/* the transform phase is not safe for CSE (yet) because several nodes get
* attributes set after their creation */
call_list = NEW_ARR_F(ir_node *, 0);
call_types = NEW_ARR_F(ir_type *, 0);
- be_transform_graph(cg->irg, ia32_pretransform_node);
+ be_transform_graph(irg, ia32_pretransform_node);
if (ia32_cg_config.use_sse2)
postprocess_fp_call_results();
* Transform firm nodes to x86 assembler nodes, ie
* do instruction selection.
*/
-void ia32_transform_graph(ia32_code_gen_t *cg);
+void ia32_transform_graph(ir_graph *irg);
/**
* Some constants needed for code generation.
* Transforms the standard firm graph into
* a SPARC firm graph
*/
-static void sparc_prepare_graph(void *self)
+static void sparc_prepare_graph(ir_graph *irg)
{
- sparc_code_gen_t *cg = self;
-
- /* transform FIRM into SPARC asm nodes */
- sparc_transform_graph(cg);
-
- if (cg->dump)
- dump_ir_graph(cg->irg, "transformed");
+ sparc_transform_graph(irg);
}
static bool sparc_modifies_flags(const ir_node *node)
return arch_irn_get_flags(node) & sparc_arch_irn_flag_modifies_fp_flags;
}
-static void sparc_before_ra(void *self)
+static void sparc_before_ra(ir_graph *irg)
{
- sparc_code_gen_t *cg = self;
/* fixup flags register */
- be_sched_fix_flags(cg->irg, &sparc_reg_classes[CLASS_sparc_flags_class],
+ be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_flags_class],
NULL, sparc_modifies_flags);
- be_sched_fix_flags(cg->irg, &sparc_reg_classes[CLASS_sparc_fpflags_class],
+ be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_fpflags_class],
NULL, sparc_modifies_fp_flags);
}
}
}
-static void sparc_after_ra(void *self)
+static void sparc_after_ra(ir_graph *irg)
{
- sparc_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
- be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
+ be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
irg_walk_graph(irg, NULL, sparc_collect_frame_entity_nodes, fec_env);
be_assign_entities(fec_env, sparc_set_frame_entity);
be_free_frame_entity_coalescer(fec_env);
- irg_block_walk_graph(cg->irg, NULL, sparc_after_ra_walker, NULL);
+ irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
}
-/**
- * Emits the code, closes the output file and frees
- * the code generator interface.
- */
-static void sparc_emit_and_done(void *self)
+static void sparc_init_graph(ir_graph *irg)
{
- sparc_code_gen_t *cg = self;
- ir_graph *irg = cg->irg;
-
- sparc_emit_routine(irg);
-
- /* de-allocate code generator */
- free(cg);
-}
-
-static void *sparc_cg_init(ir_graph *irg);
-
-static const arch_code_generator_if_t sparc_code_gen_if = {
- sparc_cg_init,
- NULL, /* get_pic_base hook */
- NULL, /* before abi introduce hook */
- sparc_prepare_graph,
- NULL, /* spill hook */
- sparc_before_ra, /* before register allocation hook */
- sparc_after_ra, /* after register allocation hook */
- NULL,
- sparc_emit_and_done
-};
-
-/**
- * Initializes the code generator.
- */
-static void *sparc_cg_init(ir_graph *irg)
-{
- sparc_isa_t *isa = (sparc_isa_t *) be_get_irg_arch_env(irg);
- sparc_code_gen_t *cg = XMALLOCZ(sparc_code_gen_t);
-
- cg->impl = &sparc_code_gen_if;
- cg->irg = irg;
- cg->isa = isa;
- cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) != 0;
- cg->constants = pmap_create();
-
- /* enter the current code generator */
- isa->cg = cg;
-
- return (arch_code_generator_t*) cg;
+ (void) irg;
}
const arch_isa_if_t sparc_isa_if;
5, /* costs for a reload instruction */
true, /* custom abi handling */
},
- NULL /* current code generator */
+ NULL, /* constants */
};
/**
isa = XMALLOC(sparc_isa_t);
memcpy(isa, &sparc_isa_template, sizeof(*isa));
+ isa->constants = pmap_create();
be_emit_init(outfile);
/* emit now all global declarations */
be_gas_emit_decls(isa->base.main_env);
+ pmap_destroy(isa->constants);
be_emit_exit();
- free(self);
+ free(isa);
}
static unsigned sparc_get_n_reg_class(void)
return 1;
}
-/**
- * Initializes the code generator interface.
- */
-static const arch_code_generator_if_t *sparc_get_code_generator_if(
- void *self)
-{
- (void) self;
- return &sparc_code_gen_if;
-}
-
list_sched_selector_t sparc_sched_selector;
/**
sparc_get_reg_class,
sparc_get_reg_class_for_mode,
NULL,
- sparc_get_code_generator_if,
sparc_get_list_sched_selector,
sparc_get_ilp_sched_selector,
sparc_get_reg_class_alignment,
sparc_get_backend_irg_list,
NULL, /* mark remat */
sparc_parse_asm_constraint,
- sparc_is_valid_clobber
+ sparc_is_valid_clobber,
+
+ sparc_init_graph,
+ NULL, /* get_pic_base */
+ NULL, /* before_abi */
+ sparc_prepare_graph,
+ sparc_before_ra,
+ sparc_after_ra,
+ NULL, /* finish */
+ sparc_emit_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc);
typedef struct sparc_transform_env_t sparc_transform_env_t;
typedef struct sparc_isa_t sparc_isa_t;
-typedef struct sparc_code_gen_t {
- const arch_code_generator_if_t *impl; /**< implementation */
- ir_graph *irg; /**< current irg */
- sparc_isa_t *isa; /**< the isa instance */
- bool dump; /**< set to 1 if graphs should
- be dumped */
- pmap *constants;
-} sparc_code_gen_t;
-
struct sparc_isa_t {
- arch_env_t base; /**< must be derived from arch_env_t */
- sparc_code_gen_t *cg; /**< current code generator */
+ arch_env_t base; /**< must be derived from arch_env_t */
+ pmap *constants;
};
/**
ir_mode *mode; /**< The mode of the irn */
};
-void sparc_finish_irg(sparc_code_gen_t *cg);
-
/**
* Sparc ABI requires some space which is always available at the top of
* the stack. It contains:
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-static sparc_code_gen_t *env_cg;
static beabi_helper_env_t *abihelper;
static const arch_register_t *sp_reg = &sparc_gp_regs[REG_SP];
static const arch_register_t *fp_reg = &sparc_gp_regs[REG_FRAME_POINTER];
*/
static ir_entity *create_float_const_entity(tarval *tv)
{
- ir_entity *entity = (ir_entity*) pmap_get(env_cg->constants, tv);
+ const arch_env_t *arch_env = be_get_irg_arch_env(current_ir_graph);
+ sparc_isa_t *isa = (sparc_isa_t*) arch_env;
+ ir_entity *entity = (ir_entity*) pmap_get(isa->constants, tv);
ir_initializer_t *initializer;
ir_mode *mode;
ir_type *type;
initializer = create_initializer_tarval(tv);
set_entity_initializer(entity, initializer);
- pmap_insert(env_cg->constants, tv, entity);
+ pmap_insert(isa->constants, tv, entity);
return entity;
}
/**
* configure transformation callbacks
*/
-void sparc_register_transformers(void)
+static void sparc_register_transformers(void)
{
be_start_transform_setup();
/**
* Transform a Firm graph into a SPARC graph.
*/
-void sparc_transform_graph(sparc_code_gen_t *cg)
+void sparc_transform_graph(ir_graph *irg)
{
- ir_graph *irg = cg->irg;
ir_entity *entity = get_irg_entity(irg);
ir_type *frame_type;
sparc_register_transformers();
- env_cg = cg;
node_to_stack = pmap_create();
cconv = sparc_decide_calling_convention(get_entity_type(entity), false);
create_stacklayout(irg);
- be_transform_graph(cg->irg, NULL);
+ be_transform_graph(irg, NULL);
assure_fp_keep();
be_abihelper_finish(abihelper);
be_add_missing_keeps(irg);
/* do code placement, to optimize the position of constants */
- place_code(cg->irg);
+ place_code(irg);
}
void sparc_init_transform(void)
void sparc_init_transform(void);
-void sparc_register_transformers(void);
+void sparc_transform_graph(ir_graph *irg);
-void sparc_transform_graph(sparc_code_gen_t *cg);
#endif