/* TODO: ugly */
static set *cur_reg_set = NULL;
-ir_mode *mode_fpcw = NULL;
+ir_mode *mode_fpcw = NULL;
+ia32_code_gen_t *ia32_current_cg = NULL;
typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_graph *irg, ir_node *block);
*/
static const arch_register_req_t *ia32_get_irn_reg_req(const void *self,
const ir_node *node,
- int pos) {
+ int pos)
+{
long node_pos = pos == -1 ? 0 : pos;
ir_mode *mode = is_Block(node) ? NULL : get_irn_mode(node);
(void) self;
return arch_no_register_req;
}
-static void ia32_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) {
+static void ia32_set_irn_reg(const void *self, ir_node *irn,
+ const arch_register_t *reg)
+{
int pos = 0;
(void) self;
}
}
-static const arch_register_t *ia32_get_irn_reg(const void *self, const ir_node *irn) {
+static const arch_register_t *ia32_get_irn_reg(const void *self,
+ const ir_node *irn)
+{
int pos = 0;
const arch_register_t *reg = NULL;
(void) self;
ia32_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
- /* if we do x87 code generation, rewrite all the virtual instructions and registers */
- if (cg->used_fp == fp_x87 || cg->force_sim) {
+ /* we might have to rewrite x87 virtual registers */
+ if (cg->do_x87_sim) {
x87_simulate_graph(cg->arch_env, cg->birg);
}
/* remove it from the isa */
cg->isa->cg = NULL;
+ assert(ia32_current_cg == cg);
+ ia32_current_cg = NULL;
+
/* de-allocate code generator */
del_set(cg->reg_set);
free(cg);
cg->birg = birg;
cg->blk_sched = NULL;
cg->fp_kind = isa->fp_kind;
- cg->used_fp = fp_none;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
/* copy optimizations from isa for easier access */
ia32_irn_ops.cg = cg;
+ assert(ia32_current_cg == NULL);
+ ia32_current_cg = cg;
+
return (arch_code_generator_t *)cg;
}
ir_node *load;
ir_entity *floatent;
- FP_USED(env_cg);
if (! USE_SSE2(env_cg)) {
cnst_classify_t clss = classify_Const(node);
ir_node *cnst;
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg))
cnst = new_rd_ia32_xConst(dbgi, irg, block);
else
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg))
return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xAdd);
else
ir_mode *mode = get_irn_mode(node);
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg))
return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xMul);
else
assert((expr_op || imm_op) && "invalid operands");
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg))
return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xSub);
else
ir_node *nomem = new_rd_NoMem(current_ir_graph);
ir_node *new_op;
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
ir_mode *mode = get_irn_mode(op1);
if (is_ia32_xConst(new_op2)) {
if (mode_is_float(mode)) {
ir_node *new_op = be_transform_node(op);
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg);
ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg);
ir_entity *ent;
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
res = new_rd_ia32_xAnd(dbgi,irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem);
}
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
new_op = new_rd_ia32_xLoad(dbgi, irg, block, lptr, noreg, new_mem);
res_mode = mode_xmm;
}
if (mode_is_float(mode)) {
- FP_USED(env_cg);
-
new_val = be_transform_node(val);
if (USE_SSE2(env_cg)) {
new_op = new_rd_ia32_xStore(dbgi, irg, block, sptr, noreg, new_val,
new_cmp_b = create_immediate_or_transform(cmp_b, 0);
if (mode_is_float(cmp_mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, cmp_a,
cmp_b, nomem, pnc);
return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res);
}
-static ir_node *create_Strict_conv(ir_mode *src_mode, ir_mode *tgt_mode,
+static ir_node *create_strict_conv(ir_mode *src_mode, ir_mode *tgt_mode,
ir_node *node)
{
ir_node *block = get_nodes_block(node);
} else {
// Matze: TODO what about strict convs?
if(get_Conv_strict(node)) {
- res = create_Strict_conv(src_mode, tgt_mode, new_op);
+ res = create_strict_conv(src_mode, tgt_mode, new_op);
SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node));
return res;
}
} else {
/* we convert from int ... */
if (mode_is_float(tgt_mode)) {
- FP_USED(env_cg);
/* ... to float */
DB((dbg, LEVEL_1, "create Conv(int, float) ..."));
if (USE_SSE2(env_cg)) {
struct obstack *obst;
constraint_t parsed_constraint;
- /* assembler could contain float statements */
- FP_USED(env_cg);
-
/* transform inputs */
arity = get_irn_arity(node);
in = alloca(arity * sizeof(in[0]));
long pn_res;
if (mode_is_float(load_mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, nomem);
pn_res = pn_ia32_xLoad_res;
ia32_collect_Projs(node, projs, pn_Load_max);
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, new_mem);
}
ir_mode *mode = get_irn_mode(val);
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg)) {
new_op = new_rd_ia32_xStore(dbgi, irg, block, new_ptr, noreg, new_val, new_mem);
} else {
ir_mode *mode = get_irn_mode(node);
if (mode_is_float(mode)) {
- FP_USED(env_cg);
if (USE_SSE2(env_cg))
return ia32_new_Unknown_xmm(env_cg);
else
/**
* Transforms a lowered Load into a "real" one.
*/
-static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func, char fp_unit) {
+static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *ptr = get_irn_n(node, 0);
ir_node *new_ptr = be_transform_node(ptr);
ir_node *noreg = ia32_new_NoReg_gp(env_cg);
ir_node *new_op;
- /*
- Could be that we have SSE2 unit, but due to 64Bit Div/Conv
- lowering we have x87 nodes, so we need to enforce simulation.
- */
- if (mode_is_float(mode)) {
- FP_USED(env_cg);
- if (fp_unit == fp_x87)
- FORCE_x87(env_cg);
- }
-
new_op = func(dbgi, irg, block, new_ptr, noreg, new_mem);
set_ia32_op_type(new_op, ia32_AddrModeS);
set_ia32_am_sc(new_op, get_ia32_am_sc(node));
if (is_ia32_am_sc_sign(node))
set_ia32_am_sc_sign(new_op);
- set_ia32_ls_mode(new_op, get_ia32_ls_mode(node));
+ set_ia32_ls_mode(new_op, mode);
if (is_ia32_use_frame(node)) {
set_ia32_frame_ent(new_op, get_ia32_frame_ent(node));
set_ia32_use_frame(new_op);
/**
* Transforms a lowered Store into a "real" one.
*/
-static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char fp_unit) {
+static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func)
+{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *ptr = get_irn_n(node, 0);
ir_node *new_ptr = be_transform_node(ptr);
long am_offs;
ia32_am_flavour_t am_flav = ia32_B;
- /*
- Could be that we have SSE2 unit, but due to 64Bit Div/Conv
- lowering we have x87 nodes, so we need to enforce simulation.
- */
- if (mode_is_float(mode)) {
- FP_USED(env_cg);
- if (fp_unit == fp_x87)
- FORCE_x87(env_cg);
- }
-
new_op = func(dbgi, irg, block, new_ptr, noreg, new_val, new_mem);
if ((am_offs = get_ia32_am_offs_int(node)) != 0) {
*/
#define GEN_LOWERED_OP(op) \
static ir_node *gen_ia32_l_##op(ir_node *node) { \
- ir_mode *mode = get_irn_mode(node); \
- if (mode_is_float(mode)) \
- FP_USED(env_cg); \
return gen_binop(node, get_binop_left(node), \
get_binop_right(node), new_rd_ia32_##op,0); \
}
#define GEN_LOWERED_x87_OP(op) \
static ir_node *gen_ia32_l_##op(ir_node *node) { \
ir_node *new_op; \
- FORCE_x87(env_cg); \
new_op = gen_binop_x87_float(node, get_binop_left(node), \
get_binop_right(node), new_rd_ia32_##op); \
return new_op; \
get_binop_right(node), new_rd_ia32_##op); \
}
-#define GEN_LOWERED_LOAD(op, fp_unit) \
- static ir_node *gen_ia32_l_##op(ir_node *node) {\
- return gen_lowered_Load(node, new_rd_ia32_##op, fp_unit); \
+#define GEN_LOWERED_LOAD(op) \
+ static ir_node *gen_ia32_l_##op(ir_node *node) { \
+ return gen_lowered_Load(node, new_rd_ia32_##op); \
}
-#define GEN_LOWERED_STORE(op, fp_unit) \
- static ir_node *gen_ia32_l_##op(ir_node *node) {\
- return gen_lowered_Store(node, new_rd_ia32_##op, fp_unit); \
+#define GEN_LOWERED_STORE(op) \
+ static ir_node *gen_ia32_l_##op(ir_node *node) { \
+ return gen_lowered_Store(node, new_rd_ia32_##op); \
}
GEN_LOWERED_OP(Adc)
GEN_LOWERED_UNOP(Neg)
-GEN_LOWERED_LOAD(vfild, fp_x87)
-GEN_LOWERED_LOAD(Load, fp_none)
-/*GEN_LOWERED_STORE(vfist, fp_x87)
- *TODO
- */
-GEN_LOWERED_STORE(Store, fp_none)
+GEN_LOWERED_LOAD(vfild)
+GEN_LOWERED_LOAD(Load)
+// GEN_LOWERED_STORE(vfist) TODO
+GEN_LOWERED_STORE(Store)
static ir_node *gen_ia32_l_vfdiv(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
SET_IA32_ORIG_NODE(vfdiv, ia32_get_old_node_name(env_cg, node));
- FORCE_x87(env_cg);
-
return vfdiv;
}