return false;
}
+/**
+ * return NoREG or pic_base in case of PIC.
+ * This is necessary as base address for newly created symbols
+ */
+static ir_node *get_symconst_base(void)
+{
+ if (env_cg->birg->main_env->options->pic) {
+ return arch_code_generator_get_pic_base(env_cg);
+ }
+
+ return noreg_GP;
+}
+
/**
* Transforms a Const.
*/
if (mode_is_float(mode)) {
ir_node *res = NULL;
ir_node *load;
+ ir_node *base;
ir_entity *floatent;
if (ia32_cg_config.use_sse2) {
#endif /* CONSTRUCT_SSE_CONST */
floatent = create_float_const_entity(node);
- load = new_bd_ia32_xLoad(dbgi, block, noreg_GP, noreg_GP, nomem, mode);
+ base = get_symconst_base();
+ load = new_bd_ia32_xLoad(dbgi, block, base, noreg_GP, nomem,
+ mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
arch_irn_add_flags(load, arch_irn_flags_rematerializable);
set_ia32_ls_mode(load, mode);
} else {
ir_mode *ls_mode;
+ ir_node *base;
floatent = create_float_const_entity(node);
/* create_float_const_ent is smart and sometimes creates
smaller entities */
ls_mode = get_type_mode(get_entity_type(floatent));
-
- load = new_bd_ia32_vfld(dbgi, block, noreg_GP, noreg_GP, nomem,
+ base = get_symconst_base();
+ load = new_bd_ia32_vfld(dbgi, block, base, noreg_GP, nomem,
ls_mode);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_am_sc(load, floatent);
ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_constant);
- set_entity_allocation(ent, allocation_static);
+ add_entity_linkage(ent, IR_LINKAGE_CONSTANT);
+ set_entity_visibility(ent, ir_visibility_local);
if (kct == ia32_ULLBIAS) {
ir_initializer_t *initializer = create_initializer_compound(2);
ir_node *mem;
ir_node *new_mem;
+ /* floating point immediates */
if (is_Const(node)) {
ir_entity *entity = create_float_const_entity(node);
- addr->base = noreg_GP;
+ addr->base = get_symconst_base();
addr->index = noreg_GP;
addr->mem = nomem;
addr->symconst_ent = entity;
* several AM nodes... */
ir_node *noreg_xmm = ia32_new_NoReg_xmm(env_cg);
- new_node = new_bd_ia32_xXor(dbgi, block, noreg_GP, noreg_GP,
- nomem, new_op, noreg_xmm);
+ new_node = new_bd_ia32_xXor(dbgi, block, get_symconst_base(),
+ noreg_GP, nomem, new_op, noreg_xmm);
size = get_mode_size_bits(mode);
ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
if (ia32_cg_config.use_sse2) {
ir_node *noreg_fp = ia32_new_NoReg_xmm(env_cg);
- new_node = new_bd_ia32_xAnd(dbgi, new_block, noreg_GP, noreg_GP,
- nomem, new_op, noreg_fp);
+ new_node = new_bd_ia32_xAnd(dbgi, new_block, get_symconst_base(),
+ noreg_GP, nomem, new_op, noreg_fp);
size = get_mode_size_bits(mode);
ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
}
}
}
- flags = be_transform_node(pred);
+ /* add ia32 compare flags */
+ {
+ ir_node *l = get_Cmp_left(pred);
+ ir_mode *mode = get_irn_mode(l);
+ if (mode_is_float(mode))
+ pnc |= ia32_pn_Cmp_float;
+ else if (! mode_is_signed(mode))
+ pnc |= ia32_pn_Cmp_unsigned;
+ }
*pnc_out = pnc;
- if (mode_is_float(get_irn_mode(get_Cmp_left(pred))))
- *pnc_out |= ia32_pn_Cmp_float;
+ flags = be_transform_node(pred);
return flags;
}
}
* @param new_mode IN/OUT for the mode of the constants, if NULL
* smallest possible mode will be used
*/
-static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode) {
+static ir_entity *ia32_create_const_array(ir_node *c0, ir_node *c1, ir_mode **new_mode)
+{
ir_entity *ent;
ir_mode *mode = *new_mode;
ir_type *tp;
ent = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp);
set_entity_ld_ident(ent, get_entity_ident(ent));
- set_entity_visibility(ent, visibility_local);
- set_entity_variability(ent, variability_constant);
- set_entity_allocation(ent, allocation_static);
+ set_entity_visibility(ent, ir_visibility_local);
+ add_entity_linkage(ent, IR_LINKAGE_CONSTANT);
initializer = create_initializer_compound(2);
return ent;
}
+/**
+ * Possible transformations for creating a Setcc.
+ */
+enum setcc_transform_insn {
+ SETCC_TR_ADD,
+ SETCC_TR_ADDxx,
+ SETCC_TR_LEA,
+ SETCC_TR_LEAxx,
+ SETCC_TR_SHL,
+ SETCC_TR_NEG,
+ SETCC_TR_NOT,
+ SETCC_TR_AND,
+ SETCC_TR_SET,
+ SETCC_TR_SBB,
+};
+
+typedef struct setcc_transform {
+ unsigned num_steps;
+ unsigned permutate_cmp_ins;
+ pn_Cmp pnc;
+ struct {
+ enum setcc_transform_insn transform;
+ long val;
+ int scale;
+ } steps[4];
+} setcc_transform_t;
+
+/**
+ * Setcc can only handle 0 and 1 result.
+ * Find a transformation that creates 0 and 1 from
+ * tv_t and tv_f.
+ */
+static void find_const_transform(pn_Cmp pnc, tarval *t, tarval *f,
+ setcc_transform_t *res)
+{
+ unsigned step = 0;
+
+ res->num_steps = 0;
+ res->permutate_cmp_ins = 0;
+
+ if (tarval_is_null(t)) {
+ tarval *tmp = t;
+ t = f;
+ f = tmp;
+ pnc = ia32_get_negated_pnc(pnc);
+ } else if (tarval_cmp(t, f) == pn_Cmp_Lt) {
+ // now, t is the bigger one
+ tarval *tmp = t;
+ t = f;
+ f = tmp;
+ pnc = ia32_get_negated_pnc(pnc);
+ }
+ res->pnc = pnc;
+
+ if (! tarval_is_null(f)) {
+ tarval *t_sub = tarval_sub(t, f, NULL);
+
+ t = t_sub;
+ res->steps[step].transform = SETCC_TR_ADD;
+
+ if (t == tarval_bad)
+ panic("constant subtract failed");
+ if (! tarval_is_long(f))
+ panic("tarval is not long");
+
+ res->steps[step].val = get_tarval_long(f);
+ ++step;
+ f = tarval_sub(f, f, NULL);
+ assert(tarval_is_null(f));
+ }
+
+ if (tarval_is_one(t)) {
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+
+ if (tarval_is_minus_one(t)) {
+ res->steps[step].transform = SETCC_TR_NEG;
+ ++step;
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+ if (tarval_is_long(t)) {
+ long v = get_tarval_long(t);
+
+ res->steps[step].val = 0;
+ switch (v) {
+ case 9:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 3; /* (a << 3) + a */
+ break;
+ case 8:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 3; /* (a << 3) */
+ break;
+ case 5:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 2; /* (a << 2) + a */
+ break;
+ case 4:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 2; /* (a << 2) */
+ break;
+ case 3:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = SETCC_TR_LEAxx;
+ res->steps[step].scale = 1; /* (a << 1) + a */
+ break;
+ case 2:
+ if (step > 0 && res->steps[step - 1].transform == SETCC_TR_ADD)
+ --step;
+ res->steps[step].transform = res->steps[step].val == 0 ? SETCC_TR_SHL : SETCC_TR_LEA;
+ res->steps[step].scale = 1; /* (a << 1) */
+ break;
+ case 1:
+ res->num_steps = step;
+ return;
+ default:
+ if (! tarval_is_single_bit(t)) {
+ res->steps[step].transform = SETCC_TR_AND;
+ res->steps[step].val = v;
+ ++step;
+ res->steps[step].transform = SETCC_TR_NEG;
+ } else {
+ int v = get_tarval_lowest_bit(t);
+ assert(v >= 0);
+
+ res->steps[step].transform = SETCC_TR_SHL;
+ res->steps[step].scale = v;
+ }
+ }
+ ++step;
+ res->steps[step].transform = SETCC_TR_SET;
+ res->num_steps = ++step;
+ return;
+ }
+ panic("tarval is not long");
+}
+
/**
* Transforms a Mux node into some code sequence.
*
}
am.ls_mode = new_mode;
- am.addr.base = noreg_GP;
+ am.addr.base = get_symconst_base();
am.addr.index = new_node;
am.addr.mem = nomem;
am.addr.offset = 0;
if (is_Const(mux_true) && is_Const(mux_false)) {
/* both are const, good */
- if (is_Const_1(mux_true) && is_Const_0(mux_false)) {
- new_node = create_set_32bit(dbgi, new_block, flags, pnc, node);
- } else if (is_Const_0(mux_true) && is_Const_1(mux_false)) {
- pnc = ia32_get_negated_pnc(pnc);
- new_node = create_set_32bit(dbgi, new_block, flags, pnc, node);
- } else {
- /* Not that simple. */
- goto need_cmov;
+ tarval *tv_true = get_Const_tarval(mux_true);
+ tarval *tv_false = get_Const_tarval(mux_false);
+ setcc_transform_t res;
+ int step;
+
+ /* check if flags is a cmp node and we are the only user,
+ i.e no other user yet */
+ int permutate_allowed = 0;
+ if (is_ia32_Cmp(flags) && get_irn_n_edges(flags) == 0) {
+ /* yes, we can permutate its inputs */
+ permutate_allowed = 1;
+ }
+ find_const_transform(pnc, tv_true, tv_false, &res);
+ new_node = node;
+ if (res.permutate_cmp_ins) {
+ ia32_attr_t *attr = get_ia32_attr(flags);
+ attr->data.ins_permuted ^= 1;
+ }
+ for (step = (int)res.num_steps - 1; step >= 0; --step) {
+ ir_node *imm;
+
+ switch (res.steps[step].transform) {
+ case SETCC_TR_ADD:
+ imm = ia32_immediate_from_long(res.steps[step].val);
+ new_node = new_bd_ia32_Add(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm);
+ break;
+ case SETCC_TR_ADDxx:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ break;
+ case SETCC_TR_LEA:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, noreg_GP, new_node);
+ set_ia32_am_scale(new_node, res.steps[step].scale);
+ set_ia32_am_offs_int(new_node, res.steps[step].val);
+ break;
+ case SETCC_TR_LEAxx:
+ new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
+ set_ia32_am_scale(new_node, res.steps[step].scale);
+ set_ia32_am_offs_int(new_node, res.steps[step].val);
+ break;
+ case SETCC_TR_SHL:
+ imm = ia32_immediate_from_long(res.steps[step].scale);
+ new_node = new_bd_ia32_Shl(dbgi, new_block, new_node, imm);
+ break;
+ case SETCC_TR_NEG:
+ new_node = new_bd_ia32_Neg(dbgi, new_block, new_node);
+ break;
+ case SETCC_TR_NOT:
+ new_node = new_bd_ia32_Not(dbgi, new_block, new_node);
+ break;
+ case SETCC_TR_AND:
+ imm = ia32_immediate_from_long(res.steps[step].val);
+ new_node = new_bd_ia32_And(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_node, imm);
+ break;
+ case SETCC_TR_SET:
+ new_node = create_set_32bit(dbgi, new_block, flags, res.pnc, new_node);
+ break;
+ case SETCC_TR_SBB:
+ new_node = new_bd_ia32_Sbb0(dbgi, new_block, flags);
+ break;
+ default:
+ panic("unknown setcc transform");
+ }
}
} else {
-need_cmov:
new_node = create_CMov(node, cond, flags, pnc);
}
return new_node;
ir_node *count = ia32_create_Immediate(NULL, 0, 31);
ir_node *fadd;
- am.addr.base = noreg_GP;
+ am.addr.base = get_symconst_base();
am.addr.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count);
am.addr.mem = nomem;
am.addr.offset = 0;
/**
* Transform Builtin trap
*/
-static ir_node *gen_trap(ir_node *node) {
+static ir_node *gen_trap(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin debugbreak
*/
-static ir_node *gen_debugbreak(ir_node *node) {
+static ir_node *gen_debugbreak(ir_node *node)
+{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *mem = be_transform_node(get_Builtin_mem(node));
/**
* Transform Builtin return_address
*/
-static ir_node *gen_return_address(ir_node *node) {
+static ir_node *gen_return_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_frame_address(ir_node *node) {
+static ir_node *gen_frame_address(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
ir_node *frame = get_Builtin_param(node, 1);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform Builtin frame_address
*/
-static ir_node *gen_prefetch(ir_node *node) {
+static ir_node *gen_prefetch(ir_node *node)
+{
dbg_info *dbgi;
ir_node *ptr, *block, *mem, *base, *index;
ir_node *param, *new_node;
/**
* Transform builtin popcount
*/
-static ir_node *gen_popcount(ir_node *node) {
+static ir_node *gen_popcount(ir_node *node)
+{
ir_node *param = get_Builtin_param(node, 0);
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin byte swap.
*/
-static ir_node *gen_bswap(ir_node *node) {
+static ir_node *gen_bswap(ir_node *node)
+{
ir_node *param = be_transform_node(get_Builtin_param(node, 0));
dbg_info *dbgi = get_irn_dbg_info(node);
/**
* Transform builtin outport.
*/
-static ir_node *gen_outport(ir_node *node) {
+static ir_node *gen_outport(ir_node *node)
+{
ir_node *port = create_immediate_or_transform(get_Builtin_param(node, 0), 0);
ir_node *oldv = get_Builtin_param(node, 1);
ir_mode *mode = get_irn_mode(oldv);
/**
* Transform builtin inport.
*/
-static ir_node *gen_inport(ir_node *node) {
+static ir_node *gen_inport(ir_node *node)
+{
ir_type *tp = get_Builtin_type(node);
ir_type *rstp = get_method_res_type(tp, 0);
ir_mode *mode = get_type_mode(rstp);
/**
* Transform a builtin inner trampoline
*/
-static ir_node *gen_inner_trampoline(ir_node *node) {
+static ir_node *gen_inner_trampoline(ir_node *node)
+{
ir_node *ptr = get_Builtin_param(node, 0);
ir_node *callee = get_Builtin_param(node, 1);
ir_node *env = be_transform_node(get_Builtin_param(node, 2));
/**
* Transform Builtin node.
*/
-static ir_node *gen_Builtin(ir_node *node) {
+static ir_node *gen_Builtin(ir_node *node)
+{
ir_builtin_kind kind = get_Builtin_kind(node);
switch (kind) {
/**
* Transform Proj(Builtin) node.
*/
-static ir_node *gen_Proj_Builtin(ir_node *proj) {
+static ir_node *gen_Proj_Builtin(ir_node *proj)
+{
ir_node *node = get_Proj_pred(proj);
ir_node *new_node = be_transform_node(node);
ir_builtin_kind kind = get_Builtin_kind(node);
* The ABI requires that the results are in st0, copy them
* to a xmm register.
*/
-static void postprocess_fp_call_results(void) {
+static void postprocess_fp_call_results(void)
+{
int i;
for (i = ARR_LEN(call_list) - 1; i >= 0; --i) {