return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
}
+/**
+ * Return the stack entity that contains the frame address.
+ */
+ir_entity *ia32_get_frame_address_entity(void) {
+ ia32_build_between_type();
+ return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
+}
+
/**
* Get the estimated cycle count for @p irn.
*
*/
ir_entity *ia32_get_return_address_entity(void);
+/**
+ * Return the stack entity that contains the frame address.
+ */
+ir_entity *ia32_get_frame_address_entity(void);
+
#endif
return get_irn_link(block);
}
+/** Checks if the current block is a fall-through target. */
static int is_fallthrough(const ir_node *cfgpred)
{
ir_node *pred;
return 1;
}
+/**
+ * returns non-zero if the given block needs a label
+ * because of being a jump-target (and not a fall-through)
+ */
static int block_needs_label(const ir_node *block)
{
int need_label = 1;
typedef enum ia32_emit_mod_t {
EMIT_RESPECT_LS = 1U << 0,
- EMIT_ALTERNATE_AM = 1U << 1
+ EMIT_ALTERNATE_AM = 1U << 1,
+ EMIT_LONG = 1U << 2
} ia32_emit_mod_t;
/**
* %Sx <node> source register x
* %s const char* string
* %u unsigned int unsigned int
+ * %d signed int signed int
*
* x starts at 0
* # modifier for %ASx, %D and %S uses ls mode of node to alter register width
* * modifier does not prefix immediates with $, but AM with *
+ * l modifier for %lu and %ld
*/
static void ia32_emitf(const ir_node *node, const char *fmt, ...)
{
++fmt;
}
+ if (*fmt == 'l') {
+ mod |= EMIT_LONG;
+ ++fmt;
+ }
+
switch (*fmt++) {
case '%':
be_emit_char('%');
break;
}
- case 'u': {
- unsigned num = va_arg(ap, unsigned);
- be_emit_irprintf("%u", num);
+ case 'u':
+ if (mod & EMIT_LONG) {
+ unsigned long num = va_arg(ap, unsigned long);
+ be_emit_irprintf("%lu", num);
+ } else {
+ unsigned num = va_arg(ap, unsigned);
+ be_emit_irprintf("%u", num);
+ }
+ break;
+
+ case 'd':
+ if (mod & EMIT_LONG) {
+ long num = va_arg(ap, long);
+ be_emit_irprintf("%ld", num);
+ } else {
+ int num = va_arg(ap, int);
+ be_emit_irprintf("%d", num);
+ }
break;
- }
default:
unknown:
- panic("unknown conversion");
+ panic("unknown format conversion in ia32_emitf()");
}
}
ia32_emitf(node, "\tpopl %D0\n");
}
+static void emit_ia32_ClimbFrame(const ir_node *node)
+{
+ const ia32_climbframe_attr_t *attr = get_ia32_climbframe_attr_const(node);
+
+ ia32_emitf(node, "\tmovl %S0, %D0\n");
+ ia32_emitf(node, "\tmovl %S1, $%u\n", attr->count);
+ ia32_emitf(NULL, BLOCK_PREFIX "%ld:\n", get_irn_node_nr(node));
+ ia32_emitf(node, "\tmovl (%D0), %D0\n");
+ ia32_emitf(node, "\tdec %S1\n");
+ ia32_emitf(node, "\tjnz " BLOCK_PREFIX "%ld\n", get_irn_node_nr(node));
+}
+
static void emit_be_Return(const ir_node *node)
{
unsigned pop = be_Return_get_pop(node);
IA32_EMIT(LdTls);
IA32_EMIT(Minus64Bit);
IA32_EMIT(SwitchJmp);
+ IA32_EMIT(ClimbFrame);
/* benode emitter */
BE_EMIT(Copy);
return copyb_attr;
}
+ia32_climbframe_attr_t *get_ia32_climbframe_attr(ir_node *node) {
+ ia32_attr_t *attr = get_ia32_attr(node);
+ ia32_climbframe_attr_t *climbframe_attr = CAST_IA32_ATTR(ia32_climbframe_attr_t, attr);
+
+ return climbframe_attr;
+}
+
+const ia32_climbframe_attr_t *get_ia32_climbframe_attr_const(const ir_node *node) {
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+ const ia32_climbframe_attr_t *climbframe_attr = CONST_CAST_IA32_ATTR(ia32_climbframe_attr_t, attr);
+
+ return climbframe_attr;
+}
+
/**
* Gets the type of an ia32 node.
*/
attr->pn_code = pnc;
}
+void
+init_ia32_climbframe_attributes(ir_node *res, unsigned count) {
+ ia32_climbframe_attr_t *attr = get_irn_generic_attr(res);
+
+#ifndef NDEBUG
+ attr->attr.attr_type |= IA32_ATTR_ia32_climbframe_attr_t;
+#endif
+ attr->count = count;
+}
+
/***************************************************************************************
* _ _ _
* | | | | | |
attr_a = get_ia32_condcode_attr_const(a);
attr_b = get_ia32_condcode_attr_const(b);
- if(attr_a->pn_code != attr_b->pn_code)
+ if (attr_a->pn_code != attr_b->pn_code)
return 1;
return 0;
}
+/** Compare node attributes for call nodes. */
static int ia32_compare_call_attr(ir_node *a, ir_node *b)
{
const ia32_call_attr_t *attr_a;
attr_a = get_ia32_copyb_attr_const(a);
attr_b = get_ia32_copyb_attr_const(b);
- if(attr_a->size != attr_b->size)
+ if (attr_a->size != attr_b->size)
return 1;
return 0;
const ia32_asm_attr_t *attr_a;
const ia32_asm_attr_t *attr_b;
- if(ia32_compare_nodes_attr(a, b))
+ if (ia32_compare_nodes_attr(a, b))
return 1;
attr_a = get_ia32_asm_attr_const(a);
const ia32_immediate_attr_t *attr_a = get_ia32_immediate_attr_const(a);
const ia32_immediate_attr_t *attr_b = get_ia32_immediate_attr_const(b);
- if(attr_a->symconst != attr_b->symconst ||
- attr_a->sc_sign != attr_b->sc_sign ||
- attr_a->offset != attr_b->offset)
+ if (attr_a->symconst != attr_b->symconst ||
+ attr_a->sc_sign != attr_b->sc_sign ||
+ attr_a->offset != attr_b->offset)
return 1;
return 0;
return ia32_compare_nodes_attr(a, b);
}
+/** Compare node attributes for ClimbFrame nodes. */
+static
+int ia32_compare_climbframe_attr(ir_node *a, ir_node *b)
+{
+ const ia32_climbframe_attr_t *attr_a;
+ const ia32_climbframe_attr_t *attr_b;
+
+ if (ia32_compare_nodes_attr(a, b))
+ return 1;
+
+ attr_a = get_ia32_climbframe_attr_const(a);
+ attr_b = get_ia32_climbframe_attr_const(b);
+
+ if (attr_a->count != attr_b->count)
+ return 1;
+
+ return 0;
+}
/* copies the ia32 attributes */
static void ia32_copy_attr(const ir_node *old_node, ir_node *new_node)
ia32_copyb_attr_t *get_ia32_copyb_attr(ir_node *node);
const ia32_copyb_attr_t *get_ia32_copyb_attr_const(const ir_node *node);
+/**
+ * Gets the ClimbFrame node attributes.
+ */
+ia32_climbframe_attr_t *get_ia32_climbframe_attr(ir_node *node);
+const ia32_climbframe_attr_t *get_ia32_climbframe_attr_const(const ir_node *node);
+
/**
* Gets the type of an ia32 node.
*/
void init_ia32_call_attributes(ir_node *res, unsigned pop, ir_type *call_tp);
void init_ia32_copyb_attributes(ir_node *res, unsigned size);
void init_ia32_condcode_attributes(ir_node *res, long pnc);
+void init_ia32_climbframe_attributes(ir_node *res, unsigned count);
/* Include the generated headers */
#include "gen_ia32_new_nodes.h"
#ifndef NDEBUG
typedef enum {
- IA32_ATTR_INVALID = 0,
- IA32_ATTR_ia32_attr_t = 1 << 0,
- IA32_ATTR_ia32_x87_attr_t = 1 << 1,
- IA32_ATTR_ia32_asm_attr_t = 1 << 2,
- IA32_ATTR_ia32_immediate_attr_t = 1 << 3,
- IA32_ATTR_ia32_condcode_attr_t = 1 << 4,
- IA32_ATTR_ia32_copyb_attr_t = 1 << 5,
- IA32_ATTR_ia32_call_attr_t = 1 << 6
+ IA32_ATTR_INVALID = 0,
+ IA32_ATTR_ia32_attr_t = 1 << 0,
+ IA32_ATTR_ia32_x87_attr_t = 1 << 1,
+ IA32_ATTR_ia32_asm_attr_t = 1 << 2,
+ IA32_ATTR_ia32_immediate_attr_t = 1 << 3,
+ IA32_ATTR_ia32_condcode_attr_t = 1 << 4,
+ IA32_ATTR_ia32_copyb_attr_t = 1 << 5,
+ IA32_ATTR_ia32_call_attr_t = 1 << 6,
+ IA32_ATTR_ia32_climbframe_attr_t = 1 << 7,
} ia32_attr_type_t;
#endif
const ia32_asm_reg_t *register_map;
};
+/**
+ * The attributes for the ClimbFrame node.
+ */
+typedef struct ia32_climbframe_attr_t ia32_climbframe_attr_t;
+struct ia32_climbframe_attr_t {
+ ia32_attr_t attr; /**< generic attribute */
+ unsigned count; /**< number of frames to climb up */
+};
+
/* the following union is necessary to indicate to the compiler that we might want to cast
* the structs (we use them to simulate OO-inheritance) */
union allow_casts_attr_t_ {
ia32_x87_attr_t x87_attr;
ia32_asm_attr_t asm_attr;
ia32_immediate_attr_t immediate_attr;
+ ia32_climbframe_attr_t climbframe_attr;
};
#ifndef NDEBUG
ia32_x87_attr_t =>
"\tinit_ia32_attributes(res, flags, in_reqs, out_reqs, exec_units, n_res);\n".
"\tinit_ia32_x87_attributes(res);",
+ ia32_climbframe_attr_t =>
+ "\tinit_ia32_attributes(res, flags, in_reqs, out_reqs, exec_units, n_res);\n".
+ "\tinit_ia32_climbframe_attributes(res, count);",
);
%compare_attr = (
ia32_asm_attr_t => "ia32_compare_asm_attr",
- ia32_attr_t => "ia32_compare_nodes_attr",
- ia32_call_attr_t => "ia32_compare_call_attr",
- ia32_condcode_attr_t => "ia32_compare_condcode_attr",
- ia32_copyb_attr_t => "ia32_compare_copyb_attr",
- ia32_immediate_attr_t => "ia32_compare_immediate_attr",
- ia32_x87_attr_t => "ia32_compare_x87_attr",
+ ia32_attr_t => "ia32_compare_nodes_attr",
+ ia32_call_attr_t => "ia32_compare_call_attr",
+ ia32_condcode_attr_t => "ia32_compare_condcode_attr",
+ ia32_copyb_attr_t => "ia32_compare_copyb_attr",
+ ia32_immediate_attr_t => "ia32_compare_immediate_attr",
+ ia32_x87_attr_t => "ia32_compare_x87_attr",
+ ia32_climbframe_attr_t => "ia32_compare_climbframe_attr",
);
%operands = (
modified_flags => $status_flags
},
+#
+# a Helper node for frame-climbing, needed for __builtin_(frame|return)_address
+#
+# PS: try gcc __builtin_frame_address(100000) :-)
+#
+ClimbFrame => {
+ reg_req => { in => [ "gp", "gp", "gp"], out => [ "in_r3" ] },
+ ins => [ "frame", "cnt", "tmp" ],
+ outs => [ "res" ],
+ latency => 4, # random number
+ attr_type => "ia32_climbframe_attr_t",
+ attr => "unsigned count",
+ units => [ "GP" ],
+ mode => $mode_gp
+},
+
#-----------------------------------------------------------------------------#
# _____ _____ ______ __ _ _ _ #
# / ____/ ____| ____| / _| | | | | | #
* Transform Builtin return_address
*/
static ir_node *gen_return_address(ir_node *node) {
- ir_node *param = get_Builtin_param(node, 0);
- ir_node *frame = get_Builtin_param(node, 1);
- dbg_info *dbgi = get_irn_dbg_info(node);
- tarval *tv = get_Const_tarval(param);
- long value = get_tarval_long(tv);
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
- ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
- if (value == 0) {
- /* the return address of the current function */
- ir_node *new_op = be_transform_node(frame);
- ir_node *noreg = ia32_new_NoReg_gp(env_cg);
- ir_node *new_node;
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
- new_node = new_bd_ia32_Load(dbgi, block, new_op, noreg, get_irg_no_mem(current_ir_graph));
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
- set_irn_pinned(new_node, get_irn_pinned(node));
- set_ia32_op_type(new_node, ia32_AddrModeS);
- set_ia32_ls_mode(new_node, mode_Iu);
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
- set_ia32_am_offs_int(new_node, 0);
- set_ia32_use_frame(new_node);
- set_ia32_frame_ent(new_node, ia32_get_return_address_entity());
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ia32_get_return_address_entity());
- if (get_irn_pinned(node) == op_pin_state_floats) {
- assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
- && pn_ia32_vfld_res == pn_ia32_Load_res
- && pn_ia32_Load_res == pn_ia32_res);
- arch_irn_add_flags(new_node, arch_irn_flags_rematerializable);
- }
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
+ }
- SET_IA32_ORIG_NODE(new_node, node);
- return new_rd_Proj(dbgi, current_ir_graph, block, new_node, mode_Iu, pn_ia32_Load_res);
+ SET_IA32_ORIG_NODE(load, node);
+ return new_rd_Proj(dbgi, current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
+}
+
+/**
+ * Transform Builtin frame_address
+ */
+static ir_node *gen_frame_address(ir_node *node) {
+ ir_node *param = get_Builtin_param(node, 0);
+ ir_node *frame = get_Builtin_param(node, 1);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ tarval *tv = get_Const_tarval(param);
+ unsigned long value = get_tarval_long(tv);
+
+ ir_node *block = be_transform_node(get_nodes_block(node));
+ ir_node *ptr = be_transform_node(frame);
+ ir_node *noreg = ia32_new_NoReg_gp(env_cg);
+ ir_node *load;
+ ir_entity *ent;
+
+ if (value > 0) {
+ ir_node *cnt = new_bd_ia32_ProduceVal(dbgi, block);
+ ir_node *res = new_bd_ia32_ProduceVal(dbgi, block);
+ ptr = new_bd_ia32_ClimbFrame(dbgi, block, ptr, cnt, res, value);
+ }
+
+ /* load the return address from this frame */
+ load = new_bd_ia32_Load(dbgi, block, ptr, noreg, get_irg_no_mem(current_ir_graph));
+
+ set_irn_pinned(load, get_irn_pinned(node));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, mode_Iu);
+
+ ent = ia32_get_frame_address_entity();
+ if (ent != NULL) {
+ set_ia32_am_offs_int(load, 0);
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, ent);
+ } else {
+ /* will fail anyway, but gcc does this: */
+ set_ia32_am_offs_int(load, 0);
+ }
+
+ if (get_irn_pinned(node) == op_pin_state_floats) {
+ assert(pn_ia32_xLoad_res == pn_ia32_vfld_res
+ && pn_ia32_vfld_res == pn_ia32_Load_res
+ && pn_ia32_Load_res == pn_ia32_res);
+ arch_irn_add_flags(load, arch_irn_flags_rematerializable);
}
- panic("builtin_return_address(%ld) not supported in IA32", value);
+
+ SET_IA32_ORIG_NODE(load, node);
+ return new_rd_Proj(dbgi, current_ir_graph, block, load, mode_Iu, pn_ia32_Load_res);
}
/**
case ir_bk_return_address:
return gen_return_address(node);
case ir_bk_frame_addess:
+ return gen_frame_address(node);
case ir_bk_prefetch:
break;
}