X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbenode.c;h=3916616130dbbdd21811e905ce1f75b90f76dddb;hb=ab11945ab757f3f22a717a35d639be264b0f649c;hp=b87dcfd6ca50a9b56e06958bae345148c3bc960f;hpb=93587aaef8b19becb0c79f6c332238bcae3db1da;p=libfirm diff --git a/ir/be/benode.c b/ir/be/benode.c index b87dcfd6c..391661613 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -1,863 +1,1286 @@ +/* + * This file is part of libFirm. + * Copyright (C) 2012 University of Karlsruhe. + */ + /** - * @file benode.c - * @date 17.05.2005 - * @author Sebastian Hack - * - * Backend node support. + * @file + * @brief Backend node support for generic backend nodes. + * @author Sebastian Hack + * @date 17.05.2005 * - * This file provdies Perm, Copy, Spill and Reload nodes. - * - * Copyright (C) 2005 Universitaet Karlsruhe - * Released under the GPL + * Backend node support for generic backend nodes. + * This file provides Perm, Copy, Spill and Reload nodes. */ - -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include +#include "beirg.h" #include "obst.h" #include "set.h" #include "pmap.h" #include "util.h" #include "debug.h" #include "fourcc.h" +#include "bitfiddle.h" +#include "raw_bitset.h" +#include "error.h" +#include "array_t.h" #include "irop_t.h" #include "irmode_t.h" #include "irnode_t.h" #include "ircons_t.h" #include "irprintf.h" +#include "irgwalk.h" +#include "iropt_t.h" +#include "irbackedge_t.h" +#include "irverify_t.h" #include "be_t.h" #include "belive_t.h" -#include "besched_t.h" -#include "benode_t.h" +#include "besched.h" +#include "benode.h" +#include "bearch.h" #include "beirgmod.h" -/* Sometimes we want to put const nodes into get_irn_generic_attr ... */ -#define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn)) - -static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O'); - -typedef enum _node_kind_t { - node_kind_spill, - node_kind_reload, - node_kind_perm, - node_kind_copy, - node_kind_kill, - node_kind_last -} node_kind_t; +typedef struct be_node_attr_t { + except_attr exc; +} be_node_attr_t; +/** The be_Return nodes attribute type. */ typedef struct { - node_kind_t kind; - const arch_register_class_t *cls; - ir_op *op; - int n_pos; - int *pos; -} be_op_t; - -typedef enum { - be_req_kind_old_limited, - be_req_kind_negate_old_limited, - be_req_kind_single_reg -} be_req_kind_t; + be_node_attr_t base; + int num_ret_vals; /**< number of return values */ + unsigned pop; /**< number of bytes that should be popped */ + int emit_pop; /**< if set, emit pop bytes, even if pop = 0 */ +} be_return_attr_t; +/** The be_IncSP attribute type. */ typedef struct { - arch_register_req_t req; - be_req_kind_t kind; - union { - struct { - void (*old_limited)(void *ptr, bitset_t *bs); - void *old_limited_env; - } old_limited; - - const arch_register_t *single_reg; - } x; -} be_req_t; - + be_node_attr_t base; + int offset; /**< The offset by which the stack shall be + expanded/shrinked. */ + int align; /**< whether stack should be aligned after the + IncSP */ +} be_incsp_attr_t; + +/** The be_Frame attribute type. */ typedef struct { - const arch_register_t *reg; - be_req_t req; - be_req_t in_req; -} be_reg_data_t; + be_node_attr_t base; + ir_entity *ent; + int offset; +} be_frame_attr_t; +/** The be_Call attribute type. */ typedef struct { - int max_reg_data; - arch_irn_flags_t flags; - const arch_register_class_t *cls; - be_reg_data_t *reg_data; -} be_node_attr_t; + be_node_attr_t base; + ir_entity *ent; /**< called entity if this is a static call. */ + unsigned pop; + ir_type *call_tp; /**< call type, copied from the original Call */ +} be_call_attr_t; typedef struct { - be_node_attr_t node_attr; - ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */ - entity *ent; /**< The entity in the stack frame the spill writes to. */ -} be_spill_attr_t; + be_node_attr_t base; + ir_entity **in_entities; + ir_entity **out_entities; +} be_memperm_attr_t; + +ir_op *op_be_Spill; +ir_op *op_be_Reload; +ir_op *op_be_Perm; +ir_op *op_be_MemPerm; +ir_op *op_be_Copy; +ir_op *op_be_Keep; +ir_op *op_be_CopyKeep; +ir_op *op_be_Call; +ir_op *op_be_Return; +ir_op *op_be_IncSP; +ir_op *op_be_AddSP; +ir_op *op_be_SubSP; +ir_op *op_be_Start; +ir_op *op_be_FrameAddr; -typedef struct { - be_node_attr_t node_attr; - int offset; /**< The offset by which the stack shall be increased/decreased. */ - be_stack_dir_t dir; /**< The direction in which the stack shall be modified (along or in the other direction). */ -} be_stack_attr_t; - -static ir_op *op_Spill; -static ir_op *op_Reload; -static ir_op *op_Perm; -static ir_op *op_Copy; -static ir_op *op_Keep; -static ir_op *op_Call; -static ir_op *op_IncSP; -static ir_op *op_AddSP; -static ir_op *op_RegParams; -static ir_op *op_StackParam; -static ir_op *op_NoReg; - -static int beo_base = -1; - -static const ir_op_ops be_node_op_ops; - -#define N irop_flag_none -#define L irop_flag_labeled -#define C irop_flag_commutative -#define X irop_flag_cfopcode -#define I irop_flag_ip_cfopcode -#define F irop_flag_fragile -#define Y irop_flag_forking -#define H irop_flag_highlevel -#define c irop_flag_constlike -#define K irop_flag_keep - -void be_node_init(void) { - static int inited = 0; - - if(inited) - return; +/** + * Compare the attributes of two be_FrameAddr nodes. + * + * @return zero if both nodes have identically attributes + */ +static int FrameAddr_cmp_attr(const ir_node *a, const ir_node *b) +{ + const be_frame_attr_t *a_attr = (const be_frame_attr_t*)get_irn_generic_attr_const(a); + const be_frame_attr_t *b_attr = (const be_frame_attr_t*)get_irn_generic_attr_const(b); - inited = 1; + if (a_attr->ent != b_attr->ent || a_attr->offset != b_attr->offset) + return 1; - /* Acquire all needed opcodes. */ - beo_base = get_next_ir_opcodes(beo_Last - 1); - - op_Spill = new_ir_op(beo_base + beo_Spill, "Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops); - op_Reload = new_ir_op(beo_base + beo_Reload, "Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Perm = new_ir_op(beo_base + beo_Perm, "Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Copy = new_ir_op(beo_base + beo_Copy, "Copy", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Keep = new_ir_op(beo_base + beo_Keep, "Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_NoReg = new_ir_op(beo_base + beo_Keep, "NoReg", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_Call = new_ir_op(beo_base + beo_Keep, "Call", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_AddSP = new_ir_op(beo_base + beo_Keep, "AddSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_IncSP = new_ir_op(beo_base + beo_Keep, "IncSP", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_RegParams = new_ir_op(beo_base + beo_Keep, "RegParams", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_StackParam = new_ir_op(beo_base + beo_Keep, "StackParam", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - - set_op_tag(op_Spill, &be_node_tag); - set_op_tag(op_Reload, &be_node_tag); - set_op_tag(op_Perm, &be_node_tag); - set_op_tag(op_Copy, &be_node_tag); - set_op_tag(op_Keep, &be_node_tag); - set_op_tag(op_NoReg, &be_node_tag); - set_op_tag(op_Call, &be_node_tag); - set_op_tag(op_AddSP, &be_node_tag); - set_op_tag(op_IncSP, &be_node_tag); - set_op_tag(op_RegParams, &be_node_tag); - set_op_tag(op_StackParam, &be_node_tag); -} - -static void *init_node_attr(ir_node* irn, const arch_register_class_t *cls, ir_graph *irg, int max_reg_data) -{ - be_node_attr_t *a = get_irn_attr(irn); - - a->max_reg_data = max_reg_data; - a->flags = arch_irn_flags_none; - a->cls = cls; - a->reg_data = NULL; - - if(max_reg_data > 0) { - int i; + return be_nodes_equal(a, b); +} - a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data); - memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0])); - for(i = 0; i < max_reg_data; ++i) { - a->reg_data[i].req.req.cls = cls; - a->reg_data[i].req.req.type = arch_register_req_type_normal; - } - } +/** + * Compare the attributes of two be_Return nodes. + * + * @return zero if both nodes have identically attributes + */ +static int Return_cmp_attr(const ir_node *a, const ir_node *b) +{ + const be_return_attr_t *a_attr = (const be_return_attr_t*)get_irn_generic_attr_const(a); + const be_return_attr_t *b_attr = (const be_return_attr_t*)get_irn_generic_attr_const(b); - return a; + if (a_attr->num_ret_vals != b_attr->num_ret_vals) + return 1; + if (a_attr->pop != b_attr->pop) + return 1; + if (a_attr->emit_pop != b_attr->emit_pop) + return 1; + + return be_nodes_equal(a, b); } -static INLINE int is_be_node(const ir_node *irn) +/** + * Compare the attributes of two be_IncSP nodes. + * + * @return zero if both nodes have identically attributes + */ +static int IncSP_cmp_attr(const ir_node *a, const ir_node *b) { - return get_op_tag(get_irn_op(irn)) == &be_node_tag; + const be_incsp_attr_t *a_attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(a); + const be_incsp_attr_t *b_attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(b); + + if (a_attr->offset != b_attr->offset) + return 1; + + return be_nodes_equal(a, b); } -be_opcode_t get_irn_be_opcode(const ir_node *irn) +/** + * Compare the attributes of two be_Call nodes. + * + * @return zero if both nodes have identically attributes + */ +static int Call_cmp_attr(const ir_node *a, const ir_node *b) { - return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp; + const be_call_attr_t *a_attr = (const be_call_attr_t*)get_irn_generic_attr_const(a); + const be_call_attr_t *b_attr = (const be_call_attr_t*)get_irn_generic_attr_const(b); + + if (a_attr->ent != b_attr->ent || + a_attr->call_tp != b_attr->call_tp) + return 1; + + return be_nodes_equal(a, b); } -static int redir_proj(const ir_node **node, int pos) +static arch_register_req_t *allocate_reg_req(ir_graph *const irg) { - const ir_node *n = *node; + struct obstack *obst = be_get_be_obst(irg); - if(is_Proj(n)) { - assert(pos == -1 && "Illegal pos for a Proj"); - *node = get_Proj_pred(n); - return get_Proj_proj(n); - } + arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t); + return req; +} - return 0; +void be_set_constr_in(ir_node *node, int pos, const arch_register_req_t *req) +{ + backend_info_t *info = be_get_info(node); + assert(pos < get_irn_arity(node)); + info->in_reqs[pos] = req; +} + +void be_set_constr_out(ir_node *node, int pos, const arch_register_req_t *req) +{ + backend_info_t *info = be_get_info(node); + info->out_infos[pos].req = req; } -static void -be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg) +/** + * Initializes the generic attribute of all be nodes and return it. + */ +static void init_node_attr(ir_node *node, int n_inputs, int n_outputs) { - int out_pos; - be_node_attr_t *a; + assert(n_outputs >= 0); - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = be_get_be_obst(irg); + backend_info_t *info = be_get_info(node); + const arch_register_req_t **in_reqs; - assert(is_be_node(irn)); - assert(out_pos < a->max_reg_data && "position too high"); - a->reg_data[out_pos].reg = reg; + if (n_inputs >= 0) { + int i; + assert(n_inputs == get_irn_arity(node)); + in_reqs = OALLOCN(obst, const arch_register_req_t*, n_inputs); + for (i = 0; i < n_inputs; ++i) { + in_reqs[i] = arch_no_register_req; + } + } else { + in_reqs = NEW_ARR_F(const arch_register_req_t*, 0); + } + info->in_reqs = in_reqs; + + info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_outputs); + for (int i = 0; i < n_outputs; ++i) { + info->out_infos[i].req = arch_no_register_req; + } } +static void add_register_req_in(ir_node *node, const arch_register_req_t *req) +{ + backend_info_t *info = be_get_info(node); + ARR_APP1(const arch_register_req_t*, info->in_reqs, req); +} -ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill, ir_node *ctx) +ir_node *be_new_Spill(const arch_register_class_t *cls, + const arch_register_class_t *cls_frame, ir_node *bl, + ir_node *frame, ir_node *to_spill) { - be_spill_attr_t *a; - ir_node *in[1]; - ir_node *res; + be_frame_attr_t *a; + ir_node *in[2]; + ir_node *res; + ir_graph *irg = get_Block_irg(bl); + + in[0] = frame; + in[1] = to_spill; + res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in); + init_node_attr(res, 2, 1); + a = (be_frame_attr_t*) get_irn_generic_attr(res); + a->ent = NULL; + a->offset = 0; + a->base.exc.pin_state = op_pin_state_pinned; + + be_node_set_reg_class_in(res, n_be_Spill_frame, cls_frame); + be_node_set_reg_class_in(res, n_be_Spill_val, cls); + arch_set_irn_register_req_out(res, 0, arch_no_register_req); + arch_add_irn_flags(res, arch_irn_flags_spill); - in[0] = to_spill; - res = new_ir_node(NULL, irg, bl, op_Spill, mode_M, 1, in); - a = init_node_attr(res, cls, irg, 0); - a->ent = NULL; - a->spill_ctx = ctx; return res; } -ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *mem) +ir_node *be_new_Reload(const arch_register_class_t *cls, + const arch_register_class_t *cls_frame, ir_node *block, + ir_node *frame, ir_node *mem, ir_mode *mode) { - ir_node *in[1]; - ir_node *res; + ir_node *in[2]; + ir_node *res; + ir_graph *irg = get_Block_irg(block); + be_frame_attr_t *a; + + in[0] = frame; + in[1] = mem; + res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in); + + init_node_attr(res, 2, 1); + be_node_set_reg_class_out(res, 0, cls); + + be_node_set_reg_class_in(res, n_be_Reload_frame, cls_frame); + arch_set_irn_flags(res, arch_irn_flags_rematerializable); + + a = (be_frame_attr_t*) get_irn_generic_attr(res); + a->ent = NULL; + a->offset = 0; + a->base.exc.pin_state = op_pin_state_pinned; - in[0] = mem; - res = new_ir_node(NULL, irg, bl, op_Reload, mode, 1, in); - init_node_attr(res, cls, irg, 1); return res; } -ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +ir_node *be_get_Reload_mem(const ir_node *irn) { - ir_node *irn = new_ir_node(NULL, irg, bl, op_Perm, mode_T, n, in); - init_node_attr(irn, cls, irg, n); + assert(be_is_Reload(irn)); + return get_irn_n(irn, n_be_Reload_mem); +} + +ir_node *be_get_Reload_frame(const ir_node *irn) +{ + assert(be_is_Reload(irn)); + return get_irn_n(irn, n_be_Reload_frame); +} + +ir_node *be_get_Spill_val(const ir_node *irn) +{ + assert(be_is_Spill(irn)); + return get_irn_n(irn, n_be_Spill_val); +} + +ir_node *be_get_Spill_frame(const ir_node *irn) +{ + assert(be_is_Spill(irn)); + return get_irn_n(irn, n_be_Spill_frame); +} + +ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block, + int n, ir_node *in[]) +{ + int i; + ir_graph *irg = get_Block_irg(block); + be_node_attr_t *attr; + + ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in); + init_node_attr(irn, n, n); + attr = (be_node_attr_t*) get_irn_generic_attr(irn); + attr->exc.pin_state = op_pin_state_pinned; + for (i = 0; i < n; ++i) { + const ir_node *input = in[i]; + const arch_register_req_t *req = arch_get_irn_register_req(input); + if (req->width == 1) { + be_set_constr_in(irn, i, cls->class_req); + be_set_constr_out(irn, i, cls->class_req); + } else { + arch_register_req_t *const new_req = allocate_reg_req(irg); + new_req->cls = cls; + new_req->type = (req->type & arch_register_req_type_aligned); + new_req->width = req->width; + be_set_constr_in(irn, i, new_req); + be_set_constr_out(irn, i, new_req); + } + } + return irn; } -ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op) +void be_Perm_reduce(ir_node *perm, int new_size, int *map) +{ + int arity = get_irn_arity(perm); + const arch_register_req_t **old_in_reqs + = ALLOCAN(const arch_register_req_t*, arity); + reg_out_info_t *old_infos = ALLOCAN(reg_out_info_t, arity); + backend_info_t *info = be_get_info(perm); + ir_node **new_in; + int i; + + assert(be_is_Perm(perm)); + assert(new_size <= arity); + + new_in = ALLOCAN(ir_node*, new_size); + + /* save the old register data */ + memcpy(old_in_reqs, info->in_reqs, arity * sizeof(old_in_reqs[0])); + memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0])); + + /* compose the new in array and set the new register data directly */ + for (i = 0; i < new_size; ++i) { + int idx = map[i]; + new_in[i] = get_irn_n(perm, idx); + info->in_reqs[i] = old_in_reqs[idx]; + info->out_infos[i] = old_infos[idx]; + } + + set_irn_in(perm, new_size, new_in); +} + +ir_node *be_new_MemPerm(ir_node *block, int n, ir_node *in[]) +{ + ir_graph *irg = get_Block_irg(block); + const arch_env_t *arch_env = be_get_irg_arch_env(irg); + ir_node *frame = get_irg_frame(irg); + const arch_register_t *sp = arch_env->sp; + ir_node *irn; + be_memperm_attr_t *attr; + ir_node **real_in; + + real_in = ALLOCAN(ir_node*, n + 1); + real_in[0] = frame; + memcpy(&real_in[1], in, n * sizeof(real_in[0])); + + irn = new_ir_node(NULL, irg, block, op_be_MemPerm, mode_T, n+1, real_in); + + init_node_attr(irn, n + 1, n); + be_node_set_reg_class_in(irn, 0, sp->reg_class); + + attr = (be_memperm_attr_t*)get_irn_generic_attr(irn); + attr->in_entities = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n); + attr->out_entities = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n); + + return irn; +} + +ir_node *be_new_Copy(ir_node *bl, ir_node *op) { ir_node *in[1]; ir_node *res; + be_node_attr_t *attr; + ir_graph *irg = get_Block_irg(bl); + const arch_register_req_t *in_req = arch_get_irn_register_req(op); + const arch_register_class_t *cls = in_req->cls; in[0] = op; - res = new_ir_node(NULL, irg, bl, op_Copy, get_irn_mode(op), 1, in); - init_node_attr(res, cls, irg, 1); + res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in); + init_node_attr(res, 1, 1); + attr = (be_node_attr_t*) get_irn_generic_attr(res); + attr->exc.pin_state = op_pin_state_floats; + be_node_set_reg_class_in(res, 0, cls); + be_node_set_reg_class_out(res, 0, cls); + + arch_register_req_t *const req = allocate_reg_req(irg); + req->cls = cls; + req->type = arch_register_req_type_should_be_same + | (in_req->type & arch_register_req_type_aligned); + req->other_same = 1U << 0; + req->width = in_req->width; + be_set_constr_out(res, 0, req); + return res; } -ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +ir_node *be_get_Copy_op(const ir_node *cpy) { - ir_node *irn; + return get_irn_n(cpy, n_be_Copy_op); +} - irn = new_ir_node(NULL, irg, bl, op_Keep, mode_ANY, n, in); - init_node_attr(irn, cls, irg, 0); - keep_alive(irn); - return irn; +ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[]) +{ + int i; + ir_node *res; + ir_graph *irg = get_Block_irg(block); + be_node_attr_t *attr; + + res = new_ir_node(NULL, irg, block, op_be_Keep, mode_ANY, -1, NULL); + init_node_attr(res, -1, 1); + attr = (be_node_attr_t*) get_irn_generic_attr(res); + attr->exc.pin_state = op_pin_state_pinned; + + for (i = 0; i < n; ++i) { + ir_node *pred = in[i]; + add_irn_n(res, pred); + const arch_register_req_t *req = arch_get_irn_register_req(pred); + req = req->cls != NULL ? req->cls->class_req : arch_no_register_req; + add_register_req_in(res, req); + } + keep_alive(res); + + return res; } -ir_node *be_new_Call(ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[]) +void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node) { - int real_n = 3 + n; - ir_node *irn; + assert(be_is_Keep(keep)); + add_irn_n(keep, node); + add_register_req_in(keep, cls->class_req); +} + +ir_node *be_new_Call(dbg_info *dbg, ir_node *bl, ir_node *mem, + const arch_register_req_t *sp_req, ir_node *sp, + const arch_register_req_t *ptr_req, ir_node *ptr, + int n_outs, int n, ir_node *in[], ir_type *call_tp) +{ + be_call_attr_t *a; + int real_n = n_be_Call_first_arg + n; ir_node **real_in; - real_in = malloc(sizeof(real_in[0]) * (real_n)); + NEW_ARR_A(ir_node *, real_in, real_n); + real_in[n_be_Call_mem] = mem; + real_in[n_be_Call_sp] = sp; + real_in[n_be_Call_ptr] = ptr; + memcpy(&real_in[n_be_Call_first_arg], in, n * sizeof(in[0])); + + ir_graph *const irg = get_Block_irg(bl); + ir_node *const irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in); + init_node_attr(irn, real_n, n_outs); + a = (be_call_attr_t*)get_irn_generic_attr(irn); + a->ent = NULL; + a->call_tp = call_tp; + a->pop = 0; + a->base.exc.pin_state = op_pin_state_pinned; + be_set_constr_in(irn, n_be_Call_sp, sp_req); + be_set_constr_in(irn, n_be_Call_ptr, ptr_req); + return irn; +} - real_in[0] = mem; - real_in[1] = sp; - real_in[2] = ptr; - memcpy(&real_in[3], in, n * sizeof(in[0])); +ir_entity *be_Call_get_entity(const ir_node *call) +{ + const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call); + assert(be_is_Call(call)); + return a->ent; +} - irn = new_ir_node(NULL, irg, bl, op_Call, mode_T, real_n, real_in); - init_node_attr(irn, NULL, irg, (n_outs > real_n ? n_outs : real_n)); - return irn; +void be_Call_set_entity(ir_node *call, ir_entity *ent) +{ + be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call); + assert(be_is_Call(call)); + a->ent = ent; +} + +ir_type *be_Call_get_type(ir_node *call) +{ + const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call); + assert(be_is_Call(call)); + return a->call_tp; } -ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, unsigned offset, be_stack_dir_t dir) +void be_Call_set_type(ir_node *call, ir_type *call_tp) { - be_stack_attr_t *a; + be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call); + assert(be_is_Call(call)); + a->call_tp = call_tp; +} + +void be_Call_set_pop(ir_node *call, unsigned pop) +{ + be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call); + a->pop = pop; +} + +unsigned be_Call_get_pop(const ir_node *call) +{ + const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call); + return a->pop; +} + +ir_node *be_new_Return(dbg_info *const dbg, ir_node *const block, int const n_res, unsigned const pop, int const n, ir_node **const in) +{ + ir_graph *const irg = get_Block_irg(block); + ir_node *const res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, n, in); + init_node_attr(res, n, 1); + be_set_constr_out(res, 0, arch_no_register_req); + + be_return_attr_t *const a = (be_return_attr_t*)get_irn_generic_attr(res); + a->num_ret_vals = n_res; + a->pop = pop; + a->emit_pop = 0; + a->base.exc.pin_state = op_pin_state_pinned; + + return res; +} + +int be_Return_get_n_rets(const ir_node *ret) +{ + const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret); + return a->num_ret_vals; +} + +unsigned be_Return_get_pop(const ir_node *ret) +{ + const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret); + return a->pop; +} + +int be_Return_get_emit_pop(const ir_node *ret) +{ + const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret); + return a->emit_pop; +} + +void be_Return_set_emit_pop(ir_node *ret, int emit_pop) +{ + be_return_attr_t *a = (be_return_attr_t*)get_irn_generic_attr(ret); + a->emit_pop = emit_pop; +} + +ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl, + ir_node *old_sp, int offset, int align) +{ + be_incsp_attr_t *a; ir_node *irn; ir_node *in[1]; + ir_graph *irg = get_Block_irg(bl); in[0] = old_sp; - irn = new_ir_node(NULL, irg, bl, op_IncSP, sp->reg_class->mode, 1, in); - a = init_node_attr(irn, sp->reg_class, irg, 1); - a->dir = dir; - a->offset = offset; - - a->node_attr.flags |= arch_irn_flags_ignore; + irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, + ARRAY_SIZE(in), in); + init_node_attr(irn, 1, 1); + a = (be_incsp_attr_t*)get_irn_generic_attr(irn); + a->offset = offset; + a->align = align; + a->base.exc.pin_state = op_pin_state_pinned; /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, -1, sp); - be_node_set_irn_reg(NULL, irn, sp); + be_node_set_reg_class_in(irn, 0, sp->reg_class); + be_set_constr_single_reg_out(irn, 0, sp, arch_register_req_type_produces_sp); return irn; } -ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op) +ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, + ir_node *sz) { - be_node_attr_t *a; ir_node *irn; - ir_node *in[2]; + ir_node *in[n_be_AddSP_last]; + ir_graph *irg; + be_node_attr_t *attr; - in[0] = old_sp; - in[1] = op; - irn = new_ir_node(NULL, irg, bl, op_AddSP, sp->reg_class->mode, 2, in); - a = init_node_attr(irn, sp->reg_class, irg, 1); - a->flags |= arch_irn_flags_ignore; + in[n_be_AddSP_old_sp] = old_sp; + in[n_be_AddSP_size] = sz; + + irg = get_Block_irg(bl); + irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, n_be_AddSP_last, in); + init_node_attr(irn, n_be_AddSP_last, pn_be_AddSP_last); + attr = (be_node_attr_t*) get_irn_generic_attr(irn); + attr->exc.pin_state = op_pin_state_pinned; /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, -1, sp); - be_node_set_irn_reg(NULL, irn, sp); + be_set_constr_single_reg_in(irn, n_be_AddSP_old_sp, sp, + arch_register_req_type_none); + be_node_set_reg_class_in(irn, n_be_AddSP_size, sp->reg_class); + be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp, + arch_register_req_type_produces_sp); return irn; } -ir_node *be_new_NoReg(const arch_register_t *reg, ir_graph *irg, ir_node *bl) +ir_node *be_new_SubSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz) { - be_node_attr_t *a; ir_node *irn; - ir_node *in[1]; + ir_node *in[n_be_SubSP_last]; + ir_graph *irg; + be_node_attr_t *attr; + + in[n_be_SubSP_old_sp] = old_sp; + in[n_be_SubSP_size] = sz; + + irg = get_Block_irg(bl); + irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, n_be_SubSP_last, in); + init_node_attr(irn, n_be_SubSP_last, pn_be_SubSP_last); + attr = (be_node_attr_t*) get_irn_generic_attr(irn); + attr->exc.pin_state = op_pin_state_pinned; + + /* Set output constraint to stack register. */ + be_set_constr_single_reg_in(irn, n_be_SubSP_old_sp, sp, + arch_register_req_type_none); + be_node_set_reg_class_in(irn, n_be_SubSP_size, sp->reg_class); + be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp); - irn = new_ir_node(NULL, irg, bl, op_NoReg, reg->reg_class->mode, 0, in); - a = init_node_attr(irn, reg->reg_class, irg, 1); - a->flags |= arch_irn_flags_ignore; - be_set_constr_single_reg(irn, -1, reg); - be_node_set_irn_reg(NULL, irn, reg); return irn; } -ir_node *be_new_StackParam(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, unsigned offset) +ir_node *be_new_Start(dbg_info *dbgi, ir_node *bl, int n_outs) +{ + ir_node *res; + ir_graph *irg = get_Block_irg(bl); + be_node_attr_t *attr; + + res = new_ir_node(dbgi, irg, bl, op_be_Start, mode_T, 0, NULL); + init_node_attr(res, 0, n_outs); + attr = (be_node_attr_t*) get_irn_generic_attr(res); + attr->exc.pin_state = op_pin_state_pinned; + + return res; +} + +ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_node *bl, ir_node *frame, ir_entity *ent) { - be_stack_attr_t *a; + be_frame_attr_t *a; ir_node *irn; ir_node *in[1]; - - in[0] = frame_pointer; - irn = new_ir_node(NULL, irg, bl, op_StackParam, mode, 1, in); - a = init_node_attr(irn, cls, irg, 1); - a->offset = offset; - return irn; + ir_graph *irg = get_Block_irg(bl); + + in[0] = frame; + irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in); + init_node_attr(irn, 1, 1); + a = (be_frame_attr_t*)get_irn_generic_attr(irn); + a->ent = ent; + a->offset = 0; + a->base.exc.pin_state = op_pin_state_floats; + be_node_set_reg_class_in(irn, 0, cls_frame); + be_node_set_reg_class_out(irn, 0, cls_frame); + + return optimize_node(irn); } -int be_is_Spill (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Spill ; } -int be_is_Reload (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Reload ; } -int be_is_Copy (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Copy ; } -int be_is_Perm (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Perm ; } -int be_is_Keep (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Keep ; } -int be_is_Call (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_Call ; } -int be_is_IncSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_IncSP ; } -int be_is_AddSP (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_AddSP ; } -int be_is_RegParams (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_RegParams ; } -int be_is_StackParam (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_StackParam ; } -int be_is_NoReg (const ir_node *irn) { return get_irn_be_opcode(irn) == beo_NoReg ; } - -static void be_limited(void *data, bitset_t *bs) -{ - be_req_t *req = data; - - switch(req->kind) { - case be_req_kind_negate_old_limited: - case be_req_kind_old_limited: - req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs); - if(req->kind == be_req_kind_negate_old_limited) - bitset_flip_all(bs); - break; - case be_req_kind_single_reg: - bitset_clear_all(bs); - bitset_set(bs, req->x.single_reg->index); - break; - } +ir_node *be_get_FrameAddr_frame(const ir_node *node) +{ + assert(be_is_FrameAddr(node)); + return get_irn_n(node, n_be_FrameAddr_ptr); } -void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg) +ir_entity *be_get_FrameAddr_entity(const ir_node *node) { - int idx = pos < 0 ? -(pos - 1) : pos; - be_node_attr_t *a = get_irn_attr(irn); - be_reg_data_t *rd = &a->reg_data[idx]; - be_req_t *r = pos < 0 ? &rd->req : &rd->in_req; + const be_frame_attr_t *attr = (const be_frame_attr_t*)get_irn_generic_attr_const(node); + return attr->ent; +} - assert(is_be_node(irn)); - assert(!(pos >= 0) || pos < get_irn_arity(irn)); - assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data); +ir_node *be_new_CopyKeep(ir_node *bl, ir_node *src, int n, ir_node *in_keep[]) +{ + ir_node *irn; + ir_node **in = ALLOCAN(ir_node*, n + 1); + ir_graph *irg = get_Block_irg(bl); + const arch_register_req_t *req = arch_get_irn_register_req(src); + const arch_register_class_t *cls = req->cls; + ir_mode *mode = get_irn_mode(src); + be_node_attr_t *attr; + + in[0] = src; + memcpy(&in[1], in_keep, n * sizeof(in[0])); + irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in); + init_node_attr(irn, n + 1, 1); + attr = (be_node_attr_t*) get_irn_generic_attr(irn); + attr->exc.pin_state = op_pin_state_floats; + be_node_set_reg_class_in(irn, 0, cls); + be_node_set_reg_class_out(irn, 0, cls); + for (int i = 0; i < n; ++i) { + ir_node *pred = in_keep[i]; + const arch_register_req_t *req = arch_get_irn_register_req(pred); + req = req->cls != NULL ? req->cls->class_req : arch_no_register_req; + be_set_constr_in(irn, i+1, req); + } - r->kind = be_req_kind_single_reg; - r->x.single_reg = reg; - r->req.limited = be_limited; - r->req.limited_env = r; - r->req.type = arch_register_req_type_limited; - r->req.cls = reg->reg_class; + return irn; } -void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req) +ir_node *be_new_CopyKeep_single(ir_node *bl, ir_node *src, ir_node *keep) { - int idx = pos < 0 ? -(pos - 1) : pos; - be_node_attr_t *a = get_irn_attr(irn); - be_reg_data_t *rd = &a->reg_data[idx]; - be_req_t *r = pos < 0 ? &rd->req : &rd->in_req; - - assert(is_be_node(irn)); - assert(!(pos >= 0) || pos < get_irn_arity(irn)); - assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data); - assert(arch_register_req_is(req, limited)); + return be_new_CopyKeep(bl, src, 1, &keep); +} - r->kind = be_req_kind_old_limited; - r->req.limited = be_limited; - r->req.limited_env = r; - r->req.type = arch_register_req_type_limited; - r->req.cls = req->cls; +ir_node *be_get_CopyKeep_op(const ir_node *cpy) +{ + return get_irn_n(cpy, n_be_CopyKeep_op); +} - r->x.old_limited.old_limited = req->limited; - r->x.old_limited.old_limited_env = req->limited_env; +void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) +{ + set_irn_n(cpy, n_be_CopyKeep_op, op); } -void be_set_IncSP_offset(ir_node *irn, unsigned offset) +static bool be_has_frame_entity(const ir_node *irn) { - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - a->offset = offset; + switch (get_irn_opcode(irn)) { + case beo_Spill: + case beo_Reload: + case beo_FrameAddr: + return true; + default: + return false; + } } -unsigned be_get_IncSP_offset(ir_node *irn) +ir_entity *be_get_frame_entity(const ir_node *irn) { - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - return a->offset; + if (be_has_frame_entity(irn)) { + const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn); + return a->ent; + } + return NULL; } -void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir) +int be_get_frame_offset(const ir_node *irn) { - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - a->dir = dir; + assert(is_be_node(irn)); + if (be_has_frame_entity(irn)) { + const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn); + return a->offset; + } + return 0; } -be_stack_dir_t be_get_IncSP_direction(ir_node *irn) +void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent) { - be_stack_attr_t *a = get_irn_attr(irn); - assert(be_is_IncSP(irn)); - return a->dir; + const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + attr->in_entities[n] = ent; } -void be_set_Spill_entity(ir_node *irn, entity *ent) +ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n) { - be_spill_attr_t *a = get_irn_attr(irn); - assert(be_is_Spill(irn)); - a->ent = ent; + const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + return attr->in_entities[n]; } -static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr) +void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent) { - if(get_irn_visited(irn) < visited_nr) { - set_irn_visited(irn, visited_nr); + const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn); - if(is_Phi(irn)) { - int i, n; - for(i = 0, n = get_irn_arity(irn); i < n; ++i) { - ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr); - if(n != NULL) - return n; - } - } + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); - else if(get_irn_be_opcode(irn) == beo_Spill) - return irn; - } + attr->out_entities[n] = ent; +} - return NULL; +ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n) +{ + const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn); + + assert(be_is_MemPerm(irn)); + assert(n < be_get_MemPerm_entity_arity(irn)); + + return attr->out_entities[n]; } -ir_node *be_get_Spill_context(const ir_node *irn) { - const be_spill_attr_t *a = get_irn_attr(irn); - assert(be_is_Spill(irn)); - return a->spill_ctx; +int be_get_MemPerm_entity_arity(const ir_node *irn) +{ + return get_irn_arity(irn) - 1; } -/** - * Finds a spill for a reload. - * If the reload is directly using the spill, this is simple, - * else we perform DFS from the reload (over all PhiMs) and return - * the first spill node we find. - */ -static INLINE ir_node *find_a_spill(ir_node *irn) +const arch_register_req_t *be_create_reg_req(struct obstack *obst, + const arch_register_t *reg, arch_register_req_type_t additional_types) { - ir_graph *irg = get_irn_irg(irn); - unsigned visited_nr = get_irg_visited(irg) + 1; + arch_register_req_t *req = OALLOC(obst, arch_register_req_t); + const arch_register_class_t *cls = reg->reg_class; + unsigned *limited_bitset; - assert(be_is_Reload(irn)); - set_irg_visited(irg, visited_nr); - return find_a_spill_walker(irn, visited_nr); + limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls)); + rbitset_set(limited_bitset, reg->index); + + req->type = arch_register_req_type_limited | additional_types; + req->cls = cls; + req->limited = limited_bitset; + req->width = 1; + return req; } -entity *be_get_spill_entity(ir_node *irn) +void be_set_constr_single_reg_in(ir_node *node, int pos, + const arch_register_t *reg, arch_register_req_type_t additional_types) { - int opc = get_irn_opcode(irn); + const arch_register_req_t *req; + + if (additional_types == 0) { + req = reg->single_req; + } else { + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = be_get_be_obst(irg); + req = be_create_reg_req(obst, reg, additional_types); + } + be_set_constr_in(node, pos, req); +} - switch(get_irn_be_opcode(irn)) { - case beo_Reload: - return be_get_spill_entity(find_a_spill(irn)); - case beo_Spill: - { - be_spill_attr_t *a = get_irn_attr(irn); - return a->ent; - } - default: - assert(0 && "Must give spill/reload node"); +void be_set_constr_single_reg_out(ir_node *node, int pos, + const arch_register_t *reg, arch_register_req_type_t additional_types) +{ + ir_graph *irg = get_irn_irg(node); + be_irg_t *birg = be_birg_from_irg(irg); + const arch_register_req_t *req; + + /* if we have an ignore register, add ignore flag and just assign it */ + if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) { + additional_types |= arch_register_req_type_ignore; } - return NULL; + if (additional_types == 0) { + req = reg->single_req; + } else { + struct obstack *obst = be_get_be_obst(irg); + req = be_create_reg_req(obst, reg, additional_types); + } + + arch_set_irn_register_out(node, pos, reg); + be_set_constr_out(node, pos, req); } -ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx) +void be_node_set_reg_class_in(ir_node *irn, int pos, + const arch_register_class_t *cls) { - const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1); - - ir_node *bl = get_nodes_block(irn); - ir_graph *irg = get_irn_irg(bl); - ir_node *spill = be_new_Spill(cls, irg, bl, irn, ctx); - ir_node *insert; + be_set_constr_in(irn, pos, cls->class_req); +} - /* - * search the right insertion point. a spill of a phi cannot be put - * directly after the phi, if there are some phis behind the one which - * is spilled. - */ - insert = sched_next(irn); - while(is_Phi(insert) && !sched_is_end(insert)) - insert = sched_next(insert); +void be_node_set_reg_class_out(ir_node *irn, int pos, + const arch_register_class_t *cls) +{ + be_set_constr_out(irn, pos, cls->class_req); +} - sched_add_before(insert, spill); - return spill; +ir_node *be_get_IncSP_pred(ir_node *irn) +{ + assert(be_is_IncSP(irn)); + return get_irn_n(irn, 0); } -ir_node *be_reload(const arch_env_t *arch_env, - const arch_register_class_t *cls, - ir_node *irn, int pos, ir_mode *mode, ir_node *spill) +void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) { - ir_node *reload; + assert(be_is_IncSP(incsp)); + set_irn_n(incsp, 0, pred); +} - ir_node *bl = get_nodes_block(irn); - ir_graph *irg = get_irn_irg(bl); +void be_set_IncSP_offset(ir_node *irn, int offset) +{ + be_incsp_attr_t *a = (be_incsp_attr_t*)get_irn_generic_attr(irn); + assert(be_is_IncSP(irn)); + a->offset = offset; +} - assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M)); +int be_get_IncSP_offset(const ir_node *irn) +{ + const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn); + assert(be_is_IncSP(irn)); + return a->offset; +} - reload = be_new_Reload(cls, irg, bl, mode, spill); +int be_get_IncSP_align(const ir_node *irn) +{ + const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn); + assert(be_is_IncSP(irn)); + return a->align; +} - set_irn_n(irn, pos, reload); - sched_add_before(irn, reload); - return reload; +static ir_entity *be_node_get_frame_entity(const ir_node *irn) +{ + return be_get_frame_entity(irn); } -static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos) +void be_node_set_frame_entity(ir_node *irn, ir_entity *ent) { - const be_node_attr_t *a = get_irn_attr(irn); + be_frame_attr_t *a; - if(out_pos < a->max_reg_data) - memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0])); - else { - req->type = arch_register_req_type_none; - req->cls = NULL; - } + assert(be_has_frame_entity(irn)); - return req; + a = (be_frame_attr_t*)get_irn_generic_attr(irn); + a->ent = ent; } -static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos) +static void be_node_set_frame_offset(ir_node *irn, int offset) { - const be_node_attr_t *a = get_irn_attr(irn); - int n = get_irn_arity(irn); + be_frame_attr_t *a; - if(pos < get_irn_arity(irn)) - memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0])); - else { - req->type = arch_register_req_type_none; - req->cls = NULL; - } + if (!be_has_frame_entity(irn)) + return; - return req; + a = (be_frame_attr_t*)get_irn_generic_attr(irn); + a->offset = offset; } -static const arch_register_req_t * -be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) +static int be_node_get_sp_bias(const ir_node *irn) { - int out_pos = pos; + if (be_is_IncSP(irn)) + return be_get_IncSP_offset(irn); + if (be_is_Call(irn)) + return -(int)be_Call_get_pop(irn); + + return 0; +} + - if(pos < 0) { - if(get_irn_mode(irn) == mode_T) - return NULL; - out_pos = redir_proj((const ir_node **) &irn, pos); - assert(is_be_node(irn)); - return put_out_reg_req(req, irn, out_pos); +/* for be nodes */ +static const arch_irn_ops_t be_node_irn_ops = { + be_node_get_frame_entity, + be_node_set_frame_offset, + be_node_get_sp_bias, + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ +}; + +static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg) +{ + ir_node *start = get_irg_start(irg); + + /* do a naive linear search... */ + be_foreach_out(start, i) { + arch_register_req_t const *const out_req = arch_get_irn_register_req_out(start, i); + if (!arch_register_req_is(out_req, limited)) + continue; + if (out_req->cls != reg->reg_class) + continue; + if (!rbitset_is_set(out_req->limited, reg->index)) + continue; + return i; } + panic("Tried querying undefined register '%s' at Start", reg->name); +} - else { - return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL; +ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg) +{ + int i = get_start_reg_index(irg, reg); + ir_node *start = get_irg_start(irg); + ir_mode *mode = arch_register_class_mode(reg->reg_class); + + foreach_out_edge(start, edge) { + ir_node *proj = get_edge_src_irn(edge); + if (!is_Proj(proj)) // maybe End/Anchor + continue; + if (get_Proj_proj(proj) == i) { + return proj; + } } + return new_r_Proj(start, mode, i); +} - return req; +static ir_entity* dummy_get_frame_entity(const ir_node *node) +{ + (void) node; + return NULL; } -const arch_register_t * -be_node_get_irn_reg(const void *_self, const ir_node *irn) +static void dummy_set_frame_offset(ir_node *node, int bias) { - int out_pos; - be_node_attr_t *a; + (void) node; + (void) bias; + panic("should not be called"); +} - out_pos = redir_proj((const ir_node **) &irn, -1); - a = get_irn_attr(irn); +static int dummy_get_sp_bias(const ir_node *node) +{ + (void) node; + return 0; +} - assert(is_be_node(irn)); - assert(out_pos < a->max_reg_data && "position too high"); +/* for "middleend" nodes */ +static const arch_irn_ops_t dummy_be_irn_ops = { + dummy_get_frame_entity, + dummy_set_frame_offset, + dummy_get_sp_bias, + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ +}; - return a->reg_data[out_pos].reg; + + +ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode, + const arch_register_req_t *req) +{ + ir_graph *irg = get_irn_irg(block); + struct obstack *obst = be_get_be_obst(irg); + backend_info_t *info; + int i; + + ir_node *phi = new_ir_node(NULL, irg, block, op_Phi, mode, n_ins, ins); + phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins); + info = be_get_info(phi); + info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); + info->in_reqs = OALLOCN(obst, const arch_register_req_t*, n_ins); + + info->out_infos[0].req = req; + for (i = 0; i < n_ins; ++i) { + info->in_reqs[i] = req; + } + irn_verify_irg(phi, irg); + phi = optimize_node(phi); + return phi; } -arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) +void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req) { - redir_proj((const ir_node **) &irn, -1); + int arity = get_irn_arity(node); + int i; - switch(get_irn_be_opcode(irn)) { -#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b; - XXX(Spill, spill) - XXX(Reload, reload) - XXX(Perm, perm) - XXX(Copy, copy) -#undef XXX - default: - return 0; + backend_info_t *info = be_get_info(node); + info->out_infos[0].req = req; + for (i = 0; i < arity; ++i) { + info->in_reqs[i] = req; } - return 0; + assert(mode_is_datab(get_irn_mode(node))); } -arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn) +void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason) { - be_node_attr_t *a = get_irn_attr(irn); - return a->flags; -} - -static const arch_irn_ops_if_t be_node_irn_ops_if = { - be_node_get_irn_reg_req, - be_node_set_irn_reg, - be_node_get_irn_reg, - be_node_classify, - be_node_get_flags, -}; + ir_graph *irg = get_irn_irg(node); + if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) + return; -static const arch_irn_ops_t be_node_irn_ops = { - &be_node_irn_ops_if -}; + switch (reason) { + case dump_node_opcode_txt: + fputs(get_op_name(get_irn_op(node)), F); + break; + case dump_node_mode_txt: + fprintf(F, "%s", get_mode_name(get_irn_mode(node))); + break; + case dump_node_nodeattr_txt: + break; + case dump_node_info_txt: + arch_dump_reqs_and_registers(F, node); + break; -const void *be_node_get_arch_ops(const arch_irn_handler_t *self, const ir_node *irn) -{ - redir_proj((const ir_node **) &irn, -1); - return is_be_node(irn) ? &be_node_irn_ops : NULL; + default: + break; + } } -const arch_irn_handler_t be_node_irn_handler = { - be_node_get_arch_ops +static const arch_irn_ops_t phi_irn_ops = { + dummy_get_frame_entity, + dummy_set_frame_offset, + dummy_get_sp_bias, + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; -static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) -{ - be_node_attr_t *at = get_irn_attr(irn); - int i; + +/** + * ir_op-Operation: dump a be node to file + */ +static void dump_node(FILE *f, const ir_node *irn, dump_reason_t reason) +{ assert(is_be_node(irn)); - switch(reason) { + switch (reason) { case dump_node_opcode_txt: - fprintf(f, get_op_name(get_irn_op(irn))); + fputs(get_op_name(get_irn_op(irn)), f); break; case dump_node_mode_txt: - fprintf(f, get_mode_name(get_irn_mode(irn))); + if (be_is_Copy(irn) || be_is_CopyKeep(irn)) { + fprintf(f, "%s", get_mode_name(get_irn_mode(irn))); + } break; case dump_node_nodeattr_txt: + if (be_is_Call(irn)) { + const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(irn); + if (a->ent) + fprintf(f, " [%s] ", get_entity_name(a->ent)); + } + if (be_is_IncSP(irn)) { + const be_incsp_attr_t *attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn); + fprintf(f, " [%d] ", attr->offset); + } break; case dump_node_info_txt: - fprintf(f, "reg class: %s\n", at->cls->name); - for(i = 0; i < at->max_reg_data; ++i) { - const arch_register_t *reg = at->reg_data[i].reg; - fprintf(f, "reg #%d: %s\n", i, reg ? reg->name : "n/a"); + arch_dump_reqs_and_registers(f, irn); + + if (be_has_frame_entity(irn)) { + const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn); + if (a->ent) { + unsigned size = get_type_size_bytes(get_entity_type(a->ent)); + ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bytes\n", + a->ent, a->offset, a->offset, size, size); + } + } - switch(get_irn_be_opcode(irn)) { - case beo_Spill: - { - be_spill_attr_t *a = (be_spill_attr_t *) at; + switch (get_irn_opcode(irn)) { + case beo_IncSP: { + const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn); + fprintf(f, "align: %d\n", a->align); + fprintf(f, "offset: %d\n", a->offset); + break; + } + case beo_Call: { + const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(irn); - ir_fprintf(f, "spill context: %+F\n", a->spill_ctx); - if (a->ent) { - unsigned ofs = get_entity_offset_bytes(a->ent); - ir_fprintf(f, "spill entity: %+F offset %x (%d)\n", a->ent, ofs, ofs); + if (a->ent) + fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent)); + break; + } + case beo_MemPerm: { + int i; + for (i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) { + ir_entity *in, *out; + in = be_get_MemPerm_in_entity(irn, i); + out = be_get_MemPerm_out_entity(irn, i); + if (in) { + fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in)); } - else { - ir_fprintf(f, "spill entity: n/a\n"); + if (out) { + fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out)); } } break; + } - case beo_IncSP: - { - be_stack_attr_t *a = (be_stack_attr_t *) at; - fprintf(f, "offset: %u\n", a->offset); - fprintf(f, "direction: %s\n", a->dir == be_stack_dir_along ? "along" : "against"); - } + default: break; } - } - - return 0; } -void copy_attr(const ir_node *old_node, ir_node *new_node) +/** + * ir_op-Operation: + * Copies the backend specific attributes from old node to new node. + */ +static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node) { - be_node_attr_t *old_attr = get_irn_attr(old_attr); - be_node_attr_t *new_attr = get_irn_attr(new_node); + const void *old_attr = get_irn_generic_attr_const(old_node); + void *new_attr = get_irn_generic_attr(new_node); + struct obstack *obst = be_get_be_obst(irg); + backend_info_t *old_info = be_get_info(old_node); + backend_info_t *new_info = be_get_info(new_node); assert(is_be_node(old_node)); assert(is_be_node(new_node)); - memcpy(new_attr, old_attr, old_node->op->attr_size); - - new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data); - memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t)); -} - -static const ir_op_ops be_node_op_ops = { - NULL, - NULL, - NULL, - NULL, - NULL, - copy_attr, - NULL, - NULL, - NULL, - NULL, - NULL, - dump_node, - NULL -}; - -pset *nodes_live_at(const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live) -{ - firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node"); - const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos); - ir_node *irn; - irn_live_t *li; - - live_foreach(bl, li) { - ir_node *irn = (ir_node *) li->irn; - if(live_is_end(li) && arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) - pset_insert_ptr(live, irn); - } - - sched_foreach_reverse(bl, irn) { - int i, n; - ir_node *x; - - /* - * If we encounter the node we want to insert the Perm after, - * exit immediately, so that this node is still live - */ - if(irn == pos) - return live; + memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node))); - DBG((dbg, LEVEL_1, "%+F\n", irn)); - for(x = pset_first(live); x; x = pset_next(live)) - DBG((dbg, LEVEL_1, "\tlive: %+F\n", x)); + new_info->flags = old_info->flags; + new_info->out_infos = old_info->out_infos ? DUP_ARR_D(reg_out_info_t, obst, old_info->out_infos) : NULL; - if(arch_irn_has_reg_class(arch_env, irn, -1, cls)) - pset_remove_ptr(live, irn); - - for(i = 0, n = get_irn_arity(irn); i < n; ++i) { - ir_node *op = get_irn_n(irn, i); - - if(arch_irn_consider_in_reg_alloc(arch_env, cls, op)) - pset_insert_ptr(live, op); + /* input infos */ + if (old_info->in_reqs != NULL) { + unsigned n_ins = get_irn_arity(old_node); + /* need dynamic in infos? */ + if (get_irn_op(old_node)->opar == oparity_dynamic) { + new_info->in_reqs = NEW_ARR_F(const arch_register_req_t*, n_ins); + } else { + new_info->in_reqs = OALLOCN(obst,const arch_register_req_t*, n_ins); } + memcpy(new_info->in_reqs, old_info->in_reqs, + n_ins * sizeof(new_info->in_reqs[0])); + } else { + new_info->in_reqs = NULL; } - - return live; } -ir_node *insert_Perm_after(const arch_env_t *arch_env, - const arch_register_class_t *cls, - dom_front_info_t *dom_front, - ir_node *pos) +int is_be_node(const ir_node *irn) { - ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos); - ir_graph *irg = get_irn_irg(bl); - pset *live = pset_new_ptr_default(); - firm_dbg_module_t *dbg = firm_dbg_register("be.node"); - - ir_node *curr, *irn, *perm, **nodes; - int i, n; - - DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos)); - - if(!nodes_live_at(arch_env, cls, pos, live)); + return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops; +} - n = pset_count(live); +static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p, + irop_flags flags, op_arity opar, size_t attr_size) +{ + ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size); + res->ops.dump_node = dump_node; + res->ops.copy_attr = copy_attr; + res->ops.be_ops = &be_node_irn_ops; + return res; +} - if(n == 0) - return NULL; +void be_init_op(void) +{ + unsigned opc; - nodes = malloc(n * sizeof(nodes[0])); + assert(op_be_Spill == NULL); - DBG((dbg, LEVEL_1, "live:\n")); - for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) { - DBG((dbg, LEVEL_1, "\t%+F\n", irn)); - nodes[i] = irn; + /* Acquire all needed opcodes. */ + op_be_Spill = new_be_op(beo_Spill, "be_Spill", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_frame_attr_t)); + op_be_Reload = new_be_op(beo_Reload, "be_Reload", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, sizeof(be_frame_attr_t)); + op_be_Perm = new_be_op(beo_Perm, "be_Perm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_node_attr_t)); + op_be_MemPerm = new_be_op(beo_MemPerm, "be_MemPerm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_memperm_attr_t)); + op_be_Copy = new_be_op(beo_Copy, "be_Copy", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t)); + op_be_Keep = new_be_op(beo_Keep, "be_Keep", op_pin_state_exc_pinned, irop_flag_keep, oparity_dynamic, sizeof(be_node_attr_t)); + op_be_CopyKeep = new_be_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_exc_pinned, irop_flag_keep, oparity_variable, sizeof(be_node_attr_t)); + op_be_Call = new_be_op(beo_Call, "be_Call", op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, sizeof(be_call_attr_t)); + ir_op_set_memory_index(op_be_Call, n_be_Call_mem); + ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except); + op_be_Return = new_be_op(beo_Return, "be_Return", op_pin_state_exc_pinned, irop_flag_cfopcode, oparity_variable, sizeof(be_return_attr_t)); + op_be_AddSP = new_be_op(beo_AddSP, "be_AddSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t)); + op_be_SubSP = new_be_op(beo_SubSP, "be_SubSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_node_attr_t)); + op_be_IncSP = new_be_op(beo_IncSP, "be_IncSP", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_incsp_attr_t)); + op_be_Start = new_be_op(beo_Start, "be_Start", op_pin_state_exc_pinned, irop_flag_none, oparity_zero, sizeof(be_node_attr_t)); + op_be_FrameAddr = new_be_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none, oparity_unary, sizeof(be_frame_attr_t)); + + op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr; + op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr; + op_be_Perm->ops.node_cmp_attr = be_nodes_equal; + op_be_MemPerm->ops.node_cmp_attr = be_nodes_equal; + op_be_Copy->ops.node_cmp_attr = be_nodes_equal; + op_be_Keep->ops.node_cmp_attr = be_nodes_equal; + op_be_CopyKeep->ops.node_cmp_attr = be_nodes_equal; + op_be_Call->ops.node_cmp_attr = Call_cmp_attr; + op_be_Return->ops.node_cmp_attr = Return_cmp_attr; + op_be_AddSP->ops.node_cmp_attr = be_nodes_equal; + op_be_SubSP->ops.node_cmp_attr = be_nodes_equal; + op_be_IncSP->ops.node_cmp_attr = IncSP_cmp_attr; + op_be_Start->ops.node_cmp_attr = be_nodes_equal; + op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr; + + /* attach out dummy_ops to middle end nodes */ + for (opc = iro_First; opc <= iro_Last; ++opc) { + ir_op *op = ir_get_opcode(opc); + assert(op->ops.be_ops == NULL); + op->ops.be_ops = &dummy_be_irn_ops; } - perm = be_new_Perm(cls, irg, bl, n, nodes); - sched_add_after(pos, perm); - free(nodes); - - curr = perm; - for(i = 0; i < n; ++i) { - ir_node *copies[1]; - ir_node *perm_op = get_irn_n(perm, i); - const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op); - - ir_mode *mode = get_irn_mode(perm_op); - ir_node *proj = new_r_Proj(irg, bl, perm, mode, i); - arch_set_irn_register(arch_env, proj, reg); - - sched_add_after(curr, proj); - curr = proj; + op_Phi->ops.be_ops = &phi_irn_ops; +} - copies[0] = proj; - be_ssa_constr_single(dom_front, perm_op, 1, copies); - } - return perm; +void be_finish_op(void) +{ + free_ir_op(op_be_Spill); op_be_Spill = NULL; + free_ir_op(op_be_Reload); op_be_Reload = NULL; + free_ir_op(op_be_Perm); op_be_Perm = NULL; + free_ir_op(op_be_MemPerm); op_be_MemPerm = NULL; + free_ir_op(op_be_Copy); op_be_Copy = NULL; + free_ir_op(op_be_Keep); op_be_Keep = NULL; + free_ir_op(op_be_CopyKeep); op_be_CopyKeep = NULL; + free_ir_op(op_be_Call); op_be_Call = NULL; + free_ir_op(op_be_Return); op_be_Return = NULL; + free_ir_op(op_be_IncSP); op_be_IncSP = NULL; + free_ir_op(op_be_AddSP); op_be_AddSP = NULL; + free_ir_op(op_be_SubSP); op_be_SubSP = NULL; + free_ir_op(op_be_Start); op_be_Start = NULL; + free_ir_op(op_be_FrameAddr); op_be_FrameAddr = NULL; }