X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fbenode.c;h=a837c194a7b853f355ee3fa95bbafdcb60284c95;hb=2cb4a701f143339e07259cf7331c670f5584de63;hp=31b337c9d51219c3d85d8acea2beffb15d8f167a;hpb=2f8c9ceec78590ae8e19118a3bb7286286bfcd86;p=libfirm diff --git a/ir/be/benode.c b/ir/be/benode.c index 31b337c9d..a837c194a 100644 --- a/ir/be/benode.c +++ b/ir/be/benode.c @@ -1,19 +1,33 @@ -/** - * @file benode.c - * @date 17.05.2005 - * @author Sebastian Hack +/* + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * - * Backend node support. + * This file is part of libFirm. * - * This file provides Perm, Copy, Spill and Reload nodes. + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. * - * Copyright (C) 2005-2006 Universitaet Karlsruhe - * Released under the GPL + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. */ -#ifdef HAVE_CONFIG_H +/** + * @file + * @brief Backend node support for generic backend nodes. + * @author Sebastian Hack + * @date 17.05.2005 + * @version $Id$ + * + * Backend node support for generic backend nodes. + * This file provides Perm, Copy, Spill and Reload nodes. + */ #include "config.h" -#endif #include @@ -25,6 +39,9 @@ #include "fourcc.h" #include "offset.h" #include "bitfiddle.h" +#include "raw_bitset.h" +#include "error.h" +#include "array_t.h" #include "irop_t.h" #include "irmode_t.h" @@ -36,80 +53,60 @@ #include "be_t.h" #include "belive_t.h" -#include "besched_t.h" +#include "besched.h" #include "benode_t.h" +#include "bearch.h" #include "beirgmod.h" -#define OUT_POS(x) (-((x) + 1)) - -/* Sometimes we want to put const nodes into get_irn_generic_attr ... */ -#define get_irn_attr(irn) get_irn_generic_attr((ir_node *) (irn)) - -static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O'); - -typedef enum { - be_req_kind_old_limited, - be_req_kind_negate_old_limited, - be_req_kind_single_reg -} be_req_kind_t; +#define get_irn_attr(irn) get_irn_generic_attr(irn) +#define get_irn_attr_const(irn) get_irn_generic_attr_const(irn) typedef struct { arch_register_req_t req; - be_req_kind_t kind; - arch_irn_flags_t flags; - union { - struct { - void (*old_limited)(void *ptr, bitset_t *bs); - void *old_limited_env; - } old_limited; - - const arch_register_t *single_reg; - } x; -} be_req_t; - -typedef struct { - const arch_register_t *reg; - be_req_t req; - be_req_t in_req; + arch_register_req_t in_req; } be_reg_data_t; /** The generic be nodes attribute type. */ typedef struct { - int max_reg_data; - be_reg_data_t *reg_data; + be_reg_data_t *reg_data; } be_node_attr_t; /** The be_Return nodes attribute type. */ typedef struct { - be_node_attr_t node_attr; + be_node_attr_t node_attr; /**< base attributes of every be node. */ int num_ret_vals; /**< number of return values */ + unsigned pop; /**< number of bytes that should be popped */ + int emit_pop; /**< if set, emit pop bytes, even if pop = 0 */ } be_return_attr_t; -/** The be_Stack attribute type. */ +/** The be_IncSP attribute type. */ typedef struct { - be_node_attr_t node_attr; - int offset; /**< The offset by which the stack shall be expanded/shrinked. */ -} be_stack_attr_t; + be_node_attr_t node_attr; /**< base attributes of every be node. */ + int offset; /**< The offset by which the stack shall be expanded/shrinked. */ + int align; /**< whether stack should be aligned after the + IncSP */ +} be_incsp_attr_t; /** The be_Frame attribute type. */ typedef struct { - be_node_attr_t node_attr; - ir_entity *ent; - int offset; + be_node_attr_t node_attr; /**< base attributes of every be node. */ + ir_entity *ent; + int offset; } be_frame_attr_t; /** The be_Call attribute type. */ typedef struct { - be_node_attr_t node_attr; - ir_entity *ent; /**< The called entity if this is a static call. */ - ir_type *call_tp; /**< The call type, copied from the original Call node. */ + be_node_attr_t node_attr; /**< base attributes of every be node. */ + ir_entity *ent; /**< The called entity if this is a static call. */ + unsigned pop; + ir_type *call_tp; /**< The call type, copied from the original Call node. */ } be_call_attr_t; typedef struct { - be_node_attr_t node_attr; - ir_entity **in_entities; - ir_entity **out_entities; + be_node_attr_t node_attr; /**< base attributes of every be node. */ + ir_entity **in_entities; + ir_entity **out_entities; } be_memperm_attr_t; ir_op *op_be_Spill; @@ -124,15 +121,10 @@ ir_op *op_be_Return; ir_op *op_be_IncSP; ir_op *op_be_AddSP; ir_op *op_be_SubSP; -ir_op *op_be_SetSP; ir_op *op_be_RegParams; -ir_op *op_be_StackParam; ir_op *op_be_FrameAddr; -ir_op *op_be_FrameLoad; -ir_op *op_be_FrameStore; ir_op *op_be_Barrier; - -static int beo_base = -1; +ir_op *op_be_Unwind; static const ir_op_ops be_node_op_ops; @@ -146,218 +138,223 @@ static const ir_op_ops be_node_op_ops; #define H irop_flag_highlevel #define c irop_flag_constlike #define K irop_flag_keep -#define M irop_flag_machine - +#define M irop_flag_uses_memory /** - * Compare two node attributes. + * Compare two be node attributes. * * @return zero if both attributes are identically */ -static int cmp_node_attr(be_node_attr_t *a, be_node_attr_t *b) { - if (a->max_reg_data == b->max_reg_data) { - int i; - - for (i = 0; i < a->max_reg_data; ++i) { - if (a->reg_data[i].reg != b->reg_data[i].reg || - memcmp(&a->reg_data[i].in_req, &b->reg_data[i].in_req, sizeof(b->reg_data[i].in_req)) || - memcmp(&a->reg_data[i].req, &b->reg_data[i].req, sizeof(a->reg_data[i].req))) - return 1; - } - return 0; +static int _node_cmp_attr(const be_node_attr_t *a, const be_node_attr_t *b) { + int i, len = ARR_LEN(a->reg_data); + + if (len != ARR_LEN(b->reg_data)) + return 1; + + for (i = len - 1; i >= 0; --i) { + if (!reg_reqs_equal(&a->reg_data[i].in_req, &b->reg_data[i].in_req) || + !reg_reqs_equal(&a->reg_data[i].req, &b->reg_data[i].req)) + return 1; } - return 1; + + return 0; } /** - * Compare the attributes of two FrameAddr nodes. + * Compare the node attributes of two be_node's. * - * @return zero if both attributes are identically + * @return zero if both nodes have identically attributes */ -static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) { - be_frame_attr_t *a_attr = get_irn_attr(a); - be_frame_attr_t *b_attr = get_irn_attr(b); - - if (a_attr->ent == b_attr->ent && a_attr->offset == b_attr->offset) - return cmp_node_attr(&a_attr->node_attr, &b_attr->node_attr); - return 1; -} +static int node_cmp_attr(ir_node *a, ir_node *b) { + const be_node_attr_t *a_attr = get_irn_attr_const(a); + const be_node_attr_t *b_attr = get_irn_attr_const(b); -void be_node_init(void) { - static int inited = 0; + if (_node_cmp_attr(a_attr, b_attr) != 0) + return 1; - if(inited) - return; + return !be_info_equal(a, b); +} - inited = 1; +/** + * Compare the attributes of two be_FrameAddr nodes. + * + * @return zero if both nodes have identically attributes + */ +static int FrameAddr_cmp_attr(ir_node *a, ir_node *b) { + const be_frame_attr_t *a_attr = get_irn_attr_const(a); + const be_frame_attr_t *b_attr = get_irn_attr_const(b); - /* Acquire all needed opcodes. */ - beo_base = get_next_ir_opcodes(beo_Last - 1); - - op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops); - op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, F, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops); - op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops); - op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_SubSP = new_ir_op(beo_base + beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops); - op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); - op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops); - op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops); - - set_op_tag(op_be_Spill, &be_node_tag); - set_op_tag(op_be_Reload, &be_node_tag); - set_op_tag(op_be_Perm, &be_node_tag); - set_op_tag(op_be_MemPerm, &be_node_tag); - set_op_tag(op_be_Copy, &be_node_tag); - set_op_tag(op_be_Keep, &be_node_tag); - set_op_tag(op_be_CopyKeep, &be_node_tag); - set_op_tag(op_be_Call, &be_node_tag); - set_op_tag(op_be_Return, &be_node_tag); - set_op_tag(op_be_AddSP, &be_node_tag); - set_op_tag(op_be_SubSP, &be_node_tag); - set_op_tag(op_be_SetSP, &be_node_tag); - set_op_tag(op_be_IncSP, &be_node_tag); - set_op_tag(op_be_RegParams, &be_node_tag); - set_op_tag(op_be_StackParam, &be_node_tag); - set_op_tag(op_be_FrameLoad, &be_node_tag); - set_op_tag(op_be_FrameStore, &be_node_tag); - set_op_tag(op_be_FrameAddr, &be_node_tag); - set_op_tag(op_be_Barrier, &be_node_tag); + if (a_attr->ent != b_attr->ent || a_attr->offset != b_attr->offset) + return 1; - op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr; + return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr); } /** - * Initializes the generic attribute of all be nodes and return ir. + * Compare the attributes of two be_Return nodes. + * + * @return zero if both nodes have identically attributes */ -static void *init_node_attr(ir_node* irn, int max_reg_data) -{ - ir_graph *irg = get_irn_irg(irn); - be_node_attr_t *a = get_irn_attr(irn); +static int Return_cmp_attr(ir_node *a, ir_node *b) { + const be_return_attr_t *a_attr = get_irn_attr_const(a); + const be_return_attr_t *b_attr = get_irn_attr_const(b); - memset(a, 0, sizeof(get_op_attr_size(get_irn_op(irn)))); - a->max_reg_data = max_reg_data; - a->reg_data = NULL; + if (a_attr->num_ret_vals != b_attr->num_ret_vals) + return 1; + if (a_attr->pop != b_attr->pop) + return 1; + if (a_attr->emit_pop != b_attr->emit_pop) + return 1; - if(max_reg_data > 0) { - int i; + return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr); +} - a->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(irg), max_reg_data); - memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0])); - for(i = 0; i < max_reg_data; ++i) { - a->reg_data[i].req.req.cls = NULL; - a->reg_data[i].req.req.type = arch_register_req_type_none; - } - } +/** + * Compare the attributes of two be_IncSP nodes. + * + * @return zero if both nodes have identically attributes + */ +static int IncSP_cmp_attr(ir_node *a, ir_node *b) { + const be_incsp_attr_t *a_attr = get_irn_attr_const(a); + const be_incsp_attr_t *b_attr = get_irn_attr_const(b); - return a; -} + if (a_attr->offset != b_attr->offset) + return 1; -int is_be_node(const ir_node *irn) -{ - return get_op_tag(get_irn_op(irn)) == &be_node_tag; + return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr); } -be_opcode_t be_get_irn_opcode(const ir_node *irn) -{ - return is_be_node(irn) ? get_irn_opcode(irn) - beo_base : beo_NoBeOp; +/** + * Compare the attributes of two be_Call nodes. + * + * @return zero if both nodes have identically attributes + */ +static int Call_cmp_attr(ir_node *a, ir_node *b) { + const be_call_attr_t *a_attr = get_irn_attr_const(a); + const be_call_attr_t *b_attr = get_irn_attr_const(b); + + if (a_attr->ent != b_attr->ent || + a_attr->call_tp != b_attr->call_tp) + return 1; + + return _node_cmp_attr(&a_attr->node_attr, &b_attr->node_attr); } -static int redir_proj(const ir_node **node, int pos) +static inline arch_register_req_t *get_be_req(const ir_node *node, int pos) { - const ir_node *n = *node; + int idx; + const be_node_attr_t *attr; + be_reg_data_t *rd; - if(is_Proj(n)) { - ir_node *irn; + assert(is_be_node(node)); + attr = get_irn_attr_const(node); - *node = irn = get_Proj_pred(n); - if(is_Proj(irn)) { - assert(get_irn_mode(irn) == mode_T); - *node = get_Proj_pred(irn); - } - return get_Proj_proj(n); + if (pos < 0) { + idx = -(pos + 1); + } else { + idx = pos; + assert(idx < get_irn_arity(node)); } + assert(idx < ARR_LEN(attr->reg_data)); + rd = &attr->reg_data[idx]; - return 0; + return pos < 0 ? &rd->req : &rd->in_req; } -static be_node_attr_t *retrieve_irn_attr(const ir_node *irn, int *the_pos) +/** + * Initializes the generic attribute of all be nodes and return it. + */ +static void *init_node_attr(ir_node *node, int max_reg_data) { - int dummy; - be_node_attr_t *res = NULL; - int *pos = the_pos ? the_pos : &dummy; + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = get_irg_obstack(irg); + be_node_attr_t *a = get_irn_attr(node); - *pos = -1; - if(is_Proj(irn)) { - ir_node *pred = get_Proj_pred(irn); - int p = get_Proj_proj(irn); + memset(a, 0, sizeof(get_op_attr_size(get_irn_op(node)))); - if(is_be_node(pred)) { - assert(get_irn_mode(pred) == mode_T); - *pos = p; - res = get_irn_attr(pred); - assert(p >= 0 && p < res->max_reg_data && "illegal proj number"); - } - } + if(max_reg_data >= 0) { + backend_info_t *info = be_get_info(node); + info->out_infos = NEW_ARR_D(reg_out_info_t, obst, max_reg_data); + memset(info->out_infos, 0, max_reg_data * sizeof(info->out_infos[0])); - else if(is_be_node(irn) && get_irn_mode(irn) != mode_T) { - be_node_attr_t *a = get_irn_attr(irn); - if(a->max_reg_data > 0) { - res = a; - *pos = 0; - } - } + a->reg_data = NEW_ARR_D(be_reg_data_t, obst, max_reg_data); + memset(a->reg_data, 0, max_reg_data * sizeof(a->reg_data[0])); + } else { + backend_info_t *info = be_get_info(node); + info->out_infos = NEW_ARR_F(reg_out_info_t, 0); - return res; -} + a->reg_data = NEW_ARR_F(be_reg_data_t, 0); + } -static be_reg_data_t *retrieve_reg_data(const ir_node *irn) -{ - int pos; - be_node_attr_t *a = retrieve_irn_attr(irn, &pos); - return a ? &a->reg_data[pos] : NULL; + return a; } -static void -be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg) +static void add_register_req(ir_node *node) { - be_reg_data_t *r = retrieve_reg_data(irn); - - if(r) - r->reg = reg; + backend_info_t *info = be_get_info(node); + be_node_attr_t *a = get_irn_attr(node); + be_reg_data_t regreq; + reg_out_info_t out_info; + memset(®req, 0, sizeof(regreq)); + memset(&out_info, 0, sizeof(out_info)); + ARR_APP1(be_reg_data_t, a->reg_data, regreq); + ARR_APP1(reg_out_info_t, info->out_infos, out_info); } - -ir_node *be_new_Spill(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *to_spill) +ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, + ir_node *bl, ir_node *frame, ir_node *to_spill) { be_frame_attr_t *a; + ir_node *in[2]; ir_node *res; + ir_graph *irg = get_Block_irg(bl); - res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 1, &to_spill); + in[0] = frame; + in[1] = to_spill; + res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in); a = init_node_attr(res, 2); a->ent = NULL; a->offset = 0; - be_node_set_reg_class(res, be_pos_Spill_val, cls); + be_node_set_reg_class_in(res, be_pos_Spill_frame, cls_frame); + be_node_set_reg_class_in(res, be_pos_Spill_val, cls); + + /* + * For spills and reloads, we return "none" as requirement for frame + * pointer, so every input is ok. Some backends need this (e.g. STA). + * Matze: we should investigate if this is really needed, this solution + * looks very hacky to me + */ + be_node_set_reg_class_in(res, be_pos_Spill_frame, NULL); + return res; } -ir_node *be_new_Reload(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *mem, ir_mode *mode) +ir_node *be_new_Reload(const arch_register_class_t *cls, + const arch_register_class_t *cls_frame, ir_node *block, + ir_node *frame, ir_node *mem, ir_mode *mode) { - ir_node *res = new_ir_node(NULL, irg, bl, op_be_Reload, mode, 1, &mem); + ir_node *in[2]; + ir_node *res; + ir_graph *irg = get_Block_irg(block); + + in[0] = frame; + in[1] = mem; + res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in); + init_node_attr(res, 2); - be_node_set_reg_class(res, -1, cls); - be_node_set_flags(res, -1, arch_irn_flags_rematerializable); + be_node_set_reg_class_out(res, 0, cls); + be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame); + arch_irn_set_flags(res, arch_irn_flags_rematerializable); + + /* + * For spills and reloads, we return "none" as requirement for frame + * pointer, so every input is ok. Some backends need this (e.g. STA). + * Matze: we should investigate if this is really needed, this solution + * looks very hacky to me + */ + be_node_set_reg_class_in(res, be_pos_Reload_frame, NULL); + return res; } @@ -367,46 +364,92 @@ ir_node *be_get_Reload_mem(const ir_node *irn) return get_irn_n(irn, be_pos_Reload_mem); } +ir_node *be_get_Reload_frame(const ir_node *irn) +{ + assert(be_is_Reload(irn)); + return get_irn_n(irn, be_pos_Reload_frame); +} + ir_node *be_get_Spill_val(const ir_node *irn) { assert(be_is_Spill(irn)); return get_irn_n(irn, be_pos_Spill_val); } -ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +ir_node *be_get_Spill_frame(const ir_node *irn) { - int i; - ir_node *irn = new_ir_node(NULL, irg, bl, op_be_Perm, mode_T, n, in); + assert(be_is_Spill(irn)); + return get_irn_n(irn, be_pos_Spill_frame); +} + +ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block, int n, ir_node *in[]) +{ + int i; + ir_graph *irg = get_Block_irg(block); + + ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in); init_node_attr(irn, n); - for(i = 0; i < n; ++i) { - be_node_set_reg_class(irn, i, cls); - be_node_set_reg_class(irn, OUT_POS(i), cls); + for (i = 0; i < n; ++i) { + be_node_set_reg_class_in(irn, i, cls); + be_node_set_reg_class_out(irn, i, cls); } return irn; } -ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +void be_Perm_reduce(ir_node *perm, int new_size, int *map) { + int arity = get_irn_arity(perm); + be_reg_data_t *old_data = ALLOCAN(be_reg_data_t, arity); + reg_out_info_t *old_infos = ALLOCAN(reg_out_info_t, arity); + be_node_attr_t *attr = get_irn_attr(perm); + backend_info_t *info = be_get_info(perm); + ir_node **new_in; + int i; - ir_node *frame = get_irg_frame(irg); - const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1); - ir_node *irn; - const arch_register_t *sp = arch_env->isa->sp; - be_memperm_attr_t *attr; - ir_node **real_in; - real_in = alloca((n+1) * sizeof(real_in[0])); + assert(be_is_Perm(perm)); + assert(new_size <= arity); + + new_in = alloca(new_size * sizeof(*new_in)); + + /* save the old register data */ + memcpy(old_data, attr->reg_data, arity * sizeof(old_data[0])); + memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0])); + + /* compose the new in array and set the new register data directly in place */ + for (i = 0; i < new_size; ++i) { + int idx = map[i]; + new_in[i] = get_irn_n(perm, idx); + attr->reg_data[i] = old_data[idx]; + info->out_infos[i] = old_infos[idx]; + } + + set_irn_in(perm, new_size, new_in); +} + +ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_node *bl, int n, ir_node *in[]) +{ + ir_graph *irg = get_Block_irg(bl); + ir_node *frame = get_irg_frame(irg); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame); + const arch_register_t *sp = arch_env->sp; + ir_node *irn; + be_memperm_attr_t *attr; + ir_node **real_in; + int i; + + real_in = ALLOCAN(ir_node*, n + 1); real_in[0] = frame; memcpy(&real_in[1], in, n * sizeof(real_in[0])); irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n+1, real_in); init_node_attr(irn, n + 1); - be_node_set_reg_class(irn, 0, sp->reg_class); - for(i = 0; i < n; ++i) { - be_node_set_reg_class(irn, i + 1, cls_frame); - be_node_set_reg_class(irn, OUT_POS(i), cls_frame); + be_node_set_reg_class_in(irn, 0, sp->reg_class); + for (i = 0; i < n; ++i) { + be_node_set_reg_class_in(irn, i + 1, cls_frame); + be_node_set_reg_class_out(irn, i, cls_frame); } attr = get_irn_attr(irn); @@ -420,16 +463,24 @@ ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, } -ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op) +ir_node *be_new_Copy(const arch_register_class_t *cls, ir_node *bl, ir_node *op) { ir_node *in[1]; ir_node *res; + arch_register_req_t *req; + ir_graph *irg = get_Block_irg(bl); in[0] = op; res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in); init_node_attr(res, 1); - be_node_set_reg_class(res, 0, cls); - be_node_set_reg_class(res, OUT_POS(0), cls); + be_node_set_reg_class_in(res, 0, cls); + be_node_set_reg_class_out(res, 0, cls); + + req = get_be_req(res, BE_OUT_POS(0)); + req->cls = cls; + req->type = arch_register_req_type_should_be_same; + req->other_same = 1U << 0; + return res; } @@ -441,20 +492,36 @@ void be_set_Copy_op(ir_node *cpy, ir_node *op) { set_irn_n(cpy, be_pos_Copy_op, op); } -ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +ir_node *be_new_Keep(const arch_register_class_t *cls, ir_node *bl, int n, ir_node *in[]) { int i; - ir_node *irn; + ir_node *res; + ir_graph *irg = get_Block_irg(bl); + + res = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, -1, NULL); + init_node_attr(res, -1); - irn = new_ir_node(NULL, irg, bl, op_be_Keep, mode_ANY, n, in); - init_node_attr(irn, n); for(i = 0; i < n; ++i) { - be_node_set_reg_class(irn, i, cls); + add_irn_n(res, in[i]); + add_register_req(res); + be_node_set_reg_class_in(res, i, cls); } - keep_alive(irn); - return irn; + keep_alive(res); + + return res; } +void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node) +{ + int n; + + assert(be_is_Keep(keep)); + n = add_irn_n(keep, node); + add_register_req(keep); + be_node_set_reg_class_in(keep, n, cls); +} + +/* creates a be_Call */ ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *sp, ir_node *ptr, int n_outs, int n, ir_node *in[], ir_type *call_tp) { @@ -473,12 +540,13 @@ ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem, ir a = init_node_attr(irn, (n_outs > real_n ? n_outs : real_n)); a->ent = NULL; a->call_tp = call_tp; + a->pop = 0; return irn; } /* Gets the call entity or NULL if this is no static call. */ ir_entity *be_Call_get_entity(const ir_node *call) { - be_call_attr_t *a = get_irn_attr(call); + const be_call_attr_t *a = get_irn_attr_const(call); assert(be_is_Call(call)); return a->ent; } @@ -492,7 +560,7 @@ void be_Call_set_entity(ir_node *call, ir_entity *ent) { /* Gets the call type. */ ir_type *be_Call_get_type(ir_node *call) { - be_call_attr_t *a = get_irn_attr(call); + const be_call_attr_t *a = get_irn_attr_const(call); assert(be_is_Call(call)); return a->call_tp; } @@ -504,215 +572,203 @@ void be_Call_set_type(ir_node *call, ir_type *call_tp) { a->call_tp = call_tp; } +void be_Call_set_pop(ir_node *call, unsigned pop) { + be_call_attr_t *a = get_irn_attr(call); + a->pop = pop; +} + +unsigned be_Call_get_pop(const ir_node *call) { + const be_call_attr_t *a = get_irn_attr_const(call); + return a->pop; +} + /* Construct a new be_Return. */ -ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *bl, int n_res, int n, ir_node *in[]) +ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *block, int n_res, + unsigned pop, int n, ir_node *in[]) { be_return_attr_t *a; - ir_node *irn = new_ir_node(dbg, irg, bl, op_be_Return, mode_X, n, in); - init_node_attr(irn, n); - a = get_irn_attr(irn); + ir_node *res; + int i; + + res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, -1, NULL); + init_node_attr(res, -1); + for(i = 0; i < n; ++i) { + add_irn_n(res, in[i]); + add_register_req(res); + } + + a = get_irn_attr(res); a->num_ret_vals = n_res; + a->pop = pop; + a->emit_pop = 0; - return irn; + return res; } /* Returns the number of real returns values */ -int be_Return_get_n_rets(ir_node *ret) -{ - be_return_attr_t *a = get_irn_attr(ret); +int be_Return_get_n_rets(const ir_node *ret) { + const be_return_attr_t *a = get_irn_generic_attr_const(ret); return a->num_ret_vals; } -ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset) +/* return the number of bytes that should be popped from stack when executing the Return. */ +unsigned be_Return_get_pop(const ir_node *ret) { + const be_return_attr_t *a = get_irn_generic_attr_const(ret); + return a->pop; +} + +/* return non-zero, if number of popped bytes must be always emitted */ +int be_Return_get_emit_pop(const ir_node *ret) { + const be_return_attr_t *a = get_irn_generic_attr_const(ret); + return a->emit_pop; +} + +/* return non-zero, if number of popped bytes must be always emitted */ +void be_Return_set_emit_pop(ir_node *ret, int emit_pop) { + be_return_attr_t *a = get_irn_generic_attr(ret); + a->emit_pop = emit_pop; +} + +int be_Return_append_node(ir_node *ret, ir_node *node) { + int pos; + + pos = add_irn_n(ret, node); + add_register_req(ret); + + return pos; +} + +ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl, + ir_node *old_sp, int offset, int align) { - be_stack_attr_t *a; + be_incsp_attr_t *a; ir_node *irn; ir_node *in[1]; + ir_graph *irg = get_Block_irg(bl); in[0] = old_sp; - irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in); + irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, + sizeof(in) / sizeof(in[0]), in); a = init_node_attr(irn, 1); a->offset = offset; - - be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp); + a->align = align; /* Set output constraint to stack register. */ - be_node_set_reg_class(irn, 0, sp->reg_class); - be_set_constr_single_reg(irn, BE_OUT_POS(0), sp); - be_node_set_irn_reg(NULL, irn, sp); + be_node_set_reg_class_in(irn, 0, sp->reg_class); + be_set_constr_single_reg_out(irn, 0, sp, arch_register_req_type_produces_sp); return irn; } -ir_node *be_new_AddSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz) +ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz) { be_node_attr_t *a; ir_node *irn; ir_node *in[be_pos_AddSP_last]; + const arch_register_class_t *cls; + ir_graph *irg; in[be_pos_AddSP_old_sp] = old_sp; in[be_pos_AddSP_size] = sz; + irg = get_Block_irg(bl); irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, be_pos_AddSP_last, in); a = init_node_attr(irn, be_pos_AddSP_last); - be_node_set_flags(irn, OUT_POS(pn_be_AddSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp); - /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, be_pos_AddSP_old_sp, sp); - be_node_set_reg_class(irn, be_pos_AddSP_size, arch_register_get_class(sp)); - be_set_constr_single_reg(irn, OUT_POS(pn_be_AddSP_res), sp); - a->reg_data[pn_be_AddSP_res].reg = sp; + be_set_constr_single_reg_in(irn, be_pos_AddSP_old_sp, sp, 0); + be_node_set_reg_class_in(irn, be_pos_AddSP_size, arch_register_get_class(sp)); + be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp, arch_register_req_type_produces_sp); + + cls = arch_register_get_class(sp); return irn; } -ir_node *be_new_SubSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *sz) +ir_node *be_new_SubSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz) { be_node_attr_t *a; ir_node *irn; ir_node *in[be_pos_SubSP_last]; + ir_graph *irg; in[be_pos_SubSP_old_sp] = old_sp; in[be_pos_SubSP_size] = sz; + irg = get_Block_irg(bl); irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, be_pos_SubSP_last, in); a = init_node_attr(irn, be_pos_SubSP_last); - be_node_set_flags(irn, OUT_POS(pn_be_SubSP_res), arch_irn_flags_ignore | arch_irn_flags_modify_sp); - /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, be_pos_SubSP_old_sp, sp); - be_node_set_reg_class(irn, be_pos_SubSP_size, arch_register_get_class(sp)); - be_set_constr_single_reg(irn, OUT_POS(pn_be_SubSP_res), sp); - a->reg_data[pn_be_SubSP_res].reg = sp; + be_set_constr_single_reg_in(irn, be_pos_SubSP_old_sp, sp, 0); + be_node_set_reg_class_in(irn, be_pos_SubSP_size, arch_register_get_class(sp)); + be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp); return irn; } -ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *op, ir_node *mem) +ir_node *be_new_RegParams(ir_node *bl, int n_outs) { - be_node_attr_t *a; - ir_node *irn; - ir_node *in[3]; - - in[0] = mem; - in[1] = old_sp; - in[2] = op; - irn = new_ir_node(NULL, irg, bl, op_be_SetSP, get_irn_mode(old_sp), 3, in); - a = init_node_attr(irn, 3); - - be_node_set_flags(irn, OUT_POS(0), arch_irn_flags_ignore | arch_irn_flags_modify_sp); - - /* Set output constraint to stack register. */ - be_set_constr_single_reg(irn, OUT_POS(0), sp); - be_node_set_reg_class(irn, be_pos_AddSP_size, sp->reg_class); - be_node_set_reg_class(irn, be_pos_AddSP_old_sp, sp->reg_class); - - return irn; -} - -ir_node *be_new_StackParam(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_mode *mode, ir_node *frame_pointer, ir_entity *ent) -{ - be_frame_attr_t *a; - ir_node *irn; - ir_node *in[1]; - - in[0] = frame_pointer; - irn = new_ir_node(NULL, irg, bl, op_be_StackParam, mode, 1, in); - a = init_node_attr(irn, 1); - a->ent = ent; - - be_node_set_reg_class(irn, 0, cls_frame); - be_node_set_reg_class(irn, OUT_POS(0), cls); - return irn; -} - -ir_node *be_new_RegParams(ir_graph *irg, ir_node *bl, int n_outs) -{ - ir_node *irn; - ir_node *in[1]; - - irn = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, in); - init_node_attr(irn, n_outs); - return irn; -} - -ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data, - ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_entity *ent) -{ - be_frame_attr_t *a; - ir_node *irn; - ir_node *in[2]; + ir_node *res; + int i; + ir_graph *irg = get_Block_irg(bl); - in[0] = mem; - in[1] = frame; - irn = new_ir_node(NULL, irg, bl, op_be_FrameLoad, mode_T, 2, in); - a = init_node_attr(irn, 3); - a->ent = ent; - a->offset = 0; - be_node_set_reg_class(irn, 1, cls_frame); - be_node_set_reg_class(irn, OUT_POS(pn_Load_res), cls_data); - return irn; -} + res = new_ir_node(NULL, irg, bl, op_be_RegParams, mode_T, 0, NULL); + init_node_attr(res, -1); + for(i = 0; i < n_outs; ++i) + add_register_req(res); -ir_node *be_new_FrameStore(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data, - ir_graph *irg, ir_node *bl, ir_node *mem, ir_node *frame, ir_node *data, ir_entity *ent) -{ - be_frame_attr_t *a; - ir_node *irn; - ir_node *in[3]; - - in[0] = mem; - in[1] = frame; - in[2] = data; - irn = new_ir_node(NULL, irg, bl, op_be_FrameStore, mode_T, 3, in); - a = init_node_attr(irn, 3); - a->ent = ent; - a->offset = 0; - be_node_set_reg_class(irn, 1, cls_frame); - be_node_set_reg_class(irn, 2, cls_data); - return irn; + return res; } -ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_entity *ent) +ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_node *bl, ir_node *frame, ir_entity *ent) { be_frame_attr_t *a; ir_node *irn; ir_node *in[1]; + ir_graph *irg = get_Block_irg(bl); in[0] = frame; irn = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in); a = init_node_attr(irn, 1); a->ent = ent; a->offset = 0; - be_node_set_reg_class(irn, 0, cls_frame); - be_node_set_reg_class(irn, OUT_POS(0), cls_frame); + be_node_set_reg_class_in(irn, 0, cls_frame); + be_node_set_reg_class_out(irn, 0, cls_frame); return optimize_node(irn); } -ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode) +ir_node *be_get_FrameAddr_frame(const ir_node *node) { + assert(be_is_FrameAddr(node)); + return get_irn_n(node, be_pos_FrameAddr_ptr); +} + +ir_entity *be_get_FrameAddr_entity(const ir_node *node) { - ir_node *irn; - ir_node **in = (ir_node **) alloca((n + 1) * sizeof(in[0])); + const be_frame_attr_t *attr = get_irn_generic_attr_const(node); + return attr->ent; +} + +ir_node *be_new_CopyKeep(const arch_register_class_t *cls, ir_node *bl, ir_node *src, int n, ir_node *in_keep[], ir_mode *mode) +{ + ir_node *irn; + ir_node **in = ALLOCAN(ir_node*, n + 1); + ir_graph *irg = get_Block_irg(bl); in[0] = src; memcpy(&in[1], in_keep, n * sizeof(in[0])); irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in); init_node_attr(irn, n + 1); - be_node_set_reg_class(irn, OUT_POS(0), cls); - be_node_set_reg_class(irn, 0, cls); + be_node_set_reg_class_in(irn, 0, cls); + be_node_set_reg_class_out(irn, 0, cls); return irn; } -ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode) +ir_node *be_new_CopyKeep_single(const arch_register_class_t *cls, ir_node *bl, ir_node *src, ir_node *keep, ir_mode *mode) { - ir_node *in[1]; - - in[0] = keep; - return be_new_CopyKeep(cls, irg, bl, src, 1, in, mode); + return be_new_CopyKeep(cls, bl, src, 1, &keep, mode); } ir_node *be_get_CopyKeep_op(const ir_node *cpy) { @@ -723,42 +779,55 @@ void be_set_CopyKeep_op(ir_node *cpy, ir_node *op) { set_irn_n(cpy, be_pos_CopyKeep_op, op); } -ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[]) +ir_node *be_new_Barrier(ir_node *bl, int n, ir_node *in[]) { - ir_node *irn; + ir_node *res; + int i; + ir_graph *irg = get_Block_irg(bl); - irn = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, n, in); - init_node_attr(irn, n); - return irn; + res = new_ir_node(NULL, irg, bl, op_be_Barrier, mode_T, -1, NULL); + init_node_attr(res, -1); + for(i = 0; i < n; ++i) { + add_irn_n(res, in[i]); + add_register_req(res); + } + + return res; +} + +ir_node *be_Barrier_append_node(ir_node *barrier, ir_node *node) +{ + ir_node *block = get_nodes_block(barrier); + ir_mode *mode = get_irn_mode(node); + int n = add_irn_n(barrier, node); + + ir_node *proj = new_r_Proj(block, barrier, mode, n); + add_register_req(barrier); + + return proj; } -int be_is_Spill (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Spill ; } -int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Reload ; } -int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; } -int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; } -int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; } -int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; } -int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; } -int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; } -int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; } -int be_is_IncSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_IncSP ; } -int be_is_SetSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_SetSP ; } -int be_is_AddSP (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_AddSP ; } -int be_is_RegParams (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_RegParams ; } -int be_is_StackParam (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_StackParam ; } -int be_is_FrameAddr (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameAddr ; } -int be_is_FrameLoad (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameLoad ; } -int be_is_FrameStore (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_FrameStore ; } -int be_is_Barrier (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Barrier ; } +/* Construct a new be_Unwind. */ +ir_node *be_new_Unwind(dbg_info *dbg, ir_node *block, + ir_node *mem, ir_node *sp) +{ + ir_node *res; + ir_node *in[2]; + ir_graph *irg = get_Block_irg(block); + + in[be_pos_Unwind_mem] = mem; + in[be_pos_Unwind_sp] = sp; + res = new_ir_node(dbg, irg, block, op_be_Unwind, mode_X, 2, in); + init_node_attr(res, -1); + + return res; +} int be_has_frame_entity(const ir_node *irn) { - switch(be_get_irn_opcode(irn)) { - case beo_StackParam: + switch (get_irn_opcode(irn)) { case beo_Spill: case beo_Reload: - case beo_FrameStore: - case beo_FrameLoad: case beo_FrameAddr: return 1; default: @@ -766,18 +835,28 @@ int be_has_frame_entity(const ir_node *irn) } } -ir_entity* be_get_frame_entity(const ir_node *irn) +ir_entity *be_get_frame_entity(const ir_node *irn) { - if(be_has_frame_entity(irn)) { - be_frame_attr_t *a = get_irn_attr(irn); + if (be_has_frame_entity(irn)) { + const be_frame_attr_t *a = get_irn_attr_const(irn); return a->ent; } return NULL; } +int be_get_frame_offset(const ir_node *irn) +{ + assert(is_be_node(irn)); + if (be_has_frame_entity(irn)) { + const be_frame_attr_t *a = get_irn_attr_const(irn); + return a->offset; + } + return 0; +} + void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent) { - be_memperm_attr_t *attr = get_irn_attr(irn); + const be_memperm_attr_t *attr = get_irn_attr_const(irn); assert(be_is_MemPerm(irn)); assert(n < be_get_MemPerm_entity_arity(irn)); @@ -787,7 +866,7 @@ void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent) ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n) { - be_memperm_attr_t *attr = get_irn_attr(irn); + const be_memperm_attr_t *attr = get_irn_attr_const(irn); assert(be_is_MemPerm(irn)); assert(n < be_get_MemPerm_entity_arity(irn)); @@ -797,7 +876,7 @@ ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n) void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent) { - be_memperm_attr_t *attr = get_irn_attr(irn); + const be_memperm_attr_t *attr = get_irn_attr_const(irn); assert(be_is_MemPerm(irn)); assert(n < be_get_MemPerm_entity_arity(irn)); @@ -807,7 +886,7 @@ void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent) ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n) { - be_memperm_attr_t *attr = get_irn_attr(irn); + const be_memperm_attr_t *attr = get_irn_attr_const(irn); assert(be_is_MemPerm(irn)); assert(n < be_get_MemPerm_entity_arity(irn)); @@ -820,84 +899,89 @@ int be_get_MemPerm_entity_arity(const ir_node *irn) return get_irn_arity(irn) - 1; } -static void be_limited(void *data, bitset_t *bs) +static void set_req_single(struct obstack *obst, arch_register_req_t *req, + const arch_register_t *reg, arch_register_req_type_t additional_types) { - be_req_t *req = data; + const arch_register_class_t *cls = arch_register_get_class(reg); + unsigned *limited_bitset; + + limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls)); + rbitset_set(limited_bitset, arch_register_get_index(reg)); + + req->cls = cls; + req->type |= arch_register_req_type_limited | additional_types; + req->limited = limited_bitset; - switch(req->kind) { - case be_req_kind_negate_old_limited: - case be_req_kind_old_limited: - req->x.old_limited.old_limited(req->x.old_limited.old_limited_env, bs); - if(req->kind == be_req_kind_negate_old_limited) - bitset_flip_all(bs); - break; - case be_req_kind_single_reg: - bitset_clear_all(bs); - bitset_set(bs, req->x.single_reg->index); - break; - } } -static INLINE be_req_t *get_req(ir_node *irn, int pos) +void be_set_constr_single_reg_in(ir_node *node, int pos, + const arch_register_t *reg, arch_register_req_type_t additional_types) { - int idx = pos < 0 ? -(pos + 1) : pos; - be_node_attr_t *a = get_irn_attr(irn); - be_reg_data_t *rd = &a->reg_data[idx]; - be_req_t *r = pos < 0 ? &rd->req : &rd->in_req; + arch_register_req_t *req = get_be_req(node, pos); + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = get_irg_obstack(irg); - assert(is_be_node(irn)); - assert(!(pos >= 0) || pos < get_irn_arity(irn)); - assert(!(pos < 0) || -(pos + 1) <= a->max_reg_data); - - return r; + set_req_single(obst, req, reg, additional_types); } -void be_set_constr_single_reg(ir_node *irn, int pos, const arch_register_t *reg) +void be_set_constr_single_reg_out(ir_node *node, int pos, + const arch_register_t *reg, arch_register_req_type_t additional_types) { - be_req_t *r = get_req(irn, pos); + arch_register_req_t *req = get_be_req(node, BE_OUT_POS(pos)); + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = get_irg_obstack(irg); + + /* if we have an ignore register, add ignore flag and just assign it */ + if (reg->type & arch_register_type_ignore) { + additional_types |= arch_register_req_type_ignore; + } - r->kind = be_req_kind_single_reg; - r->x.single_reg = reg; - r->req.limited = be_limited; - r->req.limited_env = r; - r->req.type = arch_register_req_type_limited; - r->req.cls = reg->reg_class; + arch_irn_set_register(node, pos, reg); + set_req_single(obst, req, reg, additional_types); } -void be_set_constr_limited(ir_node *irn, int pos, const arch_register_req_t *req) +void be_set_constr_limited(ir_node *node, int pos, const arch_register_req_t *req) { - be_req_t *r = get_req(irn, pos); + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = get_irg_obstack(irg); + arch_register_req_t *r = get_be_req(node, pos); assert(arch_register_req_is(req, limited)); - - r->kind = be_req_kind_old_limited; - r->req.limited = be_limited; - r->req.limited_env = r; - r->req.type = arch_register_req_type_limited; - r->req.cls = req->cls; - - r->x.old_limited.old_limited = req->limited; - r->x.old_limited.old_limited_env = req->limited_env; + assert(!(req->type & (arch_register_req_type_should_be_same | arch_register_req_type_must_be_different))); + memcpy(r, req, sizeof(r[0])); + r->limited = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs); } -void be_node_set_flags(ir_node *irn, int pos, arch_irn_flags_t flags) +void be_node_set_reg_class_in(ir_node *irn, int pos, const arch_register_class_t *cls) { - be_req_t *r = get_req(irn, pos); - r->flags = flags; + arch_register_req_t *req = get_be_req(irn, pos); + + req->cls = cls; + + if (cls == NULL) { + req->type = arch_register_req_type_none; + } else if (req->type == arch_register_req_type_none) { + req->type = arch_register_req_type_normal; + } } -void be_node_set_reg_class(ir_node *irn, int pos, const arch_register_class_t *cls) +void be_node_set_reg_class_out(ir_node *irn, int pos, const arch_register_class_t *cls) { - be_req_t *r = get_req(irn, pos); - r->req.cls = cls; - if(r->req.type == arch_register_req_type_none) - r->req.type = arch_register_req_type_normal; + arch_register_req_t *req = get_be_req(irn, BE_OUT_POS(pos)); + + req->cls = cls; + + if (cls == NULL) { + req->type = arch_register_req_type_none; + } else if (req->type == arch_register_req_type_none) { + req->type = arch_register_req_type_normal; + } } void be_node_set_req_type(ir_node *irn, int pos, arch_register_req_type_t type) { - be_req_t *r = get_req(irn, pos); - r->req.type = type; + arch_register_req_t *req = get_be_req(irn, pos); + req->type = type; } ir_node *be_get_IncSP_pred(ir_node *irn) { @@ -910,54 +994,57 @@ void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) { set_irn_n(incsp, 0, pred); } -ir_node *be_get_IncSP_mem(ir_node *irn) { - assert(be_is_IncSP(irn)); - return get_irn_n(irn, 1); -} - void be_set_IncSP_offset(ir_node *irn, int offset) { - be_stack_attr_t *a = get_irn_attr(irn); + be_incsp_attr_t *a = get_irn_attr(irn); assert(be_is_IncSP(irn)); a->offset = offset; } int be_get_IncSP_offset(const ir_node *irn) { - be_stack_attr_t *a = get_irn_attr(irn); + const be_incsp_attr_t *a = get_irn_attr_const(irn); assert(be_is_IncSP(irn)); return a->offset; } -ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn) +int be_get_IncSP_align(const ir_node *irn) { - ir_node *bl = get_nodes_block(irn); - ir_graph *irg = get_irn_irg(bl); - const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1); - ir_node *spill; + const be_incsp_attr_t *a = get_irn_attr_const(irn); + assert(be_is_IncSP(irn)); + return a->align; +} +ir_node *be_spill(ir_node *block, ir_node *irn) +{ + ir_graph *irg = get_Block_irg(block); + ir_node *frame = get_irg_frame(irg); + const arch_register_class_t *cls = arch_get_irn_reg_class_out(irn); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame); + ir_node *spill; - spill = be_new_Spill(cls, irg, bl, irn); + spill = be_new_Spill(cls, cls_frame, block, frame, irn); return spill; } -ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill) +ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill) { ir_node *reload; - ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert); - ir_graph *irg = get_irn_irg(bl); + ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert); + ir_graph *irg = get_Block_irg(bl); + ir_node *frame = get_irg_frame(irg); + const arch_register_class_t *cls_frame = arch_get_irn_reg_class_out(frame); assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M)); - reload = be_new_Reload(cls, irg, bl, spill, mode); + reload = be_new_Reload(cls, cls_frame, bl, frame, spill, mode); - if(is_Block(insert)) { - insert = sched_skip(insert, 0, sched_skip_cf_predicator, (void *) arch_env); + if (is_Block(insert)) { + insert = sched_skip(insert, 0, sched_skip_cf_predicator, NULL); sched_add_after(insert, reload); - } - - else + } else { sched_add_before(insert, reload); + } return reload; } @@ -973,100 +1060,48 @@ ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, */ -static void *put_out_reg_req(arch_register_req_t *req, const ir_node *irn, int out_pos) -{ - const be_node_attr_t *a = get_irn_attr(irn); - - if(out_pos < a->max_reg_data) { - memcpy(req, &a->reg_data[out_pos].req, sizeof(req[0])); - - if(be_is_Copy(irn)) { - req->type |= arch_register_req_type_should_be_same; - req->other_same = be_get_Copy_op(irn); - } - } - else { - req->type = arch_register_req_type_none; - req->cls = NULL; - } - - return req; -} - -static void *put_in_reg_req(arch_register_req_t *req, const ir_node *irn, int pos) +static const arch_register_req_t *be_node_get_out_reg_req( + const ir_node *irn, int pos) { - const be_node_attr_t *a = get_irn_attr(irn); + const be_node_attr_t *a = get_irn_attr_const(irn); - if(pos < get_irn_arity(irn) && pos < a->max_reg_data) - memcpy(req, &a->reg_data[pos].in_req, sizeof(req[0])); - else { - req->type = arch_register_req_type_none; - req->cls = NULL; + assert(pos >= 0); + if (pos >= ARR_LEN(a->reg_data)) { + return arch_no_register_req; } - return req; + return &a->reg_data[pos].req; } -static const arch_register_req_t * -be_node_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) +static const arch_register_req_t *be_node_get_in_reg_req( + const ir_node *irn, int pos) { - int out_pos = pos; + const be_node_attr_t *a = get_irn_attr_const(irn); - if(pos < 0) { - if(get_irn_mode(irn) == mode_T) - return NULL; - - out_pos = redir_proj((const ir_node **) &irn, pos); - assert(is_be_node(irn)); - return put_out_reg_req(req, irn, out_pos); - } - - else { - return is_be_node(irn) ? put_in_reg_req(req, irn, pos) : NULL; - } - - return req; -} + assert(pos >= 0); + if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data)) + return arch_no_register_req; -const arch_register_t * -be_node_get_irn_reg(const void *_self, const ir_node *irn) -{ - be_reg_data_t *r = retrieve_reg_data(irn); - return r ? r->reg : NULL; + return &a->reg_data[pos].in_req; } -static arch_irn_class_t be_node_classify(const void *_self, const ir_node *irn) +static arch_irn_class_t be_node_classify(const ir_node *irn) { - redir_proj((const ir_node **) &irn, -1); - - switch(be_get_irn_opcode(irn)) { -#define XXX(a,b) case beo_ ## a: return arch_irn_class_ ## b - XXX(Spill, spill); - XXX(Reload, reload); - XXX(Perm, perm); - XXX(Copy, copy); - XXX(Return, branch); - XXX(StackParam, stackparam); -#undef XXX - default: - return arch_irn_class_normal; + switch (get_irn_opcode(irn)) { + case beo_Spill: return arch_irn_class_spill; + case beo_Reload: return arch_irn_class_reload; + case beo_Perm: return arch_irn_class_perm; + case beo_Copy: return arch_irn_class_copy; + default: return 0; } - - return 0; } -static arch_irn_flags_t be_node_get_flags(const void *_self, const ir_node *irn) -{ - be_reg_data_t *r = retrieve_reg_data(irn); - return r ? r->req.flags : 0; -} - -static ir_entity *be_node_get_frame_entity(const void *self, const ir_node *irn) +static ir_entity *be_node_get_frame_entity(const ir_node *irn) { return be_get_frame_entity(irn); } -static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity *ent) +static void be_node_set_frame_entity(ir_node *irn, ir_entity *ent) { be_frame_attr_t *a; @@ -1076,17 +1111,25 @@ static void be_node_set_frame_entity(const void *self, ir_node *irn, ir_entity * a->ent = ent; } -static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset) +static void be_node_set_frame_offset(ir_node *irn, int offset) { - if(be_has_frame_entity(irn)) { - be_frame_attr_t *a = get_irn_attr(irn); - a->offset = offset; - } + be_frame_attr_t *a; + + if(!be_has_frame_entity(irn)) + return; + + a = get_irn_attr(irn); + a->offset = offset; } -static int be_node_get_sp_bias(const void *self, const ir_node *irn) +static int be_node_get_sp_bias(const ir_node *irn) { - return be_is_IncSP(irn) ? be_get_IncSP_offset(irn) : 0; + if(be_is_IncSP(irn)) + return be_get_IncSP_offset(irn); + if(be_is_Call(irn)) + return -(int)be_Call_get_pop(irn); + + return 0; } /* @@ -1098,12 +1141,11 @@ static int be_node_get_sp_bias(const void *self, const ir_node *irn) */ -static const arch_irn_ops_if_t be_node_irn_ops_if = { - be_node_get_irn_reg_req, - be_node_set_irn_reg, - be_node_get_irn_reg, +/* for be nodes */ +static const arch_irn_ops_t be_node_irn_ops = { + be_node_get_in_reg_req, + be_node_get_out_reg_req, be_node_classify, - be_node_get_flags, be_node_get_frame_entity, be_node_set_frame_entity, be_node_set_frame_offset, @@ -1114,18 +1156,59 @@ static const arch_irn_ops_if_t be_node_irn_ops_if = { NULL, /* perform_memory_operand */ }; -static const arch_irn_ops_t be_node_irn_ops = { - &be_node_irn_ops_if -}; +static const arch_register_req_t *dummy_reg_req( + const ir_node *node, int pos) +{ + (void) node; + (void) pos; + return arch_no_register_req; +} -const void *be_node_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) +static arch_irn_class_t dummy_classify(const ir_node *node) { - redir_proj((const ir_node **) &irn, -1); - return is_be_node(irn) ? &be_node_irn_ops : NULL; + (void) node; + return 0; } -const arch_irn_handler_t be_node_irn_handler = { - be_node_get_irn_ops +static ir_entity* dummy_get_frame_entity(const ir_node *node) +{ + (void) node; + return NULL; +} + +static void dummy_set_frame_entity(ir_node *node, ir_entity *entity) +{ + (void) node; + (void) entity; + panic("dummy_set_frame_entity() should not be called"); +} + +static void dummy_set_frame_offset(ir_node *node, int bias) +{ + (void) node; + (void) bias; + panic("dummy_set_frame_offset() should not be called"); +} + +static int dummy_get_sp_bias(const ir_node *node) +{ + (void) node; + return 0; +} + +/* for "middleend" nodes */ +static const arch_irn_ops_t dummy_be_irn_ops = { + dummy_reg_req, + dummy_reg_req, + dummy_classify, + dummy_get_frame_entity, + dummy_set_frame_entity, + dummy_set_frame_offset, + dummy_get_sp_bias, + NULL, /* get_inverse */ + NULL, /* get_op_estimated_cost */ + NULL, /* possible_memory_operand */ + NULL, /* perform_memory_operand */ }; /* @@ -1137,27 +1220,11 @@ const arch_irn_handler_t be_node_irn_handler = { */ -typedef struct { - arch_irn_handler_t irn_handler; - arch_irn_ops_t irn_ops; - const arch_env_t *arch_env; - pmap *regs; -} phi_handler_t; - -#define get_phi_handler_from_handler(h) container_of(h, phi_handler_t, irn_handler) -#define get_phi_handler_from_ops(h) container_of(h, phi_handler_t, irn_ops) - -static const void *phi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn) -{ - const phi_handler_t *h = get_phi_handler_from_handler(handler); - return is_Phi(irn) && mode_is_datab(get_irn_mode(irn)) ? &h->irn_ops : NULL; -} - /** - * Get register class of a Phi. - * + * Guess correct register class of a phi node by looking at its arguments */ -static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t *h, arch_register_req_t *req, const ir_node *phi, pset **visited) +static const arch_register_req_t *get_Phi_reg_req_recursive(const ir_node *phi, + pset **visited) { int n = get_irn_arity(phi); ir_node *op; @@ -1168,133 +1235,137 @@ static const arch_register_req_t *get_Phi_reg_req_recursive(const phi_handler_t for(i = 0; i < n; ++i) { op = get_irn_n(phi, i); + /* Matze: don't we unnecessary constraint our phis with this? + * we only need to take the regclass IMO*/ if(!is_Phi(op)) - return arch_get_register_req(h->arch_env, req, op, BE_OUT_POS(0)); + return arch_get_register_req_out(op); } /* - The operands of that Phi were all Phis themselves. - We have to start a DFS for a non-Phi argument now. - */ + * The operands of that Phi were all Phis themselves. + * We have to start a DFS for a non-Phi argument now. + */ if(!*visited) *visited = pset_new_ptr(16); pset_insert_ptr(*visited, phi); for(i = 0; i < n; ++i) { + const arch_register_req_t *req; op = get_irn_n(phi, i); - if(get_Phi_reg_req_recursive(h, req, op, visited)) + req = get_Phi_reg_req_recursive(op, visited); + if(req != NULL) return req; } return NULL; } -static const arch_register_req_t *phi_get_irn_reg_req(const void *self, arch_register_req_t *req, const ir_node *irn, int pos) +static const arch_register_req_t *phi_get_irn_reg_req(const ir_node *node, + int pos) { - phi_handler_t *phi_handler = get_phi_handler_from_ops(self); - pset *visited = NULL; + backend_info_t *info = be_get_info(node); + const arch_register_req_t *req = info->out_infos[0].req; + (void) pos; + + if (req == NULL) { + if (!mode_is_datab(get_irn_mode(node))) { + req = arch_no_register_req; + } else { + pset *visited = NULL; + ir_graph *irg = get_irn_irg(node); + struct obstack *obst = get_irg_obstack(irg); - get_Phi_reg_req_recursive(phi_handler, req, irn, &visited); - /* Set the requirements type to normal, since an operand of the Phi could have had constraints. */ - req->type = arch_register_req_type_normal; - if(visited) - del_pset(visited); + req = get_Phi_reg_req_recursive(node, &visited); + assert(req->cls != NULL); + + if (req->type != arch_register_req_type_normal) { + arch_register_req_t *nreq = obstack_alloc(obst, sizeof(*nreq)); + *nreq = *req; + nreq->type = arch_register_req_type_normal; + req = nreq; + } + + if (visited != NULL) + del_pset(visited); + } + info->out_infos[0].req = req; + } return req; } -static void phi_set_irn_reg(const void *self, ir_node *irn, const arch_register_t *reg) +void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req) { - phi_handler_t *h = get_phi_handler_from_ops(self); - pmap_insert(h->regs, irn, (void *) reg); -} + backend_info_t *info = be_get_info(node); + info->out_infos[0].req = req; -static const arch_register_t *phi_get_irn_reg(const void *self, const ir_node *irn) -{ - phi_handler_t *h = get_phi_handler_from_ops(self); - return pmap_get(h->regs, (void *) irn); + assert(mode_is_datab(get_irn_mode(node))); } -static arch_irn_class_t phi_classify(const void *_self, const ir_node *irn) +int be_dump_phi_reg_reqs(ir_node *node, FILE *F, dump_reason_t reason) { - return arch_irn_class_normal; -} + backend_info_t *info; + int i; + int arity; -static arch_irn_flags_t phi_get_flags(const void *_self, const ir_node *irn) -{ - return arch_irn_flags_none; -} + switch(reason) { + case dump_node_opcode_txt: + fputs(get_op_name(get_irn_op(node)), F); + break; + case dump_node_mode_txt: + fprintf(F, "%s", get_mode_name(get_irn_mode(node))); + break; + case dump_node_nodeattr_txt: + break; + case dump_node_info_txt: + info = be_get_info(node); + + /* we still have a little problem with the initialisation order. This + dump function is attached to the Phi ops before we can be sure + that all backend infos have been constructed... */ + if (info != NULL) { + const arch_register_req_t *req = info->out_infos[0].req; + const arch_register_t *reg = arch_irn_get_register(node, 0); + + arity = get_irn_arity(node); + for (i = 0; i < arity; ++i) { + fprintf(F, "inreq #%d = ", i); + arch_dump_register_req(F, req, node); + fputs("\n", F); + } + fprintf(F, "outreq #0 = "); + arch_dump_register_req(F, req, node); + fputs("\n", F); -static ir_entity *phi_get_frame_entity(const void *_self, const ir_node *irn) -{ - return NULL; -} + fputs("\n", F); -static void phi_set_frame_entity(const void *_self, ir_node *irn, ir_entity *ent) -{ -} + fprintf(F, "reg #0 = %s\n", reg != NULL ? reg->name : "n/a"); + } -static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias) -{ -} + break; + + default: + break; + } -static int phi_get_sp_bias(const void* self, const ir_node *irn) -{ return 0; } -static const arch_irn_ops_if_t phi_irn_ops = { +static const arch_irn_ops_t phi_irn_ops = { phi_get_irn_reg_req, - phi_set_irn_reg, - phi_get_irn_reg, - phi_classify, - phi_get_flags, - phi_get_frame_entity, - phi_set_frame_entity, - phi_set_frame_offset, - phi_get_sp_bias, + phi_get_irn_reg_req, + dummy_classify, + dummy_get_frame_entity, + dummy_set_frame_entity, + dummy_set_frame_offset, + dummy_get_sp_bias, NULL, /* get_inverse */ NULL, /* get_op_estimated_cost */ NULL, /* possible_memory_operand */ NULL, /* perform_memory_operand */ }; -static const arch_irn_handler_t phi_irn_handler = { - phi_get_irn_ops -}; - -arch_irn_handler_t *be_phi_handler_new(const arch_env_t *arch_env) -{ - phi_handler_t *h = xmalloc(sizeof(h[0])); - h->irn_handler.get_irn_ops = phi_get_irn_ops; - h->irn_ops.impl = &phi_irn_ops; - h->arch_env = arch_env; - h->regs = pmap_create(); - return (arch_irn_handler_t *) h; -} - -void be_phi_handler_free(arch_irn_handler_t *handler) -{ - phi_handler_t *h = (void *) handler; - pmap_destroy(h->regs); - free(handler); -} - -const void *be_phi_get_irn_ops(const arch_irn_handler_t *self, const ir_node *irn) -{ - phi_handler_t *phi_handler = get_phi_handler_from_handler(self); - return is_Phi(irn) ? &phi_handler->irn_ops : NULL; -} - -void be_phi_handler_reset(arch_irn_handler_t *handler) -{ - phi_handler_t *h = get_phi_handler_from_handler(handler); - if(h->regs) - pmap_destroy(h->regs); - h->regs = pmap_create(); -} - /* _ _ _ ____ _ | \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _ @@ -1304,67 +1375,38 @@ void be_phi_handler_reset(arch_irn_handler_t *handler) |_| |___/ */ -/** - * Dumps a register requirement to a file. - */ -static void dump_node_req(FILE *f, int idx, be_req_t *req) -{ - unsigned i; - int did_something = 0; - char buf[16]; - const char *prefix = buf; - - snprintf(buf, sizeof(buf), "#%d ", idx); - buf[sizeof(buf) - 1] = '\0'; - - if(req->flags != arch_irn_flags_none) { - fprintf(f, "%sflags: ", prefix); - prefix = ""; - for(i = arch_irn_flags_none; i <= log2_ceil(arch_irn_flags_last); ++i) { - if(req->flags & (1 << i)) { - fprintf(f, "%s%s", prefix, arch_irn_flag_str(1 << i)); - prefix = "|"; - } - } - prefix = ", "; - did_something = 1; - } - - if(req->req.cls != 0) { - char tmp[256]; - fprintf(f, prefix); - arch_register_req_format(tmp, sizeof(tmp), &req->req); - fprintf(f, "%s", tmp); - did_something = 1; - } - - if(did_something) - fprintf(f, "\n"); -} - /** * Dumps node register requirements to a file. */ -static void dump_node_reqs(FILE *f, ir_node *irn) +static void dump_node_reqs(FILE *F, ir_node *node) { int i; - be_node_attr_t *a = get_irn_attr(irn); - - fprintf(f, "registers: \n"); - for(i = 0; i < a->max_reg_data; ++i) { - be_reg_data_t *rd = &a->reg_data[i]; - if(rd->reg) - fprintf(f, "#%d: %s\n", i, rd->reg->name); + be_node_attr_t *a = get_irn_attr(node); + int len = ARR_LEN(a->reg_data); + + for (i = 0; i < len; ++i) { + const arch_register_req_t *req = &a->reg_data[i].in_req; + if (req->cls == NULL) + continue; + fprintf(F, "inreq #%d = ", i); + arch_dump_register_req(F, req, node); + fputs("\n", F); } - fprintf(f, "in requirements\n"); - for(i = 0; i < a->max_reg_data; ++i) { - dump_node_req(f, i, &a->reg_data[i].in_req); + for (i = 0; i < len; ++i) { + const arch_register_req_t *req = &a->reg_data[i].req; + if (req->cls == NULL) + continue; + fprintf(F, "outreq #%d = ", i); + arch_dump_register_req(F, req, node); + fputs("\n", F); } - fprintf(f, "\nout requirements\n"); - for(i = 0; i < a->max_reg_data; ++i) { - dump_node_req(f, i, &a->reg_data[i].req); + fputs("\n", F); + + for (i = 0; i < len; ++i) { + const arch_register_t *reg = arch_irn_get_register(node, i); + fprintf(F, "reg #%d = %s\n", i, reg != NULL ? reg->name : "n/a"); } } @@ -1379,12 +1421,29 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) switch(reason) { case dump_node_opcode_txt: - fprintf(f, get_op_name(get_irn_op(irn))); + fputs(get_op_name(get_irn_op(irn)), f); break; case dump_node_mode_txt: - fprintf(f, get_mode_name(get_irn_mode(irn))); + if(be_is_Perm(irn) || be_is_Copy(irn) || be_is_CopyKeep(irn)) { + fprintf(f, " %s", get_mode_name(get_irn_mode(irn))); + } break; case dump_node_nodeattr_txt: + if(be_is_Call(irn)) { + be_call_attr_t *a = (be_call_attr_t *) at; + if (a->ent) + fprintf(f, " [%s] ", get_entity_name(a->ent)); + } + if(be_is_IncSP(irn)) { + const be_incsp_attr_t *attr = get_irn_generic_attr_const(irn); + if(attr->offset == BE_STACK_FRAME_SIZE_EXPAND) { + fprintf(f, " [Setup Stackframe] "); + } else if(attr->offset == BE_STACK_FRAME_SIZE_SHRINK) { + fprintf(f, " [Destroy Stackframe] "); + } else { + fprintf(f, " [%d] ", attr->offset); + } + } break; case dump_node_info_txt: dump_node_reqs(f, irn); @@ -1392,17 +1451,17 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) if(be_has_frame_entity(irn)) { be_frame_attr_t *a = (be_frame_attr_t *) at; if (a->ent) { - int bits = get_type_size_bits(get_entity_type(a->ent)); - ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bits\n", - a->ent, a->offset, a->offset, bits, bits); + unsigned size = get_type_size_bytes(get_entity_type(a->ent)); + ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bytes\n", + a->ent, a->offset, a->offset, size, size); } } - switch(be_get_irn_opcode(irn)) { + switch (get_irn_opcode(irn)) { case beo_IncSP: { - be_stack_attr_t *a = (be_stack_attr_t *) at; + be_incsp_attr_t *a = (be_incsp_attr_t *) at; if (a->offset == BE_STACK_FRAME_SIZE_EXPAND) fprintf(f, "offset: FRAME_SIZE\n"); else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK) @@ -1450,9 +1509,12 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason) */ static void copy_attr(const ir_node *old_node, ir_node *new_node) { - be_node_attr_t *old_attr = get_irn_attr(old_node); + const be_node_attr_t *old_attr = get_irn_attr_const(old_node); be_node_attr_t *new_attr = get_irn_attr(new_node); - int i; + struct obstack *obst = get_irg_obstack(get_irn_irg(new_node)); + backend_info_t *old_info = be_get_info(old_node); + backend_info_t *new_info = be_get_info(new_node); + unsigned i, len; assert(is_be_node(old_node)); assert(is_be_node(new_node)); @@ -1460,23 +1522,47 @@ static void copy_attr(const ir_node *old_node, ir_node *new_node) memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node))); new_attr->reg_data = NULL; - if(new_attr->max_reg_data > 0) { - new_attr->reg_data = NEW_ARR_D(be_reg_data_t, get_irg_obstack(get_irn_irg(new_node)), new_attr->max_reg_data); - memcpy(new_attr->reg_data, old_attr->reg_data, new_attr->max_reg_data * sizeof(be_reg_data_t)); - - for(i = 0; i < old_attr->max_reg_data; ++i) { - be_req_t *r; - - r = &new_attr->reg_data[i].req; - r->req.limited_env = r; + if(old_attr->reg_data != NULL) + len = ARR_LEN(old_attr->reg_data); + else + len = 0; + + if(get_irn_op(old_node)->opar == oparity_dynamic + || be_is_RegParams(old_node)) { + new_attr->reg_data = NEW_ARR_F(be_reg_data_t, len); + new_info->out_infos = NEW_ARR_F(reg_out_info_t, len); + } else { + new_attr->reg_data = NEW_ARR_D(be_reg_data_t, obst, len); + new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, len); + } - r = &new_attr->reg_data[i].in_req; - r->req.limited_env = r; + if(len > 0) { + memcpy(new_attr->reg_data, old_attr->reg_data, len * sizeof(be_reg_data_t)); + memcpy(new_info->out_infos, old_info->out_infos, len * sizeof(new_info->out_infos[0])); + for(i = 0; i < len; ++i) { + const be_reg_data_t *rd = &old_attr->reg_data[i]; + be_reg_data_t *newrd = &new_attr->reg_data[i]; + if (arch_register_req_is(&rd->req, limited)) { + const arch_register_req_t *req = &rd->req; + arch_register_req_t *new_req = &newrd->req; + new_req->limited + = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs); + } + if(arch_register_req_is(&rd->in_req, limited)) { + const arch_register_req_t *req = &rd->in_req; + arch_register_req_t *new_req = &newrd->in_req; + new_req->limited + = rbitset_duplicate_obstack_alloc(obst, req->limited, req->cls->n_regs); + } } } } static const ir_op_ops be_node_op_ops = { + firm_default_hash, + NULL, + NULL, + NULL, NULL, NULL, NULL, @@ -1489,5 +1575,60 @@ static const ir_op_ops be_node_op_ops = { NULL, NULL, dump_node, - NULL + NULL, + &be_node_irn_ops }; + +int is_be_node(const ir_node *irn) +{ + return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops; +} + +void be_init_op(void) +{ + ir_opcode opc; + + /* Acquire all needed opcodes. */ + op_be_Spill = new_ir_op(beo_Spill, "be_Spill", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Reload = new_ir_op(beo_Reload, "be_Reload", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Perm = new_ir_op(beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_MemPerm = new_ir_op(beo_MemPerm, "be_MemPerm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops); + op_be_Copy = new_ir_op(beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Keep = new_ir_op(beo_Keep, "be_Keep", op_pin_state_floats, K, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_CopyKeep = new_ir_op(beo_CopyKeep, "be_CopyKeep", op_pin_state_floats, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Call = new_ir_op(beo_Call, "be_Call", op_pin_state_pinned, F|M, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops); + op_be_Return = new_ir_op(beo_Return, "be_Return", op_pin_state_pinned, X, oparity_dynamic, 0, sizeof(be_return_attr_t), &be_node_op_ops); + op_be_AddSP = new_ir_op(beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_SubSP = new_ir_op(beo_SubSP, "be_SubSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_IncSP = new_ir_op(beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_incsp_attr_t), &be_node_op_ops); + op_be_RegParams = new_ir_op(beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_FrameAddr = new_ir_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops); + op_be_Barrier = new_ir_op(beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_dynamic, 0, sizeof(be_node_attr_t), &be_node_op_ops); + op_be_Unwind = new_ir_op(beo_Unwind, "be_Unwind", op_pin_state_pinned, X, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops); + + op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr; + op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr; + op_be_Perm->ops.node_cmp_attr = node_cmp_attr; + op_be_MemPerm->ops.node_cmp_attr = node_cmp_attr; + op_be_Copy->ops.node_cmp_attr = node_cmp_attr; + op_be_Keep->ops.node_cmp_attr = node_cmp_attr; + op_be_CopyKeep->ops.node_cmp_attr = node_cmp_attr; + op_be_Call->ops.node_cmp_attr = Call_cmp_attr; + op_be_Return->ops.node_cmp_attr = Return_cmp_attr; + op_be_AddSP->ops.node_cmp_attr = node_cmp_attr; + op_be_SubSP->ops.node_cmp_attr = node_cmp_attr; + op_be_IncSP->ops.node_cmp_attr = IncSP_cmp_attr; + op_be_RegParams->ops.node_cmp_attr = node_cmp_attr; + op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr; + op_be_Barrier->ops.node_cmp_attr = node_cmp_attr; + op_be_Unwind->ops.node_cmp_attr = node_cmp_attr; + + /* attach out dummy_ops to middle end nodes */ + for (opc = iro_First; opc <= iro_Last; ++opc) { + ir_op *op = get_irp_opcode(opc); + assert(op->ops.be_ops == NULL); + op->ops.be_ops = &dummy_be_irn_ops; + } + + op_Phi->ops.be_ops = &phi_irn_ops; +}