X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_common_transform.c;h=1aa217d77efa7e62546e86d94097e24a674a004a;hb=9e8485b091a07e5dffaa494982bf107c452569ac;hp=616615e01f3a3fd914777e9fcab288cbc5f8d70f;hpb=c4d387cb4389654e978ea808027ea2be724c513c;p=libfirm diff --git a/ir/be/ia32/ia32_common_transform.c b/ir/be/ia32/ia32_common_transform.c index 616615e01..1aa217d77 100644 --- a/ir/be/ia32/ia32_common_transform.c +++ b/ir/be/ia32/ia32_common_transform.c @@ -21,17 +21,20 @@ * @file * @brief This file implements the common parts of IR transformation from * firm into ia32-Firm. - * @author Sebastian Buchwald + * @author Matthias Braun, Sebastian Buchwald * @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $ */ +#include "config.h" #include "error.h" -#include "irargs_t.h" #include "ircons.h" #include "irprintf.h" #include "typerep.h" +#include "bitset.h" #include "../betranshlp.h" +#include "../beirg.h" +#include "../beabi.h" #include "ia32_architecture.h" #include "ia32_common_transform.h" @@ -71,13 +74,8 @@ static int check_immediate_constraint(long val, char immediate_constraint_type) } } -/** - * creates a unique ident by adding a number to a tag - * - * @param tag the tag string, must contain a %d if a number - * should be added - */ -static ident *unique_id(const char *tag) +/* creates a unique ident by adding a number to a tag */ +ident *ia32_unique_id(const char *tag) { static unsigned id = 0; char str[256]; @@ -87,7 +85,7 @@ static ident *unique_id(const char *tag) } /** - * Get a primitive type for a mode. + * Get a primitive type for a mode with alignment 16. */ static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode) { @@ -95,10 +93,10 @@ static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode) ir_type *res; if (! e) { - char buf[64]; - snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode)); - res = new_type_primitive(new_id_from_str(buf), mode); - set_type_alignment_bytes(res, 16); + res = new_type_primitive(mode); + if (get_mode_size_bits(mode) >= 80) { + set_type_alignment_bytes(res, 16); + } pmap_insert(types, mode, res); } else @@ -142,12 +140,11 @@ ir_entity *create_float_const_entity(ir_node *cnst) } else tp = ia32_get_prim_type(isa->types, mode); - res = new_entity(get_glob_type(), unique_id(".LC%u"), tp); + res = new_entity(get_glob_type(), ia32_unique_id(".LC%u"), tp); set_entity_ld_ident(res, get_entity_ident(res)); - set_entity_visibility(res, visibility_local); - set_entity_variability(res, variability_constant); - set_entity_allocation(res, allocation_static); + set_entity_visibility(res, ir_visibility_local); + add_entity_linkage(res, IR_LINKAGE_CONSTANT); /* we create a new entity here: It's initialization must resist on the const code irg */ @@ -164,13 +161,13 @@ ir_entity *create_float_const_entity(ir_node *cnst) return res; } -ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long val) +ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val) { ir_graph *irg = current_ir_graph; ir_node *start_block = get_irg_start_block(irg); - ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, - symconst, symconst_sign, val); - arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]); + ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst, + symconst_sign, no_pic_adjust, val); + arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]); return immediate; } @@ -184,37 +181,28 @@ const arch_register_t *ia32_get_clobber_register(const char *clobber) /* TODO: construct a hashmap instead of doing linear search for clobber * register */ - for(c = 0; c < N_CLASSES; ++c) { + for (c = 0; c < N_CLASSES; ++c) { cls = & ia32_reg_classes[c]; - for(r = 0; r < cls->n_regs; ++r) { + for (r = 0; r < cls->n_regs; ++r) { const arch_register_t *temp_reg = arch_register_for_index(cls, r); - if(strcmp(temp_reg->name, clobber) == 0 + if (strcmp(temp_reg->name, clobber) == 0 || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) { reg = temp_reg; break; } } - if(reg != NULL) + if (reg != NULL) break; } return reg; } -#ifndef NDEBUG -const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) { - ia32_isa_t *isa = (ia32_isa_t*) cg->arch_env; - - lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", irn); - obstack_1grow(isa->name_obst, 0); - return obstack_finish(isa->name_obst); -} -#endif /* NDEBUG */ - -int ia32_mode_needs_gp_reg(ir_mode *mode) { - if(mode == mode_fpcw) +int ia32_mode_needs_gp_reg(ir_mode *mode) +{ + if (mode == mode_fpcw) return 0; - if(get_mode_size_bits(mode) > 32) + if (get_mode_size_bits(mode) > 32) return 0; return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b; } @@ -222,7 +210,6 @@ int ia32_mode_needs_gp_reg(ir_mode *mode) { static void parse_asm_constraints(constraint_t *constraint, const char *c, int is_output) { - asm_constraint_flags_t flags = 0; char immediate_type = '\0'; unsigned limited = 0; const arch_register_class_t *cls = NULL; @@ -234,7 +221,7 @@ static void parse_asm_constraints(constraint_t *constraint, const char *c, memset(constraint, 0, sizeof(constraint[0])); constraint->same_as = -1; - if(*c == 0) { + if (*c == 0) { /* a memory constraint: no need to do anything in backend about it * (the dependencies are already respected by the memory edge of * the node) */ @@ -243,28 +230,24 @@ static void parse_asm_constraints(constraint_t *constraint, const char *c, /* TODO: improve error messages with node and source info. (As users can * easily hit these) */ - while(*c != 0) { - switch(*c) { + while (*c != 0) { + switch (*c) { case ' ': case '\t': case '\n': break; - case '=': - flags |= ASM_CONSTRAINT_FLAG_MODIFIER_WRITE - | ASM_CONSTRAINT_FLAG_MODIFIER_NO_READ; - break; + /* Skip out/in-out marker */ + case '=': break; + case '+': break; - case '+': - flags |= ASM_CONSTRAINT_FLAG_MODIFIER_WRITE - | ASM_CONSTRAINT_FLAG_MODIFIER_READ; - break; + case '&': break; case '*': ++c; break; case '#': - while(*c != 0 && *c != ',') + while (*c != 0 && *c != ',') ++c; break; @@ -397,7 +380,7 @@ static void parse_asm_constraints(constraint_t *constraint, const char *c, panic("can only specify same constraint on input"); sscanf(c, "%d%n", &same_as, &p); - if(same_as >= 0) { + if (same_as >= 0) { c += p; continue; } @@ -433,7 +416,7 @@ static void parse_asm_constraints(constraint_t *constraint, const char *c, ++c; } - if(same_as >= 0) { + if (same_as >= 0) { if (cls != NULL) panic("same as and register constraint not supported"); if (immediate_type != '\0') @@ -453,13 +436,38 @@ static void parse_asm_constraints(constraint_t *constraint, const char *c, constraint->immediate_type = immediate_type; } +static bool can_match(const arch_register_req_t *in, + const arch_register_req_t *out) +{ + if (in->cls != out->cls) + return false; + if ( (in->type & arch_register_req_type_limited) == 0 + || (out->type & arch_register_req_type_limited) == 0 ) + return true; + + return (*in->limited & *out->limited) != 0; +} + +static inline ir_node *get_new_node(ir_node *node) +{ +#ifdef FIRM_GRGEN_BE + if (be_transformer == TRANSFORMER_DEFAULT) { + return be_transform_node(node); + } else { + return node; + } +#else + return be_transform_node(node); +#endif +} + ir_node *gen_ASM(ir_node *node) { - ir_graph *irg = current_ir_graph; ir_node *block = get_nodes_block(node); - ir_node *new_block = be_transform_node(block); + ir_node *new_block = get_new_node(block); dbg_info *dbgi = get_irn_dbg_info(node); int i, arity; + int value_arity; int out_idx; ir_node **in; ir_node *new_node; @@ -475,6 +483,11 @@ ir_node *gen_ASM(ir_node *node) const ir_asm_constraint *out_constraints; ident **clobbers; int clobbers_flags = 0; + unsigned clobber_bits[N_CLASSES]; + int out_size; + backend_info_t *info; + + memset(&clobber_bits, 0, sizeof(clobber_bits)); /* workaround for lots of buggy code out there as most people think volatile * asm is enough for everything and forget the flags (linux kernel, etc.) @@ -484,19 +497,24 @@ ir_node *gen_ASM(ir_node *node) } arity = get_irn_arity(node); - in = alloca(arity * sizeof(in[0])); - memset(in, 0, arity * sizeof(in[0])); + in = ALLOCANZ(ir_node*, arity); clobbers = get_ASM_clobbers(node); n_clobbers = 0; - for(i = 0; i < get_ASM_n_clobbers(node); ++i) { - const char *c = get_id_str(clobbers[i]); + for (i = 0; i < get_ASM_n_clobbers(node); ++i) { + const arch_register_req_t *req; + const char *c = get_id_str(clobbers[i]); + if (strcmp(c, "memory") == 0) continue; if (strcmp(c, "cc") == 0) { clobbers_flags = 1; continue; } + + req = parse_clobber(c); + clobber_bits[req->cls->index] |= *req->limited; + n_clobbers++; } n_out_constraints = get_ASM_n_output_constraints(node); @@ -506,26 +524,27 @@ ir_node *gen_ASM(ir_node *node) out_constraints = get_ASM_output_constraints(node); /* determine size of register_map */ - for(out_idx = 0; out_idx < n_out_constraints; ++out_idx) { + for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) { const ir_asm_constraint *constraint = &out_constraints[out_idx]; if (constraint->pos > reg_map_size) reg_map_size = constraint->pos; } - for(i = 0; i < arity; ++i) { - const ir_asm_constraint *constraint = &in_constraints[i]; - if(constraint->pos > reg_map_size) + for (i = 0; i < arity; ++i) { + const ir_asm_constraint *constraint = &in_constraints[i]; + if (constraint->pos > reg_map_size) reg_map_size = constraint->pos; } ++reg_map_size; - obst = get_irg_obstack(irg); + obst = get_irg_obstack(current_ir_graph); register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size); memset(register_map, 0, reg_map_size * sizeof(register_map[0])); /* construct output constraints */ - out_reg_reqs = obstack_alloc(obst, out_arity * sizeof(out_reg_reqs[0])); + out_size = out_arity + 1; + out_reg_reqs = obstack_alloc(obst, out_size * sizeof(out_reg_reqs[0])); - for(out_idx = 0; out_idx < n_out_constraints; ++out_idx) { + for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) { const ir_asm_constraint *constraint = &out_constraints[out_idx]; const char *c = get_id_str(constraint->constraint); unsigned pos = constraint->pos; @@ -546,7 +565,7 @@ ir_node *gen_ASM(ir_node *node) /* inputs + input constraints */ in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0])); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *pred = get_irn_n(node, i); const ir_asm_constraint *constraint = &in_constraints[i]; ident *constr_id = constraint->constraint; @@ -554,10 +573,24 @@ ir_node *gen_ASM(ir_node *node) unsigned pos = constraint->pos; int is_memory_op = 0; ir_node *input = NULL; + unsigned r_clobber_bits; constraint_t parsed_constraint; const arch_register_req_t *req; parse_asm_constraints(&parsed_constraint, c, 0); + if (parsed_constraint.cls != NULL) { + r_clobber_bits = clobber_bits[parsed_constraint.cls->index]; + if (r_clobber_bits != 0) { + if (parsed_constraint.all_registers_allowed) { + parsed_constraint.all_registers_allowed = 0; + be_abi_set_non_ignore_regs(env_cg->birg->abi, + parsed_constraint.cls, + &parsed_constraint.allowed_registers); + } + parsed_constraint.allowed_registers &= ~r_clobber_bits; + } + } + req = make_register_req(&parsed_constraint, n_out_constraints, out_reg_reqs, i); in_reg_reqs[i] = req; @@ -569,12 +602,12 @@ ir_node *gen_ASM(ir_node *node) if (input == NULL) { ir_node *pred = get_irn_n(node, i); - input = be_transform_node(pred); + input = get_new_node(pred); if (parsed_constraint.cls == NULL && parsed_constraint.same_as < 0) { is_memory_op = 1; - } else if(parsed_constraint.memory_possible) { + } else if (parsed_constraint.memory_possible) { /* TODO: match Load or Load/Store if memory possible is set */ } } @@ -588,7 +621,7 @@ ir_node *gen_ASM(ir_node *node) } /* parse clobbers */ - for(i = 0; i < get_ASM_n_clobbers(node); ++i) { + for (i = 0; i < get_ASM_n_clobbers(node); ++i) { const char *c = get_id_str(clobbers[i]); const arch_register_req_t *req; @@ -600,45 +633,224 @@ ir_node *gen_ASM(ir_node *node) ++out_idx; } - new_node = new_rd_ia32_Asm(dbgi, irg, new_block, arity, in, out_arity, + /* count inputs which are real values (and not memory) */ + value_arity = 0; + for (i = 0; i < arity; ++i) { + ir_node *in = get_irn_n(node, i); + if (get_irn_mode(in) == mode_M) + continue; + ++value_arity; + } + + /* Attempt to make ASM node register pressure faithful. + * (This does not work for complicated cases yet!) + * + * Algorithm: Check if there are fewer inputs or outputs (I will call this + * the smaller list). Then try to match each constraint of the smaller list + * to 1 of the other list. If we can't match it, then we have to add a dummy + * input/output to the other list + * + * FIXME: This is still broken in lots of cases. But at least better than + * before... + * FIXME: need to do this per register class... + */ + if (out_arity <= value_arity) { + int orig_arity = arity; + int in_size = arity; + int o; + bitset_t *used_ins = bitset_alloca(arity); + for (o = 0; o < out_arity; ++o) { + int i; + const arch_register_req_t *outreq = out_reg_reqs[o]; + + if (outreq->cls == NULL) { + continue; + } + + for (i = 0; i < orig_arity; ++i) { + const arch_register_req_t *inreq; + if (bitset_is_set(used_ins, i)) + continue; + inreq = in_reg_reqs[i]; + if (!can_match(outreq, inreq)) + continue; + bitset_set(used_ins, i); + break; + } + /* did we find any match? */ + if (i < orig_arity) + continue; + + /* we might need more space in the input arrays */ + if (arity >= in_size) { + const arch_register_req_t **new_in_reg_reqs; + ir_node **new_in; + + in_size *= 2; + new_in_reg_reqs + = obstack_alloc(obst, in_size*sizeof(in_reg_reqs[0])); + memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0])); + new_in = ALLOCANZ(ir_node*, in_size); + memcpy(new_in, in, arity*sizeof(new_in[0])); + + in_reg_reqs = new_in_reg_reqs; + in = new_in; + } + + /* add a new (dummy) input which occupies the register */ + assert(outreq->type & arch_register_req_type_limited); + in_reg_reqs[arity] = outreq; + in[arity] = new_bd_ia32_ProduceVal(NULL, block); + be_dep_on_frame(in[arity]); + ++arity; + } + } else { + int i; + bitset_t *used_outs = bitset_alloca(out_arity); + int orig_out_arity = out_arity; + for (i = 0; i < arity; ++i) { + int o; + const arch_register_req_t *inreq = in_reg_reqs[i]; + + if (inreq->cls == NULL) { + continue; + } + + for (o = 0; o < orig_out_arity; ++o) { + const arch_register_req_t *outreq; + if (bitset_is_set(used_outs, o)) + continue; + outreq = out_reg_reqs[o]; + if (!can_match(outreq, inreq)) + continue; + bitset_set(used_outs, i); + break; + } + /* did we find any match? */ + if (o < orig_out_arity) + continue; + + /* we might need more space in the output arrays */ + if (out_arity >= out_size) { + const arch_register_req_t **new_out_reg_reqs; + + out_size *= 2; + new_out_reg_reqs + = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0])); + memcpy(new_out_reg_reqs, out_reg_reqs, + out_arity * sizeof(new_out_reg_reqs[0])); + out_reg_reqs = new_out_reg_reqs; + } + + /* add a new (dummy) output which occupies the register */ + assert(inreq->type & arch_register_req_type_limited); + out_reg_reqs[out_arity] = inreq; + ++out_arity; + } + } + + /* append none register requirement for the memory output */ + if (out_arity + 1 >= out_size) { + const arch_register_req_t **new_out_reg_reqs; + + out_size = out_arity + 1; + new_out_reg_reqs + = obstack_alloc(obst, out_size*sizeof(out_reg_reqs[0])); + memcpy(new_out_reg_reqs, out_reg_reqs, + out_arity * sizeof(new_out_reg_reqs[0])); + out_reg_reqs = new_out_reg_reqs; + } + + /* add a new (dummy) output which occupies the register */ + out_reg_reqs[out_arity] = arch_no_register_req; + ++out_arity; + + new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity, get_ASM_text(node), register_map); - set_ia32_out_req_all(new_node, out_reg_reqs); + if (arity == 0) + be_dep_on_frame(new_node); + + info = be_get_info(new_node); + for (i = 0; i < out_arity; ++i) { + info->out_infos[i].req = out_reg_reqs[i]; + } set_ia32_in_req_all(new_node, in_reg_reqs); - SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + SET_IA32_ORIG_NODE(new_node, node); return new_node; } +ir_node *gen_CopyB(ir_node *node) +{ + ir_node *block = get_new_node(get_nodes_block(node)); + ir_node *src = get_CopyB_src(node); + ir_node *new_src = get_new_node(src); + ir_node *dst = get_CopyB_dst(node); + ir_node *new_dst = get_new_node(dst); + ir_node *mem = get_CopyB_mem(node); + ir_node *new_mem = get_new_node(mem); + ir_node *res = NULL; + dbg_info *dbgi = get_irn_dbg_info(node); + int size = get_type_size_bytes(get_CopyB_type(node)); + int rem; + + /* If we have to copy more than 32 bytes, we use REP MOVSx and */ + /* then we need the size explicitly in ECX. */ + if (size >= 32 * 4) { + rem = size & 0x3; /* size % 4 */ + size >>= 2; + + res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size); + be_dep_on_frame(res); + + res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem); + } else { + if (size == 0) { + ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n", + node); + } + res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size); + } + + SET_IA32_ORIG_NODE(res, node); + + return res; +} + +ir_node *gen_Proj_tls(ir_node *node) +{ + ir_node *block = get_new_node(get_nodes_block(node)); + ir_node *res = NULL; + + res = new_bd_ia32_LdTls(NULL, block, mode_Iu); + + return res; +} + ir_node *gen_Unknown(ir_node *node) { - ir_mode *mode = get_irn_mode(node); + ir_mode *mode = get_irn_mode(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_irg_start_block(irg); + ir_node *res = NULL; if (mode_is_float(mode)) { if (ia32_cg_config.use_sse2) { - return ia32_new_Unknown_xmm(env_cg); + res = new_bd_ia32_xUnknown(dbgi, block); } else { - /* Unknown nodes are buggy in x87 simulator, use zero for now... */ - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = get_irg_start_block(irg); - ir_node *ret = new_rd_ia32_vfldz(dbgi, irg, block); - - /* Const Nodes before the initial IncSP are a bad idea, because - * they could be spilled and we have no SP ready at that point yet. - * So add a dependency to the initial frame pointer calculation to - * avoid that situation. - */ - add_irn_dep(ret, get_irg_frame(irg)); - return ret; + res = new_bd_ia32_vfldz(dbgi, block); } } else if (ia32_mode_needs_gp_reg(mode)) { - return ia32_new_Unknown_gp(env_cg); + res = new_bd_ia32_Unknown(dbgi, block); } else { panic("unsupported Unknown-Mode"); } - return NULL; + + be_dep_on_frame(res); + return res; } const arch_register_req_t *make_register_req(const constraint_t *constraint, @@ -654,14 +866,12 @@ const arch_register_req_t *make_register_req(const constraint_t *constraint, if (same_as >= n_outs) panic("invalid output number in same_as constraint"); - other_constr = out_reqs[same_as]; + other_constr = out_reqs[same_as]; - req = obstack_alloc(obst, sizeof(req[0])); - req->cls = other_constr->cls; - req->type = arch_register_req_type_should_be_same; - req->limited = NULL; - req->other_same = 1U << pos; - req->other_different = 0; + req = obstack_alloc(obst, sizeof(req[0])); + *req = *other_constr; + req->type |= arch_register_req_type_should_be_same; + req->other_same = 1U << pos; /* switch constraints. This is because in firm we have same_as * constraints on the output constraints while in the gcc asm syntax @@ -703,8 +913,8 @@ const arch_register_req_t *parse_clobber(const char *clobber) arch_register_req_t *req; unsigned *limited; - if(reg == NULL) { - panic("Register '%s' mentioned in asm clobber is unknown\n", clobber); + if (reg == NULL) { + panic("Register '%s' mentioned in asm clobber is unknown", clobber); } assert(reg->index < 32); @@ -721,104 +931,101 @@ const arch_register_req_t *parse_clobber(const char *clobber) return req; } + +int prevents_AM(ir_node *const block, ir_node *const am_candidate, + ir_node *const other) +{ + if (get_nodes_block(other) != block) + return 0; + + if (is_Sync(other)) { + int i; + + for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) { + ir_node *const pred = get_Sync_pred(other, i); + + if (get_nodes_block(pred) != block) + continue; + + /* Do not block ourselves from getting eaten */ + if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate) + continue; + + if (!heights_reachable_in_block(heights, pred, am_candidate)) + continue; + + return 1; + } + + return 0; + } else { + /* Do not block ourselves from getting eaten */ + if (is_Proj(other) && get_Proj_pred(other) == am_candidate) + return 0; + + if (!heights_reachable_in_block(heights, other, am_candidate)) + return 0; + + return 1; + } +} + ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type) { - int minus = 0; - tarval *offset = NULL; - int offset_sign = 0; long val = 0; ir_entity *symconst_ent = NULL; - int symconst_sign = 0; ir_mode *mode; ir_node *cnst = NULL; ir_node *symconst = NULL; ir_node *new_node; mode = get_irn_mode(node); - if(!mode_is_int(mode) && !mode_is_reference(mode)) { + if (!mode_is_int(mode) && !mode_is_reference(mode)) { return NULL; } - if(is_Minus(node)) { - minus = 1; - node = get_Minus_op(node); - } - - if(is_Const(node)) { - cnst = node; - symconst = NULL; - offset_sign = minus; - } else if(is_SymConst(node)) { - cnst = NULL; - symconst = node; - symconst_sign = minus; - } else if(is_Add(node)) { + if (is_Const(node)) { + cnst = node; + symconst = NULL; + } else if (is_Global(node)) { + cnst = NULL; + symconst = node; + } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); - if(is_Const(left) && is_SymConst(right)) { - cnst = left; - symconst = right; - symconst_sign = minus; - offset_sign = minus; - } else if(is_SymConst(left) && is_Const(right)) { - cnst = right; - symconst = left; - symconst_sign = minus; - offset_sign = minus; - } - } else if(is_Sub(node)) { - ir_node *left = get_Sub_left(node); - ir_node *right = get_Sub_right(node); - if(is_Const(left) && is_SymConst(right)) { - cnst = left; - symconst = right; - symconst_sign = !minus; - offset_sign = minus; - } else if(is_SymConst(left) && is_Const(right)) { - cnst = right; - symconst = left; - symconst_sign = minus; - offset_sign = !minus; + if (is_Const(left) && is_Global(right)) { + cnst = left; + symconst = right; + } else if (is_Global(left) && is_Const(right)) { + cnst = right; + symconst = left; } } else { return NULL; } - if(cnst != NULL) { - offset = get_Const_tarval(cnst); - if(tarval_is_long(offset)) { - val = get_tarval_long(offset); - } else { - ir_fprintf(stderr, "Optimisation Warning: tarval from %+F is not a " - "long?\n", cnst); + if (cnst != NULL) { + tarval *offset = get_Const_tarval(cnst); + if (!tarval_is_long(offset)) { + ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst); return NULL; } - if(!check_immediate_constraint(val, immediate_constraint_type)) + val = get_tarval_long(offset); + if (!check_immediate_constraint(val, immediate_constraint_type)) return NULL; } - if(symconst != NULL) { - if(immediate_constraint_type != 0) { + if (symconst != NULL) { + if (immediate_constraint_type != 0) { /* we need full 32bits for symconsts */ return NULL; } - /* unfortunately the assembler/linker doesn't support -symconst */ - if(symconst_sign) - return NULL; - - if(get_SymConst_kind(symconst) != symconst_addr_ent) - return NULL; - symconst_ent = get_SymConst_entity(symconst); + symconst_ent = get_Global_entity(symconst); } - if(cnst == NULL && symconst == NULL) + if (cnst == NULL && symconst == NULL) return NULL; - if(offset_sign && offset != NULL) { - offset = tarval_neg(offset); - } - - new_node = create_Immediate(symconst_ent, symconst_sign, val); - + new_node = ia32_create_Immediate(symconst_ent, 0, val); return new_node; }