* @author Matthias Braun, Sebastian Buchwald
* @version $Id: ia32_common_transform.c 21012 2008-08-06 13:35:17Z beck $
*/
+#include "config.h"
#include "error.h"
-#include "irargs_t.h"
#include "ircons.h"
#include "irprintf.h"
#include "typerep.h"
+#include "bitset.h"
+#include "heights.h"
#include "../betranshlp.h"
+#include "../beirg.h"
+#include "../beabi.h"
#include "ia32_architecture.h"
#include "ia32_common_transform.h"
#include "gen_ia32_new_nodes.h"
#include "gen_ia32_regalloc_if.h"
-/** hold the current code generator during transformation */
-ia32_code_gen_t *env_cg = NULL;
-
-heights_t *heights = NULL;
-
-static const arch_register_req_t no_register_req = {
- arch_register_req_type_none,
- NULL, /* regclass */
- NULL, /* limit bitset */
- 0, /* same pos */
- 0 /* different pos */
-};
+ir_heights_t *ia32_heights = NULL;
static int check_immediate_constraint(long val, char immediate_constraint_type)
{
}
/**
- * creates a unique ident by adding a number to a tag
- *
- * @param tag the tag string, must contain a %d if a number
- * should be added
- */
-static ident *unique_id(const char *tag)
-{
- static unsigned id = 0;
- char str[256];
-
- snprintf(str, sizeof(str), tag, ++id);
- return new_id_from_str(str);
-}
-
-/**
- * Get a primitive type for a mode.
+ * Get a primitive type for a mode with alignment 16.
*/
static ir_type *ia32_get_prim_type(pmap *types, ir_mode *mode)
{
- pmap_entry *e = pmap_find(types, mode);
- ir_type *res;
+ ir_type *res = (ir_type*)pmap_get(types, mode);
+ if (res != NULL)
+ return res;
- if (! e) {
- char buf[64];
- snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
- res = new_type_primitive(new_id_from_str(buf), mode);
+ res = new_type_primitive(mode);
+ if (get_mode_size_bits(mode) >= 80) {
set_type_alignment_bytes(res, 16);
- pmap_insert(types, mode, res);
}
- else
- res = e->value;
+ pmap_insert(types, mode, res);
return res;
}
-ir_entity *create_float_const_entity(ir_node *cnst)
+ir_entity *ia32_create_float_const_entity(ir_node *cnst)
{
- ia32_isa_t *isa = env_cg->isa;
- tarval *key = get_Const_tarval(cnst);
- pmap_entry *e = pmap_find(isa->tv_ent, key);
- ir_entity *res;
- ir_graph *rem;
-
- if (e == NULL) {
- tarval *tv = key;
- ir_mode *mode = get_tarval_mode(tv);
- ir_type *tp;
-
- if (! ia32_cg_config.use_sse2) {
- /* try to reduce the mode to produce smaller sized entities */
- if (mode != mode_F) {
- if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
- mode = mode_F;
+ ir_graph *irg = get_irn_irg(cnst);
+ const arch_env_t *arch_env = be_get_irg_arch_env(irg);
+ ia32_isa_t *isa = (ia32_isa_t*) arch_env;
+ ir_tarval *tv = get_Const_tarval(cnst);
+ ir_entity *res = (ir_entity*)pmap_get(isa->tv_ent, tv);
+ ir_initializer_t *initializer;
+ ir_mode *mode;
+ ir_type *tp;
+
+ if (res != NULL)
+ return res;
+
+ mode = get_tarval_mode(tv);
+
+ if (! ia32_cg_config.use_sse2) {
+ /* try to reduce the mode to produce smaller sized entities */
+ if (mode != mode_F) {
+ if (tarval_ieee754_can_conv_lossless(tv, mode_F)) {
+ mode = mode_F;
+ tv = tarval_convert_to(tv, mode);
+ } else if (mode != mode_D) {
+ if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
+ mode = mode_D;
tv = tarval_convert_to(tv, mode);
- } else if (mode != mode_D) {
- if (tarval_ieee754_can_conv_lossless(tv, mode_D)) {
- mode = mode_D;
- tv = tarval_convert_to(tv, mode);
- }
}
}
}
-
- if (mode == get_irn_mode(cnst)) {
- /* mode was not changed */
- tp = get_Const_type(cnst);
- if (tp == firm_unknown_type)
- tp = ia32_get_prim_type(isa->types, mode);
- } else
- tp = ia32_get_prim_type(isa->types, mode);
-
- res = new_entity(get_glob_type(), unique_id(".LC%u"), tp);
-
- set_entity_ld_ident(res, get_entity_ident(res));
- set_entity_visibility(res, visibility_local);
- set_entity_variability(res, variability_constant);
- set_entity_allocation(res, allocation_static);
-
- /* we create a new entity here: It's initialization must resist on the
- const code irg */
- rem = current_ir_graph;
- current_ir_graph = get_const_code_irg();
- set_atomic_ent_value(res, new_Const_type(tv, tp));
- current_ir_graph = rem;
-
- pmap_insert(isa->tv_ent, key, res);
- } else {
- res = e->value;
}
+ tp = ia32_get_prim_type(isa->types, mode);
+ res = new_entity(get_glob_type(), id_unique("C%u"), tp);
+ set_entity_ld_ident(res, get_entity_ident(res));
+ set_entity_visibility(res, ir_visibility_private);
+ add_entity_linkage(res, IR_LINKAGE_CONSTANT);
+
+ initializer = create_initializer_tarval(tv);
+ set_entity_initializer(res, initializer);
+
+ pmap_insert(isa->tv_ent, tv, res);
return res;
}
-ir_node *create_Immediate(ir_entity *symconst, int symconst_sign, long val)
+ir_node *ia32_create_Immediate(ir_entity *symconst, int symconst_sign, long val)
{
ir_graph *irg = current_ir_graph;
ir_node *start_block = get_irg_start_block(irg);
- ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block,
- symconst, symconst_sign, val);
- arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
+ ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, symconst,
+ symconst_sign, ia32_no_pic_adjust, val);
+ arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
return immediate;
}
/* TODO: construct a hashmap instead of doing linear search for clobber
* register */
- for(c = 0; c < N_CLASSES; ++c) {
+ for (c = 0; c < N_IA32_CLASSES; ++c) {
cls = & ia32_reg_classes[c];
- for(r = 0; r < cls->n_regs; ++r) {
+ for (r = 0; r < cls->n_regs; ++r) {
const arch_register_t *temp_reg = arch_register_for_index(cls, r);
- if(strcmp(temp_reg->name, clobber) == 0
+ if (strcmp(temp_reg->name, clobber) == 0
|| (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
reg = temp_reg;
break;
}
}
- if(reg != NULL)
+ if (reg != NULL)
break;
}
return reg;
}
-#ifndef NDEBUG
-const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) {
- ia32_isa_t *isa = (ia32_isa_t*) cg->arch_env;
-
- lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", irn);
- obstack_1grow(isa->name_obst, 0);
- return obstack_finish(isa->name_obst);
-}
-#endif /* NDEBUG */
-
-int ia32_mode_needs_gp_reg(ir_mode *mode) {
- if(mode == mode_fpcw)
+int ia32_mode_needs_gp_reg(ir_mode *mode)
+{
+ if (mode == ia32_mode_fpcw)
return 0;
- if(get_mode_size_bits(mode) > 32)
+ if (get_mode_size_bits(mode) > 32)
return 0;
return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
}
static void parse_asm_constraints(constraint_t *constraint, const char *c,
int is_output)
{
- asm_constraint_flags_t flags = 0;
char immediate_type = '\0';
unsigned limited = 0;
const arch_register_class_t *cls = NULL;
memset(constraint, 0, sizeof(constraint[0]));
constraint->same_as = -1;
- if(*c == 0) {
+ if (*c == 0) {
/* a memory constraint: no need to do anything in backend about it
* (the dependencies are already respected by the memory edge of
* the node) */
/* TODO: improve error messages with node and source info. (As users can
* easily hit these) */
- while(*c != 0) {
- switch(*c) {
+ while (*c != 0) {
+ switch (*c) {
case ' ':
case '\t':
case '\n':
break;
- case '=':
- flags |= ASM_CONSTRAINT_FLAG_MODIFIER_WRITE
- | ASM_CONSTRAINT_FLAG_MODIFIER_NO_READ;
- break;
+ /* Skip out/in-out marker */
+ case '=': break;
+ case '+': break;
- case '+':
- flags |= ASM_CONSTRAINT_FLAG_MODIFIER_WRITE
- | ASM_CONSTRAINT_FLAG_MODIFIER_READ;
- break;
+ case '&': break;
case '*':
++c;
break;
case '#':
- while(*c != 0 && *c != ',')
+ while (*c != 0 && *c != ',')
++c;
break;
case 'a':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX;
+ limited |= 1 << REG_GP_EAX;
break;
case 'b':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EBX;
+ limited |= 1 << REG_GP_EBX;
break;
case 'c':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_ECX;
+ limited |= 1 << REG_GP_ECX;
break;
case 'd':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EDX;
+ limited |= 1 << REG_GP_EDX;
break;
case 'D':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EDI;
+ limited |= 1 << REG_GP_EDI;
break;
case 'S':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_ESI;
+ limited |= 1 << REG_GP_ESI;
break;
case 'Q':
case 'q':
* difference to Q for us (we only assign whole registers) */
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
- 1 << REG_EDX;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
+ 1 << REG_GP_EDX;
break;
case 'A':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EDX;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EDX;
break;
case 'l':
assert(cls == NULL || cls == &ia32_reg_classes[CLASS_ia32_gp]);
cls = &ia32_reg_classes[CLASS_ia32_gp];
- limited |= 1 << REG_EAX | 1 << REG_EBX | 1 << REG_ECX |
- 1 << REG_EDX | 1 << REG_ESI | 1 << REG_EDI |
- 1 << REG_EBP;
+ limited |= 1 << REG_GP_EAX | 1 << REG_GP_EBX | 1 << REG_GP_ECX |
+ 1 << REG_GP_EDX | 1 << REG_GP_ESI | 1 << REG_GP_EDI |
+ 1 << REG_GP_EBP;
break;
case 'R':
panic("can only specify same constraint on input");
sscanf(c, "%d%n", &same_as, &p);
- if(same_as >= 0) {
+ if (same_as >= 0) {
c += p;
continue;
}
case 'e': /* not available in 32 bit mode */
panic("unsupported asm constraint '%c' found in (%+F)",
*c, current_ir_graph);
- break;
default:
panic("unknown asm constraint '%c' found in (%+F)", *c,
current_ir_graph);
- break;
}
++c;
}
- if(same_as >= 0) {
+ if (same_as >= 0) {
if (cls != NULL)
panic("same as and register constraint not supported");
if (immediate_type != '\0')
constraint->immediate_type = immediate_type;
}
-ir_node *gen_ASM(ir_node *node)
+static bool can_match(const arch_register_req_t *in,
+ const arch_register_req_t *out)
+{
+ if (in->cls != out->cls)
+ return false;
+ if ( (in->type & arch_register_req_type_limited) == 0
+ || (out->type & arch_register_req_type_limited) == 0 )
+ return true;
+
+ return (*in->limited & *out->limited) != 0;
+}
+
+static inline ir_node *get_new_node(ir_node *node)
+{
+#ifdef FIRM_GRGEN_BE
+ if (be_transformer == TRANSFORMER_DEFAULT) {
+ return be_transform_node(node);
+ } else {
+ return node;
+ }
+#else
+ return be_transform_node(node);
+#endif
+}
+
+ir_node *ia32_gen_ASM(ir_node *node)
{
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(node);
- ir_node *new_block = be_transform_node(block);
+ ir_node *new_block = get_new_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
int i, arity;
+ int value_arity;
int out_idx;
ir_node **in;
ir_node *new_node;
const ir_asm_constraint *in_constraints;
const ir_asm_constraint *out_constraints;
ident **clobbers;
- int clobbers_flags = 0;
- unsigned clobber_bits_gp = 0;
+ unsigned clobber_bits[N_IA32_CLASSES];
+ int out_size;
+ backend_info_t *info;
- /* workaround for lots of buggy code out there as most people think volatile
- * asm is enough for everything and forget the flags (linux kernel, etc.)
- */
- if (get_irn_pinned(node) == op_pin_state_pinned) {
- clobbers_flags = 1;
- }
+ memset(&clobber_bits, 0, sizeof(clobber_bits));
arity = get_irn_arity(node);
- in = alloca(arity * sizeof(in[0]));
- memset(in, 0, arity * sizeof(in[0]));
+ in = ALLOCANZ(ir_node*, arity);
clobbers = get_ASM_clobbers(node);
n_clobbers = 0;
- for(i = 0; i < get_ASM_n_clobbers(node); ++i) {
+ for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
const arch_register_req_t *req;
const char *c = get_id_str(clobbers[i]);
if (strcmp(c, "memory") == 0)
continue;
if (strcmp(c, "cc") == 0) {
- clobbers_flags = 1;
continue;
}
- req = parse_clobber(c);
- if (req->cls == &ia32_reg_classes[CLASS_ia32_gp]) {
- clobber_bits_gp |= *req->limited;
- }
+ req = ia32_parse_clobber(c);
+ clobber_bits[req->cls->index] |= *req->limited;
n_clobbers++;
}
out_constraints = get_ASM_output_constraints(node);
/* determine size of register_map */
- for(out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
+ for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
const ir_asm_constraint *constraint = &out_constraints[out_idx];
if (constraint->pos > reg_map_size)
reg_map_size = constraint->pos;
}
- for(i = 0; i < arity; ++i) {
- const ir_asm_constraint *constraint = &in_constraints[i];
- if(constraint->pos > reg_map_size)
+ for (i = 0; i < arity; ++i) {
+ const ir_asm_constraint *constraint = &in_constraints[i];
+ if (constraint->pos > reg_map_size)
reg_map_size = constraint->pos;
}
++reg_map_size;
- obst = get_irg_obstack(irg);
+ obst = get_irg_obstack(current_ir_graph);
register_map = NEW_ARR_D(ia32_asm_reg_t, obst, reg_map_size);
memset(register_map, 0, reg_map_size * sizeof(register_map[0]));
/* construct output constraints */
- out_reg_reqs = obstack_alloc(obst, out_arity * sizeof(out_reg_reqs[0]));
+ out_size = out_arity + 1;
+ out_reg_reqs = OALLOCN(obst, const arch_register_req_t*, out_size);
- for(out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
+ for (out_idx = 0; out_idx < n_out_constraints; ++out_idx) {
const ir_asm_constraint *constraint = &out_constraints[out_idx];
const char *c = get_id_str(constraint->constraint);
- unsigned pos = constraint->pos;
+ unsigned pos = constraint->pos;
constraint_t parsed_constraint;
const arch_register_req_t *req;
parse_asm_constraints(&parsed_constraint, c, 1);
- req = make_register_req(&parsed_constraint, n_out_constraints,
+ req = ia32_make_register_req(&parsed_constraint, n_out_constraints,
out_reg_reqs, out_idx);
out_reg_reqs[out_idx] = req;
+ /* multiple constraints for same pos. This can happen for example when
+ * a =A constraint gets lowered to two constraints: =a and =d for the
+ * same pos */
+ if (register_map[pos].valid)
+ continue;
+
register_map[pos].use_input = 0;
register_map[pos].valid = 1;
register_map[pos].memory = 0;
}
/* inputs + input constraints */
- in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0]));
- for(i = 0; i < arity; ++i) {
+ in_reg_reqs = OALLOCN(obst, const arch_register_req_t*, arity);
+ for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
const ir_asm_constraint *constraint = &in_constraints[i];
ident *constr_id = constraint->constraint;
unsigned pos = constraint->pos;
int is_memory_op = 0;
ir_node *input = NULL;
+ unsigned r_clobber_bits;
constraint_t parsed_constraint;
const arch_register_req_t *req;
parse_asm_constraints(&parsed_constraint, c, 0);
- if (clobber_bits_gp != 0 &&
- parsed_constraint.cls == &ia32_reg_classes[CLASS_ia32_gp]) {
- if (parsed_constraint.all_registers_allowed) {
- parsed_constraint.all_registers_allowed = 0;
- parsed_constraint.allowed_registers =
- 1 << REG_EAX |
- 1 << REG_EBX |
- 1 << REG_ECX |
- 1 << REG_EDX |
- 1 << REG_ESI |
- 1 << REG_EDI |
- 1 << REG_EBP;
+ if (parsed_constraint.cls != NULL) {
+ r_clobber_bits = clobber_bits[parsed_constraint.cls->index];
+ if (r_clobber_bits != 0) {
+ if (parsed_constraint.all_registers_allowed) {
+ parsed_constraint.all_registers_allowed = 0;
+ be_set_allocatable_regs(current_ir_graph,
+ parsed_constraint.cls,
+ &parsed_constraint.allowed_registers);
+ }
+ parsed_constraint.allowed_registers &= ~r_clobber_bits;
}
- parsed_constraint.allowed_registers &= ~clobber_bits_gp;
}
- req = make_register_req(&parsed_constraint, n_out_constraints,
+ req = ia32_make_register_req(&parsed_constraint, n_out_constraints,
out_reg_reqs, i);
in_reg_reqs[i] = req;
if (parsed_constraint.immediate_type != '\0') {
char imm_type = parsed_constraint.immediate_type;
- input = try_create_Immediate(pred, imm_type);
+ input = ia32_try_create_Immediate(pred, imm_type);
}
if (input == NULL) {
- ir_node *pred = get_irn_n(node, i);
- input = be_transform_node(pred);
+ input = get_new_node(pred);
if (parsed_constraint.cls == NULL
&& parsed_constraint.same_as < 0) {
is_memory_op = 1;
- } else if(parsed_constraint.memory_possible) {
+ } else if (parsed_constraint.memory_possible) {
/* TODO: match Load or Load/Store if memory possible is set */
}
}
}
/* parse clobbers */
- for(i = 0; i < get_ASM_n_clobbers(node); ++i) {
+ for (i = 0; i < get_ASM_n_clobbers(node); ++i) {
const char *c = get_id_str(clobbers[i]);
const arch_register_req_t *req;
if (strcmp(c, "memory") == 0 || strcmp(c, "cc") == 0)
continue;
- req = parse_clobber(c);
+ req = ia32_parse_clobber(c);
out_reg_reqs[out_idx] = req;
++out_idx;
}
- new_node = new_rd_ia32_Asm(dbgi, irg, new_block, arity, in, out_arity,
+ /* count inputs which are real values (and not memory) */
+ value_arity = 0;
+ for (i = 0; i < arity; ++i) {
+ ir_node *node_in = get_irn_n(node, i);
+ if (get_irn_mode(node_in) == mode_M)
+ continue;
+ ++value_arity;
+ }
+
+ /* Attempt to make ASM node register pressure faithful.
+ * (This does not work for complicated cases yet!)
+ *
+ * Algorithm: Check if there are fewer inputs or outputs (I will call this
+ * the smaller list). Then try to match each constraint of the smaller list
+ * to 1 of the other list. If we can't match it, then we have to add a dummy
+ * input/output to the other list
+ *
+ * FIXME: This is still broken in lots of cases. But at least better than
+ * before...
+ * FIXME: need to do this per register class...
+ */
+ if (out_arity <= value_arity) {
+ int orig_arity = arity;
+ int in_size = arity;
+ int o;
+ bitset_t *used_ins = bitset_alloca(arity);
+ for (o = 0; o < out_arity; ++o) {
+ const arch_register_req_t *outreq = out_reg_reqs[o];
+
+ if (outreq->cls == NULL) {
+ continue;
+ }
+
+ for (i = 0; i < orig_arity; ++i) {
+ const arch_register_req_t *inreq;
+ if (bitset_is_set(used_ins, i))
+ continue;
+ inreq = in_reg_reqs[i];
+ if (!can_match(outreq, inreq))
+ continue;
+ bitset_set(used_ins, i);
+ break;
+ }
+ /* did we find any match? */
+ if (i < orig_arity)
+ continue;
+
+ /* we might need more space in the input arrays */
+ if (arity >= in_size) {
+ const arch_register_req_t **new_in_reg_reqs;
+ ir_node **new_in;
+
+ in_size *= 2;
+ new_in_reg_reqs = OALLOCN(obst, const arch_register_req_t*,
+ in_size);
+ memcpy(new_in_reg_reqs, in_reg_reqs, arity * sizeof(new_in_reg_reqs[0]));
+ new_in = ALLOCANZ(ir_node*, in_size);
+ memcpy(new_in, in, arity*sizeof(new_in[0]));
+
+ in_reg_reqs = new_in_reg_reqs;
+ in = new_in;
+ }
+
+ /* add a new (dummy) input which occupies the register */
+ assert(outreq->type & arch_register_req_type_limited);
+ in_reg_reqs[arity] = outreq;
+ in[arity] = new_bd_ia32_ProduceVal(NULL, block);
+ ++arity;
+ }
+ } else {
+ bitset_t *used_outs = bitset_alloca(out_arity);
+ int orig_out_arity = out_arity;
+ for (i = 0; i < arity; ++i) {
+ int o;
+ const arch_register_req_t *inreq = in_reg_reqs[i];
+
+ if (inreq->cls == NULL) {
+ continue;
+ }
+
+ for (o = 0; o < orig_out_arity; ++o) {
+ const arch_register_req_t *outreq;
+ if (bitset_is_set(used_outs, o))
+ continue;
+ outreq = out_reg_reqs[o];
+ if (!can_match(outreq, inreq))
+ continue;
+ bitset_set(used_outs, i);
+ break;
+ }
+ /* did we find any match? */
+ if (o < orig_out_arity)
+ continue;
+
+ /* we might need more space in the output arrays */
+ if (out_arity >= out_size) {
+ const arch_register_req_t **new_out_reg_reqs;
+
+ out_size *= 2;
+ new_out_reg_reqs
+ = OALLOCN(obst, const arch_register_req_t*, out_size);
+ memcpy(new_out_reg_reqs, out_reg_reqs,
+ out_arity * sizeof(new_out_reg_reqs[0]));
+ out_reg_reqs = new_out_reg_reqs;
+ }
+
+ /* add a new (dummy) output which occupies the register */
+ assert(inreq->type & arch_register_req_type_limited);
+ out_reg_reqs[out_arity] = inreq;
+ ++out_arity;
+ }
+ }
+
+ /* append none register requirement for the memory output */
+ if (out_arity + 1 >= out_size) {
+ const arch_register_req_t **new_out_reg_reqs;
+
+ out_size = out_arity + 1;
+ new_out_reg_reqs
+ = OALLOCN(obst, const arch_register_req_t*, out_size);
+ memcpy(new_out_reg_reqs, out_reg_reqs,
+ out_arity * sizeof(new_out_reg_reqs[0]));
+ out_reg_reqs = new_out_reg_reqs;
+ }
+
+ /* add a new (dummy) output which occupies the register */
+ out_reg_reqs[out_arity] = arch_no_register_req;
+ ++out_arity;
+
+ new_node = new_bd_ia32_Asm(dbgi, new_block, arity, in, out_arity,
get_ASM_text(node), register_map);
- set_ia32_out_req_all(new_node, out_reg_reqs);
- set_ia32_in_req_all(new_node, in_reg_reqs);
+ info = be_get_info(new_node);
+ for (i = 0; i < out_arity; ++i) {
+ info->out_infos[i].req = out_reg_reqs[i];
+ }
+ arch_set_in_register_reqs(new_node, in_reg_reqs);
- SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
+ SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
-ir_node *gen_Unknown(ir_node *node)
+ir_node *ia32_gen_CopyB(ir_node *node)
+{
+ ir_node *block = get_new_node(get_nodes_block(node));
+ ir_node *src = get_CopyB_src(node);
+ ir_node *new_src = get_new_node(src);
+ ir_node *dst = get_CopyB_dst(node);
+ ir_node *new_dst = get_new_node(dst);
+ ir_node *mem = get_CopyB_mem(node);
+ ir_node *new_mem = get_new_node(mem);
+ ir_node *res = NULL;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ int size = get_type_size_bytes(get_CopyB_type(node));
+ int throws_exception = ir_throws_exception(node);
+ int rem;
+
+ /* If we have to copy more than 32 bytes, we use REP MOVSx and */
+ /* then we need the size explicitly in ECX. */
+ if (size >= 32 * 4) {
+ rem = size & 0x3; /* size % 4 */
+ size >>= 2;
+
+ res = new_bd_ia32_Const(dbgi, block, NULL, 0, 0, size);
+
+ res = new_bd_ia32_CopyB(dbgi, block, new_dst, new_src, res, new_mem, rem);
+ } else {
+ if (size == 0) {
+ ir_fprintf(stderr, "Optimization warning copyb %+F with size <4\n",
+ node);
+ }
+ res = new_bd_ia32_CopyB_i(dbgi, block, new_dst, new_src, new_mem, size);
+ }
+ ir_set_throws_exception(res, throws_exception);
+
+ SET_IA32_ORIG_NODE(res, node);
+
+ return res;
+}
+
+ir_node *ia32_gen_Proj_tls(ir_node *node)
{
- ir_mode *mode = get_irn_mode(node);
+ ir_node *block = get_new_node(get_nodes_block(node));
+ ir_node *res = new_bd_ia32_LdTls(NULL, block);
+ return res;
+}
+
+ir_node *ia32_gen_Unknown(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_irg_start_block(irg);
+ ir_node *res = NULL;
if (mode_is_float(mode)) {
if (ia32_cg_config.use_sse2) {
- return ia32_new_Unknown_xmm(env_cg);
+ res = new_bd_ia32_xUnknown(dbgi, block);
} else {
- /* Unknown nodes are buggy in x87 simulator, use zero for now... */
- ir_graph *irg = current_ir_graph;
- dbg_info *dbgi = get_irn_dbg_info(node);
- ir_node *block = get_irg_start_block(irg);
- ir_node *ret = new_rd_ia32_vfldz(dbgi, irg, block);
-
- /* Const Nodes before the initial IncSP are a bad idea, because
- * they could be spilled and we have no SP ready at that point yet.
- * So add a dependency to the initial frame pointer calculation to
- * avoid that situation.
- */
- add_irn_dep(ret, get_irg_frame(irg));
- return ret;
+ res = new_bd_ia32_vfldz(dbgi, block);
}
} else if (ia32_mode_needs_gp_reg(mode)) {
- return ia32_new_Unknown_gp(env_cg);
+ res = new_bd_ia32_Unknown(dbgi, block);
} else {
panic("unsupported Unknown-Mode");
}
- return NULL;
+
+ return res;
}
-const arch_register_req_t *make_register_req(const constraint_t *constraint,
+const arch_register_req_t *ia32_make_register_req(const constraint_t *constraint,
int n_outs, const arch_register_req_t **out_reqs, int pos)
{
struct obstack *obst = get_irg_obstack(current_ir_graph);
if (same_as >= n_outs)
panic("invalid output number in same_as constraint");
- other_constr = out_reqs[same_as];
+ other_constr = out_reqs[same_as];
- req = obstack_alloc(obst, sizeof(req[0]));
- req->cls = other_constr->cls;
- req->type = arch_register_req_type_should_be_same;
- req->limited = NULL;
- req->other_same = 1U << pos;
- req->other_different = 0;
+ req = OALLOC(obst, arch_register_req_t);
+ *req = *other_constr;
+ req->type |= arch_register_req_type_should_be_same;
+ req->other_same = 1U << pos;
+ req->width = 1;
/* switch constraints. This is because in firm we have same_as
* constraints on the output constraints while in the gcc asm syntax
/* pure memory ops */
if (constraint->cls == NULL) {
- return &no_register_req;
+ return arch_no_register_req;
}
if (constraint->allowed_registers != 0
&& !constraint->all_registers_allowed) {
unsigned *limited_ptr;
- req = obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
+ req = (arch_register_req_t*)obstack_alloc(obst, sizeof(req[0]) + sizeof(unsigned));
memset(req, 0, sizeof(req[0]));
limited_ptr = (unsigned*) (req+1);
*limited_ptr = constraint->allowed_registers;
req->limited = limited_ptr;
} else {
- req = obstack_alloc(obst, sizeof(req[0]));
- memset(req, 0, sizeof(req[0]));
+ req = OALLOCZ(obst, arch_register_req_t);
req->type = arch_register_req_type_normal;
}
- req->cls = constraint->cls;
+ req->cls = constraint->cls;
+ req->width = 1;
return req;
}
-const arch_register_req_t *parse_clobber(const char *clobber)
+const arch_register_req_t *ia32_parse_clobber(const char *clobber)
{
struct obstack *obst = get_irg_obstack(current_ir_graph);
const arch_register_t *reg = ia32_get_clobber_register(clobber);
arch_register_req_t *req;
unsigned *limited;
- if(reg == NULL) {
- panic("Register '%s' mentioned in asm clobber is unknown\n", clobber);
+ if (reg == NULL) {
+ panic("Register '%s' mentioned in asm clobber is unknown", clobber);
}
assert(reg->index < 32);
- limited = obstack_alloc(obst, sizeof(limited[0]));
+ limited = OALLOC(obst, unsigned);
*limited = 1 << reg->index;
- req = obstack_alloc(obst, sizeof(req[0]));
- memset(req, 0, sizeof(req[0]));
+ req = OALLOCZ(obst, arch_register_req_t);
req->type = arch_register_req_type_limited;
req->cls = arch_register_get_class(reg);
req->limited = limited;
+ req->width = 1;
return req;
}
-ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type)
+
+int ia32_prevents_AM(ir_node *const block, ir_node *const am_candidate,
+ ir_node *const other)
+{
+ if (get_nodes_block(other) != block)
+ return 0;
+
+ if (is_Sync(other)) {
+ int i;
+
+ for (i = get_Sync_n_preds(other) - 1; i >= 0; --i) {
+ ir_node *const pred = get_Sync_pred(other, i);
+
+ if (get_nodes_block(pred) != block)
+ continue;
+
+ /* Do not block ourselves from getting eaten */
+ if (is_Proj(pred) && get_Proj_pred(pred) == am_candidate)
+ continue;
+
+ if (!heights_reachable_in_block(ia32_heights, pred, am_candidate))
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+ } else {
+ /* Do not block ourselves from getting eaten */
+ if (is_Proj(other) && get_Proj_pred(other) == am_candidate)
+ return 0;
+
+ if (!heights_reachable_in_block(ia32_heights, other, am_candidate))
+ return 0;
+
+ return 1;
+ }
+}
+
+ir_node *ia32_try_create_Immediate(ir_node *node, char immediate_constraint_type)
{
- int minus = 0;
- tarval *offset = NULL;
- int offset_sign = 0;
- long val = 0;
- ir_entity *symconst_ent = NULL;
- int symconst_sign = 0;
- ir_mode *mode;
- ir_node *cnst = NULL;
- ir_node *symconst = NULL;
- ir_node *new_node;
+ long val = 0;
+ ir_entity *symconst_ent = NULL;
+ ir_mode *mode;
+ ir_node *cnst = NULL;
+ ir_node *symconst = NULL;
+ ir_node *new_node;
mode = get_irn_mode(node);
- if(!mode_is_int(mode) && !mode_is_reference(mode)) {
+ if (!mode_is_int(mode) && !mode_is_reference(mode)) {
return NULL;
}
- if(is_Minus(node)) {
- minus = 1;
- node = get_Minus_op(node);
- }
-
- if(is_Const(node)) {
- cnst = node;
- symconst = NULL;
- offset_sign = minus;
- } else if(is_SymConst(node)) {
- cnst = NULL;
- symconst = node;
- symconst_sign = minus;
- } else if(is_Add(node)) {
+ if (is_Const(node)) {
+ cnst = node;
+ symconst = NULL;
+ } else if (is_SymConst_addr_ent(node)
+ && get_entity_owner(get_SymConst_entity(node)) != get_tls_type()) {
+ cnst = NULL;
+ symconst = node;
+ } else if (is_Add(node)) {
ir_node *left = get_Add_left(node);
ir_node *right = get_Add_right(node);
- if(is_Const(left) && is_SymConst(right)) {
- cnst = left;
- symconst = right;
- symconst_sign = minus;
- offset_sign = minus;
- } else if(is_SymConst(left) && is_Const(right)) {
- cnst = right;
- symconst = left;
- symconst_sign = minus;
- offset_sign = minus;
- }
- } else if(is_Sub(node)) {
- ir_node *left = get_Sub_left(node);
- ir_node *right = get_Sub_right(node);
- if(is_Const(left) && is_SymConst(right)) {
- cnst = left;
- symconst = right;
- symconst_sign = !minus;
- offset_sign = minus;
- } else if(is_SymConst(left) && is_Const(right)) {
- cnst = right;
- symconst = left;
- symconst_sign = minus;
- offset_sign = !minus;
+ if (is_Const(left) && is_SymConst_addr_ent(right)) {
+ cnst = left;
+ symconst = right;
+ } else if (is_SymConst_addr_ent(left) && is_Const(right)) {
+ cnst = right;
+ symconst = left;
}
} else {
return NULL;
}
- if(cnst != NULL) {
- offset = get_Const_tarval(cnst);
- if(tarval_is_long(offset)) {
- val = get_tarval_long(offset);
- } else {
- ir_fprintf(stderr, "Optimisation Warning: tarval from %+F is not a "
- "long?\n", cnst);
+ if (cnst != NULL) {
+ ir_tarval *offset = get_Const_tarval(cnst);
+ if (!tarval_is_long(offset)) {
+ ir_fprintf(stderr, "Optimisation Warning: tarval of %+F is not a long?\n", cnst);
return NULL;
}
- if(!check_immediate_constraint(val, immediate_constraint_type))
+ val = get_tarval_long(offset);
+ if (!check_immediate_constraint(val, immediate_constraint_type))
return NULL;
}
- if(symconst != NULL) {
- if(immediate_constraint_type != 0) {
+ if (symconst != NULL) {
+ if (immediate_constraint_type != 0) {
/* we need full 32bits for symconsts */
return NULL;
}
- /* unfortunately the assembler/linker doesn't support -symconst */
- if(symconst_sign)
- return NULL;
-
- if(get_SymConst_kind(symconst) != symconst_addr_ent)
- return NULL;
- symconst_ent = get_SymConst_entity(symconst);
+ symconst_ent = get_Global_entity(symconst);
}
- if(cnst == NULL && symconst == NULL)
+ if (cnst == NULL && symconst == NULL)
return NULL;
- if(offset_sign && offset != NULL) {
- offset = tarval_neg(offset);
- }
-
- new_node = create_Immediate(symconst_ent, symconst_sign, val);
-
+ new_node = ia32_create_Immediate(symconst_ent, 0, val);
return new_node;
}