#include "../beutil.h"
#include "../beirg_t.h"
#include "../betranshlp.h"
+#include "../be_t.h"
#include "bearch_ia32_t.h"
#include "ia32_nodes_attr.h"
ir_node *new_node;
ir_mode *mode;
ir_node *sign_extension;
- int has_exc;
ia32_address_mode_t am;
ia32_address_t *addr = &am.addr;
/* the upper bits have random contents for smaller modes */
- has_exc = 0;
switch (get_irn_opcode(node)) {
case iro_Div:
op1 = get_Div_left(node);
op2 = get_Div_right(node);
mem = get_Div_mem(node);
mode = get_Div_resmode(node);
- has_exc = be_get_Proj_for_pn(node, pn_Div_X_except) != NULL;
break;
case iro_Mod:
op1 = get_Mod_left(node);
op2 = get_Mod_right(node);
mem = get_Mod_mem(node);
mode = get_Mod_resmode(node);
- has_exc = be_get_Proj_for_pn(node, pn_Mod_X_except) != NULL;
break;
case iro_DivMod:
op1 = get_DivMod_left(node);
op2 = get_DivMod_right(node);
mem = get_DivMod_mem(node);
mode = get_DivMod_resmode(node);
- has_exc = be_get_Proj_for_pn(node, pn_DivMod_X_except) != NULL;
break;
default:
panic("invalid divmod node %+F", node);
match_arguments(&am, block, op1, op2, NULL, match_am);
- if(!is_NoMem(mem)) {
+ /* Beware: We don't need a Sync, if the memory predecessor of the Div node
+ is the memory of the consumed address. We can have only the second op as address
+ in Div nodes, so check only op2. */
+ if(!is_NoMem(mem) && skip_Proj(mem) != skip_Proj(op2)) {
new_mem = be_transform_node(mem);
if(!is_NoMem(addr->mem)) {
ir_node *in[2];
sign_extension, am.new_op2);
}
- set_ia32_exc_label(new_node, has_exc);
set_irn_pinned(new_node, get_irn_pinned(node));
set_am_attributes(new_node, &am);
add_irn_dep(new_node, get_irg_frame(irg));
}
- set_ia32_exc_label(new_node,
- be_get_Proj_for_pn(node, pn_Load_X_except) != NULL);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
return new_node;
set_ia32_op_type(new_node, ia32_AddrModeD);
set_ia32_ls_mode(new_node, mode);
- set_ia32_exc_label(new_node,
- be_get_Proj_for_pn(node, pn_Store_X_except) != NULL);
set_address(new_node, &addr);
SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node));
/* a memory constraint: no need to do anything in backend about it
* (the dependencies are already respected by the memory edge of
* the node) */
- constraint->req = &no_register_req;
+ constraint->req = &no_register_req;
return;
}
}
static void parse_clobber(ir_node *node, int pos, constraint_t *constraint,
- const char *c)
+ const char *clobber)
{
- (void) node;
+ ir_graph *irg = get_irn_irg(node);
+ struct obstack *obst = get_irg_obstack(irg);
+ const arch_register_t *reg = NULL;
+ int c;
+ size_t r;
+ arch_register_req_t *req;
+ const arch_register_class_t *cls;
+ unsigned *limited;
+
(void) pos;
- (void) constraint;
- (void) c;
- panic("Clobbers not supported yet");
+
+ /* TODO: construct a hashmap instead of doing linear search for clobber
+ * register */
+ for(c = 0; c < N_CLASSES; ++c) {
+ cls = & ia32_reg_classes[c];
+ for(r = 0; r < cls->n_regs; ++r) {
+ const arch_register_t *temp_reg = arch_register_for_index(cls, r);
+ if(strcmp(temp_reg->name, clobber) == 0
+ || (c == CLASS_ia32_gp && strcmp(temp_reg->name+1, clobber) == 0)) {
+ reg = temp_reg;
+ break;
+ }
+ }
+ if(reg != NULL)
+ break;
+ }
+ if(reg == NULL) {
+ panic("Register '%s' mentioned in asm clobber is unknown\n", clobber);
+ return;
+ }
+
+ assert(reg->index < 32);
+
+ limited = obstack_alloc(obst, sizeof(limited[0]));
+ *limited = 1 << reg->index;
+
+ req = obstack_alloc(obst, sizeof(req[0]));
+ memset(req, 0, sizeof(req[0]));
+ req->type = arch_register_req_type_limited;
+ req->cls = cls;
+ req->limited = limited;
+
+ constraint->req = req;
+ constraint->immediate_possible = 0;
+ constraint->immediate_type = 0;
}
static int is_memory_op(const ir_asm_constraint *constraint)
n_out_constraints = get_ASM_n_output_constraints(node);
n_clobbers = get_ASM_n_clobbers(node);
out_arity = n_out_constraints + n_clobbers;
+ /* hack to keep space for mem proj */
+ if(n_clobbers > 0)
+ out_arity += 1;
in_constraints = get_ASM_input_constraints(node);
out_constraints = get_ASM_output_constraints(node);
if(constraint->pos > reg_map_size)
reg_map_size = constraint->pos;
- } else {
+
+ out_reg_reqs[i] = parsed_constraint.req;
+ } else if(i < out_arity - 1) {
ident *glob_id = clobbers [i - n_out_constraints];
+ assert(glob_id != NULL);
c = get_id_str(glob_id);
parse_clobber(node, i, &parsed_constraint, c);
- }
- out_reg_reqs[i] = parsed_constraint.req;
+ out_reg_reqs[i+1] = parsed_constraint.req;
+ }
}
+ if(n_clobbers > 1)
+ out_reg_reqs[n_out_constraints] = &no_register_req;
/* construct input constraints */
in_reg_reqs = obstack_alloc(obst, arity * sizeof(in_reg_reqs[0]));
/* renumber the proj */
new_pred = be_transform_node(pred);
if (is_ia32_Load(new_pred)) {
- if (proj == pn_Load_res) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
- pn_ia32_Load_res);
- } else if (proj == pn_Load_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M,
- pn_ia32_Load_M);
+ switch (proj) {
+ case pn_Load_res:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Load_res);
+ case pn_Load_M:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Load_M);
+ case pn_Load_X_regular:
+ return new_rd_Jmp(dbgi, irg, block);
+ case pn_Load_X_except:
+ /* This Load might raise an exception. Mark it. */
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Load_X_exc);
+ default:
+ break;
}
- } else if(is_ia32_Conv_I2I(new_pred)
- || is_ia32_Conv_I2I8Bit(new_pred)) {
+ } else if (is_ia32_Conv_I2I(new_pred) ||
+ is_ia32_Conv_I2I8Bit(new_pred)) {
set_irn_mode(new_pred, mode_T);
if (proj == pn_Load_res) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_res);
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_mem);
}
} else if (is_ia32_xLoad(new_pred)) {
- if (proj == pn_Load_res) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm,
- pn_ia32_xLoad_res);
- } else if (proj == pn_Load_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M,
- pn_ia32_xLoad_M);
+ switch (proj) {
+ case pn_Load_res:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xLoad_res);
+ case pn_Load_M:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xLoad_M);
+ case pn_Load_X_regular:
+ return new_rd_Jmp(dbgi, irg, block);
+ case pn_Load_X_except:
+ /* This Load might raise an exception. Mark it. */
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ default:
+ break;
}
} else if (is_ia32_vfld(new_pred)) {
- if (proj == pn_Load_res) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp,
- pn_ia32_vfld_res);
- } else if (proj == pn_Load_M) {
- return new_rd_Proj(dbgi, irg, block, new_pred, mode_M,
- pn_ia32_vfld_M);
+ switch (proj) {
+ case pn_Load_res:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfld_res);
+ case pn_Load_M:
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfld_M);
+ case pn_Load_X_regular:
+ return new_rd_Jmp(dbgi, irg, block);
+ case pn_Load_X_except:
+ /* This Load might raise an exception. Mark it. */
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_xLoad_X_exc);
+ default:
+ break;
}
} else {
/* can happen for ProJMs when source address mode happened for the
/* however it should not be the result proj, as that would mean the
load had multiple users and should not have been used for
SourceAM */
- if(proj != pn_Load_M) {
+ if (proj != pn_Load_M) {
panic("internal error: transformed node not a Load");
}
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1);
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M);
case pn_Div_res:
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
+ case pn_Div_X_regular:
+ return new_rd_Jmp(dbgi, irg, block);
+ case pn_Div_X_except:
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M);
case pn_Mod_res:
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ case pn_Mod_X_except:
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
case pn_DivMod_res_mod:
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
+ case pn_DivMod_X_regular:
+ return new_rd_Jmp(dbgi, irg, block);
+ case pn_DivMod_X_except:
+ set_ia32_exc_label(new_pred, 1);
+ return new_rd_Proj(dbgi, irg, block, new_pred, mode_X, pn_ia32_Div_X_exc);
default:
break;
}
env_cg = cg;
initial_fpcw = NULL;
+BE_TIMER_PUSH(t_heights);
heights = heights_new(irg);
+BE_TIMER_POP(t_heights);
ia32_calculate_non_address_mode_nodes(cg->birg);
/* the transform phase is not safe for CSE (yet) because several nodes get