NEW_BD_BINOP(Shl)
NEW_BD_BINOP(Shr)
NEW_BD_BINOP(Shrs)
-NEW_BD_BINOP(Rot)
+NEW_BD_BINOP(Rotl)
NEW_BD_UNOP(Abs)
NEW_BD_BINOP(Carry)
NEW_BD_BINOP(Borrow)
ir_graph *irg = current_ir_graph;
ir_node *res = new_ir_node(db, irg, block, op_SymConst, mode, 0, NULL);
- res->attr.symc.num = symkind;
- res->attr.symc.sym = value;
- res->attr.symc.tp = tp;
+ res->attr.symc.kind = symkind;
+ res->attr.symc.sym = value;
+ res->attr.symc.tp = tp;
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
res->attr.assem.pin_state = op_pin_state_pinned;
res->attr.assem.inputs = NEW_ARR_D(ir_asm_constraint, irg->obst, arity);
res->attr.assem.outputs = NEW_ARR_D(ir_asm_constraint, irg->obst, n_outs);
- res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
+ res->attr.assem.clobber = NEW_ARR_D(ident *, irg->obst, n_clobber);
res->attr.assem.asm_text = asm_text;
- memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
- memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
- memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
+ memcpy(res->attr.assem.inputs, inputs, sizeof(inputs[0]) * arity);
+ memcpy(res->attr.assem.outputs, outputs, sizeof(outputs[0]) * n_outs);
+ memcpy(res->attr.assem.clobber, clobber, sizeof(clobber[0]) * n_clobber);
res = optimize_node(res);
IRN_VRFY_IRG(res, irg);
NEW_RD_BINOP(Shl)
NEW_RD_BINOP(Shr)
NEW_RD_BINOP(Shrs)
-NEW_RD_BINOP(Rot)
+NEW_RD_BINOP(Rotl)
NEW_RD_UNOP(Abs)
NEW_RD_BINOP(Carry)
NEW_RD_BINOP(Borrow)
ir_node *op, ir_node *k, ir_mode *mode) {
return new_rd_Shrs(NULL, irg, block, op, k, mode);
}
-ir_node *new_r_Rot(ir_graph *irg, ir_node *block,
+ir_node *new_r_Rotl(ir_graph *irg, ir_node *block,
ir_node *op, ir_node *k, ir_mode *mode) {
- return new_rd_Rot(NULL, irg, block, op, k, mode);
+ return new_rd_Rotl(NULL, irg, block, op, k, mode);
}
ir_node *new_r_Carry(ir_graph *irg, ir_node *block,
ir_node *op, ir_node *k, ir_mode *mode) {
NEW_D_BINOP(Mulh)
/**
- * Allocate the frag array.
+ * Allocate a frag array for a node if the current graph state is phase_building.
+ *
+ * @param irn the node for which the frag array should be allocated
+ * @param op the opcode of the (original) node, if does not match opcode of irn,
+ * nothing is done
+ * @param frag_store the address of the frag store in irn attributes, if this
+ * address contains a value != NULL, does nothing
*/
-static void allocate_frag_arr(ir_node *res, ir_op *op, ir_node ***frag_store) {
+void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store) {
if (get_opt_precise_exc_context()) {
if ((current_ir_graph->phase_state == phase_building) &&
- (get_irn_op(res) == op) && /* Could be optimized away. */
+ (get_irn_op(irn) == op) && /* Could be optimized away. */
!*frag_store) /* Could be a cse where the arr is already set. */ {
- *frag_store = new_frag_arr(res);
+ *frag_store = new_frag_arr(irn);
}
}
-} /* allocate_frag_arr */
+} /* firm_alloc_frag_arr */
ir_node *
new_d_Quot(dbg_info *db, ir_node *memop, ir_node *op1, ir_node *op2, ir_mode *mode, op_pin_state state) {
ir_node *res;
res = new_bd_Quot(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Quot, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Quot, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_DivMod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_DivMod, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_DivMod, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_Div(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_DivRL(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Div, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Div, &res->attr.except.frag_arr);
#endif
return res;
ir_node *res;
res = new_bd_Mod(db, current_ir_graph->current_block, memop, op1, op2, mode, state);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Mod, &res->attr.except.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Mod, &res->attr.except.frag_arr);
#endif
return res;
NEW_D_BINOP(Shl)
NEW_D_BINOP(Shr)
NEW_D_BINOP(Shrs)
-NEW_D_BINOP(Rot)
+NEW_D_BINOP(Rotl)
NEW_D_UNOP(Abs)
NEW_D_BINOP(Carry)
NEW_D_BINOP(Borrow)
res = new_bd_Call(db, current_ir_graph->current_block,
store, callee, arity, in, tp);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Call, &res->attr.call.exc.frag_arr);
#endif
return res;
res = new_bd_Load(db, current_ir_graph->current_block,
store, addr, mode);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Load, &res->attr.load.exc.frag_arr);
#endif
return res;
res = new_bd_Store(db, current_ir_graph->current_block,
store, addr, val);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Store, &res->attr.store.exc.frag_arr);
#endif
return res;
res = new_bd_Alloc(db, current_ir_graph->current_block,
store, size, alloc_type, where);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr); /* Could be optimized away. */
+ firm_alloc_frag_arr(res, op_Alloc, &res->attr.alloc.exc.frag_arr);
#endif
return res;
res = new_bd_CopyB(db, current_ir_graph->current_block,
store, dst, src, data_type);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
+ firm_alloc_frag_arr(res, op_CopyB, &res->attr.copyb.exc.frag_arr);
#endif
return res;
} /* new_d_CopyB */
res = new_bd_Bound(db, current_ir_graph->current_block,
store, idx, lower, upper);
#if PRECISE_EXC_CONTEXT
- allocate_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
+ firm_alloc_frag_arr(res, op_Bound, &res->attr.bound.exc.frag_arr);
#endif
return res;
} /* new_d_Bound */
/* call once for each run of the library */
void
-init_cons(uninitialized_local_variable_func_t *func) {
+firm_init_cons(uninitialized_local_variable_func_t *func) {
default_initialize_local_variable = func;
-} /* init_cons */
+} /* firm_init_cons */
void
irp_finalize_cons(void) {
ir_node *new_Shrs(ir_node *op, ir_node *k, ir_mode *mode) {
return new_d_Shrs(NULL, op, k, mode);
}
-ir_node *new_Rot(ir_node *op, ir_node *k, ir_mode *mode) {
- return new_d_Rot(NULL, op, k, mode);
+ir_node *new_Rotl(ir_node *op, ir_node *k, ir_mode *mode) {
+ return new_d_Rotl(NULL, op, k, mode);
}
ir_node *new_Carry(ir_node *op1, ir_node *op2, ir_mode *mode) {
return new_d_Carry(NULL, op1, op2, mode);