wenv->nr_of_intrinsics += r->i_mapper(node, r->ctx) ? 1 : 0;
}
} else {
- if (op->code < (unsigned)ARR_LEN(wenv->i_map)) {
+ if (op->code < (unsigned) ARR_LEN(wenv->i_map)) {
const i_instr_record *r = wenv->i_map[op->code];
/* run all possible mapper */
while (r) {
}
/* Go through all graphs and map calls to intrinsic functions. */
-unsigned lower_intrinsics(i_record *list, int length) {
+unsigned lower_intrinsics(i_record *list, int length, int part_block_used) {
int i, n_ops = get_irp_n_opcodes();
ir_graph *irg;
pmap *c_map = pmap_create_ex(length);
pmap_insert(c_map, list[i].i_call.i_ent, (void *)&list[i].i_call);
} else {
ir_op *op = list[i].i_instr.op;
- assert(op->code < (unsigned)ARR_LEN(i_map));
+ assert(op->code < (unsigned) ARR_LEN(i_map));
list[i].i_instr.link = i_map[op->code];
i_map[op->code] = &list[i].i_instr;
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
irg = get_irp_irg(i);
+ if (part_block_used)
+ collect_phiprojs(irg);
+
wenv.nr_of_intrinsics = 0;
irg_walk_graph(irg, NULL, call_mapper, &wenv);
ir_node *op = get_Call_param(call, 0);
ir_node *irn;
dbg_info *dbg = get_irn_dbg_info(call);
+ (void) ctx;
irn = new_rd_Abs(dbg, current_ir_graph, block, op, get_irn_mode(op));
irn = new_Tuple(1, &irn);
ir_node *op = get_Call_param(call, 0);
ir_node *irn, *exc, *no_exc;
dbg_info *dbg = get_irn_dbg_info(call);
+ (void) ctx;
irn = new_rd_Alloc(dbg, current_ir_graph, block, mem, op, firm_unknown_type, stack_alloc);
mem = new_Proj(irn, mode_M, pn_Alloc_M);