+static ir_node *gen_Return(ir_node *node)
+{
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *mem = get_Return_mem(node);
+ ir_node *new_mem = be_transform_node(mem);
+ size_t n_callee_saves = ARRAY_SIZE(callee_saves);
+ ir_node *sp_proj = get_stack_pointer_for(node);
+ size_t n_res = get_Return_n_ress(node);
+ ir_node *bereturn;
+ size_t i;
+
+ be_epilog_begin(abihelper);
+ be_epilog_set_memory(abihelper, new_mem);
+ /* connect stack pointer with initial stack pointer. fix_stack phase
+ will later serialize all stack pointer adjusting nodes */
+ be_epilog_add_reg(abihelper, sp_reg,
+ arch_register_req_type_produces_sp | arch_register_req_type_ignore,
+ sp_proj);
+
+ /* result values */
+ for (i = 0; i < n_res; ++i) {
+ ir_node *res_value = get_Return_res(node, i);
+ ir_node *new_res_value = be_transform_node(res_value);
+ const reg_or_stackslot_t *slot = &cconv->results[i];
+ const arch_register_t *reg = slot->reg0;
+ assert(slot->reg1 == NULL);
+ be_epilog_add_reg(abihelper, reg, arch_register_req_type_none, new_res_value);
+ }
+
+ /* connect callee saves with their values at the function begin */
+ for (i = 0; i < n_callee_saves; ++i) {
+ const arch_register_t *reg = callee_saves[i];
+ ir_node *value = be_prolog_get_reg_value(abihelper, reg);
+ be_epilog_add_reg(abihelper, reg, arch_register_req_type_none, value);
+ }
+
+ /* epilog code: an incsp */
+ bereturn = be_epilog_create_return(abihelper, dbgi, new_block);
+ return bereturn;
+}
+
+
+static ir_node *gen_Call(ir_node *node)
+{
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *callee = get_Call_ptr(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_node *mem = get_Call_mem(node);
+ ir_node *new_mem = be_transform_node(mem);
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_type *type = get_Call_type(node);
+ calling_convention_t *cconv = arm_decide_calling_convention(NULL, type);
+ size_t n_params = get_Call_n_params(node);
+ size_t const n_param_regs = cconv->n_reg_params;
+ /* max inputs: memory, callee, register arguments */
+ size_t const max_inputs = 2 + n_param_regs;
+ ir_node **in = ALLOCAN(ir_node*, max_inputs);
+ ir_node **sync_ins = ALLOCAN(ir_node*, max_inputs);
+ struct obstack *obst = be_get_be_obst(irg);
+ const arch_register_req_t **in_req
+ = OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
+ size_t in_arity = 0;
+ size_t sync_arity = 0;
+ size_t const n_caller_saves = ARRAY_SIZE(caller_saves);
+ ir_entity *entity = NULL;
+ ir_node *incsp = NULL;
+ int mem_pos;
+ ir_node *res;
+ size_t p;
+ size_t o;
+ size_t out_arity;
+
+ assert(n_params == get_method_n_params(type));
+
+ /* construct arguments */
+
+ /* memory input */
+ in_req[in_arity] = arch_no_register_req;
+ mem_pos = in_arity;
+ ++in_arity;
+ /* parameters */
+ for (p = 0; p < n_params; ++p) {
+ ir_node *value = get_Call_param(node, p);
+ ir_node *new_value = be_transform_node(value);
+ ir_node *new_value1 = NULL;
+ const reg_or_stackslot_t *param = &cconv->parameters[p];
+ ir_type *param_type = get_method_param_type(type, p);
+ ir_mode *mode = get_type_mode(param_type);
+ ir_node *str;
+
+ if (mode_is_float(mode) && param->reg0 != NULL) {
+ unsigned size_bits = get_mode_size_bits(mode);
+ if (size_bits == 64) {
+ double_to_ints(dbgi, new_block, new_value, &new_value,
+ &new_value1);
+ } else {
+ assert(size_bits == 32);
+ new_value = float_to_int(dbgi, new_block, new_value);
+ }
+ }
+
+ /* put value into registers */
+ if (param->reg0 != NULL) {
+ in[in_arity] = new_value;
+ in_req[in_arity] = param->reg0->single_req;
+ ++in_arity;
+ if (new_value1 == NULL)
+ continue;
+ }
+ if (param->reg1 != NULL) {
+ assert(new_value1 != NULL);
+ in[in_arity] = new_value1;
+ in_req[in_arity] = param->reg1->single_req;
+ ++in_arity;
+ continue;
+ }
+
+ /* we need a store if we're here */
+ if (new_value1 != NULL) {
+ new_value = new_value1;
+ mode = mode_gp;
+ }
+
+ /* create a parameter frame if necessary */
+ if (incsp == NULL) {
+ ir_node *new_frame = get_stack_pointer_for(node);
+ incsp = be_new_IncSP(sp_reg, new_block, new_frame,
+ cconv->param_stack_size, 1);
+ }
+ if (mode_is_float(mode)) {
+ str = new_bd_arm_Stf(dbgi, new_block, incsp, new_value, new_mem,
+ mode, NULL, 0, param->offset, true);
+ } else {
+ str = new_bd_arm_Str(dbgi, new_block, incsp, new_value, new_mem,
+ mode, NULL, 0, param->offset, true);
+ }
+ sync_ins[sync_arity++] = str;
+ }
+ assert(in_arity <= max_inputs);
+
+ /* construct memory input */
+ if (sync_arity == 0) {
+ in[mem_pos] = new_mem;
+ } else if (sync_arity == 1) {
+ in[mem_pos] = sync_ins[0];
+ } else {
+ in[mem_pos] = new_rd_Sync(NULL, new_block, sync_arity, sync_ins);
+ }
+
+ /* TODO: use a generic symconst matcher here */
+ if (is_SymConst(callee)) {
+ entity = get_SymConst_entity(callee);
+ } else {
+ /* TODO: finish load matcher here */
+#if 0
+ /* callee */
+ if (is_Proj(callee) && is_Load(get_Proj_pred(callee))) {
+ ir_node *load = get_Proj_pred(callee);
+ ir_node *ptr = get_Load_ptr(load);
+ ir_node *new_ptr = be_transform_node(ptr);
+ ir_node *mem = get_Load_mem(load);
+ ir_node *new_mem = be_transform_node(mem);
+ ir_mode *mode = get_Load_mode(node);
+
+ } else {
+#endif
+ in[in_arity] = be_transform_node(callee);
+ in_req[in_arity] = arm_reg_classes[CLASS_arm_gp].class_req;
+ ++in_arity;
+ //}
+ }
+
+ /* outputs:
+ * - memory
+ * - caller saves
+ */
+ out_arity = 1 + n_caller_saves;
+
+ if (entity != NULL) {
+ /* TODO: use a generic symconst matcher here
+ * so we can also handle entity+offset, etc. */
+ res = new_bd_arm_Bl(dbgi, new_block, in_arity, in, out_arity,entity, 0);
+ } else {
+ /* TODO:
+ * - use a proper shifter_operand matcher
+ * - we could also use LinkLdrPC
+ */
+ res = new_bd_arm_LinkMovPC(dbgi, new_block, in_arity, in, out_arity,
+ ARM_SHF_REG, 0, 0);
+ }
+
+ if (incsp != NULL) {
+ /* IncSP to destroy the call stackframe */
+ incsp = be_new_IncSP(sp_reg, new_block, incsp, -cconv->param_stack_size,
+ 0);
+ /* if we are the last IncSP producer in a block then we have to keep
+ * the stack value.
+ * Note: This here keeps all producers which is more than necessary */
+ add_irn_dep(incsp, res);
+ keep_alive(incsp);
+
+ pmap_insert(node_to_stack, node, incsp);
+ }
+
+ arch_set_irn_register_reqs_in(res, in_req);
+
+ /* create output register reqs */
+ arch_set_irn_register_req_out(res, 0, arch_no_register_req);
+ for (o = 0; o < n_caller_saves; ++o) {
+ const arch_register_t *reg = caller_saves[o];
+ arch_set_irn_register_req_out(res, o+1, reg->single_req);
+ }
+
+ /* copy pinned attribute */
+ set_irn_pinned(res, get_irn_pinned(node));
+
+ arm_free_calling_convention(cconv);
+ return res;
+}
+
+static ir_node *gen_Sel(ir_node *node)
+{
+ dbg_info *dbgi = get_irn_dbg_info(node);
+ ir_node *block = get_nodes_block(node);
+ ir_node *new_block = be_transform_node(block);
+ ir_node *ptr = get_Sel_ptr(node);
+ ir_node *new_ptr = be_transform_node(ptr);
+ ir_entity *entity = get_Sel_entity(node);
+
+ /* must be the frame pointer all other sels must have been lowered
+ * already */
+ assert(is_Proj(ptr) && is_Start(get_Proj_pred(ptr)));
+
+ return new_bd_arm_FrameAddr(dbgi, new_block, new_ptr, entity, 0);
+}
+
+static ir_node *gen_Phi(ir_node *node)
+{
+ ir_mode *mode = get_irn_mode(node);
+ const arch_register_req_t *req;
+ if (mode_needs_gp_reg(mode)) {
+ /* we shouldn't have any 64bit stuff around anymore */
+ assert(get_mode_size_bits(mode) <= 32);
+ /* all integer operations are on 32bit registers now */
+ mode = mode_Iu;
+ req = arm_reg_classes[CLASS_arm_gp].class_req;
+ } else {
+ req = arch_no_register_req;
+ }
+
+ return be_transform_phi(node, req);