if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
if (env->flags.try_omit_fp) {
/* simply remove the stack frame here */
curr_sp = be_new_IncSP(arch_env->sp, irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
- if(is_ia32_use_frame(irn) ||
- (is_ia32_NoReg_GP(get_irn_n(irn, 0)) &&
- is_ia32_NoReg_GP(get_irn_n(irn, 1)))) {
+ if (is_ia32_use_frame(irn) || (
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
+ is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
+ )) {
ia32_code_gen_t *cg = ia32_current_cg;
assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
ia32_code_gen_t *cg = ia32_current_cg;
assert(ia32_possible_memory_operand(irn, i) && "Cannot perform memory operand change");
- set_ia32_ls_mode(irn, get_irn_mode(get_irn_n(irn, i)));
+
+ load_mode = get_irn_mode(get_irn_n(irn, i));
+ dest_op_mode = get_ia32_ls_mode(irn);
+ if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode)) {
+ set_ia32_ls_mode(irn, load_mode);
+ }
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
set_irn_n(irn, n_ia32_mem, spill);
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
set_irn_n(irn, n_ia32_mem, spill);
/* immediates are only allowed on the right side */
if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
/* immediates are only allowed on the right side */
if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
const ir_mode *mode = get_ia32_ls_mode(node);
const ia32_attr_t *attr = get_ia32_attr_const(node);
if (is_ia32_need_stackent(node) || is_ia32_Load(node)) {
const ir_mode *mode = get_ia32_ls_mode(node);
const ia32_attr_t *attr = get_ia32_attr_const(node);
be_node_needs_frame_entity(env, node, mode, align);
} else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
|| is_ia32_vfld(node)) {
be_node_needs_frame_entity(env, node, mode, align);
} else if (is_ia32_vfild(node) || is_ia32_xLoad(node)
|| is_ia32_vfld(node)) {
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
call_flags.bits.store_args_sequential = 0;
/* call_flags.bits.try_omit_fp not changed: can handle both settings */
call_flags.bits.fp_free = 0; /* the frame pointer is fixed in IA32 */
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
/* set parameter passing style */
be_abi_call_set_flags(abi, call_flags, &ia32_abi_callbacks);
ia32_cg_config.optimize_cc) {
/* set the calling conventions to register parameter */
cc = (cc & ~cc_bits) | cc_reg_param;
ia32_cg_config.optimize_cc) {
/* set the calling conventions to register parameter */
cc = (cc & ~cc_bits) | cc_reg_param;
} else {
/* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
* movl has a shorter opcode than mov[sz][bw]l */
} else {
/* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
* movl has a shorter opcode than mov[sz][bw]l */
be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
}
be_abi_call_param_stack(abi, i, load_mode, 4, 0, 0);
}