#include "adt/error.h"
#include "adt/array.h"
+#include "adt/strutil.h"
#include "adt/util.h"
#include "symbol_t.h"
#include "token_t.h"
static int next_value_number_function;
static ir_node *continue_label;
static ir_node *break_label;
-static ir_node *current_switch_cond;
+static ir_node *current_switch;
static bool saw_default_label;
static label_t **all_labels;
static entity_t **inner_functions;
static const entity_t *current_function_entity;
static ir_node *current_function_name;
static ir_node *current_funcsig;
-static switch_statement_t *current_switch;
static ir_graph *current_function;
static translation_unit_t *current_translation_unit;
static trampoline_region *current_trampolines;
static ir_type *current_outer_frame;
static ir_node *current_static_link;
+static ir_entity *current_vararg_entity;
static entitymap_t entitymap;
DECLARATION_KIND_INNER_FUNCTION
} declaration_kind_t;
-static ir_mode *get_ir_mode_storage(type_t *type);
-
static ir_type *get_ir_type_incomplete(type_t *type);
static void enqueue_inner_function(entity_t *entity)
set_cur_block(NULL);
}
-static ir_mode *atomic_modes[ATOMIC_TYPE_LAST+1];
-
-static ir_mode *mode_int, *mode_uint;
+ir_mode *atomic_modes[ATOMIC_TYPE_LAST+1];
static ir_node *_expression_to_firm(const expression_t *expression);
static ir_node *expression_to_firm(const expression_t *expression);
{
unsigned flags = get_atomic_type_flags(kind);
unsigned size = get_atomic_type_size(kind);
- if ( (flags & (ATOMIC_TYPE_FLAG_INTEGER | ATOMIC_TYPE_FLAG_FLOAT))
- && !(flags & ATOMIC_TYPE_FLAG_COMPLEX)) {
+ if ((flags & ATOMIC_TYPE_FLAG_FLOAT)
+ && !(flags & ATOMIC_TYPE_FLAG_COMPLEX)) {
+ if (size == 4) {
+ return get_modeF();
+ } else if (size == 8) {
+ return get_modeD();
+ } else {
+ panic("unexpected kind");
+ }
+ } else if (flags & ATOMIC_TYPE_FLAG_INTEGER) {
char name[64];
- ir_mode_sort sort;
unsigned bit_size = size * 8;
bool is_signed = (flags & ATOMIC_TYPE_FLAG_SIGNED) != 0;
- unsigned modulo_shift = 0;
- ir_mode_arithmetic arithmetic;
-
- if (flags & ATOMIC_TYPE_FLAG_INTEGER) {
- assert(! (flags & ATOMIC_TYPE_FLAG_FLOAT));
- snprintf(name, sizeof(name), "%s%u", is_signed ? "I" : "U",
- bit_size);
- sort = irms_int_number;
- arithmetic = irma_twos_complement;
- modulo_shift = decide_modulo_shift(bit_size);
- } else {
- assert(flags & ATOMIC_TYPE_FLAG_FLOAT);
- snprintf(name, sizeof(name), "F%u", bit_size);
- sort = irms_float_number;
- arithmetic = irma_ieee754;
- }
- return new_ir_mode(name, sort, bit_size, is_signed, arithmetic,
- modulo_shift);
+ unsigned modulo_shift = decide_modulo_shift(bit_size);
+
+ snprintf(name, sizeof(name), "%s%u", is_signed ? "I" : "U", bit_size);
+ return new_int_mode(name, irma_twos_complement, bit_size, is_signed,
+ modulo_shift);
}
return NULL;
*/
static void init_atomic_modes(void)
{
+ atomic_modes[ATOMIC_TYPE_VOID] = mode_ANY;
for (int i = 0; i <= ATOMIC_TYPE_LAST; ++i) {
+ if (atomic_modes[i] != NULL)
+ continue;
atomic_modes[i] = init_atomic_ir_mode((atomic_type_kind_t) i);
}
- mode_int = atomic_modes[ATOMIC_TYPE_INT];
- mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
-
- /* there's no real void type in firm */
- atomic_modes[ATOMIC_TYPE_VOID] = atomic_modes[ATOMIC_TYPE_CHAR];
}
ir_mode *get_atomic_mode(atomic_type_kind_t kind)
return size_node;
}
-/**
- * Return a node representing the size of a type.
- */
-static ir_node *get_type_size_node(type_t *type)
-{
- type = skip_typeref(type);
-
- if (is_type_array(type) && type->array.is_vla) {
- ir_node *size_node = get_vla_size(&type->array);
- ir_node *elem_size = get_type_size_node(type->array.element_type);
- ir_mode *mode = get_irn_mode(size_node);
- ir_node *real_size = new_d_Mul(NULL, size_node, elem_size, mode);
- return real_size;
- }
-
- ir_mode *mode = get_ir_mode_storage(type_size_t);
- symconst_symbol sym;
- sym.type_p = get_ir_type(type);
- return new_SymConst(mode, sym, symconst_type_size);
-}
-
static unsigned count_parameters(const function_type_t *function_type)
{
unsigned count = 0;
ir_type *irtype = new_d_type_primitive(mode, dbgi);
il_alignment_t alignment = get_atomic_type_alignment(akind);
+ set_type_size_bytes(irtype, get_atomic_type_size(akind));
set_type_alignment_bytes(irtype, alignment);
return irtype;
/**
* Creates a Firm type for a complex type
*/
-static ir_type *create_complex_type(const complex_type_t *type)
+static ir_type *create_complex_type(const atomic_type_t *type)
{
atomic_type_kind_t kind = type->akind;
ir_mode *mode = atomic_modes[kind];
/**
* Creates a Firm type for an imaginary type
*/
-static ir_type *create_imaginary_type(imaginary_type_t *type)
+static ir_type *create_imaginary_type(const atomic_type_t *type)
{
- return create_atomic_type(type->akind, (const type_t*) type);
+ return create_atomic_type(type->akind, (const type_t*)type);
}
/**
if (for_closure)
set_method_calling_convention(irtype, get_method_calling_convention(irtype) | cc_this_call);
+ const decl_modifiers_t modifiers = function_type->modifiers;
+ if (modifiers & DM_CONST)
+ add_method_additional_properties(irtype, mtp_property_const);
+ if (modifiers & DM_PURE)
+ add_method_additional_properties(irtype, mtp_property_pure);
+ if (modifiers & DM_RETURNS_TWICE)
+ add_method_additional_properties(irtype, mtp_property_returns_twice);
+ if (modifiers & DM_NORETURN)
+ add_method_additional_properties(irtype, mtp_property_noreturn);
+ if (modifiers & DM_NOTHROW)
+ add_method_additional_properties(irtype, mtp_property_nothrow);
+ if (modifiers & DM_MALLOC)
+ add_method_additional_properties(irtype, mtp_property_malloc);
+
return irtype;
}
char name[32];
snprintf(name, sizeof(name), "bf_I%u", size);
- mode = new_ir_mode(name, irms_int_number, size, 1, irma_twos_complement,
- size <= 32 ? 32 : size );
+ mode = new_int_mode(name, irma_twos_complement, size, 1, 0);
s_modes[size] = mode;
}
char name[32];
snprintf(name, sizeof(name), "bf_U%u", size);
- mode = new_ir_mode(name, irms_int_number, size, 0, irma_twos_complement,
- size <= 32 ? 32 : size );
+ mode = new_int_mode(name, irma_twos_complement, size, 0, 0);
u_modes[size] = mode;
}
}
}
-#define INVALID_TYPE ((ir_type_ptr)-1)
+#define INVALID_TYPE ((ir_type*)-1)
enum {
COMPOUND_IS_STRUCT = false,
return irtype;
}
-static ir_type *create_enum_type(enum_type_t *const type)
-{
- type->base.firm_type = ir_type_int;
+static ir_tarval *fold_constant_to_tarval(expression_t const *);
- ir_mode *const mode = mode_int;
+static void determine_enum_values(enum_type_t *const type)
+{
+ ir_mode *const mode = atomic_modes[type->base.akind];
ir_tarval *const one = get_mode_one(mode);
ir_tarval * tv_next = get_mode_null(mode);
- bool constant_folding_old = constant_folding;
- constant_folding = true;
-
enum_t *enume = type->enume;
entity_t *entry = enume->base.next;
for (; entry != NULL; entry = entry->base.next) {
expression_t *const init = entry->enum_value.value;
if (init != NULL) {
- ir_node *const cnst = expression_to_firm(init);
- if (!is_Const(cnst)) {
- panic("couldn't fold constant");
- }
- tv_next = get_Const_tarval(cnst);
+ tv_next = fold_constant_to_tarval(init);
}
+ assert(entry->enum_value.tv == NULL || entry->enum_value.tv == tv_next);
entry->enum_value.tv = tv_next;
tv_next = tarval_add(tv_next, one);
}
+}
- constant_folding = constant_folding_old;
-
- return create_atomic_type(type->akind, (const type_t*) type);
+static ir_type *create_enum_type(enum_type_t *const type)
+{
+ return create_atomic_type(type->base.akind, (const type_t*) type);
}
static ir_type *get_ir_type_incomplete(type_t *type)
ir_type *firm_type = NULL;
switch (type->kind) {
- case TYPE_ERROR:
- /* Happens while constant folding, when there was an error */
- return create_atomic_type(ATOMIC_TYPE_VOID, NULL);
-
case TYPE_ATOMIC:
firm_type = create_atomic_type(type->atomic.akind, type);
break;
case TYPE_COMPLEX:
- firm_type = create_complex_type(&type->complex);
+ firm_type = create_complex_type(&type->atomic);
break;
case TYPE_IMAGINARY:
- firm_type = create_imaginary_type(&type->imaginary);
+ firm_type = create_imaginary_type(&type->atomic);
break;
case TYPE_FUNCTION:
firm_type = create_method_type(&type->function, false);
firm_type = create_enum_type(&type->enumt);
break;
+ case TYPE_ERROR:
case TYPE_TYPEOF:
case TYPE_TYPEDEF:
- case TYPE_INVALID:
break;
}
if (firm_type == NULL)
return mode;
}
+/**
+ * Return a node representing the size of a type.
+ */
+static ir_node *get_type_size_node(type_t *type)
+{
+ unsigned size;
+ ir_mode *mode = get_ir_mode_arithmetic(type_size_t);
+ type = skip_typeref(type);
+
+ if (is_type_array(type) && type->array.is_vla) {
+ ir_node *size_node = get_vla_size(&type->array);
+ ir_node *elem_size = get_type_size_node(type->array.element_type);
+ ir_node *real_size = new_d_Mul(NULL, size_node, elem_size, mode);
+ return real_size;
+ }
+
+ size = get_type_size(type);
+ return new_Const_long(mode, size);
+}
+
/** Names of the runtime functions. */
static const struct {
int id; /**< the rts id */
static ir_entity *get_function_entity(entity_t *entity, ir_type *owner_type)
{
assert(entity->kind == ENTITY_FUNCTION);
- if (entity->function.irentity != NULL) {
+ if (entity->function.irentity != NULL)
return entity->function.irentity;
- }
- entity_t *original_entity = entity;
- if (entity->function.btk != bk_none) {
- entity = get_builtin_replacement(entity);
- if (entity == NULL)
- return NULL;
+ switch (entity->function.btk) {
+ case BUILTIN_NONE:
+ case BUILTIN_LIBC:
+ case BUILTIN_LIBC_CHECK:
+ break;
+ default:
+ return NULL;
}
if (is_main(entity)) {
nested_function = true;
dbg_info *const dbgi = get_dbg_info(&entity->base.source_position);
- irentity = new_d_entity(owner_type, id, ir_type_method, dbgi);
+ irentity = new_d_entity(owner_type, id, ir_type_method, dbgi);
ident *ld_id;
if (nested_function)
entitymap_insert(&entitymap, symbol, irentity);
entity_created:
- original_entity->declaration.kind = DECLARATION_KIND_FUNCTION;
- original_entity->function.irentity = irentity;
+ entity->declaration.kind = DECLARATION_KIND_FUNCTION;
+ entity->function.irentity = irentity;
return irentity;
}
return new_d_SymConst(dbgi, mode_P, sym, symconst_addr_ent);
}
+static ir_node *create_Const_from_bool(ir_mode *const mode, bool const v)
+{
+ return new_Const((v ? get_mode_one : get_mode_null)(mode));
+}
+
+static ir_node *create_conv_from_b(dbg_info *dbgi, ir_node *value,
+ ir_mode *dest_mode)
+{
+ if (is_Const(value)) {
+ return create_Const_from_bool(dest_mode, !is_Const_null(value));
+ }
+
+ ir_node *cond = new_d_Cond(dbgi, value);
+ ir_node *proj_true = new_Proj(cond, mode_X, pn_Cond_true);
+ ir_node *proj_false = new_Proj(cond, mode_X, pn_Cond_false);
+ ir_node *tblock = new_Block(1, &proj_true);
+ ir_node *fblock = new_Block(1, &proj_false);
+ set_cur_block(tblock);
+ ir_node *const1 = new_Const(get_mode_one(dest_mode));
+ ir_node *tjump = new_Jmp();
+ set_cur_block(fblock);
+ ir_node *const0 = new_Const(get_mode_null(dest_mode));
+ ir_node *fjump = new_Jmp();
+
+ ir_node *in[2] = { tjump, fjump };
+ ir_node *mergeblock = new_Block(2, in);
+ set_cur_block(mergeblock);
+ ir_node *phi_in[2] = { const1, const0 };
+ ir_node *phi = new_Phi(2, phi_in, dest_mode);
+ return phi;
+}
+
static ir_node *create_conv(dbg_info *dbgi, ir_node *value, ir_mode *dest_mode)
{
ir_mode *value_mode = get_irn_mode(value);
if (dest_mode == mode_b) {
ir_node *zero = new_Const(get_mode_null(value_mode));
- ir_node *cmp = new_d_Cmp(dbgi, value, zero, ir_relation_less_greater);
+ ir_node *cmp = new_d_Cmp(dbgi, value, zero, ir_relation_unordered_less_greater);
return cmp;
+ } else if (value_mode == mode_b) {
+ return create_conv_from_b(dbgi, value, dest_mode);
}
return new_d_Conv(dbgi, value, dest_mode);
}
-static ir_node *create_Const_from_bool(ir_mode *const mode, bool const v)
-{
- return new_Const((v ? get_mode_one : get_mode_null)(mode));
-}
-
/**
* Creates a SymConst node representing a wide string literal.
*
ir_node *irn = new_d_Builtin(dbgi, get_store(), 3, in, ir_bk_inner_trampoline, get_unknown_type());
set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return new_Proj(irn, mode, pn_Builtin_1_result);
+ return new_Proj(irn, mode, pn_Builtin_max+1);
}
/**
static ir_node *deref_address(dbg_info *const dbgi, type_t *const type,
ir_node *const addr)
{
- ir_type *irtype = get_ir_type(type);
+ type_t *skipped = skip_typeref(type);
+ if (is_type_incomplete(skipped))
+ return addr;
+
+ ir_type *irtype = get_ir_type(skipped);
if (is_compound_type(irtype)
- || is_Method_type(irtype)
- || is_Array_type(irtype)) {
+ || is_Method_type(irtype)
+ || is_Array_type(irtype)) {
return addr;
}
- ir_cons_flags flags = type->base.qualifiers & TYPE_QUALIFIER_VOLATILE
+ ir_cons_flags flags = skipped->base.qualifiers & TYPE_QUALIFIER_VOLATILE
? cons_volatile : cons_none;
ir_mode *const mode = get_type_mode(irtype);
ir_node *const memory = get_store();
set_store(load_mem);
- ir_mode *const mode_arithmetic = get_ir_mode_arithmetic(type);
+ ir_mode *const mode_arithmetic = get_ir_mode_arithmetic(skipped);
return create_conv(dbgi, load_res, mode_arithmetic);
}
set_cur_block(old);
}
-static ir_node *reference_expression_enum_value_to_firm(
- const reference_expression_t *ref)
+static ir_node *enum_constant_to_firm(reference_expression_t const *const ref)
{
entity_t *entity = ref->entity;
- type_t *type = skip_typeref(entity->enum_value.enum_type);
- /* make sure the type is constructed */
- (void) get_ir_type(type);
+ if (entity->enum_value.tv == NULL) {
+ type_t *type = skip_typeref(entity->enum_value.enum_type);
+ assert(type->kind == TYPE_ENUM);
+ determine_enum_values(&type->enumt);
+ }
return new_Const(entity->enum_value.tv);
}
/* make sure the type is constructed */
(void) get_ir_type(type);
- if (entity->kind == ENTITY_FUNCTION && entity->function.btk != bk_none) {
+ if (entity->kind == ENTITY_FUNCTION
+ && entity->function.btk != BUILTIN_NONE) {
ir_entity *irentity = get_function_entity(entity, NULL);
/* for gcc compatibility we have to produce (dummy) addresses for some
* builtins which don't have entities */
panic("reference to declaration with unknown type found");
}
-/**
- * Generate an unary builtin.
- *
- * @param kind the builtin kind to generate
- * @param op the operand
- * @param function_type the function type for the GNU builtin routine
- * @param db debug info
- */
-static ir_node *gen_unary_builtin(ir_builtin_kind kind, expression_t *op, type_t *function_type, dbg_info *db)
-{
- ir_node *in[1];
- in[0] = expression_to_firm(op);
-
- ir_type *tp = get_ir_type(function_type);
- ir_type *res = get_method_res_type(tp, 0);
- ir_node *irn = new_d_Builtin(db, get_irg_no_mem(current_ir_graph), 1, in, kind, tp);
- set_irn_pinned(irn, op_pin_state_floats);
- return new_Proj(irn, get_type_mode(res), pn_Builtin_1_result);
-}
-
-/**
- * Generate a pinned unary builtin.
- *
- * @param kind the builtin kind to generate
- * @param op the operand
- * @param function_type the function type for the GNU builtin routine
- * @param db debug info
- */
-static ir_node *gen_unary_builtin_pinned(ir_builtin_kind kind, expression_t *op,
- type_t *function_type, dbg_info *db)
-{
- ir_node *in[1];
- in[0] = expression_to_firm(op);
-
- ir_type *tp = get_ir_type(function_type);
- ir_type *res = get_method_res_type(tp, 0);
- ir_node *mem = get_store();
- ir_node *irn = new_d_Builtin(db, mem, 1, in, kind, tp);
- set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return new_Proj(irn, get_type_mode(res), pn_Builtin_1_result);
-}
-
-/**
- * Generate an binary-void-return builtin.
- *
- * @param kind the builtin kind to generate
- * @param op1 the first operand
- * @param op2 the second operand
- * @param function_type the function type for the GNU builtin routine
- * @param db debug info
- */
-static ir_node *gen_binary_builtin_mem(ir_builtin_kind kind, expression_t *op1,
- expression_t *op2, type_t *function_type,
- dbg_info *db)
-{
- ir_node *in[2];
- in[0] = expression_to_firm(op1);
- in[1] = expression_to_firm(op2);
-
- ir_type *tp = get_ir_type(function_type);
- ir_node *mem = get_store();
- ir_node *irn = new_d_Builtin(db, mem, 2, in, kind, tp);
- set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return NULL;
-}
-
/**
* Transform calls to builtin functions.
*/
type_t *function_type = skip_typeref(expr_type->pointer.points_to);
switch (builtin->entity->function.btk) {
- case bk_gnu_builtin_alloca: {
- if (call->arguments == NULL || call->arguments->next != NULL) {
- panic("invalid number of parameters on __builtin_alloca");
- }
+ case BUILTIN_NONE:
+ break;
+ case BUILTIN_ALLOCA: {
expression_t *argument = call->arguments->expression;
ir_node *size = expression_to_firm(argument);
ir_node *store = get_store();
- ir_node *alloca = new_d_Alloc(dbgi, store, size, firm_unknown_type,
+ ir_node *alloca = new_d_Alloc(dbgi, store, size, get_unknown_type(),
stack_alloc);
ir_node *proj_m = new_Proj(alloca, mode_M, pn_Alloc_M);
set_store(proj_m);
return res;
}
-
- case bk_gnu_builtin_huge_val:
- case bk_gnu_builtin_huge_valf:
- case bk_gnu_builtin_huge_vall:
- case bk_gnu_builtin_inf:
- case bk_gnu_builtin_inff:
- case bk_gnu_builtin_infl: {
+ case BUILTIN_INF: {
type_t *type = function_type->function.return_type;
ir_mode *mode = get_ir_mode_arithmetic(type);
ir_tarval *tv = get_mode_infinite(mode);
ir_node *res = new_d_Const(dbgi, tv);
return res;
}
- case bk_gnu_builtin_nan:
- case bk_gnu_builtin_nanf:
- case bk_gnu_builtin_nanl: {
+ case BUILTIN_NAN: {
/* Ignore string for now... */
assert(is_type_function(function_type));
type_t *type = function_type->function.return_type;
ir_node *res = new_d_Const(dbgi, tv);
return res;
}
- case bk_gnu_builtin_expect: {
+ case BUILTIN_EXPECT: {
expression_t *argument = call->arguments->expression;
return _expression_to_firm(argument);
}
- case bk_gnu_builtin_va_end:
+ case BUILTIN_VA_END:
/* evaluate the argument of va_end for its side effects */
_expression_to_firm(call->arguments->expression);
return NULL;
- case bk_gnu_builtin_frame_address: {
- expression_t *const expression = call->arguments->expression;
- bool val = fold_constant_to_bool(expression);
- if (!val) {
- /* the nice case */
- return get_irg_frame(current_ir_graph);
- } else {
- /* get the argument */
- ir_node *in[2];
-
- in[0] = expression_to_firm(expression);
- in[1] = get_irg_frame(current_ir_graph);
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_irg_no_mem(current_ir_graph), 2, in, ir_bk_frame_address, tp);
- return new_Proj(irn, mode_P_data, pn_Builtin_1_result);
- }
- }
- case bk_gnu_builtin_return_address: {
- expression_t *const expression = call->arguments->expression;
- ir_node *in[2];
-
- in[0] = expression_to_firm(expression);
- in[1] = get_irg_frame(current_ir_graph);
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_irg_no_mem(current_ir_graph), 2, in, ir_bk_return_address, tp);
- return new_Proj(irn, mode_P_data, pn_Builtin_1_result);
- }
- case bk_gnu_builtin_ffs:
- return gen_unary_builtin(ir_bk_ffs, call->arguments->expression, function_type, dbgi);
- case bk_gnu_builtin_clz:
- case bk_gnu_builtin_clzl:
- case bk_gnu_builtin_clzll:
- return gen_unary_builtin(ir_bk_clz, call->arguments->expression, function_type, dbgi);
- case bk_gnu_builtin_ctz:
- case bk_gnu_builtin_ctzl:
- case bk_gnu_builtin_ctzll:
- return gen_unary_builtin(ir_bk_ctz, call->arguments->expression, function_type, dbgi);
- case bk_gnu_builtin_popcount:
- case bk_gnu_builtin_popcountl:
- case bk_gnu_builtin_popcountll:
- case bk_ms__popcount:
- return gen_unary_builtin(ir_bk_popcount, call->arguments->expression, function_type, dbgi);
- case bk_gnu_builtin_parity:
- return gen_unary_builtin(ir_bk_parity, call->arguments->expression, function_type, dbgi);
- case bk_gnu_builtin_prefetch: {
- call_argument_t *const args = call->arguments;
- expression_t *const addr = args->expression;
- ir_node *in[3];
-
- in[0] = _expression_to_firm(addr);
- if (args->next != NULL) {
- expression_t *const rw = args->next->expression;
-
- in[1] = _expression_to_firm(rw);
-
- if (args->next->next != NULL) {
- expression_t *const locality = args->next->next->expression;
-
- in[2] = expression_to_firm(locality);
- } else {
- in[2] = new_Const_long(mode_int, 3);
- }
- } else {
- in[1] = new_Const_long(mode_int, 0);
- in[2] = new_Const_long(mode_int, 3);
- }
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_store(), 3, in, ir_bk_prefetch, tp);
- set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return NULL;
- }
- case bk_gnu_builtin_object_size: {
+ case BUILTIN_OBJECT_SIZE: {
/* determine value of "type" */
expression_t *type_expression = call->arguments->next->expression;
long type_val = fold_constant_to_int(type_expression);
return new_d_Const(dbgi, result);
}
- case bk_gnu_builtin_trap:
- case bk_ms__ud2:
- {
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_store(), 0, NULL, ir_bk_trap, tp);
- set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return NULL;
- }
- case bk_ms__debugbreak: {
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_store(), 0, NULL, ir_bk_debugbreak, tp);
- set_store(new_Proj(irn, mode_M, pn_Builtin_M));
- return NULL;
- }
- case bk_ms_ReturnAddress: {
- ir_node *in[2];
-
- in[0] = new_Const(get_mode_null(mode_int));
- in[1] = get_irg_frame(current_ir_graph);
- ir_type *tp = get_ir_type(function_type);
- ir_node *irn = new_d_Builtin(dbgi, get_irg_no_mem(current_ir_graph), 2, in, ir_bk_return_address, tp);
- return new_Proj(irn, mode_P_data, pn_Builtin_1_result);
- }
- case bk_ms_rotl:
- case bk_ms_rotl64: {
+ case BUILTIN_ROTL: {
ir_node *val = expression_to_firm(call->arguments->expression);
ir_node *shf = expression_to_firm(call->arguments->next->expression);
ir_mode *mode = get_irn_mode(val);
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
return new_d_Rotl(dbgi, val, create_conv(dbgi, shf, mode_uint), mode);
}
- case bk_ms_rotr:
- case bk_ms_rotr64: {
+ case BUILTIN_ROTR: {
ir_node *val = expression_to_firm(call->arguments->expression);
ir_node *shf = expression_to_firm(call->arguments->next->expression);
ir_mode *mode = get_irn_mode(val);
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
ir_node *c = new_Const_long(mode_uint, get_mode_size_bits(mode));
ir_node *sub = new_d_Sub(dbgi, c, create_conv(dbgi, shf, mode_uint), mode_uint);
return new_d_Rotl(dbgi, val, sub, mode);
}
- case bk_ms_byteswap_ushort:
- case bk_ms_byteswap_ulong:
- case bk_ms_byteswap_uint64:
- return gen_unary_builtin(ir_bk_bswap, call->arguments->expression, function_type, dbgi);
- case bk_ms__inbyte:
- case bk_ms__inword:
- case bk_ms__indword:
- return gen_unary_builtin_pinned(ir_bk_inport, call->arguments->expression, function_type, dbgi);
- case bk_ms__outbyte:
- case bk_ms__outword:
- case bk_ms__outdword:
- return gen_binary_builtin_mem(ir_bk_outport, call->arguments->expression,
- call->arguments->next->expression, function_type, dbgi);
- default:
- panic("unsupported builtin found");
+ case BUILTIN_FIRM:
+ break;
+ case BUILTIN_LIBC:
+ case BUILTIN_LIBC_CHECK:
+ panic("builtin did not produce an entity");
}
+ panic("invalid builtin found");
}
/**
dbg_info *const dbgi = get_dbg_info(&call->base.source_position);
assert(currently_reachable());
- expression_t *function = call->function;
+ expression_t *function = call->function;
+ ir_node *callee = NULL;
+ bool firm_builtin = false;
+ ir_builtin_kind firm_builtin_kind = ir_bk_trap;
if (function->kind == EXPR_REFERENCE) {
const reference_expression_t *ref = &function->reference;
entity_t *entity = ref->entity;
if (entity->kind == ENTITY_FUNCTION) {
- ir_entity *irentity = entity->function.irentity;
- if (irentity == NULL)
- irentity = get_function_entity(entity, NULL);
-
- if (irentity == NULL && entity->function.btk != bk_none) {
+ builtin_kind_t builtin = entity->function.btk;
+ if (builtin == BUILTIN_FIRM) {
+ firm_builtin = true;
+ firm_builtin_kind = entity->function.b.firm_builtin_kind;
+ } else if (builtin != BUILTIN_NONE && builtin != BUILTIN_LIBC
+ && builtin != BUILTIN_LIBC_CHECK) {
return process_builtin_call(call);
}
-
-#if 0
- if (irentity == rts_entities[rts_alloca]) {
- /* handle alloca() call */
- expression_t *argument = call->arguments->expression;
- ir_node *size = expression_to_firm(argument);
- ir_mode *mode = get_ir_mode_arithmetic(type_size_t);
-
- size = create_conv(dbgi, size, mode);
-
- ir_node *store = get_store();
- ir_node *alloca = new_d_Alloc(dbgi, store, size,
- firm_unknown_type, stack_alloc);
- ir_node *proj_m = new_Proj(alloca, mode_M, pn_Alloc_M);
- set_store(proj_m);
- ir_node *res = new_Proj(alloca, mode_P_data, pn_Alloc_res);
-
- return res;
- }
-#endif
}
}
- ir_node *callee = expression_to_firm(function);
+ if (!firm_builtin)
+ callee = expression_to_firm(function);
type_t *type = skip_typeref(function->base.type);
assert(is_type_pointer(type));
assert(is_type_function(points_to));
function_type_t *function_type = &points_to->function;
- int n_parameters = 0;
+ int n_parameters = 0;
ir_type *ir_method_type = get_ir_type((type_t*) function_type);
ir_type *new_method_type = NULL;
if (function_type->variadic || function_type->unspecified_parameters) {
argument = argument->next;
}
- ir_node *store = get_store();
- ir_node *node = new_d_Call(dbgi, store, callee, n_parameters, in,
- ir_method_type);
- ir_node *mem = new_d_Proj(dbgi, node, mode_M, pn_Call_M);
- set_store(mem);
+ ir_node *store;
+ if (function_type->modifiers & DM_CONST) {
+ store = get_irg_no_mem(current_ir_graph);
+ } else {
+ store = get_store();
+ }
+ ir_node *node;
type_t *return_type = skip_typeref(function_type->return_type);
ir_node *result = NULL;
+ if (firm_builtin) {
+ node = new_d_Builtin(dbgi, store, n_parameters, in, firm_builtin_kind,
+ ir_method_type);
+ if (! (function_type->modifiers & DM_CONST)) {
+ ir_node *mem = new_Proj(node, mode_M, pn_Builtin_M);
+ set_store(mem);
+ }
- if (!is_type_atomic(return_type, ATOMIC_TYPE_VOID)) {
- ir_node *resproj = new_d_Proj(dbgi, node, mode_T, pn_Call_T_result);
-
- if (is_type_scalar(return_type)) {
- ir_mode *mode = get_ir_mode_storage(return_type);
- result = new_d_Proj(dbgi, resproj, mode, 0);
+ if (!is_type_atomic(return_type, ATOMIC_TYPE_VOID)) {
+ assert(is_type_scalar(return_type));
+ ir_mode *mode = get_ir_mode_storage(return_type);
+ result = new_Proj(node, mode, pn_Builtin_max+1);
ir_mode *mode_arith = get_ir_mode_arithmetic(return_type);
result = create_conv(NULL, result, mode_arith);
- } else {
- ir_mode *mode = mode_P_data;
- result = new_d_Proj(dbgi, resproj, mode, 0);
+ }
+ } else {
+ node = new_d_Call(dbgi, store, callee, n_parameters, in, ir_method_type);
+ if (! (function_type->modifiers & DM_CONST)) {
+ ir_node *mem = new_Proj(node, mode_M, pn_Call_M);
+ set_store(mem);
+ }
+
+ if (!is_type_atomic(return_type, ATOMIC_TYPE_VOID)) {
+ ir_node *resproj = new_Proj(node, mode_T, pn_Call_T_result);
+
+ if (is_type_scalar(return_type)) {
+ ir_mode *mode = get_ir_mode_storage(return_type);
+ result = new_Proj(resproj, mode, 0);
+ ir_mode *mode_arith = get_ir_mode_arithmetic(return_type);
+ result = create_conv(NULL, result, mode_arith);
+ } else {
+ ir_mode *mode = mode_P_data;
+ result = new_Proj(resproj, mode, 0);
+ }
}
}
- if (function->kind == EXPR_REFERENCE &&
- function->reference.entity->declaration.modifiers & DM_NORETURN) {
+ if (function_type->modifiers & DM_NORETURN) {
/* A dead end: Keep the Call and the Block. Also place all further
* nodes into a new and unreachable block. */
keep_alive(node);
{
ir_tarval *all_one = get_mode_all_one(mode);
int mode_size = get_mode_size_bits(mode);
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
assert(offset >= 0);
assert(size >= 0);
}
static ir_node *bitfield_store_to_firm(dbg_info *dbgi,
- ir_entity *entity, ir_node *addr, ir_node *value, bool set_volatile)
+ ir_entity *entity, ir_node *addr, ir_node *value, bool set_volatile,
+ bool need_return)
{
ir_type *entity_type = get_entity_type(entity);
ir_type *base_type = get_primitive_base_type(entity_type);
- assert(base_type != NULL);
ir_mode *mode = get_type_mode(base_type);
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
value = create_conv(dbgi, value, mode);
/* kill upper bits of value and shift to right position */
- int bitoffset = get_entity_offset_bits_remainder(entity);
- int bitsize = get_mode_size_bits(get_type_mode(entity_type));
- ir_tarval *mask = create_bitfield_mask(mode, 0, bitsize);
- ir_node *mask_node = new_d_Const(dbgi, mask);
- ir_node *value_masked = new_d_And(dbgi, value, mask_node, mode);
- ir_tarval *shiftl = new_tarval_from_long(bitoffset, mode_uint);
- ir_node *shiftcount = new_d_Const(dbgi, shiftl);
- ir_node *value_maskshift = new_d_Shl(dbgi, value_masked, shiftcount, mode);
+ unsigned bitoffset = get_entity_offset_bits_remainder(entity);
+ unsigned bitsize = get_mode_size_bits(get_type_mode(entity_type));
+ unsigned base_bits = get_mode_size_bits(mode);
+ unsigned shiftwidth = base_bits - bitsize;
+
+ ir_node *shiftcount = new_Const_long(mode_uint, shiftwidth);
+ ir_node *shiftl = new_d_Shl(dbgi, value, shiftcount, mode);
+
+ unsigned shrwidth = base_bits - bitsize - bitoffset;
+ ir_node *shrconst = new_Const_long(mode_uint, shrwidth);
+ ir_node *shiftr = new_d_Shr(dbgi, shiftl, shrconst, mode);
/* load current value */
ir_node *mem = get_store();
ir_node *load_res_masked = new_d_And(dbgi, load_res, inv_mask_node, mode);
/* construct new value and store */
- ir_node *new_val = new_d_Or(dbgi, load_res_masked, value_maskshift, mode);
+ ir_node *new_val = new_d_Or(dbgi, load_res_masked, shiftr, mode);
ir_node *store = new_d_Store(dbgi, load_mem, addr, new_val,
set_volatile ? cons_volatile : cons_none);
ir_node *store_mem = new_d_Proj(dbgi, store, mode_M, pn_Store_M);
set_store(store_mem);
- return value_masked;
+ if (!need_return)
+ return NULL;
+
+ ir_node *res_shr;
+ ir_node *count_res_shr = new_Const_long(mode_uint, base_bits - bitsize);
+ if (mode_is_signed(mode)) {
+ res_shr = new_d_Shrs(dbgi, shiftl, count_res_shr, mode);
+ } else {
+ res_shr = new_d_Shr(dbgi, shiftl, count_res_shr, mode);
+ }
+ return res_shr;
}
static ir_node *bitfield_extract_to_firm(const select_expression_t *expression,
ir_node *load = new_d_Load(dbgi, mem, addr, mode, cons_none);
ir_node *load_mem = new_d_Proj(dbgi, load, mode_M, pn_Load_M);
ir_node *load_res = new_d_Proj(dbgi, load, mode, pn_Load_res);
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
ir_mode *amode = mode;
/* optimisation, since shifting in modes < machine_size is usually
/* kill upper bits */
assert(expression->compound_entry->kind == ENTITY_COMPOUND_MEMBER);
- int bitoffset = entity->compound_member.bit_offset;
- int bitsize = entity->compound_member.bit_size;
+ unsigned bitoffset = entity->compound_member.bit_offset;
+ unsigned bitsize = entity->compound_member.bit_size;
unsigned shift_bitsl = amode_size - bitoffset - bitsize;
ir_tarval *tvl = new_tarval_from_long((long)shift_bitsl, mode_uint);
ir_node *countl = new_d_Const(dbgi, tvl);
bool set_volatile
= select->base.type->base.qualifiers & TYPE_QUALIFIER_VOLATILE;
value = bitfield_store_to_firm(dbgi, irentity, addr, value,
- set_volatile);
+ set_volatile, true);
return value;
}
}
dbg_info *dbgi = get_dbg_info(&expression->base.source_position);
type_t *type = skip_typeref(expression->base.type);
- if (expression->base.kind == EXPR_UNARY_TAKE_ADDRESS)
- return expression_to_addr(expression->value);
-
const expression_t *value = expression->value;
switch(expression->base.kind) {
+ case EXPR_UNARY_TAKE_ADDRESS:
+ return expression_to_addr(value);
+
case EXPR_UNARY_NEGATE: {
ir_node *value_node = expression_to_firm(value);
ir_mode *mode = get_ir_mode_arithmetic(type);
case EXPR_BINARY_SHIFTLEFT_ASSIGN:
case EXPR_BINARY_SHIFTRIGHT_ASSIGN:
mode = get_ir_mode_arithmetic(expression->base.type);
- right = create_conv(dbgi, right, mode_uint);
+ right = create_conv(dbgi, right, atomic_modes[ATOMIC_TYPE_UINT]);
break;
case EXPR_BINARY_SUB:
static void create_local_initializer(initializer_t *initializer, dbg_info *dbgi,
ir_entity *entity, type_t *type);
+static ir_initializer_t *create_ir_initializer(
+ const initializer_t *initializer, type_t *type);
-static ir_node *compound_literal_to_firm(
- const compound_literal_expression_t *expression)
+static ir_entity *create_initializer_entity(dbg_info *dbgi,
+ initializer_t *initializer,
+ type_t *type)
{
- type_t *type = expression->type;
+ /* create the ir_initializer */
+ ir_graph *const old_current_ir_graph = current_ir_graph;
+ current_ir_graph = get_const_code_irg();
+
+ ir_initializer_t *irinitializer = create_ir_initializer(initializer, type);
- /* create an entity on the stack */
- ir_type *frame_type = get_irg_frame_type(current_ir_graph);
+ assert(current_ir_graph == get_const_code_irg());
+ current_ir_graph = old_current_ir_graph;
- ident *const id = id_unique("CompLit.%u");
- ir_type *const irtype = get_ir_type(type);
- dbg_info *const dbgi = get_dbg_info(&expression->base.source_position);
- ir_entity *const entity = new_d_entity(frame_type, id, irtype, dbgi);
+ ident *const id = id_unique("initializer.%u");
+ ir_type *const irtype = get_ir_type(type);
+ ir_type *const global_type = get_glob_type();
+ ir_entity *const entity = new_d_entity(global_type, id, irtype, dbgi);
set_entity_ld_ident(entity, id);
+ set_entity_visibility(entity, ir_visibility_private);
+ add_entity_linkage(entity, IR_LINKAGE_CONSTANT);
+ set_entity_initializer(entity, irinitializer);
+ return entity;
+}
- /* create initialisation code */
+static ir_node *compound_literal_addr(compound_literal_expression_t const *const expression)
+{
+ dbg_info *dbgi = get_dbg_info(&expression->base.source_position);
+ type_t *type = expression->type;
initializer_t *initializer = expression->initializer;
- create_local_initializer(initializer, dbgi, entity, type);
- /* create a sel for the compound literal address */
- ir_node *frame = get_irg_frame(current_ir_graph);
- ir_node *sel = new_d_simpleSel(dbgi, new_NoMem(), frame, entity);
- return sel;
+ if (is_constant_initializer(initializer) == EXPR_CLASS_CONSTANT) {
+ ir_entity *entity = create_initializer_entity(dbgi, initializer, type);
+ return create_symconst(dbgi, entity);
+ } else {
+ /* create an entity on the stack */
+ ident *const id = id_unique("CompLit.%u");
+ ir_type *const irtype = get_ir_type(type);
+ ir_type *frame_type = get_irg_frame_type(current_ir_graph);
+
+ ir_entity *const entity = new_d_entity(frame_type, id, irtype, dbgi);
+ set_entity_ld_ident(entity, id);
+
+ /* create initialisation code */
+ create_local_initializer(initializer, dbgi, entity, type);
+
+ /* create a sel for the compound literal address */
+ ir_node *frame = get_irg_frame(current_ir_graph);
+ ir_node *sel = new_d_simpleSel(dbgi, new_NoMem(), frame, entity);
+ return sel;
+ }
+}
+
+static ir_node *compound_literal_to_firm(compound_literal_expression_t const* const expr)
+{
+ dbg_info *const dbgi = get_dbg_info(&expr->base.source_position);
+ type_t *const type = expr->type;
+ ir_node *const addr = compound_literal_addr(expr);
+ return deref_address(dbgi, type, addr);
}
/**
static unsigned get_cparser_entity_alignment(const entity_t *entity)
{
switch(entity->kind) {
- DECLARATION_KIND_CASES
+ case DECLARATION_KIND_CASES:
return entity->declaration.alignment;
case ENTITY_STRUCT:
case ENTITY_UNION:
static ir_tarval *fold_constant_to_tarval(const expression_t *expression)
{
- assert(is_type_valid(skip_typeref(expression->base.type)));
+ assert(is_constant_expression(expression) == EXPR_CLASS_CONSTANT);
bool constant_folding_old = constant_folding;
constant_folding = true;
+ int old_optimize = get_optimize();
+ int old_constant_folding = get_opt_constant_folding();
+ set_optimize(1);
+ set_opt_constant_folding(1);
init_ir_types();
- assert(is_constant_expression(expression) == EXPR_CLASS_CONSTANT);
-
ir_graph *old_current_ir_graph = current_ir_graph;
current_ir_graph = get_const_code_irg();
ir_node *cnst = expression_to_firm(expression);
current_ir_graph = old_current_ir_graph;
+ set_optimize(old_optimize);
+ set_opt_constant_folding(old_constant_folding);
if (!is_Const(cnst)) {
panic("couldn't fold constant");
/* this function is only used in parser.c, but it relies on libfirm functionality */
bool constant_is_negative(const expression_t *expression)
{
- assert(is_constant_expression(expression) == EXPR_CLASS_CONSTANT);
ir_tarval *tv = fold_constant_to_tarval(expression);
return tarval_is_negative(tv);
}
long fold_constant_to_int(const expression_t *expression)
{
- if (expression->kind == EXPR_ERROR)
- return 0;
-
ir_tarval *tv = fold_constant_to_tarval(expression);
if (!tarval_is_long(tv)) {
panic("result of constant folding is not integer");
bool fold_constant_to_bool(const expression_t *expression)
{
- if (expression->kind == EXPR_ERROR)
- return false;
ir_tarval *tv = fold_constant_to_tarval(expression);
return !tarval_is_null(tv);
}
assert(entry->declaration.kind == DECLARATION_KIND_COMPOUND_MEMBER);
if (constant_folding) {
- ir_mode *mode = get_irn_mode(compound_addr);
- /* FIXME: here, we need an integer mode with the same number of bits as mode */
- ir_node *ofs = new_Const_long(mode_uint, entry->compound_member.offset);
+ ir_mode *mode = get_irn_mode(compound_addr);
+ ir_mode *mode_uint = get_reference_mode_unsigned_eq(mode);
+ ir_node *ofs = new_Const_long(mode_uint, entry->compound_member.offset);
return new_d_Add(dbgi, compound_addr, ofs, mode);
} else {
ir_entity *irentity = entry->compound_member.entity;
/* typedef/typeof should be skipped already */
case TYPE_TYPEDEF:
case TYPE_TYPEOF:
- case TYPE_INVALID:
case TYPE_ERROR:
break;
}
make_const:;
dbg_info *const dbgi = get_dbg_info(&expr->base.source_position);
- ir_tarval *const tv = new_tarval_from_long(tc, mode_int);
+ ir_mode *const mode = atomic_modes[ATOMIC_TYPE_INT];
+ ir_tarval *const tv = new_tarval_from_long(tc, mode);
return new_d_Const(dbgi, tv);
}
static ir_node *va_start_expression_to_firm(
const va_start_expression_t *const expr)
{
- ir_graph *const irg = current_ir_graph;
- type_t *const type = current_function_entity->declaration.type;
- ir_type *const method_type = get_ir_type(type);
- size_t const n = get_method_n_params(method_type) - 1;
- ir_type *frame_type = get_irg_frame_type(irg);
- ir_type *param_irtype = get_method_param_type(method_type, n);
- ir_entity *const param_ent =
- new_parameter_entity(frame_type, n, param_irtype);
- ir_node *const frame = get_irg_frame(irg);
- dbg_info *const dbgi = get_dbg_info(&expr->base.source_position);
- ir_node *const no_mem = new_NoMem();
- ir_node *const arg_sel =
- new_d_simpleSel(dbgi, no_mem, frame, param_ent);
-
- type_t *const param_type = expr->parameter->base.type;
- ir_node *const cnst = get_type_size_node(param_type);
- ir_mode *const mode = get_irn_mode(cnst);
- ir_node *const c1 = new_Const_long(mode, stack_param_align - 1);
- ir_node *const c2 = new_d_Add(dbgi, cnst, c1, mode);
- ir_node *const c3 = new_Const_long(mode, -(long)stack_param_align);
- ir_node *const c4 = new_d_And(dbgi, c2, c3, mode);
- ir_node *const add = new_d_Add(dbgi, arg_sel, c4, mode_P_data);
- set_value_for_expression(expr->ap, add);
+ ir_entity *param_ent = current_vararg_entity;
+ if (param_ent == NULL) {
+ size_t const n = IR_VA_START_PARAMETER_NUMBER;
+ ir_type *const frame_type = get_irg_frame_type(current_ir_graph);
+ ir_type *const param_type = get_unknown_type();
+ param_ent = new_parameter_entity(frame_type, n, param_type);
+ current_vararg_entity = param_ent;
+ }
+
+ ir_node *const frame = get_irg_frame(current_ir_graph);
+ dbg_info *const dbgi = get_dbg_info(&expr->base.source_position);
+ ir_node *const no_mem = new_NoMem();
+ ir_node *const arg_sel = new_d_simpleSel(dbgi, no_mem, frame, param_ent);
+
+ set_value_for_expression(expr->ap, arg_sel);
return NULL;
}
case EXPR_CALL:
return call_expression_to_firm(&expression->call);
case EXPR_COMPOUND_LITERAL:
- return compound_literal_to_firm(&expression->compound_literal);
+ return compound_literal_addr(&expression->compound_literal);
case EXPR_REFERENCE:
return reference_addr(&expression->reference);
case EXPR_SELECT:
return new_d_SymConst(dbgi, mode_P_code, value, symconst_addr_ent);
}
-static ir_node *error_to_firm(const expression_t *expression)
-{
- ir_mode *mode = get_ir_mode_arithmetic(expression->base.type);
- return new_Bad(mode);
-}
-
/**
* creates firm nodes for an expression. The difference between this function
* and expression_to_firm is, that this version might produce mode_b nodes
#endif
switch (expression->kind) {
- EXPR_LITERAL_CASES
+ case EXPR_LITERAL_CASES:
return literal_to_firm(&expression->literal);
case EXPR_STRING_LITERAL:
return string_to_firm(&expression->base.source_position, "str.%u",
return wide_string_literal_to_firm(&expression->string_literal);
case EXPR_REFERENCE:
return reference_expression_to_firm(&expression->reference);
- case EXPR_REFERENCE_ENUM_VALUE:
- return reference_expression_enum_value_to_firm(&expression->reference);
+ case EXPR_ENUM_CONSTANT:
+ return enum_constant_to_firm(&expression->reference);
case EXPR_CALL:
return call_expression_to_firm(&expression->call);
- EXPR_UNARY_CASES
+ case EXPR_UNARY_CASES:
return unary_expression_to_firm(&expression->unary);
- EXPR_BINARY_CASES
+ case EXPR_BINARY_CASES:
return binary_expression_to_firm(&expression->binary);
case EXPR_ARRAY_ACCESS:
return array_access_to_firm(&expression->array_access);
return label_address_to_firm(&expression->label_address);
case EXPR_ERROR:
- return error_to_firm(expression);
- case EXPR_INVALID:
break;
}
panic("invalid expression found");
return false;
reference_expression_t *ref = &function->reference;
if (ref->entity->kind != ENTITY_FUNCTION ||
- ref->entity->function.btk != bk_gnu_builtin_expect)
+ ref->entity->function.btk != BUILTIN_EXPECT)
return false;
return true;
size_t len;
if (is_type_compound(top_type)) {
- compound_t *compound = top_type->compound.compound;
- entity_t *entry = compound->members.entities;
+ compound_t *const compound = top_type->compound.compound;
+ entity_t *const entry = skip_unnamed_bitfields(compound->members.entities);
top->compound_entry = entry;
top->index = 0;
entity_t *entry = top->compound_entry;
top->index++;
- entry = entry->base.next;
+ entry = skip_unnamed_bitfields(entry->base.next);
top->compound_entry = entry;
if (entry != NULL) {
assert(entry->kind == ENTITY_COMPOUND_MEMBER);
}
-static ir_initializer_t *create_ir_initializer(
- const initializer_t *initializer, type_t *type);
-
static ir_initializer_t *create_ir_initializer_value(
const initializer_value_t *initializer)
{
assert(has_array_upper_bound(ent_type, 0));
long n = get_array_upper_bound_int(ent_type, 0);
for (long i = 0; i < n; ++i) {
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
ir_tarval *index_tv = new_tarval_from_long(i, mode_uint);
ir_node *cnst = new_d_Const(dbgi, index_tv);
ir_node *in[1] = { cnst };
/* is it a bitfield type? */
if (is_Primitive_type(ent_type) &&
get_primitive_base_type(ent_type) != NULL) {
- bitfield_store_to_firm(dbgi, entity, base_addr, node, false);
+ bitfield_store_to_firm(dbgi, entity, base_addr, node, false, false);
return;
}
/* is it a bitfield type? */
if (is_Primitive_type(ent_type) &&
get_primitive_base_type(ent_type) != NULL) {
- bitfield_store_to_firm(dbgi, entity, base_addr, node, false);
+ bitfield_store_to_firm(dbgi, entity, base_addr, node, false, false);
return;
}
/* is it a bitfield type? */
if (is_Primitive_type(ent_type) &&
get_primitive_base_type(ent_type) != NULL) {
- bitfield_store_to_firm(dbgi, entity, base_addr, cnst, false);
+ bitfield_store_to_firm(dbgi, entity, base_addr, cnst, false, false);
return;
}
ir_type *irtype;
ir_entity *sub_entity;
if (is_Array_type(type)) {
+ ir_mode *mode_uint = atomic_modes[ATOMIC_TYPE_UINT];
ir_tarval *index_tv = new_tarval_from_long(i, mode_uint);
ir_node *cnst = new_d_Const(dbgi, index_tv);
ir_node *in[1] = { cnst };
return;
}
- /* create the ir_initializer */
- ir_graph *const old_current_ir_graph = current_ir_graph;
- current_ir_graph = get_const_code_irg();
-
- ir_initializer_t *irinitializer = create_ir_initializer(initializer, type);
-
- assert(current_ir_graph == get_const_code_irg());
- current_ir_graph = old_current_ir_graph;
-
/* create a "template" entity which is copied to the entity on the stack */
- ident *const id = id_unique("initializer.%u");
- ir_type *const irtype = get_ir_type(type);
- ir_type *const global_type = get_glob_type();
- ir_entity *const init_entity = new_d_entity(global_type, id, irtype, dbgi);
- set_entity_ld_ident(init_entity, id);
-
- set_entity_visibility(init_entity, ir_visibility_private);
- add_entity_linkage(init_entity, IR_LINKAGE_CONSTANT);
-
- set_entity_initializer(init_entity, irinitializer);
-
+ ir_entity *const init_entity
+ = create_initializer_entity(dbgi, initializer, type);
ir_node *const src_addr = create_symconst(dbgi, init_entity);
+ ir_type *const irtype = get_ir_type(type);
ir_node *const copyb = new_d_CopyB(dbgi, memory, addr, src_addr, irtype);
ir_node *const copyb_mem = new_Proj(copyb, mode_M, pn_CopyB_M);
if (initializer->kind == INITIALIZER_VALUE) {
initializer_value_t *initializer_value = &initializer->value;
dbg_info *dbgi = get_dbg_info(&entity->base.source_position);
+ expression_t *value = initializer_value->value;
+ type_t *init_type = value->base.type;
+ type_t *skipped = skip_typeref(init_type);
+
+ if (!is_type_scalar(skipped)) {
+ /* skip convs */
+ while (value->kind == EXPR_UNARY_CAST)
+ value = value->unary.value;
+
+ if (value->kind != EXPR_COMPOUND_LITERAL)
+ panic("expected non-scalar initializer to be a compound literal");
+ initializer = value->compound_literal.initializer;
+ goto have_initializer;
+ }
- ir_node *value = expression_to_firm(initializer_value->value);
+ ir_node *node = expression_to_firm(initializer_value->value);
- type_t *init_type = initializer_value->value->base.type;
ir_mode *mode = get_ir_mode_storage(init_type);
- value = create_conv(dbgi, value, mode);
- value = do_strict_conv(dbgi, value);
+ node = create_conv(dbgi, node, mode);
+ node = do_strict_conv(dbgi, node);
if (declaration_kind == DECLARATION_KIND_LOCAL_VARIABLE) {
- set_value(entity->variable.v.value_number, value);
+ set_value(entity->variable.v.value_number, node);
} else {
assert(declaration_kind == DECLARATION_KIND_GLOBAL_VARIABLE);
&& get_entity_owner(irentity) != get_tls_type()) {
add_entity_linkage(irentity, IR_LINKAGE_CONSTANT);
}
- set_atomic_ent_value(irentity, value);
+ set_atomic_ent_value(irentity, node);
}
} else {
+have_initializer:
assert(declaration_kind == DECLARATION_KIND_LOCAL_VARIABLE_ENTITY ||
declaration_kind == DECLARATION_KIND_GLOBAL_VARIABLE);
false_block = new_immBlock();
create_condition_evaluation(statement->condition, true_block, false_block);
mature_immBlock(true_block);
- }
-
- /* Create the false statement.
- * Handle false before true, so if no false statement is present, then the
- * empty false block is reused as fallthrough block. */
- ir_node *fallthrough_block = NULL;
- if (statement->false_statement != NULL) {
- if (false_block != NULL) {
- mature_immBlock(false_block);
- }
- set_cur_block(false_block);
- statement_to_firm(statement->false_statement);
- if (currently_reachable()) {
- fallthrough_block = new_immBlock();
- add_immBlock_pred(fallthrough_block, new_Jmp());
- }
- } else {
- fallthrough_block = false_block;
+ mature_immBlock(false_block);
}
/* Create the true statement. */
set_cur_block(true_block);
statement_to_firm(statement->true_statement);
- if (currently_reachable()) {
- if (fallthrough_block == NULL) {
- fallthrough_block = new_immBlock();
- }
- add_immBlock_pred(fallthrough_block, new_Jmp());
+ ir_node *fallthrough_block = get_cur_block();
+
+ /* Create the false statement. */
+ set_cur_block(false_block);
+ if (statement->false_statement != NULL) {
+ statement_to_firm(statement->false_statement);
}
- /* Handle the block after the if-statement. */
- if (fallthrough_block != NULL) {
- mature_immBlock(fallthrough_block);
+ /* Handle the block after the if-statement. Minor simplification and
+ * optimisation: Reuse the false/true block as fallthrough block, if the
+ * true/false statement does not pass control to the fallthrough block, e.g.
+ * in the typical if (x) return; pattern. */
+ if (fallthrough_block) {
+ if (currently_reachable()) {
+ ir_node *const t_jump = new_r_Jmp(fallthrough_block);
+ ir_node *const f_jump = new_Jmp();
+ ir_node *const in[] = { t_jump, f_jump };
+ fallthrough_block = new_Block(2, in);
+ }
+ set_cur_block(fallthrough_block);
}
- set_cur_block(fallthrough_block);
}
/* Create a jump node which jumps into target_block, if the current block is
set_unreachable_now();
}
+static ir_switch_table *create_switch_table(const switch_statement_t *statement)
+{
+ /* determine number of cases */
+ size_t n_cases = 0;
+ for (case_label_statement_t *l = statement->first_case; l != NULL;
+ l = l->next) {
+ /* default case */
+ if (l->expression == NULL)
+ continue;
+ if (l->is_empty_range)
+ continue;
+ ++n_cases;
+ }
+
+ ir_switch_table *res = ir_new_switch_table(current_ir_graph, n_cases);
+ size_t i = 0;
+ for (case_label_statement_t *l = statement->first_case; l != NULL;
+ l = l->next) {
+ if (l->expression == NULL) {
+ l->pn = pn_Switch_default;
+ continue;
+ }
+ if (l->is_empty_range)
+ continue;
+ ir_tarval *min = fold_constant_to_tarval(l->expression);
+ ir_tarval *max = min;
+ long pn = (long) i+1;
+ if (l->end_range != NULL)
+ max = fold_constant_to_tarval(l->end_range);
+ ir_switch_table_set(res, i++, min, max, pn);
+ l->pn = pn;
+ }
+ return res;
+}
+
static void switch_statement_to_firm(switch_statement_t *statement)
{
ir_node *first_block = NULL;
dbg_info *dbgi = get_dbg_info(&statement->base.source_position);
- ir_node *cond = NULL;
+ ir_node *switch_node = NULL;
if (currently_reachable()) {
ir_node *expression = expression_to_firm(statement->expression);
- cond = new_d_Cond(dbgi, expression);
- first_block = get_cur_block();
+ ir_switch_table *table = create_switch_table(statement);
+ unsigned n_outs = (unsigned)ir_switch_table_get_n_entries(table) + 1;
+
+ switch_node = new_d_Switch(dbgi, expression, n_outs, table);
+ first_block = get_cur_block();
}
set_unreachable_now();
- ir_node *const old_switch_cond = current_switch_cond;
+ ir_node *const old_switch = current_switch;
ir_node *const old_break_label = break_label;
const bool old_saw_default_label = saw_default_label;
saw_default_label = false;
- current_switch_cond = cond;
+ current_switch = switch_node;
break_label = NULL;
- switch_statement_t *const old_switch = current_switch;
- current_switch = statement;
-
- /* determine a free number for the default label */
- unsigned long num_cases = 0;
- long default_proj_nr = 0;
- for (case_label_statement_t *l = statement->first_case; l != NULL; l = l->next) {
- if (l->expression == NULL) {
- /* default case */
- continue;
- }
- if (l->last_case >= l->first_case)
- num_cases += l->last_case - l->first_case + 1;
- if (l->last_case > default_proj_nr)
- default_proj_nr = l->last_case;
- }
-
- if (default_proj_nr == LONG_MAX) {
- /* Bad: an overflow will occur, we cannot be sure that the
- * maximum + 1 is a free number. Scan the values a second
- * time to find a free number.
- */
- unsigned char *bits = xmalloc((num_cases + 7) >> 3);
-
- memset(bits, 0, (num_cases + 7) >> 3);
- for (case_label_statement_t *l = statement->first_case; l != NULL; l = l->next) {
- if (l->expression == NULL) {
- /* default case */
- continue;
- }
- unsigned long start = l->first_case > 0 ? (unsigned long)l->first_case : 0;
- if (start < num_cases && l->last_case >= 0) {
- unsigned long end = (unsigned long)l->last_case < num_cases ?
- (unsigned long)l->last_case : num_cases - 1;
- for (unsigned long cns = start; cns <= end; ++cns) {
- bits[cns >> 3] |= (1 << (cns & 7));
- }
- }
- }
- /* We look at the first num_cases constants:
- * Either they are dense, so we took the last (num_cases)
- * one, or they are not dense, so we will find one free
- * there...
- */
- unsigned long i;
- for (i = 0; i < num_cases; ++i)
- if ((bits[i >> 3] & (1 << (i & 7))) == 0)
- break;
-
- free(bits);
- default_proj_nr = i;
- } else {
- ++default_proj_nr;
- }
- statement->default_proj_nr = default_proj_nr;
- /* safety check: cond might already be folded to a Bad */
- if (cond != NULL && is_Cond(cond)) {
- set_Cond_default_proj(cond, default_proj_nr);
- }
statement_to_firm(statement->body);
if (!saw_default_label && first_block != NULL) {
set_cur_block(first_block);
- ir_node *const proj = new_d_Proj(dbgi, cond, mode_X, default_proj_nr);
+ ir_node *proj = new_d_Proj(dbgi, switch_node, mode_X, pn_Switch_default);
add_immBlock_pred(get_break_label(), proj);
}
}
set_cur_block(break_label);
- assert(current_switch_cond == cond);
- current_switch = old_switch;
- current_switch_cond = old_switch_cond;
- break_label = old_break_label;
- saw_default_label = old_saw_default_label;
+ assert(current_switch == switch_node);
+ current_switch = old_switch;
+ break_label = old_break_label;
+ saw_default_label = old_saw_default_label;
}
static void case_label_to_firm(const case_label_statement_t *statement)
if (statement->is_empty_range)
return;
- ir_node *block = new_immBlock();
- /* Fallthrough from previous case */
- jump_if_reachable(block);
+ if (current_switch != NULL) {
+ ir_node *block = new_immBlock();
+ /* Fallthrough from previous case */
+ jump_if_reachable(block);
- if (current_switch_cond != NULL) {
- set_cur_block(get_nodes_block(current_switch_cond));
- dbg_info *const dbgi = get_dbg_info(&statement->base.source_position);
- if (statement->expression != NULL) {
- long pn = statement->first_case;
- long end_pn = statement->last_case;
- assert(pn <= end_pn);
- /* create jumps for all cases in the given range */
- do {
- ir_node *const proj = new_d_Proj(dbgi, current_switch_cond, mode_X, pn);
- add_immBlock_pred(block, proj);
- } while (pn++ < end_pn);
- } else {
+ ir_node *const proj = new_Proj(current_switch, mode_X, statement->pn);
+ add_immBlock_pred(block, proj);
+ if (statement->expression == NULL)
saw_default_label = true;
- ir_node *const proj = new_d_Proj(dbgi, current_switch_cond, mode_X,
- current_switch->default_proj_nr);
- add_immBlock_pred(block, proj);
- }
- }
- mature_immBlock(block);
- set_cur_block(block);
+ mature_immBlock(block);
+ set_cur_block(block);
+ }
statement_to_firm(statement->statement);
}
statement_to_firm(statement->statement);
}
-static void goto_to_firm(const goto_statement_t *statement)
+static void computed_goto_to_firm(computed_goto_statement_t const *const statement)
{
if (!currently_reachable())
return;
- if (statement->expression) {
- ir_node *irn = expression_to_firm(statement->expression);
- dbg_info *dbgi = get_dbg_info(&statement->base.source_position);
- ir_node *ijmp = new_d_IJmp(dbgi, irn);
+ ir_node *const irn = expression_to_firm(statement->expression);
+ dbg_info *const dbgi = get_dbg_info(&statement->base.source_position);
+ ir_node *const ijmp = new_d_IJmp(dbgi, irn);
+
+ set_irn_link(ijmp, ijmp_list);
+ ijmp_list = ijmp;
- set_irn_link(ijmp, ijmp_list);
- ijmp_list = ijmp;
- } else {
- ir_node *block = get_label_block(statement->label);
- ir_node *jmp = new_Jmp();
- add_immBlock_pred(block, jmp);
- }
set_unreachable_now();
}
continue;
}
- if (strcmp(clobber_str, "memory") == 0) {
+ if (streq(clobber_str, "memory")) {
needs_memory = true;
continue;
}
ir_asm_constraint constraint;
constraint.pos = pos;
constraint.constraint = new_id_from_str(constraints);
- constraint.mode = NULL;
+ constraint.mode = mode_M;
tmp_in_constraints[in_size] = constraint;
ins[in_size] = expression_to_addr(expr);
/**
* Transform a statement.
*/
-static void statement_to_firm(statement_t *statement)
+static void statement_to_firm(statement_t *const stmt)
{
#ifndef NDEBUG
- assert(!statement->base.transformed);
- statement->base.transformed = true;
+ assert(!stmt->base.transformed);
+ stmt->base.transformed = true;
#endif
- switch (statement->kind) {
- case STATEMENT_INVALID:
- panic("invalid statement found");
- case STATEMENT_EMPTY:
- /* nothing */
- return;
- case STATEMENT_COMPOUND:
- compound_statement_to_firm(&statement->compound);
- return;
- case STATEMENT_RETURN:
- return_statement_to_firm(&statement->returns);
- return;
- case STATEMENT_EXPRESSION:
- expression_statement_to_firm(&statement->expression);
- return;
- case STATEMENT_IF:
- if_statement_to_firm(&statement->ifs);
- return;
- case STATEMENT_WHILE:
- while_statement_to_firm(&statement->whiles);
- return;
- case STATEMENT_DO_WHILE:
- do_while_statement_to_firm(&statement->do_while);
- return;
- case STATEMENT_DECLARATION:
- declaration_statement_to_firm(&statement->declaration);
- return;
- case STATEMENT_BREAK:
- create_jump_statement(statement, get_break_label());
- return;
- case STATEMENT_CONTINUE:
- create_jump_statement(statement, continue_label);
- return;
- case STATEMENT_SWITCH:
- switch_statement_to_firm(&statement->switchs);
- return;
- case STATEMENT_CASE_LABEL:
- case_label_to_firm(&statement->case_label);
- return;
- case STATEMENT_FOR:
- for_statement_to_firm(&statement->fors);
- return;
- case STATEMENT_LABEL:
- label_to_firm(&statement->label);
- return;
- case STATEMENT_GOTO:
- goto_to_firm(&statement->gotos);
- return;
- case STATEMENT_ASM:
- asm_statement_to_firm(&statement->asms);
- return;
- case STATEMENT_MS_TRY:
- ms_try_statement_to_firm(&statement->ms_try);
- return;
- case STATEMENT_LEAVE:
- leave_statement_to_firm(&statement->leave);
- return;
+ switch (stmt->kind) {
+ case STATEMENT_ASM: asm_statement_to_firm( &stmt->asms); return;
+ case STATEMENT_CASE_LABEL: case_label_to_firm( &stmt->case_label); return;
+ case STATEMENT_COMPOUND: compound_statement_to_firm( &stmt->compound); return;
+ case STATEMENT_COMPUTED_GOTO: computed_goto_to_firm( &stmt->computed_goto); return;
+ case STATEMENT_DECLARATION: declaration_statement_to_firm(&stmt->declaration); return;
+ case STATEMENT_DO_WHILE: do_while_statement_to_firm( &stmt->do_while); return;
+ case STATEMENT_EMPTY: /* nothing */ return;
+ case STATEMENT_EXPRESSION: expression_statement_to_firm( &stmt->expression); return;
+ case STATEMENT_FOR: for_statement_to_firm( &stmt->fors); return;
+ case STATEMENT_IF: if_statement_to_firm( &stmt->ifs); return;
+ case STATEMENT_LABEL: label_to_firm( &stmt->label); return;
+ case STATEMENT_LEAVE: leave_statement_to_firm( &stmt->leave); return;
+ case STATEMENT_MS_TRY: ms_try_statement_to_firm( &stmt->ms_try); return;
+ case STATEMENT_RETURN: return_statement_to_firm( &stmt->returns); return;
+ case STATEMENT_SWITCH: switch_statement_to_firm( &stmt->switchs); return;
+ case STATEMENT_WHILE: while_statement_to_firm( &stmt->whiles); return;
+
+ case STATEMENT_BREAK: create_jump_statement(stmt, get_break_label()); return;
+ case STATEMENT_CONTINUE: create_jump_statement(stmt, continue_label); return;
+ case STATEMENT_GOTO: create_jump_statement(stmt, get_label_block(stmt->gotos.label)); return;
+
+ case STATEMENT_ERROR: panic("error statement found");
}
panic("statement not implemented");
}
* @param irg the IR-graph
* @param dec_modifiers additional modifiers
*/
-static void handle_decl_modifier_irg(ir_graph_ptr irg,
+static void handle_decl_modifier_irg(ir_graph *irg,
decl_modifiers_t decl_modifiers)
{
- if (decl_modifiers & DM_RETURNS_TWICE) {
- /* TRUE if the declaration includes __attribute__((returns_twice)) */
- add_irg_additional_properties(irg, mtp_property_returns_twice);
- }
- if (decl_modifiers & DM_NORETURN) {
- /* TRUE if the declaration includes the Microsoft
- __declspec(noreturn) specifier. */
- add_irg_additional_properties(irg, mtp_property_noreturn);
- }
- if (decl_modifiers & DM_NOTHROW) {
- /* TRUE if the declaration includes the Microsoft
- __declspec(nothrow) specifier. */
- add_irg_additional_properties(irg, mtp_property_nothrow);
- }
if (decl_modifiers & DM_NAKED) {
/* TRUE if the declaration includes the Microsoft
__declspec(naked) specifier. */
if (entity->function.statement == NULL)
return;
- if (is_main(entity) && enable_main_collect2_hack) {
- prepare_main_collect2(entity);
- }
-
inner_functions = NULL;
current_trampolines = NULL;
ir_graph *old_current_function = current_function;
current_function = irg;
+ ir_entity *const old_current_vararg_entity = current_vararg_entity;
+ current_vararg_entity = NULL;
+
set_irg_fp_model(irg, firm_fp_model);
tarval_enable_fp_ops(1);
set_irn_dbg_info(get_irg_start_block(irg),
get_entity_dbg_info(function_entity));
- ir_node *first_block = get_cur_block();
-
/* set inline flags */
if (entity->function.is_inline)
set_irg_inline_property(irg, irg_inline_recomended);
add_immBlock_pred(end_block, ret);
}
- bool has_computed_gotos = false;
for (int i = ARR_LEN(all_labels) - 1; i >= 0; --i) {
label_t *label = all_labels[i];
if (label->address_taken) {
gen_ijmp_branches(label->block);
- has_computed_gotos = true;
}
mature_immBlock(label->block);
}
- if (has_computed_gotos) {
- /* if we have computed goto's in the function, we cannot inline it */
- if (get_irg_inline_property(irg) >= irg_inline_recomended) {
- source_position_t const *const pos = &entity->base.source_position;
- warningf(WARN_OTHER, pos, "'%N' can never be inlined because it contains a computed goto", entity);
- }
- set_irg_inline_property(irg, irg_inline_forbidden);
- }
DEL_ARR_F(all_labels);
all_labels = NULL;
- mature_immBlock(first_block);
- mature_immBlock(end_block);
-
irg_finalize_cons(irg);
/* finalize the frame type */
set_type_alignment_bytes(frame_type, align_all);
irg_verify(irg, VERIFY_ENFORCE_SSA);
- current_function = old_current_function;
+ current_vararg_entity = old_current_vararg_entity;
+ current_function = old_current_function;
if (current_trampolines != NULL) {
DEL_ARR_F(current_trampolines);
continue;
if (entity->kind == ENTITY_FUNCTION) {
- if (entity->function.btk != bk_none) {
+ if (entity->function.btk != BUILTIN_NONE) {
/* builtins have no representation */
continue;
}
continue;
if (entity->kind == ENTITY_FUNCTION) {
- if (entity->function.btk != bk_none) {
+ if (entity->function.btk != BUILTIN_NONE) {
/* builtins have no representation */
continue;
}
/* just to be sure */
continue_label = NULL;
break_label = NULL;
- current_switch_cond = NULL;
+ current_switch = NULL;
current_translation_unit = unit;
init_ir_types();