char *string = obstack_finish(&symbol_obstack);
symbol_t *symbol = symbol_table_insert(string);
- lexer_token.type = symbol->ID;
- lexer_token.symbol = symbol;
+ lexer_token.kind = symbol->ID;
+ lexer_token.identifier.symbol = symbol;
if (symbol->string != string) {
obstack_free(&symbol_obstack, string);
}
}
+static string_t identify_string(char *string, size_t len)
+{
+ /* TODO hash */
+#if 0
+ const char *result = strset_insert(&stringset, concat);
+ if (result != concat) {
+ obstack_free(&symbol_obstack, concat);
+ }
+#else
+ const char *result = string;
+#endif
+ return (string_t) {result, len};
+}
+
/**
* parse suffixes like 'LU' or 'f' after numbers
*/
}
finish_suffix:
if (obstack_object_size(&symbol_obstack) == 0) {
- lexer_token.symbol = NULL;
+ lexer_token.number.suffix.begin = NULL;
+ lexer_token.number.suffix.size = 0;
return;
}
obstack_1grow(&symbol_obstack, '\0');
+ size_t size = obstack_object_size(&symbol_obstack);
char *string = obstack_finish(&symbol_obstack);
- symbol_t *symbol = symbol_table_insert(string);
-
- if (symbol->string != string) {
- obstack_free(&symbol_obstack, string);
- }
- lexer_token.symbol = symbol;
-}
-static string_t identify_string(char *string, size_t len)
-{
- /* TODO hash */
-#if 0
- const char *result = strset_insert(&stringset, concat);
- if (result != concat) {
- obstack_free(&symbol_obstack, concat);
- }
-#else
- const char *result = string;
-#endif
- return (string_t) {result, len};
+ lexer_token.number.suffix = identify_string(string, size);
}
/**
next_char();
}
} else if (is_float) {
- errorf(&lexer_token.source_position,
+ errorf(&lexer_token.base.source_position,
"hexadecimal floatingpoint constant requires an exponent");
}
obstack_1grow(&symbol_obstack, '\0');
size_t size = obstack_object_size(&symbol_obstack) - 1;
char *string = obstack_finish(&symbol_obstack);
- lexer_token.literal = identify_string(string, size);
+ lexer_token.number.number = identify_string(string, size);
- lexer_token.type =
+ lexer_token.kind =
is_float ? T_FLOATINGPOINT_HEXADECIMAL : T_INTEGER_HEXADECIMAL;
if (!has_digits) {
- errorf(&lexer_token.source_position, "invalid number literal '0x%S'",
- &lexer_token.literal);
- lexer_token.literal.begin = "0";
- lexer_token.literal.size = 1;
+ errorf(&lexer_token.base.source_position,
+ "invalid number literal '0x%S'", &lexer_token.number.number);
+ lexer_token.number.number.begin = "0";
+ lexer_token.number.number.size = 1;
}
parse_number_suffix();
obstack_1grow(&symbol_obstack, '\0');
size_t size = obstack_object_size(&symbol_obstack) - 1;
char *string = obstack_finish(&symbol_obstack);
- lexer_token.literal = identify_string(string, size);
+ lexer_token.number.number = identify_string(string, size);
/* is it an octal number? */
if (is_float) {
- lexer_token.type = T_FLOATINGPOINT;
+ lexer_token.kind = T_FLOATINGPOINT;
} else if (string[0] == '0') {
- lexer_token.type = T_INTEGER_OCTAL;
+ lexer_token.kind = T_INTEGER_OCTAL;
/* check for invalid octal digits */
for (size_t i= 0; i < size; ++i) {
char t = string[i];
if (t >= '8')
- errorf(&lexer_token.source_position,
+ errorf(&lexer_token.base.source_position,
"invalid digit '%c' in octal number", t);
}
} else {
- lexer_token.type = T_INTEGER;
+ lexer_token.kind = T_INTEGER;
}
if (!has_digits) {
- errorf(&lexer_token.source_position, "invalid number literal '%S'",
- &lexer_token.literal);
+ errorf(&lexer_token.base.source_position, "invalid number literal '%S'",
+ &lexer_token.number.number);
}
parse_number_suffix();
}
case EOF: {
- errorf(&lexer_token.source_position, "string has no end");
- lexer_token.type = T_ERROR;
+ errorf(&lexer_token.base.source_position, "string has no end");
+ lexer_token.kind = T_ERROR;
return;
}
const size_t size = (size_t)obstack_object_size(&symbol_obstack);
char *string = obstack_finish(&symbol_obstack);
- lexer_token.type = T_STRING_LITERAL;
- lexer_token.literal = identify_string(string, size);
+ lexer_token.kind = T_STRING_LITERAL;
+ lexer_token.string.string = identify_string(string, size);
}
/**
goto end_of_wide_char_constant;
case EOF: {
- errorf(&lexer_token.source_position, "EOF while parsing character constant");
- lexer_token.type = T_ERROR;
+ errorf(&lexer_token.base.source_position,
+ "EOF while parsing character constant");
+ lexer_token.kind = T_ERROR;
return;
}
size_t size = (size_t) obstack_object_size(&symbol_obstack) - 1;
char *string = obstack_finish(&symbol_obstack);
- lexer_token.type = T_WIDE_CHARACTER_CONSTANT;
- lexer_token.literal = identify_string(string, size);
+ lexer_token.kind = T_WIDE_CHARACTER_CONSTANT;
+ lexer_token.string.string = identify_string(string, size);
if (size == 0) {
- errorf(&lexer_token.source_position, "empty character constant");
+ errorf(&lexer_token.base.source_position, "empty character constant");
}
}
static void parse_wide_string_literal(void)
{
parse_string_literal();
- if (lexer_token.type == T_STRING_LITERAL)
- lexer_token.type = T_WIDE_STRING_LITERAL;
+ if (lexer_token.kind == T_STRING_LITERAL)
+ lexer_token.kind = T_WIDE_STRING_LITERAL;
}
/**
goto end_of_char_constant;
case EOF: {
- errorf(&lexer_token.source_position, "EOF while parsing character constant");
- lexer_token.type = T_ERROR;
+ errorf(&lexer_token.base.source_position,
+ "EOF while parsing character constant");
+ lexer_token.kind = T_ERROR;
return;
}
const size_t size = (size_t)obstack_object_size(&symbol_obstack)-1;
char *const string = obstack_finish(&symbol_obstack);
- lexer_token.type = T_CHARACTER_CONSTANT;
- lexer_token.literal = identify_string(string, size);
+ lexer_token.kind = T_CHARACTER_CONSTANT;
+ lexer_token.string.string = identify_string(string, size);
if (size == 0) {
- errorf(&lexer_token.source_position, "empty character constant");
+ errorf(&lexer_token.base.source_position, "empty character constant");
}
}
MATCH_NEWLINE(break;)
case EOF: {
- errorf(&lexer_token.source_position, "at end of file while looking for comment end");
+ errorf(&lexer_token.base.source_position,
+ "at end of file while looking for comment end");
return;
}
*/
static void eat_until_newline(void)
{
- while (pp_token.type != '\n' && pp_token.type != T_EOF) {
+ while (pp_token.kind != '\n' && pp_token.kind != T_EOF) {
next_pp_token();
}
}
static void define_directive(void)
{
lexer_next_preprocessing_token();
- if (lexer_token.type != T_IDENTIFIER) {
+ if (lexer_token.kind != T_IDENTIFIER) {
parse_error("expected identifier after #define\n");
eat_until_newline();
}
*/
static void parse_line_directive(void)
{
- if (pp_token.type != T_INTEGER) {
+ if (pp_token.kind != T_INTEGER) {
parse_error("expected integer");
} else {
/* use offset -1 as this is about the next line */
- lexer_pos.lineno = atoi(pp_token.literal.begin) - 1;
+ lexer_pos.lineno = atoi(pp_token.number.number.begin) - 1;
next_pp_token();
}
- if (pp_token.type == T_STRING_LITERAL) {
- lexer_pos.input_name = pp_token.literal.begin;
+ if (pp_token.kind == T_STRING_LITERAL) {
+ lexer_pos.input_name = pp_token.string.string.begin;
next_pp_token();
}
bool unknown_pragma = true;
next_pp_token();
- if (pp_token.symbol->pp_ID == TP_STDC) {
+ if (pp_token.kind != T_IDENTIFIER) {
+ warningf(WARN_UNKNOWN_PRAGMAS, &pp_token.base.source_position,
+ "expected identifier after #pragma");
+ eat_until_newline();
+ return;
+ }
+
+ symbol_t *symbol = pp_token.identifier.symbol;
+ if (symbol->pp_ID == TP_STDC) {
stdc_pragma_kind_t kind = STDC_UNKNOWN;
/* a STDC pragma */
if (c_mode & _C99) {
next_pp_token();
- switch (pp_token.symbol->pp_ID) {
+ switch (pp_token.identifier.symbol->pp_ID) {
case TP_FP_CONTRACT:
kind = STDC_FP_CONTRACT;
break;
if (kind != STDC_UNKNOWN) {
stdc_pragma_value_kind_t value = STDC_VALUE_UNKNOWN;
next_pp_token();
- switch (pp_token.symbol->pp_ID) {
+ switch (pp_token.identifier.symbol->pp_ID) {
case TP_ON:
value = STDC_VALUE_ON;
break;
if (value != STDC_VALUE_UNKNOWN) {
unknown_pragma = false;
} else {
- errorf(&pp_token.source_position, "bad STDC pragma argument");
+ errorf(&pp_token.base.source_position,
+ "bad STDC pragma argument");
}
}
}
}
eat_until_newline();
if (unknown_pragma) {
- warningf(WARN_UNKNOWN_PRAGMAS, &pp_token.source_position, "encountered unknown #pragma");
+ warningf(WARN_UNKNOWN_PRAGMAS, &pp_token.base.source_position,
+ "encountered unknown #pragma");
}
}
*/
static void parse_preprocessor_identifier(void)
{
- assert(pp_token.type == T_IDENTIFIER);
- symbol_t *symbol = pp_token.symbol;
+ assert(pp_token.kind == T_IDENTIFIER);
+ symbol_t *symbol = pp_token.identifier.symbol;
switch (symbol->pp_ID) {
case TP_include:
{
next_pp_token();
- switch (pp_token.type) {
+ switch (pp_token.kind) {
case T_IDENTIFIER:
parse_preprocessor_identifier();
break;
#define MAYBE(ch, set_type) \
case ch: \
next_char(); \
- lexer_token.type = set_type; \
+ lexer_token.kind = set_type; \
return;
/* must use this as last thing */
case ch: \
if (c_mode & mode) { \
next_char(); \
- lexer_token.type = set_type; \
+ lexer_token.kind = set_type; \
return; \
} \
/* fallthrough */
#define ELSE(set_type) \
ELSE_CODE( \
- lexer_token.type = set_type; \
+ lexer_token.kind = set_type; \
)
void lexer_next_preprocessing_token(void)
{
while (true) {
- lexer_token.source_position = lexer_pos;
+ lexer_token.base.source_position = lexer_pos;
switch (c) {
case ' ':
break;
MATCH_NEWLINE(
- lexer_token.type = '\n';
+ lexer_token.kind = '\n';
return;
)
SYMBOL_CHARS
parse_symbol();
/* might be a wide string ( L"string" ) */
- if (lexer_token.symbol == symbol_L) {
+ if (lexer_token.identifier.symbol == symbol_L) {
switch (c) {
case '"': parse_wide_string_literal(); break;
case '\'': parse_wide_character_constant(); break;
ELSE_CODE(
put_back(c);
c = '.';
- lexer_token.type = '.';
+ lexer_token.kind = '.';
)
ELSE('.')
case '&':
ELSE_CODE(
put_back(c);
c = '%';
- lexer_token.type = '#';
+ lexer_token.kind = '#';
)
ELSE('#')
ELSE('%')
case ';':
case ',':
case '\\':
- lexer_token.type = c;
+ lexer_token.kind = c;
next_char();
return;
case EOF:
- lexer_token.type = T_EOF;
+ lexer_token.kind = T_EOF;
return;
default:
dollar_sign:
errorf(&lexer_pos, "unknown character '%c' found", c);
next_char();
- lexer_token.type = T_ERROR;
+ lexer_token.kind = T_ERROR;
return;
}
}
{
lexer_next_preprocessing_token();
- while (lexer_token.type == '\n') {
+ while (lexer_token.kind == '\n') {
newline_found:
lexer_next_preprocessing_token();
}
- if (lexer_token.type == '#') {
+ if (lexer_token.kind == '#') {
parse_preprocessor_directive();
goto newline_found;
}
static unsigned short token_anchor_set[T_LAST_TOKEN];
/** The current source position. */
-#define HERE (&token.source_position)
+#define HERE (&token.base.source_position)
/** true if we are in GCC mode. */
#define GNU_MODE ((c_mode & _GNUC) || in_gcc_extension)
res->base.kind = kind;
res->base.parent = current_parent;
- res->base.source_position = token.source_position;
+ res->base.source_position = token.base.source_position;
return res;
}
res->base.kind = kind;
res->base.type = type_error_type;
- res->base.source_position = token.source_position;
+ res->base.source_position = token.base.source_position;
return res;
}
static inline bool next_if(int const type)
{
- if (token.type == type) {
+ if (token.kind == type) {
next_token();
return true;
} else {
/**
* Adds a token type to the token type anchor set (a multi-set).
*/
-static void add_anchor_token(int token_type)
+static void add_anchor_token(int token_kind)
{
- assert(0 <= token_type && token_type < T_LAST_TOKEN);
- ++token_anchor_set[token_type];
+ assert(0 <= token_kind && token_kind < T_LAST_TOKEN);
+ ++token_anchor_set[token_kind];
}
/**
* Set the number of tokens types of the given type
* to zero and return the old count.
*/
-static int save_and_reset_anchor_state(int token_type)
+static int save_and_reset_anchor_state(int token_kind)
{
- assert(0 <= token_type && token_type < T_LAST_TOKEN);
- int count = token_anchor_set[token_type];
- token_anchor_set[token_type] = 0;
+ assert(0 <= token_kind && token_kind < T_LAST_TOKEN);
+ int count = token_anchor_set[token_kind];
+ token_anchor_set[token_kind] = 0;
return count;
}
/**
* Restore the number of token types to the given count.
*/
-static void restore_anchor_state(int token_type, int count)
+static void restore_anchor_state(int token_kind, int count)
{
- assert(0 <= token_type && token_type < T_LAST_TOKEN);
- token_anchor_set[token_type] = count;
+ assert(0 <= token_kind && token_kind < T_LAST_TOKEN);
+ token_anchor_set[token_kind] = count;
}
/**
* Remove a token type from the token type anchor set (a multi-set).
*/
-static void rem_anchor_token(int token_type)
+static void rem_anchor_token(int token_kind)
{
- assert(0 <= token_type && token_type < T_LAST_TOKEN);
- assert(token_anchor_set[token_type] != 0);
- --token_anchor_set[token_type];
+ assert(0 <= token_kind && token_kind < T_LAST_TOKEN);
+ assert(token_anchor_set[token_kind] != 0);
+ --token_anchor_set[token_kind];
}
/**
*/
static bool at_anchor(void)
{
- if (token.type < 0)
+ if (token.kind < 0)
return false;
- return token_anchor_set[token.type];
+ return token_anchor_set[token.kind];
}
/**
unsigned parenthesis_count = 0;
unsigned brace_count = 0;
unsigned bracket_count = 0;
- while (token.type != end_token ||
+ while (token.kind != end_token ||
parenthesis_count != 0 ||
brace_count != 0 ||
bracket_count != 0) {
- switch (token.type) {
+ switch (token.kind) {
case T_EOF: return;
case '(': ++parenthesis_count; break;
case '{': ++brace_count; break;
if (bracket_count > 0)
--bracket_count;
check_stop:
- if (token.type == end_token &&
+ if (token.kind == end_token &&
parenthesis_count == 0 &&
brace_count == 0 &&
bracket_count == 0)
*/
static void eat_until_anchor(void)
{
- while (token_anchor_set[token.type] == 0) {
- if (token.type == '(' || token.type == '{' || token.type == '[')
- eat_until_matching_token(token.type);
+ while (token_anchor_set[token.kind] == 0) {
+ if (token.kind == '(' || token.kind == '{' || token.kind == '[')
+ eat_until_matching_token(token.kind);
next_token();
}
}
next_if('}');
}
-#define eat(token_type) (assert(token.type == (token_type)), next_token())
+#define eat(token_kind) (assert(token.kind == (token_kind)), next_token())
/**
* Report a parse error because an expected token was not found.
*/
#define expect(expected, error_label) \
do { \
- if (UNLIKELY(token.type != (expected))) { \
+ if (UNLIKELY(token.kind != (expected))) { \
parse_error_expected(NULL, (expected), NULL); \
add_anchor_token(expected); \
eat_until_anchor(); \
rem_anchor_token(expected); \
- if (token.type != (expected)) \
+ if (token.kind != (expected)) \
goto error_label; \
} \
next_token(); \
static string_t parse_string_literals(void)
{
- assert(token.type == T_STRING_LITERAL);
- string_t result = token.literal;
+ assert(token.kind == T_STRING_LITERAL);
+ string_t result = token.string.string;
next_token();
- while (token.type == T_STRING_LITERAL) {
- warn_string_concat(&token.source_position);
- result = concat_strings(&result, &token.literal);
+ while (token.kind == T_STRING_LITERAL) {
+ warn_string_concat(&token.base.source_position);
+ result = concat_strings(&result, &token.string.string);
next_token();
}
{
attribute_argument_t *first = NULL;
attribute_argument_t **anchor = &first;
- if (token.type != ')') do {
+ if (token.kind != ')') do {
attribute_argument_t *argument = allocate_ast_zero(sizeof(*argument));
/* is it an identifier */
- if (token.type == T_IDENTIFIER
- && (look_ahead(1)->type == ',' || look_ahead(1)->type == ')')) {
- symbol_t *symbol = token.symbol;
+ if (token.kind == T_IDENTIFIER
+ && (look_ahead(1)->kind == ',' || look_ahead(1)->kind == ')')) {
+ symbol_t *symbol = token.identifier.symbol;
argument->kind = ATTRIBUTE_ARGUMENT_SYMBOL;
argument->v.symbol = symbol;
next_token();
static symbol_t *get_symbol_from_token(void)
{
- switch(token.type) {
+ switch(token.kind) {
case T_IDENTIFIER:
- return token.symbol;
+ return token.identifier.symbol;
case T_auto:
case T_char:
case T_double:
case T_volatile:
case T_inline:
/* maybe we need more tokens ... add them on demand */
- return get_token_symbol(&token);
+ return get_token_kind_symbol(token.kind);
default:
return NULL;
}
expect('(', end_error);
expect('(', end_error);
- if (token.type != ')') do {
+ if (token.kind != ')') do {
attribute_t *attribute = parse_attribute_gnu_single();
if (attribute == NULL)
goto end_error;
anchor = &(*anchor)->next;
attribute_t *attribute;
- switch (token.type) {
+ switch (token.kind) {
case T___attribute__:
attribute = parse_attribute_gnu();
if (attribute == NULL)
for (;;) {
designator_t *designator;
- switch (token.type) {
+ switch (token.kind) {
case '[':
designator = allocate_ast_zero(sizeof(designator[0]));
- designator->source_position = token.source_position;
+ designator->source_position = token.base.source_position;
next_token();
add_anchor_token(']');
designator->array_index = parse_constant_expression();
break;
case '.':
designator = allocate_ast_zero(sizeof(designator[0]));
- designator->source_position = token.source_position;
+ designator->source_position = token.base.source_position;
next_token();
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing designator",
T_IDENTIFIER, NULL);
return NULL;
}
- designator->symbol = token.symbol;
+ designator->symbol = token.identifier.symbol;
next_token();
break;
default:
{
/* there might be extra {} hierarchies */
int braces = 0;
- if (token.type == '{') {
+ if (token.kind == '{') {
warningf(WARN_OTHER, HERE, "extra curly braces around scalar initializer");
do {
eat('{');
++braces;
- } while (token.type == '{');
+ } while (token.kind == '{');
}
expression_t *expression = parse_assignment_expression();
bool additional_warning_displayed = false;
while (braces > 0) {
next_if(',');
- if (token.type != '}') {
+ if (token.kind != '}') {
if (!additional_warning_displayed) {
warningf(WARN_OTHER, HERE, "additional elements in scalar initializer");
additional_warning_displayed = true;
{
next_if('{');
- while (token.type != '}') {
- if (token.type == T_EOF)
+ while (token.kind != '}') {
+ if (token.kind == T_EOF)
return;
- if (token.type == '{') {
+ if (token.kind == '{') {
eat_block();
continue;
}
type_t *outer_type, size_t top_path_level,
parse_initializer_env_t *env)
{
- if (token.type == '}') {
+ if (token.kind == '}') {
/* empty initializer */
return create_empty_initializer();
}
while (true) {
designator_t *designator = NULL;
- if (token.type == '.' || token.type == '[') {
+ if (token.kind == '.' || token.kind == '[') {
designator = parse_designation();
goto finish_designator;
- } else if (token.type == T_IDENTIFIER && look_ahead(1)->type == ':') {
+ } else if (token.kind == T_IDENTIFIER && look_ahead(1)->kind == ':') {
/* GNU-style designator ("identifier: value") */
designator = allocate_ast_zero(sizeof(designator[0]));
- designator->source_position = token.source_position;
- designator->symbol = token.symbol;
+ designator->source_position = token.base.source_position;
+ designator->symbol = token.identifier.symbol;
eat(T_IDENTIFIER);
eat(':');
initializer_t *sub;
- if (token.type == '{') {
+ if (token.kind == '{') {
if (type != NULL && is_type_scalar(type)) {
sub = parse_scalar_initializer(type, env->must_be_constant);
} else {
sub = initializer_from_expression(outer_type, expression);
if (sub != NULL) {
next_if(',');
- if (token.type != '}') {
+ if (token.kind != '}') {
warningf(WARN_OTHER, HERE, "excessive elements in initializer for type '%T'", orig_type);
}
/* TODO: eat , ... */
ARR_APP1(initializer_t*, initializers, sub);
error_parse_next:
- if (token.type == '}') {
+ if (token.kind == '}') {
break;
}
expect(',', end_error);
- if (token.type == '}') {
+ if (token.kind == '}') {
break;
}
if (is_type_scalar(type)) {
result = parse_scalar_initializer(type, env->must_be_constant);
- } else if (token.type == '{') {
+ } else if (token.kind == '{') {
eat('{');
type_path_t path;
entity_t *entity = NULL;
attribute_t *attributes = NULL;
- if (token.type == T___attribute__) {
+ if (token.kind == T___attribute__) {
attributes = parse_attributes(NULL);
}
entity_kind_tag_t const kind = is_struct ? ENTITY_STRUCT : ENTITY_UNION;
- if (token.type == T_IDENTIFIER) {
+ if (token.kind == T_IDENTIFIER) {
/* the compound has a name, check if we have seen it already */
- symbol = token.symbol;
+ symbol = token.identifier.symbol;
entity = get_tag(symbol, kind);
next_token();
if (entity != NULL) {
if (entity->base.parent_scope != current_scope &&
- (token.type == '{' || token.type == ';')) {
+ (token.kind == '{' || token.kind == ';')) {
/* we're in an inner scope and have a definition. Shadow
* existing definition in outer scope */
entity = NULL;
- } else if (entity->compound.complete && token.type == '{') {
+ } else if (entity->compound.complete && token.kind == '{') {
source_position_t const *const ppos = &entity->base.source_position;
errorf(&pos, "multiple definitions of '%N' (previous definition %P)", entity, ppos);
/* clear members in the hope to avoid further errors */
entity->compound.members.entities = NULL;
}
}
- } else if (token.type != '{') {
+ } else if (token.kind != '{') {
char const *const msg =
is_struct ? "while parsing struct type specifier" :
"while parsing union type specifier";
append_entity(current_scope, entity);
}
- if (token.type == '{') {
+ if (token.kind == '{') {
parse_compound_type_entries(&entity->compound);
/* ISO/IEC 14882:1998(E) §7.1.3:5 */
{
eat('{');
- if (token.type == '}') {
+ if (token.kind == '}') {
errorf(HERE, "empty enum not allowed");
next_token();
return;
add_anchor_token('}');
do {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing enum entry", T_IDENTIFIER, NULL);
eat_block();
rem_anchor_token('}');
return;
}
- entity_t *const entity = allocate_entity_zero(ENTITY_ENUM_VALUE, NAMESPACE_NORMAL, token.symbol);
+ symbol_t *symbol = token.identifier.symbol;
+ entity_t *const entity
+ = allocate_entity_zero(ENTITY_ENUM_VALUE, NAMESPACE_NORMAL, symbol);
entity->enum_value.enum_type = enum_type;
- entity->base.source_position = token.source_position;
+ entity->base.source_position = token.base.source_position;
next_token();
if (next_if('=')) {
}
record_entity(entity, false);
- } while (next_if(',') && token.type != '}');
+ } while (next_if(',') && token.kind != '}');
rem_anchor_token('}');
expect('}', end_error);
symbol_t *symbol;
eat(T_enum);
- switch (token.type) {
+ switch (token.kind) {
case T_IDENTIFIER:
- symbol = token.symbol;
+ symbol = token.identifier.symbol;
entity = get_tag(symbol, ENTITY_ENUM);
next_token();
if (entity != NULL) {
if (entity->base.parent_scope != current_scope &&
- (token.type == '{' || token.type == ';')) {
+ (token.kind == '{' || token.kind == ';')) {
/* we're in an inner scope and have a definition. Shadow
* existing definition in outer scope */
entity = NULL;
- } else if (entity->enume.complete && token.type == '{') {
+ } else if (entity->enume.complete && token.kind == '{') {
source_position_t const *const ppos = &entity->base.source_position;
errorf(&pos, "multiple definitions of '%N' (previous definition %P)", entity, ppos);
}
type->enumt.enume = &entity->enume;
type->enumt.akind = ATOMIC_TYPE_INT;
- if (token.type == '{') {
+ if (token.kind == '{') {
if (symbol != NULL) {
environment_push(entity);
}
expression_t *expression = NULL;
- switch (token.type) {
+ switch (token.kind) {
case T_IDENTIFIER:
- if (is_typedef_symbol(token.symbol)) {
+ if (is_typedef_symbol(token.identifier.symbol)) {
DECLARATION_START
type = parse_typename();
} else {
= allocate_ast_zero(sizeof(*property));
do {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing property declspec",
T_IDENTIFIER, NULL);
goto end_error;
}
symbol_t **prop;
- symbol_t *symbol = token.symbol;
+ symbol_t *symbol = token.identifier.symbol;
if (strcmp(symbol->string, "put") == 0) {
prop = &property->put_symbol;
} else if (strcmp(symbol->string, "get") == 0) {
}
eat(T_IDENTIFIER);
expect('=', end_error);
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing property declspec",
T_IDENTIFIER, NULL);
goto end_error;
}
if (prop != NULL)
- *prop = token.symbol;
+ *prop = token.identifier.symbol;
next_token();
} while (next_if(','));
attribute_kind_t kind = ATTRIBUTE_UNKNOWN;
if (next_if(T_restrict)) {
kind = ATTRIBUTE_MS_RESTRICT;
- } else if (token.type == T_IDENTIFIER) {
- const char *name = token.symbol->string;
+ } else if (token.kind == T_IDENTIFIER) {
+ const char *name = token.identifier.symbol->string;
for (attribute_kind_t k = ATTRIBUTE_MS_FIRST; k <= ATTRIBUTE_MS_LAST;
++k) {
const char *attribute_name = get_attribute_name(k);
bool saw_error = false;
memset(specifiers, 0, sizeof(*specifiers));
- specifiers->source_position = token.source_position;
+ specifiers->source_position = token.base.source_position;
while (true) {
specifiers->attributes = parse_attributes(specifiers->attributes);
- switch (token.type) {
+ switch (token.kind) {
/* storage class */
#define MATCH_STORAGE_CLASS(token, class) \
case token: \
/* Be somewhat resilient to typos like 'unsigned lng* f()' in a
* declaration, so it doesn't generate errors about expecting '(' or
* '{' later on. */
- switch (look_ahead(1)->type) {
+ switch (look_ahead(1)->kind) {
STORAGE_CLASSES
TYPE_SPECIFIERS
case T_const:
}
}
- type_t *const typedef_type = get_typedef_type(token.symbol);
+ type_t *const typedef_type = get_typedef_type(token.identifier.symbol);
if (typedef_type == NULL) {
/* Be somewhat resilient to typos like 'vodi f()' at the beginning of a
* declaration, so it doesn't generate 'implicit int' followed by more
* errors later on. */
- token_type_t const la1_type = (token_type_t)look_ahead(1)->type;
+ token_kind_t const la1_type = (token_kind_t)look_ahead(1)->kind;
switch (la1_type) {
DECLARATION_START
case T_IDENTIFIER:
case '*': {
errorf(HERE, "%K does not name a type", &token);
- entity_t *entity =
- create_error_entity(token.symbol, ENTITY_TYPEDEF);
+ symbol_t *symbol = token.identifier.symbol;
+ entity_t *entity
+ = create_error_entity(symbol, ENTITY_TYPEDEF);
type = allocate_type_zero(TYPE_TYPEDEF);
type->typedeft.typedefe = &entity->typedefe;
type_qualifiers_t qualifiers = TYPE_QUALIFIER_NONE;
while (true) {
- switch (token.type) {
+ switch (token.kind) {
/* type qualifiers */
MATCH_TYPE_QUALIFIER(T_const, TYPE_QUALIFIER_CONST);
MATCH_TYPE_QUALIFIER(T_restrict, TYPE_QUALIFIER_RESTRICT);
*/
static void parse_identifier_list(scope_t *scope)
{
+ assert(token.kind == T_IDENTIFIER);
do {
- entity_t *const entity = allocate_entity_zero(ENTITY_PARAMETER, NAMESPACE_NORMAL, token.symbol);
- entity->base.source_position = token.source_position;
+ entity_t *const entity = allocate_entity_zero(ENTITY_PARAMETER, NAMESPACE_NORMAL, token.identifier.symbol);
+ entity->base.source_position = token.base.source_position;
/* a K&R parameter has no type, yet */
next_token();
if (scope != NULL)
append_entity(scope, entity);
- } while (next_if(',') && token.type == T_IDENTIFIER);
+ } while (next_if(',') && token.kind == T_IDENTIFIER);
}
static entity_t *parse_parameter(void)
static bool has_parameters(void)
{
/* func(void) is not a parameter */
- if (token.type == T_IDENTIFIER) {
- entity_t const *const entity = get_entity(token.symbol, NAMESPACE_NORMAL);
+ if (token.kind == T_IDENTIFIER) {
+ entity_t const *const entity
+ = get_entity(token.identifier.symbol, NAMESPACE_NORMAL);
if (entity == NULL)
return true;
if (entity->kind != ENTITY_TYPEDEF)
return true;
if (skip_typeref(entity->typedefe.type) != type_void)
return true;
- } else if (token.type != T_void) {
+ } else if (token.kind != T_void) {
return true;
}
- if (look_ahead(1)->type != ')')
+ if (look_ahead(1)->kind != ')')
return true;
next_token();
return false;
add_anchor_token(')');
int saved_comma_state = save_and_reset_anchor_state(',');
- if (token.type == T_IDENTIFIER &&
- !is_typedef_symbol(token.symbol)) {
- token_type_t la1_type = (token_type_t)look_ahead(1)->type;
+ if (token.kind == T_IDENTIFIER
+ && !is_typedef_symbol(token.identifier.symbol)) {
+ token_kind_t la1_type = (token_kind_t)look_ahead(1)->kind;
if (la1_type == ',' || la1_type == ')') {
type->kr_style_parameters = true;
parse_identifier_list(scope);
}
}
- if (token.type == ')') {
+ if (token.kind == ')') {
/* ISO/IEC 14882:1998(E) §C.1.6:1 */
if (!(c_mode & _CXX))
type->unspecified_parameters = true;
} else if (has_parameters()) {
function_parameter_t **anchor = &type->parameters;
do {
- switch (token.type) {
+ switch (token.kind) {
case T_DOTDOTDOT:
next_token();
type->variadic = true;
array->is_static = is_static;
expression_t *size = NULL;
- if (token.type == '*' && look_ahead(1)->type == ']') {
+ if (token.kind == '*' && look_ahead(1)->kind == ']') {
array->is_variable = true;
next_token();
- } else if (token.type != ']') {
+ } else if (token.kind != ']') {
size = parse_assignment_expression();
/* §6.7.5.2:1 Array size must have integer type */
for (;;) {
construct_type_t *type;
//variable_t *based = NULL; /* MS __based extension */
- switch (token.type) {
+ switch (token.kind) {
case '&':
type = parse_reference_declarator();
break;
ptr_operator_end: ;
construct_type_t *inner_types = NULL;
- switch (token.type) {
+ switch (token.kind) {
case T_IDENTIFIER:
if (env->must_be_abstract) {
errorf(HERE, "no identifier expected in typename");
} else {
- env->symbol = token.symbol;
- env->source_position = token.source_position;
+ env->symbol = token.identifier.symbol;
+ env->source_position = token.base.source_position;
}
next_token();
break;
case '(': {
/* Parenthesized declarator or function declarator? */
token_t const *const la1 = look_ahead(1);
- switch (la1->type) {
+ switch (la1->kind) {
case T_IDENTIFIER:
- if (is_typedef_symbol(la1->symbol)) {
+ if (is_typedef_symbol(la1->identifier.symbol)) {
case ')':
/* §6.7.6:2 footnote 126: Empty parentheses in a type name are
* interpreted as ``function with no parameter specification'', rather
for (;;) {
construct_type_t *type;
- switch (token.type) {
+ switch (token.kind) {
case '(': {
scope_t *scope = NULL;
if (!env->must_be_abstract) {
static bool is_declaration_specifier(const token_t *token)
{
- switch (token->type) {
+ switch (token->kind) {
DECLARATION_START
return true;
case T_IDENTIFIER:
- return is_typedef_symbol(token->symbol);
+ return is_typedef_symbol(token->identifier.symbol);
default:
return false;
add_anchor_token(';');
add_anchor_token(',');
while (true) {
- entity_t *entity = finished_declaration(ndeclaration, token.type == '=');
+ entity_t *entity = finished_declaration(ndeclaration, token.kind == '=');
- if (token.type == '=') {
+ if (token.kind == '=') {
parse_init_declarator_rest(entity);
} else if (entity->kind == ENTITY_VARIABLE) {
/* ISO/IEC 14882:1998(E) §8.5.3:3 The initializer can be omitted
parse_declaration_specifiers(&specifiers);
rem_anchor_token(';');
- if (token.type == ';') {
+ if (token.kind == ';') {
parse_anonymous_declaration_rest(&specifiers);
} else {
entity_t *entity = parse_declarator(&specifiers, flags);
/* parse declaration list */
for (;;) {
- switch (token.type) {
+ switch (token.kind) {
DECLARATION_START
/* This covers symbols, which are no type, too, and results in
* better error messages. The typical cases are misspelled type
rem_anchor_token(';');
/* must be a declaration */
- if (token.type == ';') {
+ if (token.kind == ';') {
parse_anonymous_declaration_rest(&specifiers);
return;
}
rem_anchor_token(',');
/* must be a declaration */
- switch (token.type) {
+ switch (token.kind) {
case ',':
case ';':
case '=':
/* must be a function definition */
parse_kr_declaration_list(ndeclaration);
- if (token.type != '{') {
+ if (token.kind != '{') {
parse_error_expected("while parsing function definition", '{', NULL);
eat_until_matching_token(';');
return;
do {
entity_t *entity;
- if (token.type == ':') {
+ if (token.kind == ':') {
source_position_t source_position = *HERE;
next_token();
}
}
- if (token.type == ':') {
+ if (token.kind == ':') {
source_position_t source_position = *HERE;
next_token();
expression_t *size = parse_constant_expression();
} else if (is_type_incomplete(type)) {
/* §6.7.2.1:16 flexible array member */
if (!is_type_array(type) ||
- token.type != ';' ||
- look_ahead(1)->type != '}') {
+ token.kind != ';' ||
+ look_ahead(1)->kind != '}') {
errorf(pos, "'%N' has incomplete type '%T'", entity, orig_type);
}
}
add_anchor_token('}');
for (;;) {
- switch (token.type) {
+ switch (token.kind) {
DECLARATION_START
case T___extension__:
case T_IDENTIFIER: {
static expression_t *expected_expression_error(void)
{
/* skip the error message if the error token was read */
- if (token.type != T_ERROR) {
+ if (token.kind != T_ERROR) {
errorf(HERE, "expected expression, got token %K", &token);
}
next_token();
*/
static expression_t *parse_string_literal(void)
{
- source_position_t begin = token.source_position;
- string_t res = token.literal;
- bool is_wide = (token.type == T_WIDE_STRING_LITERAL);
+ source_position_t begin = token.base.source_position;
+ string_t res = token.string.string;
+ bool is_wide = (token.kind == T_WIDE_STRING_LITERAL);
next_token();
- while (token.type == T_STRING_LITERAL
- || token.type == T_WIDE_STRING_LITERAL) {
- warn_string_concat(&token.source_position);
- res = concat_strings(&res, &token.literal);
+ while (token.kind == T_STRING_LITERAL
+ || token.kind == T_WIDE_STRING_LITERAL) {
+ warn_string_concat(&token.base.source_position);
+ res = concat_strings(&res, &token.string.string);
next_token();
- is_wide |= token.type == T_WIDE_STRING_LITERAL;
+ is_wide |= token.kind == T_WIDE_STRING_LITERAL;
}
expression_t *literal;
static void warn_traditional_suffix(void)
{
- warningf(WARN_TRADITIONAL, HERE, "traditional C rejects the '%Y' suffix", token.symbol);
+ warningf(WARN_TRADITIONAL, HERE, "traditional C rejects the '%S' suffix",
+ &token.number.suffix);
}
static void check_integer_suffix(void)
{
- symbol_t *suffix = token.symbol;
- if (suffix == NULL)
+ const string_t *suffix = &token.number.suffix;
+ if (suffix->size == 0)
return;
bool not_traditional = false;
- const char *c = suffix->string;
+ const char *c = suffix->begin;
if (*c == 'l' || *c == 'L') {
++c;
if (*c == *(c-1)) {
}
}
if (*c != '\0') {
- errorf(&token.source_position,
- "invalid suffix '%s' on integer constant", suffix->string);
+ errorf(&token.base.source_position,
+ "invalid suffix '%S' on integer constant", suffix);
} else if (not_traditional) {
warn_traditional_suffix();
}
static type_t *check_floatingpoint_suffix(void)
{
- symbol_t *suffix = token.symbol;
- type_t *type = type_double;
- if (suffix == NULL)
+ const string_t *suffix = &token.number.suffix;
+ type_t *type = type_double;
+ if (suffix->size == 0)
return type;
bool not_traditional = false;
- const char *c = suffix->string;
+ const char *c = suffix->begin;
if (*c == 'f' || *c == 'F') {
++c;
type = type_float;
type = type_long_double;
}
if (*c != '\0') {
- errorf(&token.source_position,
- "invalid suffix '%s' on floatingpoint constant", suffix->string);
+ errorf(&token.base.source_position,
+ "invalid suffix '%S' on floatingpoint constant", suffix);
} else if (not_traditional) {
warn_traditional_suffix();
}
expression_kind_t kind;
type_t *type;
- switch (token.type) {
+ switch (token.kind) {
case T_INTEGER:
kind = EXPR_LITERAL_INTEGER;
check_integer_suffix();
expression_t *literal = allocate_expression_zero(kind);
literal->base.type = type;
- literal->literal.value = token.literal;
- literal->literal.suffix = token.symbol;
+ literal->literal.value = token.number.number;
+ literal->literal.suffix = token.number.suffix;
next_token();
/* integer type depends on the size of the number and the size
{
expression_t *literal = allocate_expression_zero(EXPR_LITERAL_CHARACTER);
literal->base.type = c_mode & _CXX ? type_char : type_int;
- literal->literal.value = token.literal;
+ literal->literal.value = token.string.string;
size_t len = literal->literal.value.size;
if (len > 1) {
{
expression_t *literal = allocate_expression_zero(EXPR_LITERAL_WIDE_CHARACTER);
literal->base.type = type_int;
- literal->literal.value = token.literal;
+ literal->literal.value = token.string.string;
size_t len = wstrlen(&literal->literal.value);
if (len > 1) {
entity_t *entity;
while (true) {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing identifier", T_IDENTIFIER, NULL);
return create_error_entity(sym_anonymous, ENTITY_VARIABLE);
}
- symbol = token.symbol;
+ symbol = token.identifier.symbol;
pos = *HERE;
next_token();
}
if (entity == NULL) {
- if (!strict_mode && token.type == '(') {
+ if (!strict_mode && token.kind == '(') {
/* an implicitly declared function */
warningf(WARN_IMPLICIT_FUNCTION_DECLARATION, &pos, "implicit declaration of function '%Y'", symbol);
entity = create_implicit_function(symbol, &pos);
static expression_t *parse_reference(void)
{
- source_position_t const pos = token.source_position;
+ source_position_t const pos = token.base.source_position;
entity_t *const entity = parse_qualified_identifier();
type_t *orig_type;
rem_anchor_token(')');
expect(')', end_error);
- if (token.type == '{') {
+ if (token.kind == '{') {
return parse_compound_literal(&pos, type);
}
static expression_t *parse_parenthesized_expression(void)
{
token_t const* const la1 = look_ahead(1);
- switch (la1->type) {
+ switch (la1->kind) {
case '{':
/* gcc extension: a statement expression */
return parse_statement_expression();
case T_IDENTIFIER:
- if (is_typedef_symbol(la1->symbol)) {
+ if (is_typedef_symbol(la1->identifier.symbol)) {
DECLARATION_START
return parse_cast();
}
designator_t *result = allocate_ast_zero(sizeof(result[0]));
result->source_position = *HERE;
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing member designator",
T_IDENTIFIER, NULL);
return NULL;
}
- result->symbol = token.symbol;
+ result->symbol = token.identifier.symbol;
next_token();
designator_t *last_designator = result;
while (true) {
if (next_if('.')) {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing member designator",
T_IDENTIFIER, NULL);
return NULL;
}
designator_t *designator = allocate_ast_zero(sizeof(result[0]));
designator->source_position = *HERE;
- designator->symbol = token.symbol;
+ designator->symbol = token.identifier.symbol;
next_token();
last_designator->next = designator;
{
expression_t *expression;
- switch (token.type) {
+ switch (token.kind) {
case T___builtin_isgreater:
expression = allocate_expression_zero(EXPR_BINARY_ISGREATER);
break;
*/
static label_t *get_label(void)
{
- assert(token.type == T_IDENTIFIER);
+ assert(token.kind == T_IDENTIFIER);
assert(current_function != NULL);
- entity_t *label = get_entity(token.symbol, NAMESPACE_LABEL);
+ entity_t *label = get_entity(token.identifier.symbol, NAMESPACE_LABEL);
/* If we find a local label, we already created the declaration. */
if (label != NULL && label->kind == ENTITY_LOCAL_LABEL) {
if (label->base.parent_scope != current_scope) {
}
} else if (label == NULL || label->base.parent_scope != ¤t_function->parameters) {
/* There is no matching label in the same function, so create a new one. */
- label = allocate_entity_zero(ENTITY_LABEL, NAMESPACE_LABEL, token.symbol);
+ label = allocate_entity_zero(ENTITY_LABEL, NAMESPACE_LABEL, token.identifier.symbol);
label_push(label);
}
*/
static expression_t *parse_label_address(void)
{
- source_position_t source_position = token.source_position;
+ source_position_t source_position = token.base.source_position;
eat(T_ANDAND);
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing label address", T_IDENTIFIER, NULL);
return create_invalid_expression();
}
eat(T___noop);
- if (token.type == '(') {
+ if (token.kind == '(') {
/* parse arguments */
eat('(');
add_anchor_token(')');
add_anchor_token(',');
- if (token.type != ')') do {
+ if (token.kind != ')') do {
(void)parse_assignment_expression();
} while (next_if(','));
}
*/
static expression_t *parse_primary_expression(void)
{
- switch (token.type) {
+ switch (token.kind) {
case T_false: return parse_boolean_literal(false);
case T_true: return parse_boolean_literal(true);
case T_INTEGER:
case T_COLONCOLON:
return parse_reference();
case T_IDENTIFIER:
- if (!is_typedef_symbol(token.symbol)) {
+ if (!is_typedef_symbol(token.identifier.symbol)) {
return parse_reference();
}
/* FALLTHROUGH */
type_t *orig_type;
expression_t *expression;
- if (token.type == '(' && is_declaration_specifier(look_ahead(1))) {
+ if (token.kind == '(' && is_declaration_specifier(look_ahead(1))) {
source_position_t const pos = *HERE;
next_token();
add_anchor_token(')');
rem_anchor_token(')');
expect(')', end_error);
- if (token.type == '{') {
+ if (token.kind == '{') {
/* It was not sizeof(type) after all. It is sizeof of an expression
* starting with a compound literal */
expression = parse_compound_literal(&pos, orig_type);
static expression_t *parse_select_expression(expression_t *addr)
{
- assert(token.type == '.' || token.type == T_MINUSGREATER);
- bool select_left_arrow = (token.type == T_MINUSGREATER);
+ assert(token.kind == '.' || token.kind == T_MINUSGREATER);
+ bool select_left_arrow = (token.kind == T_MINUSGREATER);
source_position_t const pos = *HERE;
next_token();
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing select", T_IDENTIFIER, NULL);
return create_invalid_expression();
}
- symbol_t *symbol = token.symbol;
+ symbol_t *symbol = token.identifier.symbol;
next_token();
type_t *const orig_type = addr->base.type;
add_anchor_token(')');
add_anchor_token(',');
- if (token.type != ')') {
+ if (token.kind != ')') {
call_argument_t **anchor = &call->arguments;
do {
call_argument_t *argument = allocate_ast_zero(sizeof(*argument));
expression_t *true_expression = expression;
bool gnu_cond = false;
- if (GNU_MODE && token.type == ':') {
+ if (GNU_MODE && token.kind == ':') {
gnu_cond = true;
} else {
true_expression = parse_expression();
eat(T_throw);
expression_t *value = NULL;
- switch (token.type) {
+ switch (token.kind) {
EXPRESSION_START {
value = parse_assignment_expression();
/* ISO/IEC 14882:1998(E) §15.1:3 */
expression->base.type = make_pointer_type(orig_type, TYPE_QUALIFIER_NONE);
}
-#define CREATE_UNARY_EXPRESSION_PARSER(token_type, unexpression_type, sfunc) \
+#define CREATE_UNARY_EXPRESSION_PARSER(token_kind, unexpression_type, sfunc) \
static expression_t *parse_##unexpression_type(void) \
{ \
expression_t *unary_expression \
= allocate_expression_zero(unexpression_type); \
- eat(token_type); \
+ eat(token_kind); \
unary_expression->unary.value = parse_subexpression(PREC_UNARY); \
\
sfunc(&unary_expression->unary); \
CREATE_UNARY_EXPRESSION_PARSER(T_MINUSMINUS, EXPR_UNARY_PREFIX_DECREMENT,
semantic_incdec)
-#define CREATE_UNARY_POSTFIX_EXPRESSION_PARSER(token_type, unexpression_type, \
+#define CREATE_UNARY_POSTFIX_EXPRESSION_PARSER(token_kind, unexpression_type, \
sfunc) \
static expression_t *parse_##unexpression_type(expression_t *left) \
{ \
expression_t *unary_expression \
= allocate_expression_zero(unexpression_type); \
- eat(token_type); \
+ eat(token_kind); \
unary_expression->unary.value = left; \
\
sfunc(&unary_expression->unary); \
/**
* @param prec_r precedence of the right operand
*/
-#define CREATE_BINEXPR_PARSER(token_type, binexpression_type, prec_r, sfunc) \
+#define CREATE_BINEXPR_PARSER(token_kind, binexpression_type, prec_r, sfunc) \
static expression_t *parse_##binexpression_type(expression_t *left) \
{ \
expression_t *binexpr = allocate_expression_zero(binexpression_type); \
binexpr->binary.left = left; \
- eat(token_type); \
+ eat(token_kind); \
\
expression_t *right = parse_subexpression(prec_r); \
\
static expression_t *parse_subexpression(precedence_t precedence)
{
- if (token.type < 0) {
+ if (token.kind < 0) {
return expected_expression_error();
}
expression_parser_function_t *parser
- = &expression_parsers[token.type];
+ = &expression_parsers[token.kind];
expression_t *left;
if (parser->parser != NULL) {
assert(left != NULL);
while (true) {
- if (token.type < 0) {
+ if (token.kind < 0) {
return expected_expression_error();
}
- parser = &expression_parsers[token.type];
+ parser = &expression_parsers[token.kind];
if (parser->infix_parser == NULL)
break;
if (parser->infix_precedence < precedence)
* Register a parser for a prefix-like operator.
*
* @param parser the parser function
- * @param token_type the token type of the prefix token
+ * @param token_kind the token type of the prefix token
*/
static void register_expression_parser(parse_expression_function parser,
- int token_type)
+ int token_kind)
{
- expression_parser_function_t *entry = &expression_parsers[token_type];
+ expression_parser_function_t *entry = &expression_parsers[token_kind];
if (entry->parser != NULL) {
- diagnosticf("for token '%k'\n", (token_type_t)token_type);
+ diagnosticf("for token '%k'\n", (token_kind_t)token_kind);
panic("trying to register multiple expression parsers for a token");
}
entry->parser = parser;
* Register a parser for an infix operator with given precedence.
*
* @param parser the parser function
- * @param token_type the token type of the infix operator
+ * @param token_kind the token type of the infix operator
* @param precedence the precedence of the operator
*/
static void register_infix_parser(parse_expression_infix_function parser,
- int token_type, precedence_t precedence)
+ int token_kind, precedence_t precedence)
{
- expression_parser_function_t *entry = &expression_parsers[token_type];
+ expression_parser_function_t *entry = &expression_parsers[token_kind];
if (entry->infix_parser != NULL) {
- diagnosticf("for token '%k'\n", (token_type_t)token_type);
+ diagnosticf("for token '%k'\n", (token_kind_t)token_kind);
panic("trying to register multiple infix expression parsers for a "
"token");
}
asm_argument_t *result = NULL;
asm_argument_t **anchor = &result;
- while (token.type == T_STRING_LITERAL || token.type == '[') {
+ while (token.kind == T_STRING_LITERAL || token.kind == '[') {
asm_argument_t *argument = allocate_ast_zero(sizeof(argument[0]));
memset(argument, 0, sizeof(argument[0]));
if (next_if('[')) {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing asm argument",
T_IDENTIFIER, NULL);
return NULL;
}
- argument->symbol = token.symbol;
+ argument->symbol = token.identifier.symbol;
expect(']', end_error);
}
asm_clobber_t *result = NULL;
asm_clobber_t **anchor = &result;
- while (token.type == T_STRING_LITERAL) {
+ while (token.kind == T_STRING_LITERAL) {
asm_clobber_t *clobber = allocate_ast_zero(sizeof(clobber[0]));
clobber->clobber = parse_string_literals();
expect('(', end_error);
add_anchor_token(')');
- if (token.type != T_STRING_LITERAL) {
+ if (token.kind != T_STRING_LITERAL) {
parse_error_expected("after asm(", T_STRING_LITERAL, NULL);
goto end_of_asm;
}
static statement_t *parse_label_inner_statement(statement_t const *const label, char const *const label_kind)
{
statement_t *inner_stmt;
- switch (token.type) {
+ switch (token.kind) {
case '}':
errorf(&label->base.source_position, "%s at end of compound statement", label_kind);
inner_stmt = create_invalid_statement();
eat(':');
- if (token.type == T___attribute__ && !(c_mode & _CXX)) {
+ if (token.kind == T___attribute__ && !(c_mode & _CXX)) {
parse_attributes(NULL); // TODO process attributes
}
POP_EXTENSION();
- if (token.type != ';') {
+ if (token.kind != ';') {
add_anchor_token(';');
expression_t *const cond = parse_expression();
statement->fors.condition = cond;
rem_anchor_token(';');
}
expect(';', end_error2);
- if (token.type != ')') {
+ if (token.kind != ')') {
expression_t *const step = parse_expression();
statement->fors.step = step;
mark_vars_read(step, ENT_ANY);
}
statement->gotos.expression = expression;
- } else if (token.type == T_IDENTIFIER) {
+ } else if (token.kind == T_IDENTIFIER) {
label_t *const label = get_label();
label->used = true;
statement->gotos.label = label;
eat(T_return);
expression_t *return_value = NULL;
- if (token.type != ';') {
+ if (token.kind != ';') {
return_value = parse_expression();
mark_vars_read(return_value, NULL);
}
entity_t *end = NULL;
entity_t **anchor = &begin;
do {
- if (token.type != T_IDENTIFIER) {
+ if (token.kind != T_IDENTIFIER) {
parse_error_expected("while parsing local label declaration",
T_IDENTIFIER, NULL);
goto end_error;
}
- symbol_t *symbol = token.symbol;
+ symbol_t *symbol = token.identifier.symbol;
entity_t *entity = get_entity(symbol, NAMESPACE_LABEL);
if (entity != NULL && entity->base.parent_scope == current_scope) {
source_position_t const *const ppos = &entity->base.source_position;
} else {
entity = allocate_entity_zero(ENTITY_LOCAL_LABEL, NAMESPACE_LABEL, symbol);
entity->base.parent_scope = current_scope;
- entity->base.source_position = token.source_position;
+ entity->base.source_position = token.base.source_position;
*anchor = entity;
anchor = &entity->base.next;
entity_t *entity = NULL;
symbol_t *symbol = NULL;
- if (token.type == T_IDENTIFIER) {
- symbol = token.symbol;
+ if (token.kind == T_IDENTIFIER) {
+ symbol = token.identifier.symbol;
next_token();
entity = get_entity(symbol, NAMESPACE_NORMAL);
&& entity->kind != ENTITY_NAMESPACE
&& entity->base.parent_scope == current_scope) {
if (is_entity_valid(entity)) {
- error_redefined_as_different_kind(&token.source_position,
+ error_redefined_as_different_kind(&token.base.source_position,
entity, ENTITY_NAMESPACE);
}
entity = NULL;
if (entity == NULL) {
entity = allocate_entity_zero(ENTITY_NAMESPACE, NAMESPACE_NORMAL, symbol);
- entity->base.source_position = token.source_position;
+ entity->base.source_position = token.base.source_position;
entity->base.parent_scope = current_scope;
}
- if (token.type == '=') {
+ if (token.kind == '=') {
/* TODO: parse namespace alias */
panic("namespace alias definition not supported yet");
}
/* declaration or statement */
add_anchor_token(';');
- switch (token.type) {
+ switch (token.kind) {
case T_IDENTIFIER: {
- token_type_t la1_type = (token_type_t)look_ahead(1)->type;
+ token_kind_t la1_type = (token_kind_t)look_ahead(1)->kind;
if (la1_type == ':') {
statement = parse_label_statement();
- } else if (is_typedef_symbol(token.symbol)) {
+ } else if (is_typedef_symbol(token.identifier.symbol)) {
statement = parse_declaration_statement();
} else {
/* it's an identifier, the grammar says this must be an
switch (la1_type) {
case '&':
case '*':
- if (get_entity(token.symbol, NAMESPACE_NORMAL) != NULL) {
+ if (get_entity(token.identifier.symbol, NAMESPACE_NORMAL) != NULL) {
default:
statement = parse_expression_statement();
} else {
statement_t **anchor = &statement->compound.statements;
bool only_decls_so_far = true;
- while (token.type != '}') {
- if (token.type == T_EOF) {
+ while (token.kind != '}') {
+ if (token.kind == T_EOF) {
errorf(&statement->base.source_position,
"EOF while parsing compound statement");
break;
static void parse_external(void)
{
- switch (token.type) {
+ switch (token.kind) {
case T_extern:
- if (look_ahead(1)->type == T_STRING_LITERAL) {
+ if (look_ahead(1)->kind == T_STRING_LITERAL) {
parse_linkage_specification();
} else {
DECLARATION_START_NO_EXTERN
default:
errorf(HERE, "stray %K outside of function", &token);
- if (token.type == '(' || token.type == '{' || token.type == '[')
- eat_until_matching_token(token.type);
+ if (token.kind == '(' || token.kind == '{' || token.kind == '[')
+ eat_until_matching_token(token.kind);
next_token();
return;
}
memcpy(token_anchor_copy, token_anchor_set, sizeof(token_anchor_copy));
#endif
- while (token.type != T_EOF && token.type != '}') {
+ while (token.kind != T_EOF && token.kind != '}') {
#ifndef NDEBUG
for (int i = 0; i < T_LAST_TOKEN; ++i) {
unsigned short count = token_anchor_set[i] - token_anchor_copy[i];
while (true) {
parse_externals();
- if (token.type == T_EOF)
+ if (token.kind == T_EOF)
break;
errorf(HERE, "stray %K outside of function", &token);
- if (token.type == '(' || token.type == '{' || token.type == '[')
- eat_until_matching_token(token.type);
+ if (token.kind == '(' || token.kind == '{' || token.kind == '[')
+ eat_until_matching_token(token.kind);
next_token();
}
}
static source_position_t expansion_pos;
static pp_definition_t *current_expansion = NULL;
static strset_t stringset;
-static preprocessor_token_type_t last_token = TP_ERROR;
+static preprocessor_token_kind_t last_token = TP_ERROR;
static searchpath_entry_t *searchpath;
*/
static void parse_error(const char *msg)
{
- errorf(&pp_token.source_position, "%s", msg);
+ errorf(&pp_token.base.source_position, "%s", msg);
}
static inline void next_real_char(void)
case EOF: {
source_position_t source_position;
- source_position.input_name = pp_token.source_position.input_name;
+ source_position.input_name = pp_token.base.source_position.input_name;
source_position.lineno = start_linenr;
errorf(&source_position, "string has no end");
- pp_token.type = TP_ERROR;
+ pp_token.kind = TP_ERROR;
return;
}
const size_t size = (size_t)obstack_object_size(&symbol_obstack);
char *const string = obstack_finish(&symbol_obstack);
- pp_token.type = TP_STRING_LITERAL;
- pp_token.literal = make_string(string, size);
+ pp_token.kind = TP_STRING_LITERAL;
+ pp_token.string.string = make_string(string, size);
}
/**
static void parse_wide_string_literal(void)
{
parse_string_literal();
- if (pp_token.type == TP_STRING_LITERAL)
- pp_token.type = TP_WIDE_STRING_LITERAL;
+ if (pp_token.kind == TP_STRING_LITERAL)
+ pp_token.kind = TP_WIDE_STRING_LITERAL;
}
static void parse_wide_character_constant(void)
case EOF:
parse_error("EOF while parsing character constant");
- pp_token.type = TP_ERROR;
+ pp_token.kind = TP_ERROR;
return;
default:
obstack_1grow(&symbol_obstack, '\0');
size_t size = (size_t) obstack_object_size(&symbol_obstack)-1;
char *string = obstack_finish(&symbol_obstack);
- pp_token.type = TP_WIDE_CHARACTER_CONSTANT;
- pp_token.literal = make_string(string, size);
+ pp_token.kind = TP_WIDE_CHARACTER_CONSTANT;
+ pp_token.string.string = make_string(string, size);
if (size == 0) {
parse_error("empty character constant");
case EOF: {
source_position_t source_position;
- source_position.input_name = pp_token.source_position.input_name;
+ source_position.input_name = pp_token.base.source_position.input_name;
source_position.lineno = start_linenr;
errorf(&source_position, "EOF while parsing character constant");
- pp_token.type = TP_ERROR;
+ pp_token.kind = TP_ERROR;
return;
}
}
end_of_char_constant:;
- const size_t size = (size_t)obstack_object_size(&symbol_obstack);
- const char *const string = obstack_finish(&symbol_obstack);
+ obstack_1grow(&symbol_obstack, '\0');
+ const size_t size = (size_t)obstack_object_size(&symbol_obstack);
+ char *const string = obstack_finish(&symbol_obstack);
+
+ pp_token.kind = TP_CHARACTER_CONSTANT;
+ pp_token.string.string = make_string(string, size);
- pp_token.type = TP_CHARACTER_CONSTANT;
- pp_token.literal.begin = string;
- pp_token.literal.size = size;
+ if (size == 0) {
+ parse_error("empty character constant");
+ }
}
#define SYMBOL_CHARS_WITHOUT_E_P \
goto restart;
}
pp_token = definition->token_list[definition->expand_pos];
- pp_token.source_position = expansion_pos;
+ pp_token.base.source_position = expansion_pos;
++definition->expand_pos;
- if (pp_token.type != TP_IDENTIFIER)
+ if (pp_token.kind != TP_IDENTIFIER)
return;
/* if it was an identifier then we might need to expand again */
- pp_definition_t *symbol_definition = pp_token.symbol->pp_definition;
+ pp_definition_t *symbol_definition = pp_token.identifier.symbol->pp_definition;
if (symbol_definition != NULL && !symbol_definition->is_expanding) {
symbol_definition->parent_expansion = definition;
symbol_definition->expand_pos = 0;
case EOF: {
source_position_t source_position;
- source_position.input_name = pp_token.source_position.input_name;
+ source_position.input_name = pp_token.base.source_position.input_name;
source_position.lineno = start_linenr;
errorf(&source_position, "at end of file while looking for comment end");
return;
static void eat_pp(int type)
{
(void) type;
- assert(pp_token.type == type);
+ assert(pp_token.kind == type);
next_preprocessing_token();
}
symbol_t *symbol = symbol_table_insert(string);
- pp_token.type = symbol->pp_ID;
- pp_token.symbol = symbol;
+ pp_token.kind = symbol->pp_ID;
+ pp_token.identifier.symbol = symbol;
/* we can free the memory from symbol obstack if we already had an entry in
* the symbol table */
size_t size = obstack_object_size(&symbol_obstack);
char *string = obstack_finish(&symbol_obstack);
- pp_token.type = TP_NUMBER;
- pp_token.literal.begin = string;
- pp_token.literal.size = size;
+ pp_token.kind = TP_NUMBER;
+ pp_token.number.number = make_string(string, size);
}
#define MAYBE(ch, set_type) \
case ch: \
next_char(); \
- pp_token.type = set_type; \
+ pp_token.kind = set_type; \
return;
#define ELSE_CODE(code) \
#define ELSE(set_type) \
ELSE_CODE( \
- pp_token.type = set_type; \
+ pp_token.kind = set_type; \
)
static void next_preprocessing_token(void)
info.at_line_begin = false;
info.had_whitespace = false;
restart:
- pp_token.source_position = input.position;
+ pp_token.base.source_position = input.position;
switch (input.c) {
case ' ':
case '\t':
ELSE_CODE(
put_back(input.c);
input.c = '.';
- pp_token.type = '.';
+ pp_token.kind = '.';
)
ELSE('.')
case '&':
ELSE_CODE(
put_back(input.c);
input.c = '%';
- pp_token.type = '#';
+ pp_token.kind = '#';
)
ELSE('#')
ELSE('%')
MAYBE_PROLOG
MAYBE('#', TP_HASHHASH)
ELSE_CODE(
- pp_token.type = '#';
+ pp_token.kind = '#';
)
case '?':
case ';':
case ',':
case '\\':
- pp_token.type = input.c;
+ pp_token.kind = input.c;
next_char();
return;
print_line_directive(&input.position, "2");
goto restart;
} else {
- pp_token.source_position.lineno++;
+ pp_token.base.source_position.lineno++;
info.at_line_begin = true;
- pp_token.type = TP_EOF;
+ pp_token.kind = TP_EOF;
}
return;
default:
next_char();
if (!ignore_unknown_chars) {
- errorf(&pp_token.source_position, "unknown character '%c' found\n",
- input.c);
- pp_token.type = TP_ERROR;
+ errorf(&pp_token.base.source_position,
+ "unknown character '%c' found\n", input.c);
+ pp_token.kind = TP_ERROR;
} else {
- pp_token.type = input.c;
+ pp_token.kind = input.c;
}
return;
}
static void emit_newlines(void)
{
- unsigned delta = pp_token.source_position.lineno - input.output_line;
+ unsigned delta = pp_token.base.source_position.lineno - input.output_line;
if (delta >= 9) {
fputc('\n', out);
- print_line_directive(&pp_token.source_position, NULL);
+ print_line_directive(&pp_token.base.source_position, NULL);
fputc('\n', out);
} else {
for (unsigned i = 0; i < delta; ++i) {
fputc('\n', out);
}
}
- input.output_line = pp_token.source_position.lineno;
+ input.output_line = pp_token.base.source_position.lineno;
}
static void emit_pp_token(void)
fputc(' ', out);
} else if (info.had_whitespace ||
- tokens_would_paste(last_token, pp_token.type)) {
+ tokens_would_paste(last_token, pp_token.kind)) {
fputc(' ', out);
}
- switch (pp_token.type) {
+ switch (pp_token.kind) {
case TP_IDENTIFIER:
- fputs(pp_token.symbol->string, out);
+ fputs(pp_token.identifier.symbol->string, out);
break;
case TP_NUMBER:
- fputs(pp_token.literal.begin, out);
+ fputs(pp_token.number.number.begin, out);
break;
case TP_WIDE_STRING_LITERAL:
fputc('L', out);
case TP_STRING_LITERAL:
fputc('"', out);
- fputs(pp_token.literal.begin, out);
+ fputs(pp_token.string.string.begin, out);
fputc('"', out);
break;
case TP_WIDE_CHARACTER_CONSTANT:
fputc('L', out);
case TP_CHARACTER_CONSTANT:
fputc('\'', out);
- fputs(pp_token.literal.begin, out);
+ fputs(pp_token.string.string.begin, out);
fputc('\'', out);
break;
default:
- print_pp_token_type(out, pp_token.type);
+ print_pp_token_kind(out, pp_token.kind);
break;
}
- last_token = pp_token.type;
+ last_token = pp_token.kind;
}
static void eat_pp_directive(void)
static bool pp_tokens_equal(const token_t *token1, const token_t *token2)
{
- if (token1->type != token2->type)
+ if (token1->kind != token2->kind)
return false;
- switch (token1->type) {
- case TP_HEADERNAME:
- /* TODO */
- return false;
+ switch (token1->kind) {
case TP_IDENTIFIER:
- return token1->symbol == token2->symbol;
+ return token1->identifier.symbol == token2->identifier.symbol;
case TP_NUMBER:
case TP_CHARACTER_CONSTANT:
case TP_STRING_LITERAL:
- return strings_equal(&token1->literal, &token2->literal);
+ return strings_equal(&token1->string.string, &token2->string.string);
default:
return true;
eat_pp(TP_define);
assert(obstack_object_size(&pp_obstack) == 0);
- if (pp_token.type != TP_IDENTIFIER || info.at_line_begin) {
- errorf(&pp_token.source_position,
+ if (pp_token.kind != TP_IDENTIFIER || info.at_line_begin) {
+ errorf(&pp_token.base.source_position,
"expected identifier after #define, got '%t'", &pp_token);
goto error_out;
}
- symbol_t *symbol = pp_token.symbol;
+ symbol_t *symbol = pp_token.identifier.symbol;
pp_definition_t *new_definition
= obstack_alloc(&pp_obstack, sizeof(new_definition[0]));
next_preprocessing_token();
while (true) {
- switch (pp_token.type) {
+ switch (pp_token.kind) {
case TP_DOTDOTDOT:
new_definition->is_variadic = true;
next_preprocessing_token();
- if (pp_token.type != ')') {
+ if (pp_token.kind != ')') {
errorf(&input.position,
"'...' not at end of macro argument list");
goto error_out;
}
break;
case TP_IDENTIFIER:
- obstack_ptr_grow(&pp_obstack, pp_token.symbol);
+ obstack_ptr_grow(&pp_obstack, pp_token.identifier.symbol);
next_preprocessing_token();
- if (pp_token.type == ',') {
+ if (pp_token.kind == ',') {
next_preprocessing_token();
break;
}
- if (pp_token.type != ')') {
- errorf(&pp_token.source_position,
+ if (pp_token.kind != ')') {
+ errorf(&pp_token.base.source_position,
"expected ',' or ')' after identifier, got '%t'",
&pp_token);
goto error_out;
next_preprocessing_token();
goto finish_argument_list;
default:
- errorf(&pp_token.source_position,
+ errorf(&pp_token.base.source_position,
"expected identifier, '...' or ')' in #define argument list, got '%t'",
&pp_token);
goto error_out;
{
eat_pp(TP_undef);
- if (pp_token.type != TP_IDENTIFIER) {
+ if (pp_token.kind != TP_IDENTIFIER) {
errorf(&input.position,
"expected identifier after #undef, got '%t'", &pp_token);
eat_pp_directive();
return;
}
- symbol_t *symbol = pp_token.symbol;
+ symbol_t *symbol = pp_token.identifier.symbol;
symbol->pp_definition = NULL;
next_preprocessing_token();
}
if (!info.at_line_begin) {
- warningf(WARN_OTHER, &pp_token.source_position, "extra tokens at end of #include directive");
+ warningf(WARN_OTHER, &pp_token.base.source_position,
+ "extra tokens at end of #include directive");
eat_pp_directive();
}
if (n_inputs > INCLUDE_LIMIT) {
- errorf(&pp_token.source_position, "#include nested too deeply");
+ errorf(&pp_token.base.source_position, "#include nested too deeply");
/* eat \n or EOF */
next_preprocessing_token();
return false;
push_input();
bool res = do_include(system_include, headername);
if (!res) {
- errorf(&pp_token.source_position,
+ errorf(&pp_token.base.source_position,
"failed including '%s': %s", headername, strerror(errno));
pop_restore_input();
return false;
static void parse_ifdef_ifndef_directive(void)
{
- bool is_ifndef = (pp_token.type == TP_ifndef);
+ bool is_ifndef = (pp_token.kind == TP_ifndef);
bool condition;
next_preprocessing_token();
if (skip_mode) {
eat_pp_directive();
pp_conditional_t *conditional = push_conditional();
- conditional->source_position = pp_token.source_position;
+ conditional->source_position = pp_token.base.source_position;
conditional->skip = true;
return;
}
- if (pp_token.type != TP_IDENTIFIER || info.at_line_begin) {
- errorf(&pp_token.source_position,
+ if (pp_token.kind != TP_IDENTIFIER || info.at_line_begin) {
+ errorf(&pp_token.base.source_position,
"expected identifier after #%s, got '%t'",
is_ifndef ? "ifndef" : "ifdef", &pp_token);
eat_pp_directive();
/* just take the true case in the hope to avoid further errors */
condition = true;
} else {
- symbol_t *symbol = pp_token.symbol;
+ symbol_t *symbol = pp_token.identifier.symbol;
pp_definition_t *pp_definition = symbol->pp_definition;
next_preprocessing_token();
if (!info.at_line_begin) {
- errorf(&pp_token.source_position,
+ errorf(&pp_token.base.source_position,
"extra tokens at end of #%s",
is_ifndef ? "ifndef" : "ifdef");
eat_pp_directive();
}
pp_conditional_t *conditional = push_conditional();
- conditional->source_position = pp_token.source_position;
+ conditional->source_position = pp_token.base.source_position;
conditional->condition = condition;
if (!condition) {
if (!info.at_line_begin) {
if (!skip_mode) {
- warningf(WARN_OTHER, &pp_token.source_position, "extra tokens at end of #else");
+ warningf(WARN_OTHER, &pp_token.base.source_position, "extra tokens at end of #else");
}
eat_pp_directive();
}
pp_conditional_t *conditional = conditional_stack;
if (conditional == NULL) {
- errorf(&pp_token.source_position, "#else without prior #if");
+ errorf(&pp_token.base.source_position, "#else without prior #if");
return;
}
if (conditional->in_else) {
- errorf(&pp_token.source_position,
+ errorf(&pp_token.base.source_position,
"#else after #else (condition started %P)",
conditional->source_position);
skip_mode = true;
if (!conditional->skip) {
skip_mode = conditional->condition;
}
- conditional->source_position = pp_token.source_position;
+ conditional->source_position = pp_token.base.source_position;
}
static void parse_endif_directive(void)
if (!info.at_line_begin) {
if (!skip_mode) {
- warningf(WARN_OTHER, &pp_token.source_position, "extra tokens at end of #endif");
+ warningf(WARN_OTHER, &pp_token.base.source_position, "extra tokens at end of #endif");
}
eat_pp_directive();
}
pp_conditional_t *conditional = conditional_stack;
if (conditional == NULL) {
- errorf(&pp_token.source_position, "#endif without prior #if");
+ errorf(&pp_token.base.source_position, "#endif without prior #if");
return;
}
eat_pp('#');
if (skip_mode) {
- switch (pp_token.type) {
+ switch (pp_token.kind) {
case TP_ifdef:
case TP_ifndef:
parse_ifdef_ifndef_directive();
break;
}
} else {
- switch (pp_token.type) {
+ switch (pp_token.kind) {
case TP_define:
parse_define_directive();
break;
/* the nop directive "#" */
break;
}
- errorf(&pp_token.source_position,
+ errorf(&pp_token.base.source_position,
"invalid preprocessing directive #%t", &pp_token);
eat_pp_directive();
break;
switch_input(file, filename);
while (true) {
- if (pp_token.type == '#' && info.at_line_begin) {
+ if (pp_token.kind == '#' && info.at_line_begin) {
parse_preprocessing_directive();
continue;
- } else if (pp_token.type == TP_EOF) {
+ } else if (pp_token.kind == TP_EOF) {
goto end_of_main_loop;
- } else if (pp_token.type == TP_IDENTIFIER && !in_pp_directive) {
- symbol_t *symbol = pp_token.symbol;
+ } else if (pp_token.kind == TP_IDENTIFIER && !in_pp_directive) {
+ symbol_t *symbol = pp_token.identifier.symbol;
pp_definition_t *pp_definition = symbol->pp_definition;
if (pp_definition != NULL && !pp_definition->is_expanding) {
- expansion_pos = pp_token.source_position;
+ expansion_pos = pp_token.base.source_position;
if (pp_definition->has_parameters) {
- source_position_t position = pp_token.source_position;
+ source_position_t position = pp_token.base.source_position;
add_token_info_t old_info = info;
next_preprocessing_token();
add_token_info_t new_info = info;
/* no opening brace -> no expansion */
- if (pp_token.type == '(') {
+ if (pp_token.kind == '(') {
eat_pp('(');
/* parse arguments (TODO) */
- while (pp_token.type != TP_EOF && pp_token.type != ')')
+ while (pp_token.kind != TP_EOF && pp_token.kind != ')')
next_preprocessing_token();
} else {
token_t next_token = pp_token;
/* restore identifier token */
- pp_token.type = TP_IDENTIFIER;
- pp_token.symbol = symbol;
- pp_token.source_position = position;
+ pp_token.kind = TP_IDENTIFIER;
+ pp_token.identifier.symbol = symbol;
+ pp_token.base.source_position = position;
info = old_info;
emit_pp_token();